Merged with mainline at revision 128810.
[official-gcc.git] / gcc / config / alpha / alpha.c
blobfee5cd11115f89cfc407b0dcaa4cdc3c4e3a9412
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
66 static const char * const alpha_cpu_name[] =
68 "ev4", "ev5", "ev6"
71 /* Specify how accurate floating-point traps need to be. */
73 enum alpha_trap_precision alpha_tp;
75 /* Specify the floating-point rounding mode. */
77 enum alpha_fp_rounding_mode alpha_fprm;
79 /* Specify which things cause traps. */
81 enum alpha_fp_trap_mode alpha_fptm;
83 /* Save information from a "cmpxx" operation until the branch or scc is
84 emitted. */
86 struct alpha_compare alpha_compare;
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
91 static int inside_function = FALSE;
93 /* The number of cycles of latency we should assume on memory reads. */
95 int alpha_memory_latency = 3;
97 /* Whether the function needs the GP. */
99 static int alpha_function_needs_gp;
101 /* The alias set for prologue/epilogue register save/restore. */
103 static GTY(()) alias_set_type alpha_sr_alias_set;
105 /* The assembler name of the current function. */
107 static const char *alpha_fnname;
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
120 /* Costs of various operations on the different architectures. */
122 struct alpha_rtx_cost_data
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
137 { /* EV4 */
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
148 { /* EV5 */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
159 { /* EV6 */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
175 one of them. */
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS current_function_args_info.num_args
193 #else
194 #define NUM_ARGS current_function_args_info
195 #endif
197 #define REG_PV 27
198 #define REG_RA 26
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
206 #endif
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
213 /* Implement TARGET_HANDLE_OPTION. */
215 static bool
216 alpha_handle_option (size_t code, const char *arg, int value)
218 switch (code)
220 case OPT_mfp_regs:
221 if (value == 0)
222 target_flags |= MASK_SOFT_FP;
223 break;
225 case OPT_mieee:
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
228 break;
230 case OPT_mtls_size_:
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
233 break;
236 return true;
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
242 static const char *
243 alpha_mangle_type (const_tree type)
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
247 return "g";
249 /* For all other types, use normal C++ mangling. */
250 return NULL;
252 #endif
254 /* Parse target option strings. */
256 void
257 override_options (void)
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
262 const int flags;
263 } cpu_table[] = {
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { 0, 0, 0 }
281 int i;
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
288 flag_pic = 0;
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
295 else
296 alpha_fprm = ALPHA_FPRM_NORM;
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
307 if (TARGET_IEEE)
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
311 else
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
318 if (TARGET_IEEE_WITH_INEXACT)
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
322 else
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
329 if (alpha_tp_string)
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
337 else
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
341 if (alpha_fprm_string)
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
351 else
352 error ("bad value %qs for -mfp-rounding-mode switch",
353 alpha_fprm_string);
356 if (alpha_fptm_string)
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
366 else
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
370 if (alpha_cpu_string)
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
378 break;
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
384 if (alpha_tune_string)
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
389 alpha_tune = cpu_table [i].processor;
390 break;
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
396 /* Do some sanity checks on the above options. */
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
411 if (alpha_cpu == PROCESSOR_EV6)
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
419 if (TARGET_FLOAT_VAX)
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
426 if (alpha_fptm == ALPHA_FPTM_SUI)
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
437 char *end;
438 int lat;
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
450 static int const cache_latency[][4] =
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
462 lat = 3;
464 else
465 lat = cache_latency[alpha_tune][lat-1];
467 else if (! strcmp (alpha_mlat_string, "main"))
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
471 lat = 150;
473 else
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
476 lat = 3;
479 alpha_memory_latency = lat;
482 /* Default the definition of "small data" to 8 bytes. */
483 if (!g_switch_set)
484 g_switch_value = 8;
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 if (flag_pic == 1)
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
497 if (align_loops <= 0)
498 align_loops = 16;
499 if (align_jumps <= 0)
500 align_jumps = 16;
502 if (align_functions <= 0)
503 align_functions = 16;
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
508 /* Register variables and functions with the garbage collector. */
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
524 #endif
527 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
530 zap_mask (HOST_WIDE_INT value)
532 int i;
534 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
535 i++, value >>= 8)
536 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
537 return 0;
539 return 1;
542 /* Return true if OP is valid for a particular TLS relocation.
543 We are already guaranteed that OP is a CONST. */
546 tls_symbolic_operand_1 (rtx op, int size, int unspec)
548 op = XEXP (op, 0);
550 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
551 return 0;
552 op = XVECEXP (op, 0, 0);
554 if (GET_CODE (op) != SYMBOL_REF)
555 return 0;
557 switch (SYMBOL_REF_TLS_MODEL (op))
559 case TLS_MODEL_LOCAL_DYNAMIC:
560 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
561 case TLS_MODEL_INITIAL_EXEC:
562 return unspec == UNSPEC_TPREL && size == 64;
563 case TLS_MODEL_LOCAL_EXEC:
564 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
565 default:
566 gcc_unreachable ();
570 /* Used by aligned_memory_operand and unaligned_memory_operand to
571 resolve what reload is going to do with OP if it's a register. */
574 resolve_reload_operand (rtx op)
576 if (reload_in_progress)
578 rtx tmp = op;
579 if (GET_CODE (tmp) == SUBREG)
580 tmp = SUBREG_REG (tmp);
581 if (GET_CODE (tmp) == REG
582 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
584 op = reg_equiv_memory_loc[REGNO (tmp)];
585 if (op == 0)
586 return 0;
589 return op;
592 /* The scalar modes supported differs from the default check-what-c-supports
593 version in that sometimes TFmode is available even when long double
594 indicates only DFmode. On unicosmk, we have the situation that HImode
595 doesn't map to any C type, but of course we still support that. */
597 static bool
598 alpha_scalar_mode_supported_p (enum machine_mode mode)
600 switch (mode)
602 case QImode:
603 case HImode:
604 case SImode:
605 case DImode:
606 case TImode: /* via optabs.c */
607 return true;
609 case SFmode:
610 case DFmode:
611 return true;
613 case TFmode:
614 return TARGET_HAS_XFLOATING_LIBS;
616 default:
617 return false;
621 /* Alpha implements a couple of integer vector mode operations when
622 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
623 which allows the vectorizer to operate on e.g. move instructions,
624 or when expand_vector_operations can do something useful. */
626 static bool
627 alpha_vector_mode_supported_p (enum machine_mode mode)
629 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
632 /* Return 1 if this function can directly return via $26. */
635 direct_return (void)
637 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
638 && reload_completed
639 && alpha_sa_size () == 0
640 && get_frame_size () == 0
641 && current_function_outgoing_args_size == 0
642 && current_function_pretend_args_size == 0);
645 /* Return the ADDR_VEC associated with a tablejump insn. */
648 alpha_tablejump_addr_vec (rtx insn)
650 rtx tmp;
652 tmp = JUMP_LABEL (insn);
653 if (!tmp)
654 return NULL_RTX;
655 tmp = NEXT_INSN (tmp);
656 if (!tmp)
657 return NULL_RTX;
658 if (GET_CODE (tmp) == JUMP_INSN
659 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
660 return PATTERN (tmp);
661 return NULL_RTX;
664 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
667 alpha_tablejump_best_label (rtx insn)
669 rtx jump_table = alpha_tablejump_addr_vec (insn);
670 rtx best_label = NULL_RTX;
672 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
673 there for edge frequency counts from profile data. */
675 if (jump_table)
677 int n_labels = XVECLEN (jump_table, 1);
678 int best_count = -1;
679 int i, j;
681 for (i = 0; i < n_labels; i++)
683 int count = 1;
685 for (j = i + 1; j < n_labels; j++)
686 if (XEXP (XVECEXP (jump_table, 1, i), 0)
687 == XEXP (XVECEXP (jump_table, 1, j), 0))
688 count++;
690 if (count > best_count)
691 best_count = count, best_label = XVECEXP (jump_table, 1, i);
695 return best_label ? best_label : const0_rtx;
698 /* Return the TLS model to use for SYMBOL. */
700 static enum tls_model
701 tls_symbolic_operand_type (rtx symbol)
703 enum tls_model model;
705 if (GET_CODE (symbol) != SYMBOL_REF)
706 return 0;
707 model = SYMBOL_REF_TLS_MODEL (symbol);
709 /* Local-exec with a 64-bit size is the same code as initial-exec. */
710 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
711 model = TLS_MODEL_INITIAL_EXEC;
713 return model;
716 /* Return true if the function DECL will share the same GP as any
717 function in the current unit of translation. */
719 static bool
720 decl_has_samegp (const_tree decl)
722 /* Functions that are not local can be overridden, and thus may
723 not share the same gp. */
724 if (!(*targetm.binds_local_p) (decl))
725 return false;
727 /* If -msmall-data is in effect, assume that there is only one GP
728 for the module, and so any local symbol has this property. We
729 need explicit relocations to be able to enforce this for symbols
730 not defined in this unit of translation, however. */
731 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
732 return true;
734 /* Functions that are not external are defined in this UoT. */
735 /* ??? Irritatingly, static functions not yet emitted are still
736 marked "external". Apply this to non-static functions only. */
737 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
740 /* Return true if EXP should be placed in the small data section. */
742 static bool
743 alpha_in_small_data_p (const_tree exp)
745 /* We want to merge strings, so we never consider them small data. */
746 if (TREE_CODE (exp) == STRING_CST)
747 return false;
749 /* Functions are never in the small data area. Duh. */
750 if (TREE_CODE (exp) == FUNCTION_DECL)
751 return false;
753 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
755 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
756 if (strcmp (section, ".sdata") == 0
757 || strcmp (section, ".sbss") == 0)
758 return true;
760 else
762 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
764 /* If this is an incomplete type with size 0, then we can't put it
765 in sdata because it might be too big when completed. */
766 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
767 return true;
770 return false;
773 #if TARGET_ABI_OPEN_VMS
774 static bool
775 alpha_linkage_symbol_p (const char *symname)
777 int symlen = strlen (symname);
779 if (symlen > 4)
780 return strcmp (&symname [symlen - 4], "..lk") == 0;
782 return false;
785 #define LINKAGE_SYMBOL_REF_P(X) \
786 ((GET_CODE (X) == SYMBOL_REF \
787 && alpha_linkage_symbol_p (XSTR (X, 0))) \
788 || (GET_CODE (X) == CONST \
789 && GET_CODE (XEXP (X, 0)) == PLUS \
790 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
791 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
792 #endif
794 /* legitimate_address_p recognizes an RTL expression that is a valid
795 memory address for an instruction. The MODE argument is the
796 machine mode for the MEM expression that wants to use this address.
798 For Alpha, we have either a constant address or the sum of a
799 register and a constant address, or just a register. For DImode,
800 any of those forms can be surrounded with an AND that clear the
801 low-order three bits; this is an "unaligned" access. */
803 bool
804 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
806 /* If this is an ldq_u type address, discard the outer AND. */
807 if (mode == DImode
808 && GET_CODE (x) == AND
809 && GET_CODE (XEXP (x, 1)) == CONST_INT
810 && INTVAL (XEXP (x, 1)) == -8)
811 x = XEXP (x, 0);
813 /* Discard non-paradoxical subregs. */
814 if (GET_CODE (x) == SUBREG
815 && (GET_MODE_SIZE (GET_MODE (x))
816 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
817 x = SUBREG_REG (x);
819 /* Unadorned general registers are valid. */
820 if (REG_P (x)
821 && (strict
822 ? STRICT_REG_OK_FOR_BASE_P (x)
823 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
824 return true;
826 /* Constant addresses (i.e. +/- 32k) are valid. */
827 if (CONSTANT_ADDRESS_P (x))
828 return true;
830 #if TARGET_ABI_OPEN_VMS
831 if (LINKAGE_SYMBOL_REF_P (x))
832 return true;
833 #endif
835 /* Register plus a small constant offset is valid. */
836 if (GET_CODE (x) == PLUS)
838 rtx ofs = XEXP (x, 1);
839 x = XEXP (x, 0);
841 /* Discard non-paradoxical subregs. */
842 if (GET_CODE (x) == SUBREG
843 && (GET_MODE_SIZE (GET_MODE (x))
844 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
845 x = SUBREG_REG (x);
847 if (REG_P (x))
849 if (! strict
850 && NONSTRICT_REG_OK_FP_BASE_P (x)
851 && GET_CODE (ofs) == CONST_INT)
852 return true;
853 if ((strict
854 ? STRICT_REG_OK_FOR_BASE_P (x)
855 : NONSTRICT_REG_OK_FOR_BASE_P (x))
856 && CONSTANT_ADDRESS_P (ofs))
857 return true;
861 /* If we're managing explicit relocations, LO_SUM is valid, as
862 are small data symbols. */
863 else if (TARGET_EXPLICIT_RELOCS)
865 if (small_symbolic_operand (x, Pmode))
866 return true;
868 if (GET_CODE (x) == LO_SUM)
870 rtx ofs = XEXP (x, 1);
871 x = XEXP (x, 0);
873 /* Discard non-paradoxical subregs. */
874 if (GET_CODE (x) == SUBREG
875 && (GET_MODE_SIZE (GET_MODE (x))
876 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
877 x = SUBREG_REG (x);
879 /* Must have a valid base register. */
880 if (! (REG_P (x)
881 && (strict
882 ? STRICT_REG_OK_FOR_BASE_P (x)
883 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
884 return false;
886 /* The symbol must be local. */
887 if (local_symbolic_operand (ofs, Pmode)
888 || dtp32_symbolic_operand (ofs, Pmode)
889 || tp32_symbolic_operand (ofs, Pmode))
890 return true;
894 return false;
897 /* Build the SYMBOL_REF for __tls_get_addr. */
899 static GTY(()) rtx tls_get_addr_libfunc;
901 static rtx
902 get_tls_get_addr (void)
904 if (!tls_get_addr_libfunc)
905 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
906 return tls_get_addr_libfunc;
909 /* Try machine-dependent ways of modifying an illegitimate address
910 to be legitimate. If we find one, return the new, valid address. */
913 alpha_legitimize_address (rtx x, rtx scratch,
914 enum machine_mode mode ATTRIBUTE_UNUSED)
916 HOST_WIDE_INT addend;
918 /* If the address is (plus reg const_int) and the CONST_INT is not a
919 valid offset, compute the high part of the constant and add it to
920 the register. Then our address is (plus temp low-part-const). */
921 if (GET_CODE (x) == PLUS
922 && GET_CODE (XEXP (x, 0)) == REG
923 && GET_CODE (XEXP (x, 1)) == CONST_INT
924 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
926 addend = INTVAL (XEXP (x, 1));
927 x = XEXP (x, 0);
928 goto split_addend;
931 /* If the address is (const (plus FOO const_int)), find the low-order
932 part of the CONST_INT. Then load FOO plus any high-order part of the
933 CONST_INT into a register. Our address is (plus reg low-part-const).
934 This is done to reduce the number of GOT entries. */
935 if (can_create_pseudo_p ()
936 && GET_CODE (x) == CONST
937 && GET_CODE (XEXP (x, 0)) == PLUS
938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
940 addend = INTVAL (XEXP (XEXP (x, 0), 1));
941 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
942 goto split_addend;
945 /* If we have a (plus reg const), emit the load as in (2), then add
946 the two registers, and finally generate (plus reg low-part-const) as
947 our address. */
948 if (can_create_pseudo_p ()
949 && GET_CODE (x) == PLUS
950 && GET_CODE (XEXP (x, 0)) == REG
951 && GET_CODE (XEXP (x, 1)) == CONST
952 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
953 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
955 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
956 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
957 XEXP (XEXP (XEXP (x, 1), 0), 0),
958 NULL_RTX, 1, OPTAB_LIB_WIDEN);
959 goto split_addend;
962 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
963 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
965 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
967 switch (tls_symbolic_operand_type (x))
969 case TLS_MODEL_NONE:
970 break;
972 case TLS_MODEL_GLOBAL_DYNAMIC:
973 start_sequence ();
975 r0 = gen_rtx_REG (Pmode, 0);
976 r16 = gen_rtx_REG (Pmode, 16);
977 tga = get_tls_get_addr ();
978 dest = gen_reg_rtx (Pmode);
979 seq = GEN_INT (alpha_next_sequence_number++);
981 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
982 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
983 insn = emit_call_insn (insn);
984 CONST_OR_PURE_CALL_P (insn) = 1;
985 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
987 insn = get_insns ();
988 end_sequence ();
990 emit_libcall_block (insn, dest, r0, x);
991 return dest;
993 case TLS_MODEL_LOCAL_DYNAMIC:
994 start_sequence ();
996 r0 = gen_rtx_REG (Pmode, 0);
997 r16 = gen_rtx_REG (Pmode, 16);
998 tga = get_tls_get_addr ();
999 scratch = gen_reg_rtx (Pmode);
1000 seq = GEN_INT (alpha_next_sequence_number++);
1002 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1003 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1004 insn = emit_call_insn (insn);
1005 CONST_OR_PURE_CALL_P (insn) = 1;
1006 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1008 insn = get_insns ();
1009 end_sequence ();
1011 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1012 UNSPEC_TLSLDM_CALL);
1013 emit_libcall_block (insn, scratch, r0, eqv);
1015 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1016 eqv = gen_rtx_CONST (Pmode, eqv);
1018 if (alpha_tls_size == 64)
1020 dest = gen_reg_rtx (Pmode);
1021 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1022 emit_insn (gen_adddi3 (dest, dest, scratch));
1023 return dest;
1025 if (alpha_tls_size == 32)
1027 insn = gen_rtx_HIGH (Pmode, eqv);
1028 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1029 scratch = gen_reg_rtx (Pmode);
1030 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1032 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1034 case TLS_MODEL_INITIAL_EXEC:
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1036 eqv = gen_rtx_CONST (Pmode, eqv);
1037 tp = gen_reg_rtx (Pmode);
1038 scratch = gen_reg_rtx (Pmode);
1039 dest = gen_reg_rtx (Pmode);
1041 emit_insn (gen_load_tp (tp));
1042 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1043 emit_insn (gen_adddi3 (dest, tp, scratch));
1044 return dest;
1046 case TLS_MODEL_LOCAL_EXEC:
1047 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1048 eqv = gen_rtx_CONST (Pmode, eqv);
1049 tp = gen_reg_rtx (Pmode);
1051 emit_insn (gen_load_tp (tp));
1052 if (alpha_tls_size == 32)
1054 insn = gen_rtx_HIGH (Pmode, eqv);
1055 insn = gen_rtx_PLUS (Pmode, tp, insn);
1056 tp = gen_reg_rtx (Pmode);
1057 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1059 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1061 default:
1062 gcc_unreachable ();
1065 if (local_symbolic_operand (x, Pmode))
1067 if (small_symbolic_operand (x, Pmode))
1068 return x;
1069 else
1071 if (can_create_pseudo_p ())
1072 scratch = gen_reg_rtx (Pmode);
1073 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1074 gen_rtx_HIGH (Pmode, x)));
1075 return gen_rtx_LO_SUM (Pmode, scratch, x);
1080 return NULL;
1082 split_addend:
1084 HOST_WIDE_INT low, high;
1086 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1087 addend -= low;
1088 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1089 addend -= high;
1091 if (addend)
1092 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1093 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1094 1, OPTAB_LIB_WIDEN);
1095 if (high)
1096 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1097 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1098 1, OPTAB_LIB_WIDEN);
1100 return plus_constant (x, low);
1104 /* Primarily this is required for TLS symbols, but given that our move
1105 patterns *ought* to be able to handle any symbol at any time, we
1106 should never be spilling symbolic operands to the constant pool, ever. */
1108 static bool
1109 alpha_cannot_force_const_mem (rtx x)
1111 enum rtx_code code = GET_CODE (x);
1112 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1115 /* We do not allow indirect calls to be optimized into sibling calls, nor
1116 can we allow a call to a function with a different GP to be optimized
1117 into a sibcall. */
1119 static bool
1120 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1122 /* Can't do indirect tail calls, since we don't know if the target
1123 uses the same GP. */
1124 if (!decl)
1125 return false;
1127 /* Otherwise, we can make a tail call if the target function shares
1128 the same GP. */
1129 return decl_has_samegp (decl);
1133 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1135 rtx x = *px;
1137 /* Don't re-split. */
1138 if (GET_CODE (x) == LO_SUM)
1139 return -1;
1141 return small_symbolic_operand (x, Pmode) != 0;
1144 static int
1145 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1147 rtx x = *px;
1149 /* Don't re-split. */
1150 if (GET_CODE (x) == LO_SUM)
1151 return -1;
1153 if (small_symbolic_operand (x, Pmode))
1155 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1156 *px = x;
1157 return -1;
1160 return 0;
1164 split_small_symbolic_operand (rtx x)
1166 x = copy_insn (x);
1167 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1168 return x;
1171 /* Indicate that INSN cannot be duplicated. This is true for any insn
1172 that we've marked with gpdisp relocs, since those have to stay in
1173 1-1 correspondence with one another.
1175 Technically we could copy them if we could set up a mapping from one
1176 sequence number to another, across the set of insns to be duplicated.
1177 This seems overly complicated and error-prone since interblock motion
1178 from sched-ebb could move one of the pair of insns to a different block.
1180 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1181 then they'll be in a different block from their ldgp. Which could lead
1182 the bb reorder code to think that it would be ok to copy just the block
1183 containing the call and branch to the block containing the ldgp. */
1185 static bool
1186 alpha_cannot_copy_insn_p (rtx insn)
1188 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1189 return false;
1190 if (recog_memoized (insn) >= 0)
1191 return get_attr_cannot_copy (insn);
1192 else
1193 return false;
1197 /* Try a machine-dependent way of reloading an illegitimate address
1198 operand. If we find one, push the reload and return the new rtx. */
1201 alpha_legitimize_reload_address (rtx x,
1202 enum machine_mode mode ATTRIBUTE_UNUSED,
1203 int opnum, int type,
1204 int ind_levels ATTRIBUTE_UNUSED)
1206 /* We must recognize output that we have already generated ourselves. */
1207 if (GET_CODE (x) == PLUS
1208 && GET_CODE (XEXP (x, 0)) == PLUS
1209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1210 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1211 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1213 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1214 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1215 opnum, type);
1216 return x;
1219 /* We wish to handle large displacements off a base register by
1220 splitting the addend across an ldah and the mem insn. This
1221 cuts number of extra insns needed from 3 to 1. */
1222 if (GET_CODE (x) == PLUS
1223 && GET_CODE (XEXP (x, 0)) == REG
1224 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1225 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1226 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1228 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1229 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1230 HOST_WIDE_INT high
1231 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1233 /* Check for 32-bit overflow. */
1234 if (high + low != val)
1235 return NULL_RTX;
1237 /* Reload the high part into a base reg; leave the low part
1238 in the mem directly. */
1239 x = gen_rtx_PLUS (GET_MODE (x),
1240 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1241 GEN_INT (high)),
1242 GEN_INT (low));
1244 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1245 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1246 opnum, type);
1247 return x;
1250 return NULL_RTX;
1253 /* Compute a (partial) cost for rtx X. Return true if the complete
1254 cost has been computed, and false if subexpressions should be
1255 scanned. In either case, *TOTAL contains the cost result. */
1257 static bool
1258 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1260 enum machine_mode mode = GET_MODE (x);
1261 bool float_mode_p = FLOAT_MODE_P (mode);
1262 const struct alpha_rtx_cost_data *cost_data;
1264 if (optimize_size)
1265 cost_data = &alpha_rtx_cost_size;
1266 else
1267 cost_data = &alpha_rtx_cost_data[alpha_tune];
1269 switch (code)
1271 case CONST_INT:
1272 /* If this is an 8-bit constant, return zero since it can be used
1273 nearly anywhere with no cost. If it is a valid operand for an
1274 ADD or AND, likewise return 0 if we know it will be used in that
1275 context. Otherwise, return 2 since it might be used there later.
1276 All other constants take at least two insns. */
1277 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1279 *total = 0;
1280 return true;
1282 /* FALLTHRU */
1284 case CONST_DOUBLE:
1285 if (x == CONST0_RTX (mode))
1286 *total = 0;
1287 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1288 || (outer_code == AND && and_operand (x, VOIDmode)))
1289 *total = 0;
1290 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1291 *total = 2;
1292 else
1293 *total = COSTS_N_INSNS (2);
1294 return true;
1296 case CONST:
1297 case SYMBOL_REF:
1298 case LABEL_REF:
1299 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1300 *total = COSTS_N_INSNS (outer_code != MEM);
1301 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1302 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1303 else if (tls_symbolic_operand_type (x))
1304 /* Estimate of cost for call_pal rduniq. */
1305 /* ??? How many insns do we emit here? More than one... */
1306 *total = COSTS_N_INSNS (15);
1307 else
1308 /* Otherwise we do a load from the GOT. */
1309 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1310 return true;
1312 case HIGH:
1313 /* This is effectively an add_operand. */
1314 *total = 2;
1315 return true;
1317 case PLUS:
1318 case MINUS:
1319 if (float_mode_p)
1320 *total = cost_data->fp_add;
1321 else if (GET_CODE (XEXP (x, 0)) == MULT
1322 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1324 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1325 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1326 return true;
1328 return false;
1330 case MULT:
1331 if (float_mode_p)
1332 *total = cost_data->fp_mult;
1333 else if (mode == DImode)
1334 *total = cost_data->int_mult_di;
1335 else
1336 *total = cost_data->int_mult_si;
1337 return false;
1339 case ASHIFT:
1340 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1341 && INTVAL (XEXP (x, 1)) <= 3)
1343 *total = COSTS_N_INSNS (1);
1344 return false;
1346 /* FALLTHRU */
1348 case ASHIFTRT:
1349 case LSHIFTRT:
1350 *total = cost_data->int_shift;
1351 return false;
1353 case IF_THEN_ELSE:
1354 if (float_mode_p)
1355 *total = cost_data->fp_add;
1356 else
1357 *total = cost_data->int_cmov;
1358 return false;
1360 case DIV:
1361 case UDIV:
1362 case MOD:
1363 case UMOD:
1364 if (!float_mode_p)
1365 *total = cost_data->int_div;
1366 else if (mode == SFmode)
1367 *total = cost_data->fp_div_sf;
1368 else
1369 *total = cost_data->fp_div_df;
1370 return false;
1372 case MEM:
1373 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1374 return true;
1376 case NEG:
1377 if (! float_mode_p)
1379 *total = COSTS_N_INSNS (1);
1380 return false;
1382 /* FALLTHRU */
1384 case ABS:
1385 if (! float_mode_p)
1387 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1388 return false;
1390 /* FALLTHRU */
1392 case FLOAT:
1393 case UNSIGNED_FLOAT:
1394 case FIX:
1395 case UNSIGNED_FIX:
1396 case FLOAT_TRUNCATE:
1397 *total = cost_data->fp_add;
1398 return false;
1400 case FLOAT_EXTEND:
1401 if (GET_CODE (XEXP (x, 0)) == MEM)
1402 *total = 0;
1403 else
1404 *total = cost_data->fp_add;
1405 return false;
1407 default:
1408 return false;
1412 /* REF is an alignable memory location. Place an aligned SImode
1413 reference into *PALIGNED_MEM and the number of bits to shift into
1414 *PBITNUM. SCRATCH is a free register for use in reloading out
1415 of range stack slots. */
1417 void
1418 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1420 rtx base;
1421 HOST_WIDE_INT disp, offset;
1423 gcc_assert (GET_CODE (ref) == MEM);
1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1428 base = find_replacement (&XEXP (ref, 0));
1429 gcc_assert (memory_address_p (GET_MODE (ref), base));
1431 else
1432 base = XEXP (ref, 0);
1434 if (GET_CODE (base) == PLUS)
1435 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1436 else
1437 disp = 0;
1439 /* Find the byte offset within an aligned word. If the memory itself is
1440 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1441 will have examined the base register and determined it is aligned, and
1442 thus displacements from it are naturally alignable. */
1443 if (MEM_ALIGN (ref) >= 32)
1444 offset = 0;
1445 else
1446 offset = disp & 3;
1448 /* Access the entire aligned word. */
1449 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1451 /* Convert the byte offset within the word to a bit offset. */
1452 if (WORDS_BIG_ENDIAN)
1453 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1454 else
1455 offset *= 8;
1456 *pbitnum = GEN_INT (offset);
1459 /* Similar, but just get the address. Handle the two reload cases.
1460 Add EXTRA_OFFSET to the address we return. */
1463 get_unaligned_address (rtx ref)
1465 rtx base;
1466 HOST_WIDE_INT offset = 0;
1468 gcc_assert (GET_CODE (ref) == MEM);
1470 if (reload_in_progress
1471 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1473 base = find_replacement (&XEXP (ref, 0));
1475 gcc_assert (memory_address_p (GET_MODE (ref), base));
1477 else
1478 base = XEXP (ref, 0);
1480 if (GET_CODE (base) == PLUS)
1481 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1483 return plus_constant (base, offset);
1486 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1487 X is always returned in a register. */
1490 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1492 if (GET_CODE (addr) == PLUS)
1494 ofs += INTVAL (XEXP (addr, 1));
1495 addr = XEXP (addr, 0);
1498 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1499 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1502 /* On the Alpha, all (non-symbolic) constants except zero go into
1503 a floating-point register via memory. Note that we cannot
1504 return anything that is not a subset of CLASS, and that some
1505 symbolic constants cannot be dropped to memory. */
1507 enum reg_class
1508 alpha_preferred_reload_class(rtx x, enum reg_class class)
1510 /* Zero is present in any register class. */
1511 if (x == CONST0_RTX (GET_MODE (x)))
1512 return class;
1514 /* These sorts of constants we can easily drop to memory. */
1515 if (GET_CODE (x) == CONST_INT
1516 || GET_CODE (x) == CONST_DOUBLE
1517 || GET_CODE (x) == CONST_VECTOR)
1519 if (class == FLOAT_REGS)
1520 return NO_REGS;
1521 if (class == ALL_REGS)
1522 return GENERAL_REGS;
1523 return class;
1526 /* All other kinds of constants should not (and in the case of HIGH
1527 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1528 secondary reload. */
1529 if (CONSTANT_P (x))
1530 return (class == ALL_REGS ? GENERAL_REGS : class);
1532 return class;
1535 /* Inform reload about cases where moving X with a mode MODE to a register in
1536 CLASS requires an extra scratch or immediate register. Return the class
1537 needed for the immediate register. */
1539 static enum reg_class
1540 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1541 enum machine_mode mode, secondary_reload_info *sri)
1543 /* Loading and storing HImode or QImode values to and from memory
1544 usually requires a scratch register. */
1545 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1547 if (any_memory_operand (x, mode))
1549 if (in_p)
1551 if (!aligned_memory_operand (x, mode))
1552 sri->icode = reload_in_optab[mode];
1554 else
1555 sri->icode = reload_out_optab[mode];
1556 return NO_REGS;
1560 /* We also cannot do integral arithmetic into FP regs, as might result
1561 from register elimination into a DImode fp register. */
1562 if (class == FLOAT_REGS)
1564 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1565 return GENERAL_REGS;
1566 if (in_p && INTEGRAL_MODE_P (mode)
1567 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1568 return GENERAL_REGS;
1571 return NO_REGS;
1574 /* Subfunction of the following function. Update the flags of any MEM
1575 found in part of X. */
1577 static int
1578 alpha_set_memflags_1 (rtx *xp, void *data)
1580 rtx x = *xp, orig = (rtx) data;
1582 if (GET_CODE (x) != MEM)
1583 return 0;
1585 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1586 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1587 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1588 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1589 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1591 /* Sadly, we cannot use alias sets because the extra aliasing
1592 produced by the AND interferes. Given that two-byte quantities
1593 are the only thing we would be able to differentiate anyway,
1594 there does not seem to be any point in convoluting the early
1595 out of the alias check. */
1597 return -1;
1600 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1601 generated to perform a memory operation, look for any MEMs in either
1602 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1603 volatile flags from REF into each of the MEMs found. If REF is not
1604 a MEM, don't do anything. */
1606 void
1607 alpha_set_memflags (rtx insn, rtx ref)
1609 rtx *base_ptr;
1611 if (GET_CODE (ref) != MEM)
1612 return;
1614 /* This is only called from alpha.md, after having had something
1615 generated from one of the insn patterns. So if everything is
1616 zero, the pattern is already up-to-date. */
1617 if (!MEM_VOLATILE_P (ref)
1618 && !MEM_IN_STRUCT_P (ref)
1619 && !MEM_SCALAR_P (ref)
1620 && !MEM_NOTRAP_P (ref)
1621 && !MEM_READONLY_P (ref))
1622 return;
1624 if (INSN_P (insn))
1625 base_ptr = &PATTERN (insn);
1626 else
1627 base_ptr = &insn;
1628 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1631 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1632 int, bool);
1634 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1635 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1636 and return pc_rtx if successful. */
1638 static rtx
1639 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1640 HOST_WIDE_INT c, int n, bool no_output)
1642 HOST_WIDE_INT new;
1643 int i, bits;
1644 /* Use a pseudo if highly optimizing and still generating RTL. */
1645 rtx subtarget
1646 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1647 rtx temp, insn;
1649 /* If this is a sign-extended 32-bit constant, we can do this in at most
1650 three insns, so do it if we have enough insns left. We always have
1651 a sign-extended 32-bit constant when compiling on a narrow machine. */
1653 if (HOST_BITS_PER_WIDE_INT != 64
1654 || c >> 31 == -1 || c >> 31 == 0)
1656 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1657 HOST_WIDE_INT tmp1 = c - low;
1658 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1659 HOST_WIDE_INT extra = 0;
1661 /* If HIGH will be interpreted as negative but the constant is
1662 positive, we must adjust it to do two ldha insns. */
1664 if ((high & 0x8000) != 0 && c >= 0)
1666 extra = 0x4000;
1667 tmp1 -= 0x40000000;
1668 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1671 if (c == low || (low == 0 && extra == 0))
1673 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1674 but that meant that we can't handle INT_MIN on 32-bit machines
1675 (like NT/Alpha), because we recurse indefinitely through
1676 emit_move_insn to gen_movdi. So instead, since we know exactly
1677 what we want, create it explicitly. */
1679 if (no_output)
1680 return pc_rtx;
1681 if (target == NULL)
1682 target = gen_reg_rtx (mode);
1683 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1684 return target;
1686 else if (n >= 2 + (extra != 0))
1688 if (no_output)
1689 return pc_rtx;
1690 if (!can_create_pseudo_p ())
1692 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1693 temp = target;
1695 else
1696 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1697 subtarget, mode);
1699 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1700 This means that if we go through expand_binop, we'll try to
1701 generate extensions, etc, which will require new pseudos, which
1702 will fail during some split phases. The SImode add patterns
1703 still exist, but are not named. So build the insns by hand. */
1705 if (extra != 0)
1707 if (! subtarget)
1708 subtarget = gen_reg_rtx (mode);
1709 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1710 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1711 emit_insn (insn);
1712 temp = subtarget;
1715 if (target == NULL)
1716 target = gen_reg_rtx (mode);
1717 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1718 insn = gen_rtx_SET (VOIDmode, target, insn);
1719 emit_insn (insn);
1720 return target;
1724 /* If we couldn't do it that way, try some other methods. But if we have
1725 no instructions left, don't bother. Likewise, if this is SImode and
1726 we can't make pseudos, we can't do anything since the expand_binop
1727 and expand_unop calls will widen and try to make pseudos. */
1729 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1730 return 0;
1732 /* Next, see if we can load a related constant and then shift and possibly
1733 negate it to get the constant we want. Try this once each increasing
1734 numbers of insns. */
1736 for (i = 1; i < n; i++)
1738 /* First, see if minus some low bits, we've an easy load of
1739 high bits. */
1741 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1742 if (new != 0)
1744 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1745 if (temp)
1747 if (no_output)
1748 return temp;
1749 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1750 target, 0, OPTAB_WIDEN);
1754 /* Next try complementing. */
1755 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1756 if (temp)
1758 if (no_output)
1759 return temp;
1760 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1763 /* Next try to form a constant and do a left shift. We can do this
1764 if some low-order bits are zero; the exact_log2 call below tells
1765 us that information. The bits we are shifting out could be any
1766 value, but here we'll just try the 0- and sign-extended forms of
1767 the constant. To try to increase the chance of having the same
1768 constant in more than one insn, start at the highest number of
1769 bits to shift, but try all possibilities in case a ZAPNOT will
1770 be useful. */
1772 bits = exact_log2 (c & -c);
1773 if (bits > 0)
1774 for (; bits > 0; bits--)
1776 new = c >> bits;
1777 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1778 if (!temp && c < 0)
1780 new = (unsigned HOST_WIDE_INT)c >> bits;
1781 temp = alpha_emit_set_const (subtarget, mode, new,
1782 i, no_output);
1784 if (temp)
1786 if (no_output)
1787 return temp;
1788 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1789 target, 0, OPTAB_WIDEN);
1793 /* Now try high-order zero bits. Here we try the shifted-in bits as
1794 all zero and all ones. Be careful to avoid shifting outside the
1795 mode and to avoid shifting outside the host wide int size. */
1796 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1797 confuse the recursive call and set all of the high 32 bits. */
1799 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1800 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1801 if (bits > 0)
1802 for (; bits > 0; bits--)
1804 new = c << bits;
1805 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1806 if (!temp)
1808 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1809 temp = alpha_emit_set_const (subtarget, mode, new,
1810 i, no_output);
1812 if (temp)
1814 if (no_output)
1815 return temp;
1816 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1817 target, 1, OPTAB_WIDEN);
1821 /* Now try high-order 1 bits. We get that with a sign-extension.
1822 But one bit isn't enough here. Be careful to avoid shifting outside
1823 the mode and to avoid shifting outside the host wide int size. */
1825 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1826 - floor_log2 (~ c) - 2);
1827 if (bits > 0)
1828 for (; bits > 0; bits--)
1830 new = c << bits;
1831 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1832 if (!temp)
1834 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1835 temp = alpha_emit_set_const (subtarget, mode, new,
1836 i, no_output);
1838 if (temp)
1840 if (no_output)
1841 return temp;
1842 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1843 target, 0, OPTAB_WIDEN);
1848 #if HOST_BITS_PER_WIDE_INT == 64
1849 /* Finally, see if can load a value into the target that is the same as the
1850 constant except that all bytes that are 0 are changed to be 0xff. If we
1851 can, then we can do a ZAPNOT to obtain the desired constant. */
1853 new = c;
1854 for (i = 0; i < 64; i += 8)
1855 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1856 new |= (HOST_WIDE_INT) 0xff << i;
1858 /* We are only called for SImode and DImode. If this is SImode, ensure that
1859 we are sign extended to a full word. */
1861 if (mode == SImode)
1862 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1864 if (new != c)
1866 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1867 if (temp)
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1872 target, 0, OPTAB_WIDEN);
1875 #endif
1877 return 0;
1880 /* Try to output insns to set TARGET equal to the constant C if it can be
1881 done in less than N insns. Do all computations in MODE. Returns the place
1882 where the output has been placed if it can be done and the insns have been
1883 emitted. If it would take more than N insns, zero is returned and no
1884 insns and emitted. */
1886 static rtx
1887 alpha_emit_set_const (rtx target, enum machine_mode mode,
1888 HOST_WIDE_INT c, int n, bool no_output)
1890 enum machine_mode orig_mode = mode;
1891 rtx orig_target = target;
1892 rtx result = 0;
1893 int i;
1895 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1896 can't load this constant in one insn, do this in DImode. */
1897 if (!can_create_pseudo_p () && mode == SImode
1898 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1900 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1901 if (result)
1902 return result;
1904 target = no_output ? NULL : gen_lowpart (DImode, target);
1905 mode = DImode;
1907 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1910 mode = DImode;
1913 /* Try 1 insn, then 2, then up to N. */
1914 for (i = 1; i <= n; i++)
1916 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1917 if (result)
1919 rtx insn, set;
1921 if (no_output)
1922 return result;
1924 insn = get_last_insn ();
1925 set = single_set (insn);
1926 if (! CONSTANT_P (SET_SRC (set)))
1927 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1928 break;
1932 /* Allow for the case where we changed the mode of TARGET. */
1933 if (result)
1935 if (result == target)
1936 result = orig_target;
1937 else if (mode != orig_mode)
1938 result = gen_lowpart (orig_mode, result);
1941 return result;
1944 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1945 fall back to a straight forward decomposition. We do this to avoid
1946 exponential run times encountered when looking for longer sequences
1947 with alpha_emit_set_const. */
1949 static rtx
1950 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1952 HOST_WIDE_INT d1, d2, d3, d4;
1954 /* Decompose the entire word */
1955 #if HOST_BITS_PER_WIDE_INT >= 64
1956 gcc_assert (c2 == -(c1 < 0));
1957 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1958 c1 -= d1;
1959 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1960 c1 = (c1 - d2) >> 32;
1961 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1962 c1 -= d3;
1963 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1964 gcc_assert (c1 == d4);
1965 #else
1966 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d1;
1968 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d2);
1970 c2 += (d2 < 0);
1971 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1972 c2 -= d3;
1973 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c2 == d4);
1975 #endif
1977 /* Construct the high word */
1978 if (d4)
1980 emit_move_insn (target, GEN_INT (d4));
1981 if (d3)
1982 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1984 else
1985 emit_move_insn (target, GEN_INT (d3));
1987 /* Shift it into place */
1988 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1990 /* Add in the low bits. */
1991 if (d2)
1992 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1993 if (d1)
1994 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1996 return target;
1999 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2000 the low 64 bits. */
2002 static void
2003 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2005 HOST_WIDE_INT i0, i1;
2007 if (GET_CODE (x) == CONST_VECTOR)
2008 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2011 if (GET_CODE (x) == CONST_INT)
2013 i0 = INTVAL (x);
2014 i1 = -(i0 < 0);
2016 else if (HOST_BITS_PER_WIDE_INT >= 64)
2018 i0 = CONST_DOUBLE_LOW (x);
2019 i1 = -(i0 < 0);
2021 else
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = CONST_DOUBLE_HIGH (x);
2027 *p0 = i0;
2028 *p1 = i1;
2031 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2032 are willing to load the value into a register via a move pattern.
2033 Normally this is all symbolic constants, integral constants that
2034 take three or fewer instructions, and floating-point zero. */
2036 bool
2037 alpha_legitimate_constant_p (rtx x)
2039 enum machine_mode mode = GET_MODE (x);
2040 HOST_WIDE_INT i0, i1;
2042 switch (GET_CODE (x))
2044 case CONST:
2045 case LABEL_REF:
2046 case HIGH:
2047 return true;
2049 case SYMBOL_REF:
2050 /* TLS symbols are never valid. */
2051 return SYMBOL_REF_TLS_MODEL (x) == 0;
2053 case CONST_DOUBLE:
2054 if (x == CONST0_RTX (mode))
2055 return true;
2056 if (FLOAT_MODE_P (mode))
2057 return false;
2058 goto do_integer;
2060 case CONST_VECTOR:
2061 if (x == CONST0_RTX (mode))
2062 return true;
2063 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2064 return false;
2065 if (GET_MODE_SIZE (mode) != 8)
2066 return false;
2067 goto do_integer;
2069 case CONST_INT:
2070 do_integer:
2071 if (TARGET_BUILD_CONSTANTS)
2072 return true;
2073 alpha_extract_integer (x, &i0, &i1);
2074 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2075 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2076 return false;
2078 default:
2079 return false;
2083 /* Operand 1 is known to be a constant, and should require more than one
2084 instruction to load. Emit that multi-part load. */
2086 bool
2087 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2089 HOST_WIDE_INT i0, i1;
2090 rtx temp = NULL_RTX;
2092 alpha_extract_integer (operands[1], &i0, &i1);
2094 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2095 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2097 if (!temp && TARGET_BUILD_CONSTANTS)
2098 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2100 if (temp)
2102 if (!rtx_equal_p (operands[0], temp))
2103 emit_move_insn (operands[0], temp);
2104 return true;
2107 return false;
2110 /* Expand a move instruction; return true if all work is done.
2111 We don't handle non-bwx subword loads here. */
2113 bool
2114 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2116 /* If the output is not a register, the input must be. */
2117 if (GET_CODE (operands[0]) == MEM
2118 && ! reg_or_0_operand (operands[1], mode))
2119 operands[1] = force_reg (mode, operands[1]);
2121 /* Allow legitimize_address to perform some simplifications. */
2122 if (mode == Pmode && symbolic_operand (operands[1], mode))
2124 rtx tmp;
2126 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2127 if (tmp)
2129 if (tmp == operands[0])
2130 return true;
2131 operands[1] = tmp;
2132 return false;
2136 /* Early out for non-constants and valid constants. */
2137 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2138 return false;
2140 /* Split large integers. */
2141 if (GET_CODE (operands[1]) == CONST_INT
2142 || GET_CODE (operands[1]) == CONST_DOUBLE
2143 || GET_CODE (operands[1]) == CONST_VECTOR)
2145 if (alpha_split_const_mov (mode, operands))
2146 return true;
2149 /* Otherwise we've nothing left but to drop the thing to memory. */
2150 operands[1] = force_const_mem (mode, operands[1]);
2151 if (reload_in_progress)
2153 emit_move_insn (operands[0], XEXP (operands[1], 0));
2154 operands[1] = replace_equiv_address (operands[1], operands[0]);
2156 else
2157 operands[1] = validize_mem (operands[1]);
2158 return false;
2161 /* Expand a non-bwx QImode or HImode move instruction;
2162 return true if all work is done. */
2164 bool
2165 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2167 rtx seq;
2169 /* If the output is not a register, the input must be. */
2170 if (MEM_P (operands[0]))
2171 operands[1] = force_reg (mode, operands[1]);
2173 /* Handle four memory cases, unaligned and aligned for either the input
2174 or the output. The only case where we can be called during reload is
2175 for aligned loads; all other cases require temporaries. */
2177 if (any_memory_operand (operands[1], mode))
2179 if (aligned_memory_operand (operands[1], mode))
2181 if (reload_in_progress)
2183 if (mode == QImode)
2184 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2185 else
2186 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2187 emit_insn (seq);
2189 else
2191 rtx aligned_mem, bitnum;
2192 rtx scratch = gen_reg_rtx (SImode);
2193 rtx subtarget;
2194 bool copyout;
2196 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2198 subtarget = operands[0];
2199 if (GET_CODE (subtarget) == REG)
2200 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2201 else
2202 subtarget = gen_reg_rtx (DImode), copyout = true;
2204 if (mode == QImode)
2205 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2206 bitnum, scratch);
2207 else
2208 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2209 bitnum, scratch);
2210 emit_insn (seq);
2212 if (copyout)
2213 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2216 else
2218 /* Don't pass these as parameters since that makes the generated
2219 code depend on parameter evaluation order which will cause
2220 bootstrap failures. */
2222 rtx temp1, temp2, subtarget, ua;
2223 bool copyout;
2225 temp1 = gen_reg_rtx (DImode);
2226 temp2 = gen_reg_rtx (DImode);
2228 subtarget = operands[0];
2229 if (GET_CODE (subtarget) == REG)
2230 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2231 else
2232 subtarget = gen_reg_rtx (DImode), copyout = true;
2234 ua = get_unaligned_address (operands[1]);
2235 if (mode == QImode)
2236 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2237 else
2238 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2240 alpha_set_memflags (seq, operands[1]);
2241 emit_insn (seq);
2243 if (copyout)
2244 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2246 return true;
2249 if (any_memory_operand (operands[0], mode))
2251 if (aligned_memory_operand (operands[0], mode))
2253 rtx aligned_mem, bitnum;
2254 rtx temp1 = gen_reg_rtx (SImode);
2255 rtx temp2 = gen_reg_rtx (SImode);
2257 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2259 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2260 temp1, temp2));
2262 else
2264 rtx temp1 = gen_reg_rtx (DImode);
2265 rtx temp2 = gen_reg_rtx (DImode);
2266 rtx temp3 = gen_reg_rtx (DImode);
2267 rtx ua = get_unaligned_address (operands[0]);
2269 if (mode == QImode)
2270 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2271 else
2272 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2274 alpha_set_memflags (seq, operands[0]);
2275 emit_insn (seq);
2277 return true;
2280 return false;
2283 /* Implement the movmisalign patterns. One of the operands is a memory
2284 that is not naturally aligned. Emit instructions to load it. */
2286 void
2287 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2289 /* Honor misaligned loads, for those we promised to do so. */
2290 if (MEM_P (operands[1]))
2292 rtx tmp;
2294 if (register_operand (operands[0], mode))
2295 tmp = operands[0];
2296 else
2297 tmp = gen_reg_rtx (mode);
2299 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2300 if (tmp != operands[0])
2301 emit_move_insn (operands[0], tmp);
2303 else if (MEM_P (operands[0]))
2305 if (!reg_or_0_operand (operands[1], mode))
2306 operands[1] = force_reg (mode, operands[1]);
2307 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2309 else
2310 gcc_unreachable ();
2313 /* Generate an unsigned DImode to FP conversion. This is the same code
2314 optabs would emit if we didn't have TFmode patterns.
2316 For SFmode, this is the only construction I've found that can pass
2317 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2318 intermediates will work, because you'll get intermediate rounding
2319 that ruins the end result. Some of this could be fixed by turning
2320 on round-to-positive-infinity, but that requires diddling the fpsr,
2321 which kills performance. I tried turning this around and converting
2322 to a negative number, so that I could turn on /m, but either I did
2323 it wrong or there's something else cause I wound up with the exact
2324 same single-bit error. There is a branch-less form of this same code:
2326 srl $16,1,$1
2327 and $16,1,$2
2328 cmplt $16,0,$3
2329 or $1,$2,$2
2330 cmovge $16,$16,$2
2331 itoft $3,$f10
2332 itoft $2,$f11
2333 cvtqs $f11,$f11
2334 adds $f11,$f11,$f0
2335 fcmoveq $f10,$f11,$f0
2337 I'm not using it because it's the same number of instructions as
2338 this branch-full form, and it has more serialized long latency
2339 instructions on the critical path.
2341 For DFmode, we can avoid rounding errors by breaking up the word
2342 into two pieces, converting them separately, and adding them back:
2344 LC0: .long 0,0x5f800000
2346 itoft $16,$f11
2347 lda $2,LC0
2348 cmplt $16,0,$1
2349 cpyse $f11,$f31,$f10
2350 cpyse $f31,$f11,$f11
2351 s4addq $1,$2,$1
2352 lds $f12,0($1)
2353 cvtqt $f10,$f10
2354 cvtqt $f11,$f11
2355 addt $f12,$f10,$f0
2356 addt $f0,$f11,$f0
2358 This doesn't seem to be a clear-cut win over the optabs form.
2359 It probably all depends on the distribution of numbers being
2360 converted -- in the optabs form, all but high-bit-set has a
2361 much lower minimum execution time. */
2363 void
2364 alpha_emit_floatuns (rtx operands[2])
2366 rtx neglab, donelab, i0, i1, f0, in, out;
2367 enum machine_mode mode;
2369 out = operands[0];
2370 in = force_reg (DImode, operands[1]);
2371 mode = GET_MODE (out);
2372 neglab = gen_label_rtx ();
2373 donelab = gen_label_rtx ();
2374 i0 = gen_reg_rtx (DImode);
2375 i1 = gen_reg_rtx (DImode);
2376 f0 = gen_reg_rtx (mode);
2378 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2380 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2381 emit_jump_insn (gen_jump (donelab));
2382 emit_barrier ();
2384 emit_label (neglab);
2386 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2387 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2388 emit_insn (gen_iordi3 (i0, i0, i1));
2389 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2390 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2392 emit_label (donelab);
2395 /* Generate the comparison for a conditional branch. */
2398 alpha_emit_conditional_branch (enum rtx_code code)
2400 enum rtx_code cmp_code, branch_code;
2401 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2402 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2403 rtx tem;
2405 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2407 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2408 op1 = const0_rtx;
2409 alpha_compare.fp_p = 0;
2412 /* The general case: fold the comparison code to the types of compares
2413 that we have, choosing the branch as necessary. */
2414 switch (code)
2416 case EQ: case LE: case LT: case LEU: case LTU:
2417 case UNORDERED:
2418 /* We have these compares: */
2419 cmp_code = code, branch_code = NE;
2420 break;
2422 case NE:
2423 case ORDERED:
2424 /* These must be reversed. */
2425 cmp_code = reverse_condition (code), branch_code = EQ;
2426 break;
2428 case GE: case GT: case GEU: case GTU:
2429 /* For FP, we swap them, for INT, we reverse them. */
2430 if (alpha_compare.fp_p)
2432 cmp_code = swap_condition (code);
2433 branch_code = NE;
2434 tem = op0, op0 = op1, op1 = tem;
2436 else
2438 cmp_code = reverse_condition (code);
2439 branch_code = EQ;
2441 break;
2443 default:
2444 gcc_unreachable ();
2447 if (alpha_compare.fp_p)
2449 cmp_mode = DFmode;
2450 if (flag_unsafe_math_optimizations)
2452 /* When we are not as concerned about non-finite values, and we
2453 are comparing against zero, we can branch directly. */
2454 if (op1 == CONST0_RTX (DFmode))
2455 cmp_code = UNKNOWN, branch_code = code;
2456 else if (op0 == CONST0_RTX (DFmode))
2458 /* Undo the swap we probably did just above. */
2459 tem = op0, op0 = op1, op1 = tem;
2460 branch_code = swap_condition (cmp_code);
2461 cmp_code = UNKNOWN;
2464 else
2466 /* ??? We mark the branch mode to be CCmode to prevent the
2467 compare and branch from being combined, since the compare
2468 insn follows IEEE rules that the branch does not. */
2469 branch_mode = CCmode;
2472 else
2474 cmp_mode = DImode;
2476 /* The following optimizations are only for signed compares. */
2477 if (code != LEU && code != LTU && code != GEU && code != GTU)
2479 /* Whee. Compare and branch against 0 directly. */
2480 if (op1 == const0_rtx)
2481 cmp_code = UNKNOWN, branch_code = code;
2483 /* If the constants doesn't fit into an immediate, but can
2484 be generated by lda/ldah, we adjust the argument and
2485 compare against zero, so we can use beq/bne directly. */
2486 /* ??? Don't do this when comparing against symbols, otherwise
2487 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2488 be declared false out of hand (at least for non-weak). */
2489 else if (GET_CODE (op1) == CONST_INT
2490 && (code == EQ || code == NE)
2491 && !(symbolic_operand (op0, VOIDmode)
2492 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2494 rtx n_op1 = GEN_INT (-INTVAL (op1));
2496 if (! satisfies_constraint_I (op1)
2497 && (satisfies_constraint_K (n_op1)
2498 || satisfies_constraint_L (n_op1)))
2499 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2503 if (!reg_or_0_operand (op0, DImode))
2504 op0 = force_reg (DImode, op0);
2505 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2506 op1 = force_reg (DImode, op1);
2509 /* Emit an initial compare instruction, if necessary. */
2510 tem = op0;
2511 if (cmp_code != UNKNOWN)
2513 tem = gen_reg_rtx (cmp_mode);
2514 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2517 /* Zero the operands. */
2518 memset (&alpha_compare, 0, sizeof (alpha_compare));
2520 /* Return the branch comparison. */
2521 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2524 /* Certain simplifications can be done to make invalid setcc operations
2525 valid. Return the final comparison, or NULL if we can't work. */
2528 alpha_emit_setcc (enum rtx_code code)
2530 enum rtx_code cmp_code;
2531 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2532 int fp_p = alpha_compare.fp_p;
2533 rtx tmp;
2535 /* Zero the operands. */
2536 memset (&alpha_compare, 0, sizeof (alpha_compare));
2538 if (fp_p && GET_MODE (op0) == TFmode)
2540 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2541 op1 = const0_rtx;
2542 fp_p = 0;
2545 if (fp_p && !TARGET_FIX)
2546 return NULL_RTX;
2548 /* The general case: fold the comparison code to the types of compares
2549 that we have, choosing the branch as necessary. */
2551 cmp_code = UNKNOWN;
2552 switch (code)
2554 case EQ: case LE: case LT: case LEU: case LTU:
2555 case UNORDERED:
2556 /* We have these compares. */
2557 if (fp_p)
2558 cmp_code = code, code = NE;
2559 break;
2561 case NE:
2562 if (!fp_p && op1 == const0_rtx)
2563 break;
2564 /* FALLTHRU */
2566 case ORDERED:
2567 cmp_code = reverse_condition (code);
2568 code = EQ;
2569 break;
2571 case GE: case GT: case GEU: case GTU:
2572 /* These normally need swapping, but for integer zero we have
2573 special patterns that recognize swapped operands. */
2574 if (!fp_p && op1 == const0_rtx)
2575 break;
2576 code = swap_condition (code);
2577 if (fp_p)
2578 cmp_code = code, code = NE;
2579 tmp = op0, op0 = op1, op1 = tmp;
2580 break;
2582 default:
2583 gcc_unreachable ();
2586 if (!fp_p)
2588 if (!register_operand (op0, DImode))
2589 op0 = force_reg (DImode, op0);
2590 if (!reg_or_8bit_operand (op1, DImode))
2591 op1 = force_reg (DImode, op1);
2594 /* Emit an initial compare instruction, if necessary. */
2595 if (cmp_code != UNKNOWN)
2597 enum machine_mode mode = fp_p ? DFmode : DImode;
2599 tmp = gen_reg_rtx (mode);
2600 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2601 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2603 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2604 op1 = const0_rtx;
2607 /* Return the setcc comparison. */
2608 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2612 /* Rewrite a comparison against zero CMP of the form
2613 (CODE (cc0) (const_int 0)) so it can be written validly in
2614 a conditional move (if_then_else CMP ...).
2615 If both of the operands that set cc0 are nonzero we must emit
2616 an insn to perform the compare (it can't be done within
2617 the conditional move). */
2620 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2622 enum rtx_code code = GET_CODE (cmp);
2623 enum rtx_code cmov_code = NE;
2624 rtx op0 = alpha_compare.op0;
2625 rtx op1 = alpha_compare.op1;
2626 int fp_p = alpha_compare.fp_p;
2627 enum machine_mode cmp_mode
2628 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2629 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2630 enum machine_mode cmov_mode = VOIDmode;
2631 int local_fast_math = flag_unsafe_math_optimizations;
2632 rtx tem;
2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2637 if (fp_p != FLOAT_MODE_P (mode))
2639 enum rtx_code cmp_code;
2641 if (! TARGET_FIX)
2642 return 0;
2644 /* If we have fp<->int register move instructions, do a cmov by
2645 performing the comparison in fp registers, and move the
2646 zero/nonzero value to integer registers, where we can then
2647 use a normal cmov, or vice-versa. */
2649 switch (code)
2651 case EQ: case LE: case LT: case LEU: case LTU:
2652 /* We have these compares. */
2653 cmp_code = code, code = NE;
2654 break;
2656 case NE:
2657 /* This must be reversed. */
2658 cmp_code = EQ, code = EQ;
2659 break;
2661 case GE: case GT: case GEU: case GTU:
2662 /* These normally need swapping, but for integer zero we have
2663 special patterns that recognize swapped operands. */
2664 if (!fp_p && op1 == const0_rtx)
2665 cmp_code = code, code = NE;
2666 else
2668 cmp_code = swap_condition (code);
2669 code = NE;
2670 tem = op0, op0 = op1, op1 = tem;
2672 break;
2674 default:
2675 gcc_unreachable ();
2678 tem = gen_reg_rtx (cmp_op_mode);
2679 emit_insn (gen_rtx_SET (VOIDmode, tem,
2680 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2681 op0, op1)));
2683 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2684 op0 = gen_lowpart (cmp_op_mode, tem);
2685 op1 = CONST0_RTX (cmp_op_mode);
2686 fp_p = !fp_p;
2687 local_fast_math = 1;
2690 /* We may be able to use a conditional move directly.
2691 This avoids emitting spurious compares. */
2692 if (signed_comparison_operator (cmp, VOIDmode)
2693 && (!fp_p || local_fast_math)
2694 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2695 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2697 /* We can't put the comparison inside the conditional move;
2698 emit a compare instruction and put that inside the
2699 conditional move. Make sure we emit only comparisons we have;
2700 swap or reverse as necessary. */
2702 if (!can_create_pseudo_p ())
2703 return NULL_RTX;
2705 switch (code)
2707 case EQ: case LE: case LT: case LEU: case LTU:
2708 /* We have these compares: */
2709 break;
2711 case NE:
2712 /* This must be reversed. */
2713 code = reverse_condition (code);
2714 cmov_code = EQ;
2715 break;
2717 case GE: case GT: case GEU: case GTU:
2718 /* These must be swapped. */
2719 if (op1 != CONST0_RTX (cmp_mode))
2721 code = swap_condition (code);
2722 tem = op0, op0 = op1, op1 = tem;
2724 break;
2726 default:
2727 gcc_unreachable ();
2730 if (!fp_p)
2732 if (!reg_or_0_operand (op0, DImode))
2733 op0 = force_reg (DImode, op0);
2734 if (!reg_or_8bit_operand (op1, DImode))
2735 op1 = force_reg (DImode, op1);
2738 /* ??? We mark the branch mode to be CCmode to prevent the compare
2739 and cmov from being combined, since the compare insn follows IEEE
2740 rules that the cmov does not. */
2741 if (fp_p && !local_fast_math)
2742 cmov_mode = CCmode;
2744 tem = gen_reg_rtx (cmp_op_mode);
2745 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2746 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2749 /* Simplify a conditional move of two constants into a setcc with
2750 arithmetic. This is done with a splitter since combine would
2751 just undo the work if done during code generation. It also catches
2752 cases we wouldn't have before cse. */
2755 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2756 rtx t_rtx, rtx f_rtx)
2758 HOST_WIDE_INT t, f, diff;
2759 enum machine_mode mode;
2760 rtx target, subtarget, tmp;
2762 mode = GET_MODE (dest);
2763 t = INTVAL (t_rtx);
2764 f = INTVAL (f_rtx);
2765 diff = t - f;
2767 if (((code == NE || code == EQ) && diff < 0)
2768 || (code == GE || code == GT))
2770 code = reverse_condition (code);
2771 diff = t, t = f, f = diff;
2772 diff = t - f;
2775 subtarget = target = dest;
2776 if (mode != DImode)
2778 target = gen_lowpart (DImode, dest);
2779 if (can_create_pseudo_p ())
2780 subtarget = gen_reg_rtx (DImode);
2781 else
2782 subtarget = target;
2784 /* Below, we must be careful to use copy_rtx on target and subtarget
2785 in intermediate insns, as they may be a subreg rtx, which may not
2786 be shared. */
2788 if (f == 0 && exact_log2 (diff) > 0
2789 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2790 viable over a longer latency cmove. On EV5, the E0 slot is a
2791 scarce resource, and on EV4 shift has the same latency as a cmove. */
2792 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2794 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2795 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2797 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2798 GEN_INT (exact_log2 (t)));
2799 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2801 else if (f == 0 && t == -1)
2803 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2804 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2806 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2808 else if (diff == 1 || diff == 4 || diff == 8)
2810 rtx add_op;
2812 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2813 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2815 if (diff == 1)
2816 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2817 else
2819 add_op = GEN_INT (f);
2820 if (sext_add_operand (add_op, mode))
2822 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2823 GEN_INT (diff));
2824 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2825 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2827 else
2828 return 0;
2831 else
2832 return 0;
2834 return 1;
2837 /* Look up the function X_floating library function name for the
2838 given operation. */
2840 struct xfloating_op GTY(())
2842 const enum rtx_code code;
2843 const char *const GTY((skip)) osf_func;
2844 const char *const GTY((skip)) vms_func;
2845 rtx libcall;
2848 static GTY(()) struct xfloating_op xfloating_ops[] =
2850 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2851 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2852 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2853 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2854 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2855 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2856 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2857 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2858 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2859 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2860 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2861 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2862 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2863 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2864 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2867 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2869 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2870 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2873 static rtx
2874 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2876 struct xfloating_op *ops = xfloating_ops;
2877 long n = ARRAY_SIZE (xfloating_ops);
2878 long i;
2880 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2882 /* How irritating. Nothing to key off for the main table. */
2883 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2885 ops = vax_cvt_ops;
2886 n = ARRAY_SIZE (vax_cvt_ops);
2889 for (i = 0; i < n; ++i, ++ops)
2890 if (ops->code == code)
2892 rtx func = ops->libcall;
2893 if (!func)
2895 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2896 ? ops->vms_func : ops->osf_func);
2897 ops->libcall = func;
2899 return func;
2902 gcc_unreachable ();
2905 /* Most X_floating operations take the rounding mode as an argument.
2906 Compute that here. */
2908 static int
2909 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2910 enum alpha_fp_rounding_mode round)
2912 int mode;
2914 switch (round)
2916 case ALPHA_FPRM_NORM:
2917 mode = 2;
2918 break;
2919 case ALPHA_FPRM_MINF:
2920 mode = 1;
2921 break;
2922 case ALPHA_FPRM_CHOP:
2923 mode = 0;
2924 break;
2925 case ALPHA_FPRM_DYN:
2926 mode = 4;
2927 break;
2928 default:
2929 gcc_unreachable ();
2931 /* XXX For reference, round to +inf is mode = 3. */
2934 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2935 mode |= 0x10000;
2937 return mode;
2940 /* Emit an X_floating library function call.
2942 Note that these functions do not follow normal calling conventions:
2943 TFmode arguments are passed in two integer registers (as opposed to
2944 indirect); TFmode return values appear in R16+R17.
2946 FUNC is the function to call.
2947 TARGET is where the output belongs.
2948 OPERANDS are the inputs.
2949 NOPERANDS is the count of inputs.
2950 EQUIV is the expression equivalent for the function.
2953 static void
2954 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2955 int noperands, rtx equiv)
2957 rtx usage = NULL_RTX, tmp, reg;
2958 int regno = 16, i;
2960 start_sequence ();
2962 for (i = 0; i < noperands; ++i)
2964 switch (GET_MODE (operands[i]))
2966 case TFmode:
2967 reg = gen_rtx_REG (TFmode, regno);
2968 regno += 2;
2969 break;
2971 case DFmode:
2972 reg = gen_rtx_REG (DFmode, regno + 32);
2973 regno += 1;
2974 break;
2976 case VOIDmode:
2977 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2978 /* FALLTHRU */
2979 case DImode:
2980 reg = gen_rtx_REG (DImode, regno);
2981 regno += 1;
2982 break;
2984 default:
2985 gcc_unreachable ();
2988 emit_move_insn (reg, operands[i]);
2989 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2992 switch (GET_MODE (target))
2994 case TFmode:
2995 reg = gen_rtx_REG (TFmode, 16);
2996 break;
2997 case DFmode:
2998 reg = gen_rtx_REG (DFmode, 32);
2999 break;
3000 case DImode:
3001 reg = gen_rtx_REG (DImode, 0);
3002 break;
3003 default:
3004 gcc_unreachable ();
3007 tmp = gen_rtx_MEM (QImode, func);
3008 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3009 const0_rtx, const0_rtx));
3010 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3011 CONST_OR_PURE_CALL_P (tmp) = 1;
3013 tmp = get_insns ();
3014 end_sequence ();
3016 emit_libcall_block (tmp, target, reg, equiv);
3019 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3021 void
3022 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3024 rtx func;
3025 int mode;
3026 rtx out_operands[3];
3028 func = alpha_lookup_xfloating_lib_func (code);
3029 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3031 out_operands[0] = operands[1];
3032 out_operands[1] = operands[2];
3033 out_operands[2] = GEN_INT (mode);
3034 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3035 gen_rtx_fmt_ee (code, TFmode, operands[1],
3036 operands[2]));
3039 /* Emit an X_floating library function call for a comparison. */
3041 static rtx
3042 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3044 enum rtx_code cmp_code, res_code;
3045 rtx func, out, operands[2];
3047 /* X_floating library comparison functions return
3048 -1 unordered
3049 0 false
3050 1 true
3051 Convert the compare against the raw return value. */
3053 cmp_code = *pcode;
3054 switch (cmp_code)
3056 case UNORDERED:
3057 cmp_code = EQ;
3058 res_code = LT;
3059 break;
3060 case ORDERED:
3061 cmp_code = EQ;
3062 res_code = GE;
3063 break;
3064 case NE:
3065 res_code = NE;
3066 break;
3067 case EQ:
3068 case LT:
3069 case GT:
3070 case LE:
3071 case GE:
3072 res_code = GT;
3073 break;
3074 default:
3075 gcc_unreachable ();
3077 *pcode = res_code;
3079 func = alpha_lookup_xfloating_lib_func (cmp_code);
3081 operands[0] = op0;
3082 operands[1] = op1;
3083 out = gen_reg_rtx (DImode);
3085 /* ??? Strange mode for equiv because what's actually returned
3086 is -1,0,1, not a proper boolean value. */
3087 alpha_emit_xfloating_libcall (func, out, operands, 2,
3088 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3090 return out;
3093 /* Emit an X_floating library function call for a conversion. */
3095 void
3096 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3098 int noperands = 1, mode;
3099 rtx out_operands[2];
3100 rtx func;
3101 enum rtx_code code = orig_code;
3103 if (code == UNSIGNED_FIX)
3104 code = FIX;
3106 func = alpha_lookup_xfloating_lib_func (code);
3108 out_operands[0] = operands[1];
3110 switch (code)
3112 case FIX:
3113 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3114 out_operands[1] = GEN_INT (mode);
3115 noperands = 2;
3116 break;
3117 case FLOAT_TRUNCATE:
3118 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3119 out_operands[1] = GEN_INT (mode);
3120 noperands = 2;
3121 break;
3122 default:
3123 break;
3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3127 gen_rtx_fmt_e (orig_code,
3128 GET_MODE (operands[0]),
3129 operands[1]));
3132 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3133 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3134 guarantee that the sequence
3135 set (OP[0] OP[2])
3136 set (OP[1] OP[3])
3137 is valid. Naturally, output operand ordering is little-endian.
3138 This is used by *movtf_internal and *movti_internal. */
3140 void
3141 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3142 bool fixup_overlap)
3144 switch (GET_CODE (operands[1]))
3146 case REG:
3147 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3148 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3149 break;
3151 case MEM:
3152 operands[3] = adjust_address (operands[1], DImode, 8);
3153 operands[2] = adjust_address (operands[1], DImode, 0);
3154 break;
3156 case CONST_INT:
3157 case CONST_DOUBLE:
3158 gcc_assert (operands[1] == CONST0_RTX (mode));
3159 operands[2] = operands[3] = const0_rtx;
3160 break;
3162 default:
3163 gcc_unreachable ();
3166 switch (GET_CODE (operands[0]))
3168 case REG:
3169 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3170 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3171 break;
3173 case MEM:
3174 operands[1] = adjust_address (operands[0], DImode, 8);
3175 operands[0] = adjust_address (operands[0], DImode, 0);
3176 break;
3178 default:
3179 gcc_unreachable ();
3182 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3184 rtx tmp;
3185 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3186 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3190 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3191 op2 is a register containing the sign bit, operation is the
3192 logical operation to be performed. */
3194 void
3195 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3197 rtx high_bit = operands[2];
3198 rtx scratch;
3199 int move;
3201 alpha_split_tmode_pair (operands, TFmode, false);
3203 /* Detect three flavors of operand overlap. */
3204 move = 1;
3205 if (rtx_equal_p (operands[0], operands[2]))
3206 move = 0;
3207 else if (rtx_equal_p (operands[1], operands[2]))
3209 if (rtx_equal_p (operands[0], high_bit))
3210 move = 2;
3211 else
3212 move = -1;
3215 if (move < 0)
3216 emit_move_insn (operands[0], operands[2]);
3218 /* ??? If the destination overlaps both source tf and high_bit, then
3219 assume source tf is dead in its entirety and use the other half
3220 for a scratch register. Otherwise "scratch" is just the proper
3221 destination register. */
3222 scratch = operands[move < 2 ? 1 : 3];
3224 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3226 if (move > 0)
3228 emit_move_insn (operands[0], operands[2]);
3229 if (move > 1)
3230 emit_move_insn (operands[1], scratch);
3234 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3235 unaligned data:
3237 unsigned: signed:
3238 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3239 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3240 lda r3,X(r11) lda r3,X+2(r11)
3241 extwl r1,r3,r1 extql r1,r3,r1
3242 extwh r2,r3,r2 extqh r2,r3,r2
3243 or r1.r2.r1 or r1,r2,r1
3244 sra r1,48,r1
3246 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3248 lda r3,X(r11) lda r3,X(r11)
3249 extll r1,r3,r1 extll r1,r3,r1
3250 extlh r2,r3,r2 extlh r2,r3,r2
3251 or r1.r2.r1 addl r1,r2,r1
3253 quad: ldq_u r1,X(r11)
3254 ldq_u r2,X+7(r11)
3255 lda r3,X(r11)
3256 extql r1,r3,r1
3257 extqh r2,r3,r2
3258 or r1.r2.r1
3261 void
3262 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3263 HOST_WIDE_INT ofs, int sign)
3265 rtx meml, memh, addr, extl, exth, tmp, mema;
3266 enum machine_mode mode;
3268 if (TARGET_BWX && size == 2)
3270 meml = adjust_address (mem, QImode, ofs);
3271 memh = adjust_address (mem, QImode, ofs+1);
3272 if (BYTES_BIG_ENDIAN)
3273 tmp = meml, meml = memh, memh = tmp;
3274 extl = gen_reg_rtx (DImode);
3275 exth = gen_reg_rtx (DImode);
3276 emit_insn (gen_zero_extendqidi2 (extl, meml));
3277 emit_insn (gen_zero_extendqidi2 (exth, memh));
3278 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3279 NULL, 1, OPTAB_LIB_WIDEN);
3280 addr = expand_simple_binop (DImode, IOR, extl, exth,
3281 NULL, 1, OPTAB_LIB_WIDEN);
3283 if (sign && GET_MODE (tgt) != HImode)
3285 addr = gen_lowpart (HImode, addr);
3286 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3288 else
3290 if (GET_MODE (tgt) != DImode)
3291 addr = gen_lowpart (GET_MODE (tgt), addr);
3292 emit_move_insn (tgt, addr);
3294 return;
3297 meml = gen_reg_rtx (DImode);
3298 memh = gen_reg_rtx (DImode);
3299 addr = gen_reg_rtx (DImode);
3300 extl = gen_reg_rtx (DImode);
3301 exth = gen_reg_rtx (DImode);
3303 mema = XEXP (mem, 0);
3304 if (GET_CODE (mema) == LO_SUM)
3305 mema = force_reg (Pmode, mema);
3307 /* AND addresses cannot be in any alias set, since they may implicitly
3308 alias surrounding code. Ideally we'd have some alias set that
3309 covered all types except those with alignment 8 or higher. */
3311 tmp = change_address (mem, DImode,
3312 gen_rtx_AND (DImode,
3313 plus_constant (mema, ofs),
3314 GEN_INT (-8)));
3315 set_mem_alias_set (tmp, 0);
3316 emit_move_insn (meml, tmp);
3318 tmp = change_address (mem, DImode,
3319 gen_rtx_AND (DImode,
3320 plus_constant (mema, ofs + size - 1),
3321 GEN_INT (-8)));
3322 set_mem_alias_set (tmp, 0);
3323 emit_move_insn (memh, tmp);
3325 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3327 emit_move_insn (addr, plus_constant (mema, -1));
3329 emit_insn (gen_extqh_be (extl, meml, addr));
3330 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3332 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3333 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3334 addr, 1, OPTAB_WIDEN);
3336 else if (sign && size == 2)
3338 emit_move_insn (addr, plus_constant (mema, ofs+2));
3340 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3341 emit_insn (gen_extqh_le (exth, memh, addr));
3343 /* We must use tgt here for the target. Alpha-vms port fails if we use
3344 addr for the target, because addr is marked as a pointer and combine
3345 knows that pointers are always sign-extended 32-bit values. */
3346 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3347 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3348 addr, 1, OPTAB_WIDEN);
3350 else
3352 if (WORDS_BIG_ENDIAN)
3354 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3355 switch ((int) size)
3357 case 2:
3358 emit_insn (gen_extwh_be (extl, meml, addr));
3359 mode = HImode;
3360 break;
3362 case 4:
3363 emit_insn (gen_extlh_be (extl, meml, addr));
3364 mode = SImode;
3365 break;
3367 case 8:
3368 emit_insn (gen_extqh_be (extl, meml, addr));
3369 mode = DImode;
3370 break;
3372 default:
3373 gcc_unreachable ();
3375 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3377 else
3379 emit_move_insn (addr, plus_constant (mema, ofs));
3380 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3381 switch ((int) size)
3383 case 2:
3384 emit_insn (gen_extwh_le (exth, memh, addr));
3385 mode = HImode;
3386 break;
3388 case 4:
3389 emit_insn (gen_extlh_le (exth, memh, addr));
3390 mode = SImode;
3391 break;
3393 case 8:
3394 emit_insn (gen_extqh_le (exth, memh, addr));
3395 mode = DImode;
3396 break;
3398 default:
3399 gcc_unreachable ();
3403 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3404 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3405 sign, OPTAB_WIDEN);
3408 if (addr != tgt)
3409 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3412 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3414 void
3415 alpha_expand_unaligned_store (rtx dst, rtx src,
3416 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3418 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3420 if (TARGET_BWX && size == 2)
3422 if (src != const0_rtx)
3424 dstl = gen_lowpart (QImode, src);
3425 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3426 NULL, 1, OPTAB_LIB_WIDEN);
3427 dsth = gen_lowpart (QImode, dsth);
3429 else
3430 dstl = dsth = const0_rtx;
3432 meml = adjust_address (dst, QImode, ofs);
3433 memh = adjust_address (dst, QImode, ofs+1);
3434 if (BYTES_BIG_ENDIAN)
3435 addr = meml, meml = memh, memh = addr;
3437 emit_move_insn (meml, dstl);
3438 emit_move_insn (memh, dsth);
3439 return;
3442 dstl = gen_reg_rtx (DImode);
3443 dsth = gen_reg_rtx (DImode);
3444 insl = gen_reg_rtx (DImode);
3445 insh = gen_reg_rtx (DImode);
3447 dsta = XEXP (dst, 0);
3448 if (GET_CODE (dsta) == LO_SUM)
3449 dsta = force_reg (Pmode, dsta);
3451 /* AND addresses cannot be in any alias set, since they may implicitly
3452 alias surrounding code. Ideally we'd have some alias set that
3453 covered all types except those with alignment 8 or higher. */
3455 meml = change_address (dst, DImode,
3456 gen_rtx_AND (DImode,
3457 plus_constant (dsta, ofs),
3458 GEN_INT (-8)));
3459 set_mem_alias_set (meml, 0);
3461 memh = change_address (dst, DImode,
3462 gen_rtx_AND (DImode,
3463 plus_constant (dsta, ofs + size - 1),
3464 GEN_INT (-8)));
3465 set_mem_alias_set (memh, 0);
3467 emit_move_insn (dsth, memh);
3468 emit_move_insn (dstl, meml);
3469 if (WORDS_BIG_ENDIAN)
3471 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3473 if (src != const0_rtx)
3475 switch ((int) size)
3477 case 2:
3478 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3479 break;
3480 case 4:
3481 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3482 break;
3483 case 8:
3484 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3485 break;
3487 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3488 GEN_INT (size*8), addr));
3491 switch ((int) size)
3493 case 2:
3494 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3495 break;
3496 case 4:
3498 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3499 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3500 break;
3502 case 8:
3503 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3504 break;
3507 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3509 else
3511 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3513 if (src != CONST0_RTX (GET_MODE (src)))
3515 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3516 GEN_INT (size*8), addr));
3518 switch ((int) size)
3520 case 2:
3521 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3522 break;
3523 case 4:
3524 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3525 break;
3526 case 8:
3527 emit_insn (gen_insql_le (insl, src, addr));
3528 break;
3532 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3534 switch ((int) size)
3536 case 2:
3537 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3538 break;
3539 case 4:
3541 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3542 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3543 break;
3545 case 8:
3546 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3547 break;
3551 if (src != CONST0_RTX (GET_MODE (src)))
3553 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3554 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3557 if (WORDS_BIG_ENDIAN)
3559 emit_move_insn (meml, dstl);
3560 emit_move_insn (memh, dsth);
3562 else
3564 /* Must store high before low for degenerate case of aligned. */
3565 emit_move_insn (memh, dsth);
3566 emit_move_insn (meml, dstl);
3570 /* The block move code tries to maximize speed by separating loads and
3571 stores at the expense of register pressure: we load all of the data
3572 before we store it back out. There are two secondary effects worth
3573 mentioning, that this speeds copying to/from aligned and unaligned
3574 buffers, and that it makes the code significantly easier to write. */
3576 #define MAX_MOVE_WORDS 8
3578 /* Load an integral number of consecutive unaligned quadwords. */
3580 static void
3581 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3582 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3584 rtx const im8 = GEN_INT (-8);
3585 rtx const i64 = GEN_INT (64);
3586 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3587 rtx sreg, areg, tmp, smema;
3588 HOST_WIDE_INT i;
3590 smema = XEXP (smem, 0);
3591 if (GET_CODE (smema) == LO_SUM)
3592 smema = force_reg (Pmode, smema);
3594 /* Generate all the tmp registers we need. */
3595 for (i = 0; i < words; ++i)
3597 data_regs[i] = out_regs[i];
3598 ext_tmps[i] = gen_reg_rtx (DImode);
3600 data_regs[words] = gen_reg_rtx (DImode);
3602 if (ofs != 0)
3603 smem = adjust_address (smem, GET_MODE (smem), ofs);
3605 /* Load up all of the source data. */
3606 for (i = 0; i < words; ++i)
3608 tmp = change_address (smem, DImode,
3609 gen_rtx_AND (DImode,
3610 plus_constant (smema, 8*i),
3611 im8));
3612 set_mem_alias_set (tmp, 0);
3613 emit_move_insn (data_regs[i], tmp);
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*words - 1),
3619 im8));
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[words], tmp);
3623 /* Extract the half-word fragments. Unfortunately DEC decided to make
3624 extxh with offset zero a noop instead of zeroing the register, so
3625 we must take care of that edge condition ourselves with cmov. */
3627 sreg = copy_addr_to_reg (smema);
3628 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3629 1, OPTAB_WIDEN);
3630 if (WORDS_BIG_ENDIAN)
3631 emit_move_insn (sreg, plus_constant (sreg, 7));
3632 for (i = 0; i < words; ++i)
3634 if (WORDS_BIG_ENDIAN)
3636 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3637 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3639 else
3641 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3642 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3644 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3645 gen_rtx_IF_THEN_ELSE (DImode,
3646 gen_rtx_EQ (DImode, areg,
3647 const0_rtx),
3648 const0_rtx, ext_tmps[i])));
3651 /* Merge the half-words into whole words. */
3652 for (i = 0; i < words; ++i)
3654 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3655 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3659 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3660 may be NULL to store zeros. */
3662 static void
3663 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3664 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3666 rtx const im8 = GEN_INT (-8);
3667 rtx const i64 = GEN_INT (64);
3668 rtx ins_tmps[MAX_MOVE_WORDS];
3669 rtx st_tmp_1, st_tmp_2, dreg;
3670 rtx st_addr_1, st_addr_2, dmema;
3671 HOST_WIDE_INT i;
3673 dmema = XEXP (dmem, 0);
3674 if (GET_CODE (dmema) == LO_SUM)
3675 dmema = force_reg (Pmode, dmema);
3677 /* Generate all the tmp registers we need. */
3678 if (data_regs != NULL)
3679 for (i = 0; i < words; ++i)
3680 ins_tmps[i] = gen_reg_rtx(DImode);
3681 st_tmp_1 = gen_reg_rtx(DImode);
3682 st_tmp_2 = gen_reg_rtx(DImode);
3684 if (ofs != 0)
3685 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3687 st_addr_2 = change_address (dmem, DImode,
3688 gen_rtx_AND (DImode,
3689 plus_constant (dmema, words*8 - 1),
3690 im8));
3691 set_mem_alias_set (st_addr_2, 0);
3693 st_addr_1 = change_address (dmem, DImode,
3694 gen_rtx_AND (DImode, dmema, im8));
3695 set_mem_alias_set (st_addr_1, 0);
3697 /* Load up the destination end bits. */
3698 emit_move_insn (st_tmp_2, st_addr_2);
3699 emit_move_insn (st_tmp_1, st_addr_1);
3701 /* Shift the input data into place. */
3702 dreg = copy_addr_to_reg (dmema);
3703 if (WORDS_BIG_ENDIAN)
3704 emit_move_insn (dreg, plus_constant (dreg, 7));
3705 if (data_regs != NULL)
3707 for (i = words-1; i >= 0; --i)
3709 if (WORDS_BIG_ENDIAN)
3711 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3712 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3714 else
3716 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3717 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3720 for (i = words-1; i > 0; --i)
3722 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3723 ins_tmps[i-1], ins_tmps[i-1], 1,
3724 OPTAB_WIDEN);
3728 /* Split and merge the ends with the destination data. */
3729 if (WORDS_BIG_ENDIAN)
3731 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3732 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3734 else
3736 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3737 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3740 if (data_regs != NULL)
3742 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3743 st_tmp_2, 1, OPTAB_WIDEN);
3744 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3745 st_tmp_1, 1, OPTAB_WIDEN);
3748 /* Store it all. */
3749 if (WORDS_BIG_ENDIAN)
3750 emit_move_insn (st_addr_1, st_tmp_1);
3751 else
3752 emit_move_insn (st_addr_2, st_tmp_2);
3753 for (i = words-1; i > 0; --i)
3755 rtx tmp = change_address (dmem, DImode,
3756 gen_rtx_AND (DImode,
3757 plus_constant(dmema,
3758 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3759 im8));
3760 set_mem_alias_set (tmp, 0);
3761 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3763 if (WORDS_BIG_ENDIAN)
3764 emit_move_insn (st_addr_2, st_tmp_2);
3765 else
3766 emit_move_insn (st_addr_1, st_tmp_1);
3770 /* Expand string/block move operations.
3772 operands[0] is the pointer to the destination.
3773 operands[1] is the pointer to the source.
3774 operands[2] is the number of bytes to move.
3775 operands[3] is the alignment. */
3778 alpha_expand_block_move (rtx operands[])
3780 rtx bytes_rtx = operands[2];
3781 rtx align_rtx = operands[3];
3782 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3783 HOST_WIDE_INT bytes = orig_bytes;
3784 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3785 HOST_WIDE_INT dst_align = src_align;
3786 rtx orig_src = operands[1];
3787 rtx orig_dst = operands[0];
3788 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3789 rtx tmp;
3790 unsigned int i, words, ofs, nregs = 0;
3792 if (orig_bytes <= 0)
3793 return 1;
3794 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3795 return 0;
3797 /* Look for additional alignment information from recorded register info. */
3799 tmp = XEXP (orig_src, 0);
3800 if (GET_CODE (tmp) == REG)
3801 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3802 else if (GET_CODE (tmp) == PLUS
3803 && GET_CODE (XEXP (tmp, 0)) == REG
3804 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3806 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3807 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3809 if (a > src_align)
3811 if (a >= 64 && c % 8 == 0)
3812 src_align = 64;
3813 else if (a >= 32 && c % 4 == 0)
3814 src_align = 32;
3815 else if (a >= 16 && c % 2 == 0)
3816 src_align = 16;
3820 tmp = XEXP (orig_dst, 0);
3821 if (GET_CODE (tmp) == REG)
3822 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3823 else if (GET_CODE (tmp) == PLUS
3824 && GET_CODE (XEXP (tmp, 0)) == REG
3825 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3830 if (a > dst_align)
3832 if (a >= 64 && c % 8 == 0)
3833 dst_align = 64;
3834 else if (a >= 32 && c % 4 == 0)
3835 dst_align = 32;
3836 else if (a >= 16 && c % 2 == 0)
3837 dst_align = 16;
3841 ofs = 0;
3842 if (src_align >= 64 && bytes >= 8)
3844 words = bytes / 8;
3846 for (i = 0; i < words; ++i)
3847 data_regs[nregs + i] = gen_reg_rtx (DImode);
3849 for (i = 0; i < words; ++i)
3850 emit_move_insn (data_regs[nregs + i],
3851 adjust_address (orig_src, DImode, ofs + i * 8));
3853 nregs += words;
3854 bytes -= words * 8;
3855 ofs += words * 8;
3858 if (src_align >= 32 && bytes >= 4)
3860 words = bytes / 4;
3862 for (i = 0; i < words; ++i)
3863 data_regs[nregs + i] = gen_reg_rtx (SImode);
3865 for (i = 0; i < words; ++i)
3866 emit_move_insn (data_regs[nregs + i],
3867 adjust_address (orig_src, SImode, ofs + i * 4));
3869 nregs += words;
3870 bytes -= words * 4;
3871 ofs += words * 4;
3874 if (bytes >= 8)
3876 words = bytes / 8;
3878 for (i = 0; i < words+1; ++i)
3879 data_regs[nregs + i] = gen_reg_rtx (DImode);
3881 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3882 words, ofs);
3884 nregs += words;
3885 bytes -= words * 8;
3886 ofs += words * 8;
3889 if (! TARGET_BWX && bytes >= 4)
3891 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3892 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3893 bytes -= 4;
3894 ofs += 4;
3897 if (bytes >= 2)
3899 if (src_align >= 16)
3901 do {
3902 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3903 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3904 bytes -= 2;
3905 ofs += 2;
3906 } while (bytes >= 2);
3908 else if (! TARGET_BWX)
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3912 bytes -= 2;
3913 ofs += 2;
3917 while (bytes > 0)
3919 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3921 bytes -= 1;
3922 ofs += 1;
3925 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3927 /* Now save it back out again. */
3929 i = 0, ofs = 0;
3931 /* Write out the data in whatever chunks reading the source allowed. */
3932 if (dst_align >= 64)
3934 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3936 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3937 data_regs[i]);
3938 ofs += 8;
3939 i++;
3943 if (dst_align >= 32)
3945 /* If the source has remaining DImode regs, write them out in
3946 two pieces. */
3947 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3949 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3950 NULL_RTX, 1, OPTAB_WIDEN);
3952 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3953 gen_lowpart (SImode, data_regs[i]));
3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3955 gen_lowpart (SImode, tmp));
3956 ofs += 8;
3957 i++;
3960 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3963 data_regs[i]);
3964 ofs += 4;
3965 i++;
3969 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3971 /* Write out a remaining block of words using unaligned methods. */
3973 for (words = 1; i + words < nregs; words++)
3974 if (GET_MODE (data_regs[i + words]) != DImode)
3975 break;
3977 if (words == 1)
3978 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3979 else
3980 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3981 words, ofs);
3983 i += words;
3984 ofs += words * 8;
3987 /* Due to the above, this won't be aligned. */
3988 /* ??? If we have more than one of these, consider constructing full
3989 words in registers and using alpha_expand_unaligned_store_words. */
3990 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3992 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3993 ofs += 4;
3994 i++;
3997 if (dst_align >= 16)
3998 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4000 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4001 i++;
4002 ofs += 2;
4004 else
4005 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4008 i++;
4009 ofs += 2;
4012 /* The remainder must be byte copies. */
4013 while (i < nregs)
4015 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4016 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4017 i++;
4018 ofs += 1;
4021 return 1;
4025 alpha_expand_block_clear (rtx operands[])
4027 rtx bytes_rtx = operands[1];
4028 rtx align_rtx = operands[3];
4029 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4030 HOST_WIDE_INT bytes = orig_bytes;
4031 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4032 HOST_WIDE_INT alignofs = 0;
4033 rtx orig_dst = operands[0];
4034 rtx tmp;
4035 int i, words, ofs = 0;
4037 if (orig_bytes <= 0)
4038 return 1;
4039 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4040 return 0;
4042 /* Look for stricter alignment. */
4043 tmp = XEXP (orig_dst, 0);
4044 if (GET_CODE (tmp) == REG)
4045 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4046 else if (GET_CODE (tmp) == PLUS
4047 && GET_CODE (XEXP (tmp, 0)) == REG
4048 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4050 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4051 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4053 if (a > align)
4055 if (a >= 64)
4056 align = a, alignofs = 8 - c % 8;
4057 else if (a >= 32)
4058 align = a, alignofs = 4 - c % 4;
4059 else if (a >= 16)
4060 align = a, alignofs = 2 - c % 2;
4064 /* Handle an unaligned prefix first. */
4066 if (alignofs > 0)
4068 #if HOST_BITS_PER_WIDE_INT >= 64
4069 /* Given that alignofs is bounded by align, the only time BWX could
4070 generate three stores is for a 7 byte fill. Prefer two individual
4071 stores over a load/mask/store sequence. */
4072 if ((!TARGET_BWX || alignofs == 7)
4073 && align >= 32
4074 && !(alignofs == 4 && bytes >= 4))
4076 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4077 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4078 rtx mem, tmp;
4079 HOST_WIDE_INT mask;
4081 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4082 set_mem_alias_set (mem, 0);
4084 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4085 if (bytes < alignofs)
4087 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4088 ofs += bytes;
4089 bytes = 0;
4091 else
4093 bytes -= alignofs;
4094 ofs += alignofs;
4096 alignofs = 0;
4098 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4099 NULL_RTX, 1, OPTAB_WIDEN);
4101 emit_move_insn (mem, tmp);
4103 #endif
4105 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4107 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4108 bytes -= 1;
4109 ofs += 1;
4110 alignofs -= 1;
4112 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4114 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4115 bytes -= 2;
4116 ofs += 2;
4117 alignofs -= 2;
4119 if (alignofs == 4 && bytes >= 4)
4121 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4122 bytes -= 4;
4123 ofs += 4;
4124 alignofs = 0;
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4129 if (alignofs > 0)
4131 alignofs = alignofs & -alignofs;
4132 align = MIN (align, alignofs * BITS_PER_UNIT);
4136 /* Handle a block of contiguous long-words. */
4138 if (align >= 64 && bytes >= 8)
4140 words = bytes / 8;
4142 for (i = 0; i < words; ++i)
4143 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4144 const0_rtx);
4146 bytes -= words * 8;
4147 ofs += words * 8;
4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4153 if (align >= 32 && bytes > 16)
4155 rtx orig_dsta;
4157 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4158 bytes -= 4;
4159 ofs += 4;
4161 orig_dsta = XEXP (orig_dst, 0);
4162 if (GET_CODE (orig_dsta) == LO_SUM)
4163 orig_dsta = force_reg (Pmode, orig_dsta);
4165 words = bytes / 8;
4166 for (i = 0; i < words; ++i)
4168 rtx mem
4169 = change_address (orig_dst, DImode,
4170 gen_rtx_AND (DImode,
4171 plus_constant (orig_dsta, ofs + i*8),
4172 GEN_INT (-8)));
4173 set_mem_alias_set (mem, 0);
4174 emit_move_insn (mem, const0_rtx);
4177 /* Depending on the alignment, the first stq_u may have overlapped
4178 with the initial stl, which means that the last stq_u didn't
4179 write as much as it would appear. Leave those questionable bytes
4180 unaccounted for. */
4181 bytes -= words * 8 - 4;
4182 ofs += words * 8 - 4;
4185 /* Handle a smaller block of aligned words. */
4187 if ((align >= 64 && bytes == 4)
4188 || (align == 32 && bytes >= 4))
4190 words = bytes / 4;
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4194 const0_rtx);
4196 bytes -= words * 4;
4197 ofs += words * 4;
4200 /* An unaligned block uses stq_u stores for as many as possible. */
4202 if (bytes >= 8)
4204 words = bytes / 8;
4206 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4208 bytes -= words * 8;
4209 ofs += words * 8;
4212 /* Next clean up any trailing pieces. */
4214 #if HOST_BITS_PER_WIDE_INT >= 64
4215 /* Count the number of bits in BYTES for which aligned stores could
4216 be emitted. */
4217 words = 0;
4218 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4219 if (bytes & i)
4220 words += 1;
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX ? words > 2 : bytes > 0)
4226 if (align >= 64)
4228 rtx mem, tmp;
4229 HOST_WIDE_INT mask;
4231 mem = adjust_address (orig_dst, DImode, ofs);
4232 set_mem_alias_set (mem, 0);
4234 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4236 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4237 NULL_RTX, 1, OPTAB_WIDEN);
4239 emit_move_insn (mem, tmp);
4240 return 1;
4242 else if (align >= 32 && bytes < 4)
4244 rtx mem, tmp;
4245 HOST_WIDE_INT mask;
4247 mem = adjust_address (orig_dst, SImode, ofs);
4248 set_mem_alias_set (mem, 0);
4250 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4252 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4255 emit_move_insn (mem, tmp);
4256 return 1;
4259 #endif
4261 if (!TARGET_BWX && bytes >= 4)
4263 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4264 bytes -= 4;
4265 ofs += 4;
4268 if (bytes >= 2)
4270 if (align >= 16)
4272 do {
4273 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4274 const0_rtx);
4275 bytes -= 2;
4276 ofs += 2;
4277 } while (bytes >= 2);
4279 else if (! TARGET_BWX)
4281 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4282 bytes -= 2;
4283 ofs += 2;
4287 while (bytes > 0)
4289 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4290 bytes -= 1;
4291 ofs += 1;
4294 return 1;
4297 /* Returns a mask so that zap(x, value) == x & mask. */
4300 alpha_expand_zap_mask (HOST_WIDE_INT value)
4302 rtx result;
4303 int i;
4305 if (HOST_BITS_PER_WIDE_INT >= 64)
4307 HOST_WIDE_INT mask = 0;
4309 for (i = 7; i >= 0; --i)
4311 mask <<= 8;
4312 if (!((value >> i) & 1))
4313 mask |= 0xff;
4316 result = gen_int_mode (mask, DImode);
4318 else
4320 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4322 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4324 for (i = 7; i >= 4; --i)
4326 mask_hi <<= 8;
4327 if (!((value >> i) & 1))
4328 mask_hi |= 0xff;
4331 for (i = 3; i >= 0; --i)
4333 mask_lo <<= 8;
4334 if (!((value >> i) & 1))
4335 mask_lo |= 0xff;
4338 result = immed_double_const (mask_lo, mask_hi, DImode);
4341 return result;
4344 void
4345 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4346 enum machine_mode mode,
4347 rtx op0, rtx op1, rtx op2)
4349 op0 = gen_lowpart (mode, op0);
4351 if (op1 == const0_rtx)
4352 op1 = CONST0_RTX (mode);
4353 else
4354 op1 = gen_lowpart (mode, op1);
4356 if (op2 == const0_rtx)
4357 op2 = CONST0_RTX (mode);
4358 else
4359 op2 = gen_lowpart (mode, op2);
4361 emit_insn ((*gen) (op0, op1, op2));
4364 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4365 COND is true. Mark the jump as unlikely to be taken. */
4367 static void
4368 emit_unlikely_jump (rtx cond, rtx label)
4370 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4371 rtx x;
4373 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4374 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4375 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4378 /* A subroutine of the atomic operation splitters. Emit a load-locked
4379 instruction in MODE. */
4381 static void
4382 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4384 rtx (*fn) (rtx, rtx) = NULL;
4385 if (mode == SImode)
4386 fn = gen_load_locked_si;
4387 else if (mode == DImode)
4388 fn = gen_load_locked_di;
4389 emit_insn (fn (reg, mem));
4392 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4393 instruction in MODE. */
4395 static void
4396 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4398 rtx (*fn) (rtx, rtx, rtx) = NULL;
4399 if (mode == SImode)
4400 fn = gen_store_conditional_si;
4401 else if (mode == DImode)
4402 fn = gen_store_conditional_di;
4403 emit_insn (fn (res, mem, val));
4406 /* A subroutine of the atomic operation splitters. Emit an insxl
4407 instruction in MODE. */
4409 static rtx
4410 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4412 rtx ret = gen_reg_rtx (DImode);
4413 rtx (*fn) (rtx, rtx, rtx);
4415 if (WORDS_BIG_ENDIAN)
4417 if (mode == QImode)
4418 fn = gen_insbl_be;
4419 else
4420 fn = gen_inswl_be;
4422 else
4424 if (mode == QImode)
4425 fn = gen_insbl_le;
4426 else
4427 fn = gen_inswl_le;
4429 /* The insbl and inswl patterns require a register operand. */
4430 op1 = force_reg (mode, op1);
4431 emit_insn (fn (ret, op1, op2));
4433 return ret;
4436 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4437 to perform. MEM is the memory on which to operate. VAL is the second
4438 operand of the binary operator. BEFORE and AFTER are optional locations to
4439 return the value of MEM either before of after the operation. SCRATCH is
4440 a scratch register. */
4442 void
4443 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4444 rtx before, rtx after, rtx scratch)
4446 enum machine_mode mode = GET_MODE (mem);
4447 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4449 emit_insn (gen_memory_barrier ());
4451 label = gen_label_rtx ();
4452 emit_label (label);
4453 label = gen_rtx_LABEL_REF (DImode, label);
4455 if (before == NULL)
4456 before = scratch;
4457 emit_load_locked (mode, before, mem);
4459 if (code == NOT)
4460 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4461 else
4462 x = gen_rtx_fmt_ee (code, mode, before, val);
4463 if (after)
4464 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4465 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4467 emit_store_conditional (mode, cond, mem, scratch);
4469 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4470 emit_unlikely_jump (x, label);
4472 emit_insn (gen_memory_barrier ());
4475 /* Expand a compare and swap operation. */
4477 void
4478 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4479 rtx scratch)
4481 enum machine_mode mode = GET_MODE (mem);
4482 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4484 emit_insn (gen_memory_barrier ());
4486 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4487 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4488 emit_label (XEXP (label1, 0));
4490 emit_load_locked (mode, retval, mem);
4492 x = gen_lowpart (DImode, retval);
4493 if (oldval == const0_rtx)
4494 x = gen_rtx_NE (DImode, x, const0_rtx);
4495 else
4497 x = gen_rtx_EQ (DImode, x, oldval);
4498 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4499 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4501 emit_unlikely_jump (x, label2);
4503 emit_move_insn (scratch, newval);
4504 emit_store_conditional (mode, cond, mem, scratch);
4506 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4507 emit_unlikely_jump (x, label1);
4509 emit_insn (gen_memory_barrier ());
4510 emit_label (XEXP (label2, 0));
4513 void
4514 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4516 enum machine_mode mode = GET_MODE (mem);
4517 rtx addr, align, wdst;
4518 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4520 addr = force_reg (DImode, XEXP (mem, 0));
4521 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4522 NULL_RTX, 1, OPTAB_DIRECT);
4524 oldval = convert_modes (DImode, mode, oldval, 1);
4525 newval = emit_insxl (mode, newval, addr);
4527 wdst = gen_reg_rtx (DImode);
4528 if (mode == QImode)
4529 fn5 = gen_sync_compare_and_swapqi_1;
4530 else
4531 fn5 = gen_sync_compare_and_swaphi_1;
4532 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4534 emit_move_insn (dst, gen_lowpart (mode, wdst));
4537 void
4538 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4539 rtx oldval, rtx newval, rtx align,
4540 rtx scratch, rtx cond)
4542 rtx label1, label2, mem, width, mask, x;
4544 mem = gen_rtx_MEM (DImode, align);
4545 MEM_VOLATILE_P (mem) = 1;
4547 emit_insn (gen_memory_barrier ());
4548 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4549 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4550 emit_label (XEXP (label1, 0));
4552 emit_load_locked (DImode, scratch, mem);
4554 width = GEN_INT (GET_MODE_BITSIZE (mode));
4555 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4556 if (WORDS_BIG_ENDIAN)
4557 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4558 else
4559 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4561 if (oldval == const0_rtx)
4562 x = gen_rtx_NE (DImode, dest, const0_rtx);
4563 else
4565 x = gen_rtx_EQ (DImode, dest, oldval);
4566 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4567 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4569 emit_unlikely_jump (x, label2);
4571 if (WORDS_BIG_ENDIAN)
4572 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4573 else
4574 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4575 emit_insn (gen_iordi3 (scratch, scratch, newval));
4577 emit_store_conditional (DImode, scratch, mem, scratch);
4579 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4580 emit_unlikely_jump (x, label1);
4582 emit_insn (gen_memory_barrier ());
4583 emit_label (XEXP (label2, 0));
4586 /* Expand an atomic exchange operation. */
4588 void
4589 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4591 enum machine_mode mode = GET_MODE (mem);
4592 rtx label, x, cond = gen_lowpart (DImode, scratch);
4594 emit_insn (gen_memory_barrier ());
4596 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 emit_label (XEXP (label, 0));
4599 emit_load_locked (mode, retval, mem);
4600 emit_move_insn (scratch, val);
4601 emit_store_conditional (mode, cond, mem, scratch);
4603 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4604 emit_unlikely_jump (x, label);
4607 void
4608 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4610 enum machine_mode mode = GET_MODE (mem);
4611 rtx addr, align, wdst;
4612 rtx (*fn4) (rtx, rtx, rtx, rtx);
4614 /* Force the address into a register. */
4615 addr = force_reg (DImode, XEXP (mem, 0));
4617 /* Align it to a multiple of 8. */
4618 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4619 NULL_RTX, 1, OPTAB_DIRECT);
4621 /* Insert val into the correct byte location within the word. */
4622 val = emit_insxl (mode, val, addr);
4624 wdst = gen_reg_rtx (DImode);
4625 if (mode == QImode)
4626 fn4 = gen_sync_lock_test_and_setqi_1;
4627 else
4628 fn4 = gen_sync_lock_test_and_sethi_1;
4629 emit_insn (fn4 (wdst, addr, val, align));
4631 emit_move_insn (dst, gen_lowpart (mode, wdst));
4634 void
4635 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4636 rtx val, rtx align, rtx scratch)
4638 rtx label, mem, width, mask, x;
4640 mem = gen_rtx_MEM (DImode, align);
4641 MEM_VOLATILE_P (mem) = 1;
4643 emit_insn (gen_memory_barrier ());
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4647 emit_load_locked (DImode, scratch, mem);
4649 width = GEN_INT (GET_MODE_BITSIZE (mode));
4650 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4651 if (WORDS_BIG_ENDIAN)
4653 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4654 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4656 else
4658 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4659 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4661 emit_insn (gen_iordi3 (scratch, scratch, val));
4663 emit_store_conditional (DImode, scratch, mem, scratch);
4665 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4666 emit_unlikely_jump (x, label);
4669 /* Adjust the cost of a scheduling dependency. Return the new cost of
4670 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4672 static int
4673 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4675 enum attr_type insn_type, dep_insn_type;
4677 /* If the dependence is an anti-dependence, there is no cost. For an
4678 output dependence, there is sometimes a cost, but it doesn't seem
4679 worth handling those few cases. */
4680 if (REG_NOTE_KIND (link) != 0)
4681 return cost;
4683 /* If we can't recognize the insns, we can't really do anything. */
4684 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4685 return cost;
4687 insn_type = get_attr_type (insn);
4688 dep_insn_type = get_attr_type (dep_insn);
4690 /* Bring in the user-defined memory latency. */
4691 if (dep_insn_type == TYPE_ILD
4692 || dep_insn_type == TYPE_FLD
4693 || dep_insn_type == TYPE_LDSYM)
4694 cost += alpha_memory_latency-1;
4696 /* Everything else handled in DFA bypasses now. */
4698 return cost;
4701 /* The number of instructions that can be issued per cycle. */
4703 static int
4704 alpha_issue_rate (void)
4706 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4709 /* How many alternative schedules to try. This should be as wide as the
4710 scheduling freedom in the DFA, but no wider. Making this value too
4711 large results extra work for the scheduler.
4713 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4714 alternative schedules. For EV5, we can choose between E0/E1 and
4715 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4717 static int
4718 alpha_multipass_dfa_lookahead (void)
4720 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4723 /* Machine-specific function data. */
4725 struct machine_function GTY(())
4727 /* For unicosmk. */
4728 /* List of call information words for calls from this function. */
4729 struct rtx_def *first_ciw;
4730 struct rtx_def *last_ciw;
4731 int ciw_count;
4733 /* List of deferred case vectors. */
4734 struct rtx_def *addr_list;
4736 /* For OSF. */
4737 const char *some_ld_name;
4739 /* For TARGET_LD_BUGGY_LDGP. */
4740 struct rtx_def *gp_save_rtx;
4743 /* How to allocate a 'struct machine_function'. */
4745 static struct machine_function *
4746 alpha_init_machine_status (void)
4748 return ((struct machine_function *)
4749 ggc_alloc_cleared (sizeof (struct machine_function)));
4752 /* Functions to save and restore alpha_return_addr_rtx. */
4754 /* Start the ball rolling with RETURN_ADDR_RTX. */
4757 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4759 if (count != 0)
4760 return const0_rtx;
4762 return get_hard_reg_initial_val (Pmode, REG_RA);
4765 /* Return or create a memory slot containing the gp value for the current
4766 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4769 alpha_gp_save_rtx (void)
4771 rtx seq, m = cfun->machine->gp_save_rtx;
4773 if (m == NULL)
4775 start_sequence ();
4777 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4778 m = validize_mem (m);
4779 emit_move_insn (m, pic_offset_table_rtx);
4781 seq = get_insns ();
4782 end_sequence ();
4784 /* We used to simply emit the sequence after entry_of_function.
4785 However this breaks the CFG if the first instruction in the
4786 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4787 label. Emit the sequence properly on the edge. We are only
4788 invoked from dw2_build_landing_pads and finish_eh_generation
4789 will call commit_edge_insertions thanks to a kludge. */
4790 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4792 cfun->machine->gp_save_rtx = m;
4795 return m;
4798 static int
4799 alpha_ra_ever_killed (void)
4801 rtx top;
4803 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4804 return (int)df_regs_ever_live_p (REG_RA);
4806 push_topmost_sequence ();
4807 top = get_insns ();
4808 pop_topmost_sequence ();
4810 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4814 /* Return the trap mode suffix applicable to the current
4815 instruction, or NULL. */
4817 static const char *
4818 get_trap_mode_suffix (void)
4820 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4822 switch (s)
4824 case TRAP_SUFFIX_NONE:
4825 return NULL;
4827 case TRAP_SUFFIX_SU:
4828 if (alpha_fptm >= ALPHA_FPTM_SU)
4829 return "su";
4830 return NULL;
4832 case TRAP_SUFFIX_SUI:
4833 if (alpha_fptm >= ALPHA_FPTM_SUI)
4834 return "sui";
4835 return NULL;
4837 case TRAP_SUFFIX_V_SV:
4838 switch (alpha_fptm)
4840 case ALPHA_FPTM_N:
4841 return NULL;
4842 case ALPHA_FPTM_U:
4843 return "v";
4844 case ALPHA_FPTM_SU:
4845 case ALPHA_FPTM_SUI:
4846 return "sv";
4847 default:
4848 gcc_unreachable ();
4851 case TRAP_SUFFIX_V_SV_SVI:
4852 switch (alpha_fptm)
4854 case ALPHA_FPTM_N:
4855 return NULL;
4856 case ALPHA_FPTM_U:
4857 return "v";
4858 case ALPHA_FPTM_SU:
4859 return "sv";
4860 case ALPHA_FPTM_SUI:
4861 return "svi";
4862 default:
4863 gcc_unreachable ();
4865 break;
4867 case TRAP_SUFFIX_U_SU_SUI:
4868 switch (alpha_fptm)
4870 case ALPHA_FPTM_N:
4871 return NULL;
4872 case ALPHA_FPTM_U:
4873 return "u";
4874 case ALPHA_FPTM_SU:
4875 return "su";
4876 case ALPHA_FPTM_SUI:
4877 return "sui";
4878 default:
4879 gcc_unreachable ();
4881 break;
4883 default:
4884 gcc_unreachable ();
4886 gcc_unreachable ();
4889 /* Return the rounding mode suffix applicable to the current
4890 instruction, or NULL. */
4892 static const char *
4893 get_round_mode_suffix (void)
4895 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4897 switch (s)
4899 case ROUND_SUFFIX_NONE:
4900 return NULL;
4901 case ROUND_SUFFIX_NORMAL:
4902 switch (alpha_fprm)
4904 case ALPHA_FPRM_NORM:
4905 return NULL;
4906 case ALPHA_FPRM_MINF:
4907 return "m";
4908 case ALPHA_FPRM_CHOP:
4909 return "c";
4910 case ALPHA_FPRM_DYN:
4911 return "d";
4912 default:
4913 gcc_unreachable ();
4915 break;
4917 case ROUND_SUFFIX_C:
4918 return "c";
4920 default:
4921 gcc_unreachable ();
4923 gcc_unreachable ();
4926 /* Locate some local-dynamic symbol still in use by this function
4927 so that we can print its name in some movdi_er_tlsldm pattern. */
4929 static int
4930 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4932 rtx x = *px;
4934 if (GET_CODE (x) == SYMBOL_REF
4935 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4937 cfun->machine->some_ld_name = XSTR (x, 0);
4938 return 1;
4941 return 0;
4944 static const char *
4945 get_some_local_dynamic_name (void)
4947 rtx insn;
4949 if (cfun->machine->some_ld_name)
4950 return cfun->machine->some_ld_name;
4952 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4953 if (INSN_P (insn)
4954 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4955 return cfun->machine->some_ld_name;
4957 gcc_unreachable ();
4960 /* Print an operand. Recognize special options, documented below. */
4962 void
4963 print_operand (FILE *file, rtx x, int code)
4965 int i;
4967 switch (code)
4969 case '~':
4970 /* Print the assembler name of the current function. */
4971 assemble_name (file, alpha_fnname);
4972 break;
4974 case '&':
4975 assemble_name (file, get_some_local_dynamic_name ());
4976 break;
4978 case '/':
4980 const char *trap = get_trap_mode_suffix ();
4981 const char *round = get_round_mode_suffix ();
4983 if (trap || round)
4984 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4985 (trap ? trap : ""), (round ? round : ""));
4986 break;
4989 case ',':
4990 /* Generates single precision instruction suffix. */
4991 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4992 break;
4994 case '-':
4995 /* Generates double precision instruction suffix. */
4996 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4997 break;
4999 case '#':
5000 if (alpha_this_literal_sequence_number == 0)
5001 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5002 fprintf (file, "%d", alpha_this_literal_sequence_number);
5003 break;
5005 case '*':
5006 if (alpha_this_gpdisp_sequence_number == 0)
5007 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5008 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5009 break;
5011 case 'H':
5012 if (GET_CODE (x) == HIGH)
5013 output_addr_const (file, XEXP (x, 0));
5014 else
5015 output_operand_lossage ("invalid %%H value");
5016 break;
5018 case 'J':
5020 const char *lituse;
5022 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5024 x = XVECEXP (x, 0, 0);
5025 lituse = "lituse_tlsgd";
5027 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5029 x = XVECEXP (x, 0, 0);
5030 lituse = "lituse_tlsldm";
5032 else if (GET_CODE (x) == CONST_INT)
5033 lituse = "lituse_jsr";
5034 else
5036 output_operand_lossage ("invalid %%J value");
5037 break;
5040 if (x != const0_rtx)
5041 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5043 break;
5045 case 'j':
5047 const char *lituse;
5049 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5050 lituse = "lituse_jsrdirect";
5051 #else
5052 lituse = "lituse_jsr";
5053 #endif
5055 gcc_assert (INTVAL (x) != 0);
5056 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5058 break;
5059 case 'r':
5060 /* If this operand is the constant zero, write it as "$31". */
5061 if (GET_CODE (x) == REG)
5062 fprintf (file, "%s", reg_names[REGNO (x)]);
5063 else if (x == CONST0_RTX (GET_MODE (x)))
5064 fprintf (file, "$31");
5065 else
5066 output_operand_lossage ("invalid %%r value");
5067 break;
5069 case 'R':
5070 /* Similar, but for floating-point. */
5071 if (GET_CODE (x) == REG)
5072 fprintf (file, "%s", reg_names[REGNO (x)]);
5073 else if (x == CONST0_RTX (GET_MODE (x)))
5074 fprintf (file, "$f31");
5075 else
5076 output_operand_lossage ("invalid %%R value");
5077 break;
5079 case 'N':
5080 /* Write the 1's complement of a constant. */
5081 if (GET_CODE (x) != CONST_INT)
5082 output_operand_lossage ("invalid %%N value");
5084 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5085 break;
5087 case 'P':
5088 /* Write 1 << C, for a constant C. */
5089 if (GET_CODE (x) != CONST_INT)
5090 output_operand_lossage ("invalid %%P value");
5092 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5093 break;
5095 case 'h':
5096 /* Write the high-order 16 bits of a constant, sign-extended. */
5097 if (GET_CODE (x) != CONST_INT)
5098 output_operand_lossage ("invalid %%h value");
5100 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5101 break;
5103 case 'L':
5104 /* Write the low-order 16 bits of a constant, sign-extended. */
5105 if (GET_CODE (x) != CONST_INT)
5106 output_operand_lossage ("invalid %%L value");
5108 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5109 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5110 break;
5112 case 'm':
5113 /* Write mask for ZAP insn. */
5114 if (GET_CODE (x) == CONST_DOUBLE)
5116 HOST_WIDE_INT mask = 0;
5117 HOST_WIDE_INT value;
5119 value = CONST_DOUBLE_LOW (x);
5120 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5121 i++, value >>= 8)
5122 if (value & 0xff)
5123 mask |= (1 << i);
5125 value = CONST_DOUBLE_HIGH (x);
5126 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5127 i++, value >>= 8)
5128 if (value & 0xff)
5129 mask |= (1 << (i + sizeof (int)));
5131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5134 else if (GET_CODE (x) == CONST_INT)
5136 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5138 for (i = 0; i < 8; i++, value >>= 8)
5139 if (value & 0xff)
5140 mask |= (1 << i);
5142 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5144 else
5145 output_operand_lossage ("invalid %%m value");
5146 break;
5148 case 'M':
5149 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5150 if (GET_CODE (x) != CONST_INT
5151 || (INTVAL (x) != 8 && INTVAL (x) != 16
5152 && INTVAL (x) != 32 && INTVAL (x) != 64))
5153 output_operand_lossage ("invalid %%M value");
5155 fprintf (file, "%s",
5156 (INTVAL (x) == 8 ? "b"
5157 : INTVAL (x) == 16 ? "w"
5158 : INTVAL (x) == 32 ? "l"
5159 : "q"));
5160 break;
5162 case 'U':
5163 /* Similar, except do it from the mask. */
5164 if (GET_CODE (x) == CONST_INT)
5166 HOST_WIDE_INT value = INTVAL (x);
5168 if (value == 0xff)
5170 fputc ('b', file);
5171 break;
5173 if (value == 0xffff)
5175 fputc ('w', file);
5176 break;
5178 if (value == 0xffffffff)
5180 fputc ('l', file);
5181 break;
5183 if (value == -1)
5185 fputc ('q', file);
5186 break;
5189 else if (HOST_BITS_PER_WIDE_INT == 32
5190 && GET_CODE (x) == CONST_DOUBLE
5191 && CONST_DOUBLE_LOW (x) == 0xffffffff
5192 && CONST_DOUBLE_HIGH (x) == 0)
5194 fputc ('l', file);
5195 break;
5197 output_operand_lossage ("invalid %%U value");
5198 break;
5200 case 's':
5201 /* Write the constant value divided by 8 for little-endian mode or
5202 (56 - value) / 8 for big-endian mode. */
5204 if (GET_CODE (x) != CONST_INT
5205 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5206 ? 56
5207 : 64)
5208 || (INTVAL (x) & 7) != 0)
5209 output_operand_lossage ("invalid %%s value");
5211 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5212 WORDS_BIG_ENDIAN
5213 ? (56 - INTVAL (x)) / 8
5214 : INTVAL (x) / 8);
5215 break;
5217 case 'S':
5218 /* Same, except compute (64 - c) / 8 */
5220 if (GET_CODE (x) != CONST_INT
5221 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5222 && (INTVAL (x) & 7) != 8)
5223 output_operand_lossage ("invalid %%s value");
5225 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5226 break;
5228 case 't':
5230 /* On Unicos/Mk systems: use a DEX expression if the symbol
5231 clashes with a register name. */
5232 int dex = unicosmk_need_dex (x);
5233 if (dex)
5234 fprintf (file, "DEX(%d)", dex);
5235 else
5236 output_addr_const (file, x);
5238 break;
5240 case 'C': case 'D': case 'c': case 'd':
5241 /* Write out comparison name. */
5243 enum rtx_code c = GET_CODE (x);
5245 if (!COMPARISON_P (x))
5246 output_operand_lossage ("invalid %%C value");
5248 else if (code == 'D')
5249 c = reverse_condition (c);
5250 else if (code == 'c')
5251 c = swap_condition (c);
5252 else if (code == 'd')
5253 c = swap_condition (reverse_condition (c));
5255 if (c == LEU)
5256 fprintf (file, "ule");
5257 else if (c == LTU)
5258 fprintf (file, "ult");
5259 else if (c == UNORDERED)
5260 fprintf (file, "un");
5261 else
5262 fprintf (file, "%s", GET_RTX_NAME (c));
5264 break;
5266 case 'E':
5267 /* Write the divide or modulus operator. */
5268 switch (GET_CODE (x))
5270 case DIV:
5271 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5272 break;
5273 case UDIV:
5274 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5275 break;
5276 case MOD:
5277 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5278 break;
5279 case UMOD:
5280 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5281 break;
5282 default:
5283 output_operand_lossage ("invalid %%E value");
5284 break;
5286 break;
5288 case 'A':
5289 /* Write "_u" for unaligned access. */
5290 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5291 fprintf (file, "_u");
5292 break;
5294 case 0:
5295 if (GET_CODE (x) == REG)
5296 fprintf (file, "%s", reg_names[REGNO (x)]);
5297 else if (GET_CODE (x) == MEM)
5298 output_address (XEXP (x, 0));
5299 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5301 switch (XINT (XEXP (x, 0), 1))
5303 case UNSPEC_DTPREL:
5304 case UNSPEC_TPREL:
5305 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5306 break;
5307 default:
5308 output_operand_lossage ("unknown relocation unspec");
5309 break;
5312 else
5313 output_addr_const (file, x);
5314 break;
5316 default:
5317 output_operand_lossage ("invalid %%xn code");
5321 void
5322 print_operand_address (FILE *file, rtx addr)
5324 int basereg = 31;
5325 HOST_WIDE_INT offset = 0;
5327 if (GET_CODE (addr) == AND)
5328 addr = XEXP (addr, 0);
5330 if (GET_CODE (addr) == PLUS
5331 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5333 offset = INTVAL (XEXP (addr, 1));
5334 addr = XEXP (addr, 0);
5337 if (GET_CODE (addr) == LO_SUM)
5339 const char *reloc16, *reloclo;
5340 rtx op1 = XEXP (addr, 1);
5342 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5344 op1 = XEXP (op1, 0);
5345 switch (XINT (op1, 1))
5347 case UNSPEC_DTPREL:
5348 reloc16 = NULL;
5349 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5350 break;
5351 case UNSPEC_TPREL:
5352 reloc16 = NULL;
5353 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5354 break;
5355 default:
5356 output_operand_lossage ("unknown relocation unspec");
5357 return;
5360 output_addr_const (file, XVECEXP (op1, 0, 0));
5362 else
5364 reloc16 = "gprel";
5365 reloclo = "gprellow";
5366 output_addr_const (file, op1);
5369 if (offset)
5370 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5372 addr = XEXP (addr, 0);
5373 switch (GET_CODE (addr))
5375 case REG:
5376 basereg = REGNO (addr);
5377 break;
5379 case SUBREG:
5380 basereg = subreg_regno (addr);
5381 break;
5383 default:
5384 gcc_unreachable ();
5387 fprintf (file, "($%d)\t\t!%s", basereg,
5388 (basereg == 29 ? reloc16 : reloclo));
5389 return;
5392 switch (GET_CODE (addr))
5394 case REG:
5395 basereg = REGNO (addr);
5396 break;
5398 case SUBREG:
5399 basereg = subreg_regno (addr);
5400 break;
5402 case CONST_INT:
5403 offset = INTVAL (addr);
5404 break;
5406 #if TARGET_ABI_OPEN_VMS
5407 case SYMBOL_REF:
5408 fprintf (file, "%s", XSTR (addr, 0));
5409 return;
5411 case CONST:
5412 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5413 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5414 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5415 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5416 INTVAL (XEXP (XEXP (addr, 0), 1)));
5417 return;
5419 #endif
5420 default:
5421 gcc_unreachable ();
5424 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5427 /* Emit RTL insns to initialize the variable parts of a trampoline at
5428 TRAMP. FNADDR is an RTX for the address of the function's pure
5429 code. CXT is an RTX for the static chain value for the function.
5431 The three offset parameters are for the individual template's
5432 layout. A JMPOFS < 0 indicates that the trampoline does not
5433 contain instructions at all.
5435 We assume here that a function will be called many more times than
5436 its address is taken (e.g., it might be passed to qsort), so we
5437 take the trouble to initialize the "hint" field in the JMP insn.
5438 Note that the hint field is PC (new) + 4 * bits 13:0. */
5440 void
5441 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5442 int fnofs, int cxtofs, int jmpofs)
5444 rtx temp, temp1, addr;
5445 /* VMS really uses DImode pointers in memory at this point. */
5446 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5448 #ifdef POINTERS_EXTEND_UNSIGNED
5449 fnaddr = convert_memory_address (mode, fnaddr);
5450 cxt = convert_memory_address (mode, cxt);
5451 #endif
5453 /* Store function address and CXT. */
5454 addr = memory_address (mode, plus_constant (tramp, fnofs));
5455 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5456 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5457 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5459 /* This has been disabled since the hint only has a 32k range, and in
5460 no existing OS is the stack within 32k of the text segment. */
5461 if (0 && jmpofs >= 0)
5463 /* Compute hint value. */
5464 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5465 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5466 OPTAB_WIDEN);
5467 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5468 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5469 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5470 GEN_INT (0x3fff), 0);
5472 /* Merge in the hint. */
5473 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5474 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5475 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5476 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5477 OPTAB_WIDEN);
5478 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5481 #ifdef ENABLE_EXECUTE_STACK
5482 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5483 0, VOIDmode, 1, tramp, Pmode);
5484 #endif
5486 if (jmpofs >= 0)
5487 emit_insn (gen_imb ());
5490 /* Determine where to put an argument to a function.
5491 Value is zero to push the argument on the stack,
5492 or a hard register in which to store the argument.
5494 MODE is the argument's machine mode.
5495 TYPE is the data type of the argument (as a tree).
5496 This is null for libcalls where that information may
5497 not be available.
5498 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5499 the preceding args and about the function being called.
5500 NAMED is nonzero if this argument is a named parameter
5501 (otherwise it is an extra parameter matching an ellipsis).
5503 On Alpha the first 6 words of args are normally in registers
5504 and the rest are pushed. */
5507 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5508 int named ATTRIBUTE_UNUSED)
5510 int basereg;
5511 int num_args;
5513 /* Don't get confused and pass small structures in FP registers. */
5514 if (type && AGGREGATE_TYPE_P (type))
5515 basereg = 16;
5516 else
5518 #ifdef ENABLE_CHECKING
5519 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5520 values here. */
5521 gcc_assert (!COMPLEX_MODE_P (mode));
5522 #endif
5524 /* Set up defaults for FP operands passed in FP registers, and
5525 integral operands passed in integer registers. */
5526 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5527 basereg = 32 + 16;
5528 else
5529 basereg = 16;
5532 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5533 the three platforms, so we can't avoid conditional compilation. */
5534 #if TARGET_ABI_OPEN_VMS
5536 if (mode == VOIDmode)
5537 return alpha_arg_info_reg_val (cum);
5539 num_args = cum.num_args;
5540 if (num_args >= 6
5541 || targetm.calls.must_pass_in_stack (mode, type))
5542 return NULL_RTX;
5544 #elif TARGET_ABI_UNICOSMK
5546 int size;
5548 /* If this is the last argument, generate the call info word (CIW). */
5549 /* ??? We don't include the caller's line number in the CIW because
5550 I don't know how to determine it if debug infos are turned off. */
5551 if (mode == VOIDmode)
5553 int i;
5554 HOST_WIDE_INT lo;
5555 HOST_WIDE_INT hi;
5556 rtx ciw;
5558 lo = 0;
5560 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5561 if (cum.reg_args_type[i])
5562 lo |= (1 << (7 - i));
5564 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5565 lo |= 7;
5566 else
5567 lo |= cum.num_reg_words;
5569 #if HOST_BITS_PER_WIDE_INT == 32
5570 hi = (cum.num_args << 20) | cum.num_arg_words;
5571 #else
5572 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5573 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5574 hi = 0;
5575 #endif
5576 ciw = immed_double_const (lo, hi, DImode);
5578 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5579 UNSPEC_UMK_LOAD_CIW);
5582 size = ALPHA_ARG_SIZE (mode, type, named);
5583 num_args = cum.num_reg_words;
5584 if (cum.force_stack
5585 || cum.num_reg_words + size > 6
5586 || targetm.calls.must_pass_in_stack (mode, type))
5587 return NULL_RTX;
5588 else if (type && TYPE_MODE (type) == BLKmode)
5590 rtx reg1, reg2;
5592 reg1 = gen_rtx_REG (DImode, num_args + 16);
5593 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5595 /* The argument fits in two registers. Note that we still need to
5596 reserve a register for empty structures. */
5597 if (size == 0)
5598 return NULL_RTX;
5599 else if (size == 1)
5600 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5601 else
5603 reg2 = gen_rtx_REG (DImode, num_args + 17);
5604 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5605 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5609 #elif TARGET_ABI_OSF
5611 if (cum >= 6)
5612 return NULL_RTX;
5613 num_args = cum;
5615 /* VOID is passed as a special flag for "last argument". */
5616 if (type == void_type_node)
5617 basereg = 16;
5618 else if (targetm.calls.must_pass_in_stack (mode, type))
5619 return NULL_RTX;
5621 #else
5622 #error Unhandled ABI
5623 #endif
5625 return gen_rtx_REG (mode, num_args + basereg);
5628 static int
5629 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5630 enum machine_mode mode ATTRIBUTE_UNUSED,
5631 tree type ATTRIBUTE_UNUSED,
5632 bool named ATTRIBUTE_UNUSED)
5634 int words = 0;
5636 #if TARGET_ABI_OPEN_VMS
5637 if (cum->num_args < 6
5638 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5639 words = 6 - cum->num_args;
5640 #elif TARGET_ABI_UNICOSMK
5641 /* Never any split arguments. */
5642 #elif TARGET_ABI_OSF
5643 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5644 words = 6 - *cum;
5645 #else
5646 #error Unhandled ABI
5647 #endif
5649 return words * UNITS_PER_WORD;
5653 /* Return true if TYPE must be returned in memory, instead of in registers. */
5655 static bool
5656 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5658 enum machine_mode mode = VOIDmode;
5659 int size;
5661 if (type)
5663 mode = TYPE_MODE (type);
5665 /* All aggregates are returned in memory. */
5666 if (AGGREGATE_TYPE_P (type))
5667 return true;
5670 size = GET_MODE_SIZE (mode);
5671 switch (GET_MODE_CLASS (mode))
5673 case MODE_VECTOR_FLOAT:
5674 /* Pass all float vectors in memory, like an aggregate. */
5675 return true;
5677 case MODE_COMPLEX_FLOAT:
5678 /* We judge complex floats on the size of their element,
5679 not the size of the whole type. */
5680 size = GET_MODE_UNIT_SIZE (mode);
5681 break;
5683 case MODE_INT:
5684 case MODE_FLOAT:
5685 case MODE_COMPLEX_INT:
5686 case MODE_VECTOR_INT:
5687 break;
5689 default:
5690 /* ??? We get called on all sorts of random stuff from
5691 aggregate_value_p. We must return something, but it's not
5692 clear what's safe to return. Pretend it's a struct I
5693 guess. */
5694 return true;
5697 /* Otherwise types must fit in one register. */
5698 return size > UNITS_PER_WORD;
5701 /* Return true if TYPE should be passed by invisible reference. */
5703 static bool
5704 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5705 enum machine_mode mode,
5706 const_tree type ATTRIBUTE_UNUSED,
5707 bool named ATTRIBUTE_UNUSED)
5709 return mode == TFmode || mode == TCmode;
5712 /* Define how to find the value returned by a function. VALTYPE is the
5713 data type of the value (as a tree). If the precise function being
5714 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5715 MODE is set instead of VALTYPE for libcalls.
5717 On Alpha the value is found in $0 for integer functions and
5718 $f0 for floating-point functions. */
5721 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5722 enum machine_mode mode)
5724 unsigned int regnum, dummy;
5725 enum mode_class class;
5727 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5729 if (valtype)
5730 mode = TYPE_MODE (valtype);
5732 class = GET_MODE_CLASS (mode);
5733 switch (class)
5735 case MODE_INT:
5736 PROMOTE_MODE (mode, dummy, valtype);
5737 /* FALLTHRU */
5739 case MODE_COMPLEX_INT:
5740 case MODE_VECTOR_INT:
5741 regnum = 0;
5742 break;
5744 case MODE_FLOAT:
5745 regnum = 32;
5746 break;
5748 case MODE_COMPLEX_FLOAT:
5750 enum machine_mode cmode = GET_MODE_INNER (mode);
5752 return gen_rtx_PARALLEL
5753 (VOIDmode,
5754 gen_rtvec (2,
5755 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5756 const0_rtx),
5757 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5758 GEN_INT (GET_MODE_SIZE (cmode)))));
5761 default:
5762 gcc_unreachable ();
5765 return gen_rtx_REG (mode, regnum);
5768 /* TCmode complex values are passed by invisible reference. We
5769 should not split these values. */
5771 static bool
5772 alpha_split_complex_arg (const_tree type)
5774 return TYPE_MODE (type) != TCmode;
5777 static tree
5778 alpha_build_builtin_va_list (void)
5780 tree base, ofs, space, record, type_decl;
5782 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5783 return ptr_type_node;
5785 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5786 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5787 TREE_CHAIN (record) = type_decl;
5788 TYPE_NAME (record) = type_decl;
5790 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5792 /* Dummy field to prevent alignment warnings. */
5793 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5794 DECL_FIELD_CONTEXT (space) = record;
5795 DECL_ARTIFICIAL (space) = 1;
5796 DECL_IGNORED_P (space) = 1;
5798 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5799 integer_type_node);
5800 DECL_FIELD_CONTEXT (ofs) = record;
5801 TREE_CHAIN (ofs) = space;
5803 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5804 ptr_type_node);
5805 DECL_FIELD_CONTEXT (base) = record;
5806 TREE_CHAIN (base) = ofs;
5808 TYPE_FIELDS (record) = base;
5809 layout_type (record);
5811 va_list_gpr_counter_field = ofs;
5812 return record;
5815 #if TARGET_ABI_OSF
5816 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5817 and constant additions. */
5819 static tree
5820 va_list_skip_additions (tree lhs)
5822 tree rhs, stmt;
5824 if (TREE_CODE (lhs) != SSA_NAME)
5825 return lhs;
5827 for (;;)
5829 stmt = SSA_NAME_DEF_STMT (lhs);
5831 if (TREE_CODE (stmt) == PHI_NODE)
5832 return stmt;
5834 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5835 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5836 return lhs;
5838 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5839 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5840 rhs = TREE_OPERAND (rhs, 0);
5842 if ((TREE_CODE (rhs) != NOP_EXPR
5843 && TREE_CODE (rhs) != CONVERT_EXPR
5844 && ((TREE_CODE (rhs) != PLUS_EXPR
5845 && TREE_CODE (rhs) != POINTER_PLUS_EXPR)
5846 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5847 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5848 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5849 return rhs;
5851 lhs = TREE_OPERAND (rhs, 0);
5855 /* Check if LHS = RHS statement is
5856 LHS = *(ap.__base + ap.__offset + cst)
5858 LHS = *(ap.__base
5859 + ((ap.__offset + cst <= 47)
5860 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5861 If the former, indicate that GPR registers are needed,
5862 if the latter, indicate that FPR registers are needed.
5864 Also look for LHS = (*ptr).field, where ptr is one of the forms
5865 listed above.
5867 On alpha, cfun->va_list_gpr_size is used as size of the needed
5868 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5869 registers are needed and bit 1 set if FPR registers are needed.
5870 Return true if va_list references should not be scanned for the
5871 current statement. */
5873 static bool
5874 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_tree lhs, const_tree rhs)
5876 tree base, offset, arg1, arg2;
5877 int offset_arg = 1;
5879 while (handled_component_p (rhs))
5880 rhs = TREE_OPERAND (rhs, 0);
5881 if (TREE_CODE (rhs) != INDIRECT_REF
5882 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5883 return false;
5885 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5886 if (lhs == NULL_TREE
5887 || TREE_CODE (lhs) != POINTER_PLUS_EXPR)
5888 return false;
5890 base = TREE_OPERAND (lhs, 0);
5891 if (TREE_CODE (base) == SSA_NAME)
5892 base = va_list_skip_additions (base);
5894 if (TREE_CODE (base) != COMPONENT_REF
5895 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5897 base = TREE_OPERAND (lhs, 0);
5898 if (TREE_CODE (base) == SSA_NAME)
5899 base = va_list_skip_additions (base);
5901 if (TREE_CODE (base) != COMPONENT_REF
5902 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5903 return false;
5905 offset_arg = 0;
5908 base = get_base_address (base);
5909 if (TREE_CODE (base) != VAR_DECL
5910 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5911 return false;
5913 offset = TREE_OPERAND (lhs, offset_arg);
5914 if (TREE_CODE (offset) == SSA_NAME)
5915 offset = va_list_skip_additions (offset);
5917 if (TREE_CODE (offset) == PHI_NODE)
5919 HOST_WIDE_INT sub;
5921 if (PHI_NUM_ARGS (offset) != 2)
5922 goto escapes;
5924 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5925 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5926 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5928 tree tem = arg1;
5929 arg1 = arg2;
5930 arg2 = tem;
5932 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5933 goto escapes;
5935 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5936 goto escapes;
5938 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5939 if (TREE_CODE (arg2) == MINUS_EXPR)
5940 sub = -sub;
5941 if (sub < -48 || sub > -32)
5942 goto escapes;
5944 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5945 if (arg1 != arg2)
5946 goto escapes;
5948 if (TREE_CODE (arg1) == SSA_NAME)
5949 arg1 = va_list_skip_additions (arg1);
5951 if (TREE_CODE (arg1) != COMPONENT_REF
5952 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5953 || get_base_address (arg1) != base)
5954 goto escapes;
5956 /* Need floating point regs. */
5957 cfun->va_list_fpr_size |= 2;
5959 else if (TREE_CODE (offset) != COMPONENT_REF
5960 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5961 || get_base_address (offset) != base)
5962 goto escapes;
5963 else
5964 /* Need general regs. */
5965 cfun->va_list_fpr_size |= 1;
5966 return false;
5968 escapes:
5969 si->va_list_escapes = true;
5970 return false;
5972 #endif
5974 /* Perform any needed actions needed for a function that is receiving a
5975 variable number of arguments. */
5977 static void
5978 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5979 tree type, int *pretend_size, int no_rtl)
5981 CUMULATIVE_ARGS cum = *pcum;
5983 /* Skip the current argument. */
5984 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5986 #if TARGET_ABI_UNICOSMK
5987 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5988 arguments on the stack. Unfortunately, it doesn't always store the first
5989 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5990 with stdargs as we always have at least one named argument there. */
5991 if (cum.num_reg_words < 6)
5993 if (!no_rtl)
5995 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
5996 emit_insn (gen_arg_home_umk ());
5998 *pretend_size = 0;
6000 #elif TARGET_ABI_OPEN_VMS
6001 /* For VMS, we allocate space for all 6 arg registers plus a count.
6003 However, if NO registers need to be saved, don't allocate any space.
6004 This is not only because we won't need the space, but because AP
6005 includes the current_pretend_args_size and we don't want to mess up
6006 any ap-relative addresses already made. */
6007 if (cum.num_args < 6)
6009 if (!no_rtl)
6011 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6012 emit_insn (gen_arg_home ());
6014 *pretend_size = 7 * UNITS_PER_WORD;
6016 #else
6017 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6018 only push those that are remaining. However, if NO registers need to
6019 be saved, don't allocate any space. This is not only because we won't
6020 need the space, but because AP includes the current_pretend_args_size
6021 and we don't want to mess up any ap-relative addresses already made.
6023 If we are not to use the floating-point registers, save the integer
6024 registers where we would put the floating-point registers. This is
6025 not the most efficient way to implement varargs with just one register
6026 class, but it isn't worth doing anything more efficient in this rare
6027 case. */
6028 if (cum >= 6)
6029 return;
6031 if (!no_rtl)
6033 int count;
6034 alias_set_type set = get_varargs_alias_set ();
6035 rtx tmp;
6037 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6038 if (count > 6 - cum)
6039 count = 6 - cum;
6041 /* Detect whether integer registers or floating-point registers
6042 are needed by the detected va_arg statements. See above for
6043 how these values are computed. Note that the "escape" value
6044 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6045 these bits set. */
6046 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6048 if (cfun->va_list_fpr_size & 1)
6050 tmp = gen_rtx_MEM (BLKmode,
6051 plus_constant (virtual_incoming_args_rtx,
6052 (cum + 6) * UNITS_PER_WORD));
6053 MEM_NOTRAP_P (tmp) = 1;
6054 set_mem_alias_set (tmp, set);
6055 move_block_from_reg (16 + cum, tmp, count);
6058 if (cfun->va_list_fpr_size & 2)
6060 tmp = gen_rtx_MEM (BLKmode,
6061 plus_constant (virtual_incoming_args_rtx,
6062 cum * UNITS_PER_WORD));
6063 MEM_NOTRAP_P (tmp) = 1;
6064 set_mem_alias_set (tmp, set);
6065 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6068 *pretend_size = 12 * UNITS_PER_WORD;
6069 #endif
6072 void
6073 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6075 HOST_WIDE_INT offset;
6076 tree t, offset_field, base_field;
6078 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6079 return;
6081 if (TARGET_ABI_UNICOSMK)
6082 std_expand_builtin_va_start (valist, nextarg);
6084 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6085 up by 48, storing fp arg registers in the first 48 bytes, and the
6086 integer arg registers in the next 48 bytes. This is only done,
6087 however, if any integer registers need to be stored.
6089 If no integer registers need be stored, then we must subtract 48
6090 in order to account for the integer arg registers which are counted
6091 in argsize above, but which are not actually stored on the stack.
6092 Must further be careful here about structures straddling the last
6093 integer argument register; that futzes with pretend_args_size,
6094 which changes the meaning of AP. */
6096 if (NUM_ARGS < 6)
6097 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6098 else
6099 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6101 if (TARGET_ABI_OPEN_VMS)
6103 nextarg = plus_constant (nextarg, offset);
6104 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6105 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6106 make_tree (ptr_type_node, nextarg));
6107 TREE_SIDE_EFFECTS (t) = 1;
6109 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6111 else
6113 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6114 offset_field = TREE_CHAIN (base_field);
6116 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6117 valist, base_field, NULL_TREE);
6118 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6119 valist, offset_field, NULL_TREE);
6121 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6122 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6123 size_int (offset));
6124 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6125 TREE_SIDE_EFFECTS (t) = 1;
6126 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6128 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6129 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6130 offset_field, t);
6131 TREE_SIDE_EFFECTS (t) = 1;
6132 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6136 static tree
6137 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6139 tree type_size, ptr_type, addend, t, addr, internal_post;
6141 /* If the type could not be passed in registers, skip the block
6142 reserved for the registers. */
6143 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6145 t = build_int_cst (TREE_TYPE (offset), 6*8);
6146 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6147 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6148 gimplify_and_add (t, pre_p);
6151 addend = offset;
6152 ptr_type = build_pointer_type (type);
6154 if (TREE_CODE (type) == COMPLEX_TYPE)
6156 tree real_part, imag_part, real_temp;
6158 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6159 offset, pre_p);
6161 /* Copy the value into a new temporary, lest the formal temporary
6162 be reused out from under us. */
6163 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6165 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6166 offset, pre_p);
6168 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6170 else if (TREE_CODE (type) == REAL_TYPE)
6172 tree fpaddend, cond, fourtyeight;
6174 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6175 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6176 addend, fourtyeight);
6177 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6178 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6179 fpaddend, addend);
6182 /* Build the final address and force that value into a temporary. */
6183 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6184 fold_convert (sizetype, addend));
6185 internal_post = NULL;
6186 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6187 append_to_statement_list (internal_post, pre_p);
6189 /* Update the offset field. */
6190 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6191 if (type_size == NULL || TREE_OVERFLOW (type_size))
6192 t = size_zero_node;
6193 else
6195 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6196 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6197 t = size_binop (MULT_EXPR, t, size_int (8));
6199 t = fold_convert (TREE_TYPE (offset), t);
6200 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6201 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6202 gimplify_and_add (t, pre_p);
6204 return build_va_arg_indirect_ref (addr);
6207 static tree
6208 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6210 tree offset_field, base_field, offset, base, t, r;
6211 bool indirect;
6213 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6214 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6216 base_field = TYPE_FIELDS (va_list_type_node);
6217 offset_field = TREE_CHAIN (base_field);
6218 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6219 valist, base_field, NULL_TREE);
6220 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6221 valist, offset_field, NULL_TREE);
6223 /* Pull the fields of the structure out into temporaries. Since we never
6224 modify the base field, we can use a formal temporary. Sign-extend the
6225 offset field so that it's the proper width for pointer arithmetic. */
6226 base = get_formal_tmp_var (base_field, pre_p);
6228 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6229 offset = get_initialized_tmp_var (t, pre_p, NULL);
6231 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6232 if (indirect)
6233 type = build_pointer_type (type);
6235 /* Find the value. Note that this will be a stable indirection, or
6236 a composite of stable indirections in the case of complex. */
6237 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6239 /* Stuff the offset temporary back into its field. */
6240 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6241 fold_convert (TREE_TYPE (offset_field), offset));
6242 gimplify_and_add (t, pre_p);
6244 if (indirect)
6245 r = build_va_arg_indirect_ref (r);
6247 return r;
6250 /* Builtins. */
6252 enum alpha_builtin
6254 ALPHA_BUILTIN_CMPBGE,
6255 ALPHA_BUILTIN_EXTBL,
6256 ALPHA_BUILTIN_EXTWL,
6257 ALPHA_BUILTIN_EXTLL,
6258 ALPHA_BUILTIN_EXTQL,
6259 ALPHA_BUILTIN_EXTWH,
6260 ALPHA_BUILTIN_EXTLH,
6261 ALPHA_BUILTIN_EXTQH,
6262 ALPHA_BUILTIN_INSBL,
6263 ALPHA_BUILTIN_INSWL,
6264 ALPHA_BUILTIN_INSLL,
6265 ALPHA_BUILTIN_INSQL,
6266 ALPHA_BUILTIN_INSWH,
6267 ALPHA_BUILTIN_INSLH,
6268 ALPHA_BUILTIN_INSQH,
6269 ALPHA_BUILTIN_MSKBL,
6270 ALPHA_BUILTIN_MSKWL,
6271 ALPHA_BUILTIN_MSKLL,
6272 ALPHA_BUILTIN_MSKQL,
6273 ALPHA_BUILTIN_MSKWH,
6274 ALPHA_BUILTIN_MSKLH,
6275 ALPHA_BUILTIN_MSKQH,
6276 ALPHA_BUILTIN_UMULH,
6277 ALPHA_BUILTIN_ZAP,
6278 ALPHA_BUILTIN_ZAPNOT,
6279 ALPHA_BUILTIN_AMASK,
6280 ALPHA_BUILTIN_IMPLVER,
6281 ALPHA_BUILTIN_RPCC,
6282 ALPHA_BUILTIN_THREAD_POINTER,
6283 ALPHA_BUILTIN_SET_THREAD_POINTER,
6285 /* TARGET_MAX */
6286 ALPHA_BUILTIN_MINUB8,
6287 ALPHA_BUILTIN_MINSB8,
6288 ALPHA_BUILTIN_MINUW4,
6289 ALPHA_BUILTIN_MINSW4,
6290 ALPHA_BUILTIN_MAXUB8,
6291 ALPHA_BUILTIN_MAXSB8,
6292 ALPHA_BUILTIN_MAXUW4,
6293 ALPHA_BUILTIN_MAXSW4,
6294 ALPHA_BUILTIN_PERR,
6295 ALPHA_BUILTIN_PKLB,
6296 ALPHA_BUILTIN_PKWB,
6297 ALPHA_BUILTIN_UNPKBL,
6298 ALPHA_BUILTIN_UNPKBW,
6300 /* TARGET_CIX */
6301 ALPHA_BUILTIN_CTTZ,
6302 ALPHA_BUILTIN_CTLZ,
6303 ALPHA_BUILTIN_CTPOP,
6305 ALPHA_BUILTIN_max
6308 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6309 CODE_FOR_builtin_cmpbge,
6310 CODE_FOR_builtin_extbl,
6311 CODE_FOR_builtin_extwl,
6312 CODE_FOR_builtin_extll,
6313 CODE_FOR_builtin_extql,
6314 CODE_FOR_builtin_extwh,
6315 CODE_FOR_builtin_extlh,
6316 CODE_FOR_builtin_extqh,
6317 CODE_FOR_builtin_insbl,
6318 CODE_FOR_builtin_inswl,
6319 CODE_FOR_builtin_insll,
6320 CODE_FOR_builtin_insql,
6321 CODE_FOR_builtin_inswh,
6322 CODE_FOR_builtin_inslh,
6323 CODE_FOR_builtin_insqh,
6324 CODE_FOR_builtin_mskbl,
6325 CODE_FOR_builtin_mskwl,
6326 CODE_FOR_builtin_mskll,
6327 CODE_FOR_builtin_mskql,
6328 CODE_FOR_builtin_mskwh,
6329 CODE_FOR_builtin_msklh,
6330 CODE_FOR_builtin_mskqh,
6331 CODE_FOR_umuldi3_highpart,
6332 CODE_FOR_builtin_zap,
6333 CODE_FOR_builtin_zapnot,
6334 CODE_FOR_builtin_amask,
6335 CODE_FOR_builtin_implver,
6336 CODE_FOR_builtin_rpcc,
6337 CODE_FOR_load_tp,
6338 CODE_FOR_set_tp,
6340 /* TARGET_MAX */
6341 CODE_FOR_builtin_minub8,
6342 CODE_FOR_builtin_minsb8,
6343 CODE_FOR_builtin_minuw4,
6344 CODE_FOR_builtin_minsw4,
6345 CODE_FOR_builtin_maxub8,
6346 CODE_FOR_builtin_maxsb8,
6347 CODE_FOR_builtin_maxuw4,
6348 CODE_FOR_builtin_maxsw4,
6349 CODE_FOR_builtin_perr,
6350 CODE_FOR_builtin_pklb,
6351 CODE_FOR_builtin_pkwb,
6352 CODE_FOR_builtin_unpkbl,
6353 CODE_FOR_builtin_unpkbw,
6355 /* TARGET_CIX */
6356 CODE_FOR_ctzdi2,
6357 CODE_FOR_clzdi2,
6358 CODE_FOR_popcountdi2
6361 struct alpha_builtin_def
6363 const char *name;
6364 enum alpha_builtin code;
6365 unsigned int target_mask;
6366 bool is_const;
6369 static struct alpha_builtin_def const zero_arg_builtins[] = {
6370 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6371 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6374 static struct alpha_builtin_def const one_arg_builtins[] = {
6375 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6376 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6377 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6378 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6379 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6380 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6381 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6382 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6385 static struct alpha_builtin_def const two_arg_builtins[] = {
6386 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6387 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6388 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6389 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6390 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6391 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6392 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6393 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6394 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6395 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6396 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6397 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6398 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6399 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6400 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6401 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6402 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6403 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6404 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6405 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6406 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6407 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6408 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6409 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6410 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6411 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6412 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6413 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6414 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6415 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6416 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6417 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6418 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6419 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6422 static GTY(()) tree alpha_v8qi_u;
6423 static GTY(()) tree alpha_v8qi_s;
6424 static GTY(()) tree alpha_v4hi_u;
6425 static GTY(()) tree alpha_v4hi_s;
6427 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6428 functions pointed to by P, with function type FTYPE. */
6430 static void
6431 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6432 tree ftype)
6434 tree decl;
6435 size_t i;
6437 for (i = 0; i < count; ++i, ++p)
6438 if ((target_flags & p->target_mask) == p->target_mask)
6440 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6441 NULL, NULL);
6442 if (p->is_const)
6443 TREE_READONLY (decl) = 1;
6444 TREE_NOTHROW (decl) = 1;
6449 static void
6450 alpha_init_builtins (void)
6452 tree dimode_integer_type_node;
6453 tree ftype, decl;
6455 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6457 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6458 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6459 ftype);
6461 ftype = build_function_type_list (dimode_integer_type_node,
6462 dimode_integer_type_node, NULL_TREE);
6463 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6464 ftype);
6466 ftype = build_function_type_list (dimode_integer_type_node,
6467 dimode_integer_type_node,
6468 dimode_integer_type_node, NULL_TREE);
6469 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6470 ftype);
6472 ftype = build_function_type (ptr_type_node, void_list_node);
6473 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6474 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6475 NULL, NULL);
6476 TREE_NOTHROW (decl) = 1;
6478 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6479 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6480 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6481 NULL, NULL);
6482 TREE_NOTHROW (decl) = 1;
6484 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6485 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6486 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6487 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6490 /* Expand an expression EXP that calls a built-in function,
6491 with result going to TARGET if that's convenient
6492 (and in mode MODE if that's convenient).
6493 SUBTARGET may be used as the target for computing one of EXP's operands.
6494 IGNORE is nonzero if the value is to be ignored. */
6496 static rtx
6497 alpha_expand_builtin (tree exp, rtx target,
6498 rtx subtarget ATTRIBUTE_UNUSED,
6499 enum machine_mode mode ATTRIBUTE_UNUSED,
6500 int ignore ATTRIBUTE_UNUSED)
6502 #define MAX_ARGS 2
6504 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6505 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6506 tree arg;
6507 call_expr_arg_iterator iter;
6508 enum insn_code icode;
6509 rtx op[MAX_ARGS], pat;
6510 int arity;
6511 bool nonvoid;
6513 if (fcode >= ALPHA_BUILTIN_max)
6514 internal_error ("bad builtin fcode");
6515 icode = code_for_builtin[fcode];
6516 if (icode == 0)
6517 internal_error ("bad builtin fcode");
6519 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6521 arity = 0;
6522 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6524 const struct insn_operand_data *insn_op;
6526 if (arg == error_mark_node)
6527 return NULL_RTX;
6528 if (arity > MAX_ARGS)
6529 return NULL_RTX;
6531 insn_op = &insn_data[icode].operand[arity + nonvoid];
6533 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6535 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6536 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6537 arity++;
6540 if (nonvoid)
6542 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6543 if (!target
6544 || GET_MODE (target) != tmode
6545 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6546 target = gen_reg_rtx (tmode);
6549 switch (arity)
6551 case 0:
6552 pat = GEN_FCN (icode) (target);
6553 break;
6554 case 1:
6555 if (nonvoid)
6556 pat = GEN_FCN (icode) (target, op[0]);
6557 else
6558 pat = GEN_FCN (icode) (op[0]);
6559 break;
6560 case 2:
6561 pat = GEN_FCN (icode) (target, op[0], op[1]);
6562 break;
6563 default:
6564 gcc_unreachable ();
6566 if (!pat)
6567 return NULL_RTX;
6568 emit_insn (pat);
6570 if (nonvoid)
6571 return target;
6572 else
6573 return const0_rtx;
6577 /* Several bits below assume HWI >= 64 bits. This should be enforced
6578 by config.gcc. */
6579 #if HOST_BITS_PER_WIDE_INT < 64
6580 # error "HOST_WIDE_INT too small"
6581 #endif
6583 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6584 with an 8-bit output vector. OPINT contains the integer operands; bit N
6585 of OP_CONST is set if OPINT[N] is valid. */
6587 static tree
6588 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6590 if (op_const == 3)
6592 int i, val;
6593 for (i = 0, val = 0; i < 8; ++i)
6595 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6596 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6597 if (c0 >= c1)
6598 val |= 1 << i;
6600 return build_int_cst (long_integer_type_node, val);
6602 else if (op_const == 2 && opint[1] == 0)
6603 return build_int_cst (long_integer_type_node, 0xff);
6604 return NULL;
6607 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6608 specialized form of an AND operation. Other byte manipulation instructions
6609 are defined in terms of this instruction, so this is also used as a
6610 subroutine for other builtins.
6612 OP contains the tree operands; OPINT contains the extracted integer values.
6613 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6614 OPINT may be considered. */
6616 static tree
6617 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6618 long op_const)
6620 if (op_const & 2)
6622 unsigned HOST_WIDE_INT mask = 0;
6623 int i;
6625 for (i = 0; i < 8; ++i)
6626 if ((opint[1] >> i) & 1)
6627 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6629 if (op_const & 1)
6630 return build_int_cst (long_integer_type_node, opint[0] & mask);
6632 if (op)
6633 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6634 build_int_cst (long_integer_type_node, mask));
6636 else if ((op_const & 1) && opint[0] == 0)
6637 return build_int_cst (long_integer_type_node, 0);
6638 return NULL;
6641 /* Fold the builtins for the EXT family of instructions. */
6643 static tree
6644 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6645 long op_const, unsigned HOST_WIDE_INT bytemask,
6646 bool is_high)
6648 long zap_const = 2;
6649 tree *zap_op = NULL;
6651 if (op_const & 2)
6653 unsigned HOST_WIDE_INT loc;
6655 loc = opint[1] & 7;
6656 if (BYTES_BIG_ENDIAN)
6657 loc ^= 7;
6658 loc *= 8;
6660 if (loc != 0)
6662 if (op_const & 1)
6664 unsigned HOST_WIDE_INT temp = opint[0];
6665 if (is_high)
6666 temp <<= loc;
6667 else
6668 temp >>= loc;
6669 opint[0] = temp;
6670 zap_const = 3;
6673 else
6674 zap_op = op;
6677 opint[1] = bytemask;
6678 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6681 /* Fold the builtins for the INS family of instructions. */
6683 static tree
6684 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6685 long op_const, unsigned HOST_WIDE_INT bytemask,
6686 bool is_high)
6688 if ((op_const & 1) && opint[0] == 0)
6689 return build_int_cst (long_integer_type_node, 0);
6691 if (op_const & 2)
6693 unsigned HOST_WIDE_INT temp, loc, byteloc;
6694 tree *zap_op = NULL;
6696 loc = opint[1] & 7;
6697 if (BYTES_BIG_ENDIAN)
6698 loc ^= 7;
6699 bytemask <<= loc;
6701 temp = opint[0];
6702 if (is_high)
6704 byteloc = (64 - (loc * 8)) & 0x3f;
6705 if (byteloc == 0)
6706 zap_op = op;
6707 else
6708 temp >>= byteloc;
6709 bytemask >>= 8;
6711 else
6713 byteloc = loc * 8;
6714 if (byteloc == 0)
6715 zap_op = op;
6716 else
6717 temp <<= byteloc;
6720 opint[0] = temp;
6721 opint[1] = bytemask;
6722 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6725 return NULL;
6728 static tree
6729 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6730 long op_const, unsigned HOST_WIDE_INT bytemask,
6731 bool is_high)
6733 if (op_const & 2)
6735 unsigned HOST_WIDE_INT loc;
6737 loc = opint[1] & 7;
6738 if (BYTES_BIG_ENDIAN)
6739 loc ^= 7;
6740 bytemask <<= loc;
6742 if (is_high)
6743 bytemask >>= 8;
6745 opint[1] = bytemask ^ 0xff;
6748 return alpha_fold_builtin_zapnot (op, opint, op_const);
6751 static tree
6752 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6754 switch (op_const)
6756 case 3:
6758 unsigned HOST_WIDE_INT l;
6759 HOST_WIDE_INT h;
6761 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6763 #if HOST_BITS_PER_WIDE_INT > 64
6764 # error fixme
6765 #endif
6767 return build_int_cst (long_integer_type_node, h);
6770 case 1:
6771 opint[1] = opint[0];
6772 /* FALLTHRU */
6773 case 2:
6774 /* Note that (X*1) >> 64 == 0. */
6775 if (opint[1] == 0 || opint[1] == 1)
6776 return build_int_cst (long_integer_type_node, 0);
6777 break;
6779 return NULL;
6782 static tree
6783 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6785 tree op0 = fold_convert (vtype, op[0]);
6786 tree op1 = fold_convert (vtype, op[1]);
6787 tree val = fold_build2 (code, vtype, op0, op1);
6788 return fold_convert (long_integer_type_node, val);
6791 static tree
6792 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6794 unsigned HOST_WIDE_INT temp = 0;
6795 int i;
6797 if (op_const != 3)
6798 return NULL;
6800 for (i = 0; i < 8; ++i)
6802 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6803 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6804 if (a >= b)
6805 temp += a - b;
6806 else
6807 temp += b - a;
6810 return build_int_cst (long_integer_type_node, temp);
6813 static tree
6814 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6816 unsigned HOST_WIDE_INT temp;
6818 if (op_const == 0)
6819 return NULL;
6821 temp = opint[0] & 0xff;
6822 temp |= (opint[0] >> 24) & 0xff00;
6824 return build_int_cst (long_integer_type_node, temp);
6827 static tree
6828 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6830 unsigned HOST_WIDE_INT temp;
6832 if (op_const == 0)
6833 return NULL;
6835 temp = opint[0] & 0xff;
6836 temp |= (opint[0] >> 8) & 0xff00;
6837 temp |= (opint[0] >> 16) & 0xff0000;
6838 temp |= (opint[0] >> 24) & 0xff000000;
6840 return build_int_cst (long_integer_type_node, temp);
6843 static tree
6844 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6846 unsigned HOST_WIDE_INT temp;
6848 if (op_const == 0)
6849 return NULL;
6851 temp = opint[0] & 0xff;
6852 temp |= (opint[0] & 0xff00) << 24;
6854 return build_int_cst (long_integer_type_node, temp);
6857 static tree
6858 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6860 unsigned HOST_WIDE_INT temp;
6862 if (op_const == 0)
6863 return NULL;
6865 temp = opint[0] & 0xff;
6866 temp |= (opint[0] & 0x0000ff00) << 8;
6867 temp |= (opint[0] & 0x00ff0000) << 16;
6868 temp |= (opint[0] & 0xff000000) << 24;
6870 return build_int_cst (long_integer_type_node, temp);
6873 static tree
6874 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6876 unsigned HOST_WIDE_INT temp;
6878 if (op_const == 0)
6879 return NULL;
6881 if (opint[0] == 0)
6882 temp = 64;
6883 else
6884 temp = exact_log2 (opint[0] & -opint[0]);
6886 return build_int_cst (long_integer_type_node, temp);
6889 static tree
6890 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6892 unsigned HOST_WIDE_INT temp;
6894 if (op_const == 0)
6895 return NULL;
6897 if (opint[0] == 0)
6898 temp = 64;
6899 else
6900 temp = 64 - floor_log2 (opint[0]) - 1;
6902 return build_int_cst (long_integer_type_node, temp);
6905 static tree
6906 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6908 unsigned HOST_WIDE_INT temp, op;
6910 if (op_const == 0)
6911 return NULL;
6913 op = opint[0];
6914 temp = 0;
6915 while (op)
6916 temp++, op &= op - 1;
6918 return build_int_cst (long_integer_type_node, temp);
6921 /* Fold one of our builtin functions. */
6923 static tree
6924 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6926 tree op[MAX_ARGS], t;
6927 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6928 long op_const = 0, arity = 0;
6930 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6932 tree arg = TREE_VALUE (t);
6933 if (arg == error_mark_node)
6934 return NULL;
6935 if (arity >= MAX_ARGS)
6936 return NULL;
6938 op[arity] = arg;
6939 opint[arity] = 0;
6940 if (TREE_CODE (arg) == INTEGER_CST)
6942 op_const |= 1L << arity;
6943 opint[arity] = int_cst_value (arg);
6947 switch (DECL_FUNCTION_CODE (fndecl))
6949 case ALPHA_BUILTIN_CMPBGE:
6950 return alpha_fold_builtin_cmpbge (opint, op_const);
6952 case ALPHA_BUILTIN_EXTBL:
6953 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6954 case ALPHA_BUILTIN_EXTWL:
6955 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6956 case ALPHA_BUILTIN_EXTLL:
6957 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6958 case ALPHA_BUILTIN_EXTQL:
6959 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6960 case ALPHA_BUILTIN_EXTWH:
6961 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6962 case ALPHA_BUILTIN_EXTLH:
6963 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6964 case ALPHA_BUILTIN_EXTQH:
6965 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6967 case ALPHA_BUILTIN_INSBL:
6968 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6969 case ALPHA_BUILTIN_INSWL:
6970 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6971 case ALPHA_BUILTIN_INSLL:
6972 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6973 case ALPHA_BUILTIN_INSQL:
6974 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6975 case ALPHA_BUILTIN_INSWH:
6976 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6977 case ALPHA_BUILTIN_INSLH:
6978 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6979 case ALPHA_BUILTIN_INSQH:
6980 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6982 case ALPHA_BUILTIN_MSKBL:
6983 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6984 case ALPHA_BUILTIN_MSKWL:
6985 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6986 case ALPHA_BUILTIN_MSKLL:
6987 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6988 case ALPHA_BUILTIN_MSKQL:
6989 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6990 case ALPHA_BUILTIN_MSKWH:
6991 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6992 case ALPHA_BUILTIN_MSKLH:
6993 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6994 case ALPHA_BUILTIN_MSKQH:
6995 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6997 case ALPHA_BUILTIN_UMULH:
6998 return alpha_fold_builtin_umulh (opint, op_const);
7000 case ALPHA_BUILTIN_ZAP:
7001 opint[1] ^= 0xff;
7002 /* FALLTHRU */
7003 case ALPHA_BUILTIN_ZAPNOT:
7004 return alpha_fold_builtin_zapnot (op, opint, op_const);
7006 case ALPHA_BUILTIN_MINUB8:
7007 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7008 case ALPHA_BUILTIN_MINSB8:
7009 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7010 case ALPHA_BUILTIN_MINUW4:
7011 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7012 case ALPHA_BUILTIN_MINSW4:
7013 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7014 case ALPHA_BUILTIN_MAXUB8:
7015 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7016 case ALPHA_BUILTIN_MAXSB8:
7017 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7018 case ALPHA_BUILTIN_MAXUW4:
7019 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7020 case ALPHA_BUILTIN_MAXSW4:
7021 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7023 case ALPHA_BUILTIN_PERR:
7024 return alpha_fold_builtin_perr (opint, op_const);
7025 case ALPHA_BUILTIN_PKLB:
7026 return alpha_fold_builtin_pklb (opint, op_const);
7027 case ALPHA_BUILTIN_PKWB:
7028 return alpha_fold_builtin_pkwb (opint, op_const);
7029 case ALPHA_BUILTIN_UNPKBL:
7030 return alpha_fold_builtin_unpkbl (opint, op_const);
7031 case ALPHA_BUILTIN_UNPKBW:
7032 return alpha_fold_builtin_unpkbw (opint, op_const);
7034 case ALPHA_BUILTIN_CTTZ:
7035 return alpha_fold_builtin_cttz (opint, op_const);
7036 case ALPHA_BUILTIN_CTLZ:
7037 return alpha_fold_builtin_ctlz (opint, op_const);
7038 case ALPHA_BUILTIN_CTPOP:
7039 return alpha_fold_builtin_ctpop (opint, op_const);
7041 case ALPHA_BUILTIN_AMASK:
7042 case ALPHA_BUILTIN_IMPLVER:
7043 case ALPHA_BUILTIN_RPCC:
7044 case ALPHA_BUILTIN_THREAD_POINTER:
7045 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7046 /* None of these are foldable at compile-time. */
7047 default:
7048 return NULL;
7052 /* This page contains routines that are used to determine what the function
7053 prologue and epilogue code will do and write them out. */
7055 /* Compute the size of the save area in the stack. */
7057 /* These variables are used for communication between the following functions.
7058 They indicate various things about the current function being compiled
7059 that are used to tell what kind of prologue, epilogue and procedure
7060 descriptor to generate. */
7062 /* Nonzero if we need a stack procedure. */
7063 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7064 static enum alpha_procedure_types alpha_procedure_type;
7066 /* Register number (either FP or SP) that is used to unwind the frame. */
7067 static int vms_unwind_regno;
7069 /* Register number used to save FP. We need not have one for RA since
7070 we don't modify it for register procedures. This is only defined
7071 for register frame procedures. */
7072 static int vms_save_fp_regno;
7074 /* Register number used to reference objects off our PV. */
7075 static int vms_base_regno;
7077 /* Compute register masks for saved registers. */
7079 static void
7080 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7082 unsigned long imask = 0;
7083 unsigned long fmask = 0;
7084 unsigned int i;
7086 /* When outputting a thunk, we don't have valid register life info,
7087 but assemble_start_function wants to output .frame and .mask
7088 directives. */
7089 if (current_function_is_thunk)
7091 *imaskP = 0;
7092 *fmaskP = 0;
7093 return;
7096 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7097 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7099 /* One for every register we have to save. */
7100 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7101 if (! fixed_regs[i] && ! call_used_regs[i]
7102 && df_regs_ever_live_p (i) && i != REG_RA
7103 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7105 if (i < 32)
7106 imask |= (1UL << i);
7107 else
7108 fmask |= (1UL << (i - 32));
7111 /* We need to restore these for the handler. */
7112 if (current_function_calls_eh_return)
7114 for (i = 0; ; ++i)
7116 unsigned regno = EH_RETURN_DATA_REGNO (i);
7117 if (regno == INVALID_REGNUM)
7118 break;
7119 imask |= 1UL << regno;
7123 /* If any register spilled, then spill the return address also. */
7124 /* ??? This is required by the Digital stack unwind specification
7125 and isn't needed if we're doing Dwarf2 unwinding. */
7126 if (imask || fmask || alpha_ra_ever_killed ())
7127 imask |= (1UL << REG_RA);
7129 *imaskP = imask;
7130 *fmaskP = fmask;
7134 alpha_sa_size (void)
7136 unsigned long mask[2];
7137 int sa_size = 0;
7138 int i, j;
7140 alpha_sa_mask (&mask[0], &mask[1]);
7142 if (TARGET_ABI_UNICOSMK)
7144 if (mask[0] || mask[1])
7145 sa_size = 14;
7147 else
7149 for (j = 0; j < 2; ++j)
7150 for (i = 0; i < 32; ++i)
7151 if ((mask[j] >> i) & 1)
7152 sa_size++;
7155 if (TARGET_ABI_UNICOSMK)
7157 /* We might not need to generate a frame if we don't make any calls
7158 (including calls to __T3E_MISMATCH if this is a vararg function),
7159 don't have any local variables which require stack slots, don't
7160 use alloca and have not determined that we need a frame for other
7161 reasons. */
7163 alpha_procedure_type
7164 = (sa_size || get_frame_size() != 0
7165 || current_function_outgoing_args_size
7166 || current_function_stdarg || current_function_calls_alloca
7167 || frame_pointer_needed)
7168 ? PT_STACK : PT_REGISTER;
7170 /* Always reserve space for saving callee-saved registers if we
7171 need a frame as required by the calling convention. */
7172 if (alpha_procedure_type == PT_STACK)
7173 sa_size = 14;
7175 else if (TARGET_ABI_OPEN_VMS)
7177 /* Start by assuming we can use a register procedure if we don't
7178 make any calls (REG_RA not used) or need to save any
7179 registers and a stack procedure if we do. */
7180 if ((mask[0] >> REG_RA) & 1)
7181 alpha_procedure_type = PT_STACK;
7182 else if (get_frame_size() != 0)
7183 alpha_procedure_type = PT_REGISTER;
7184 else
7185 alpha_procedure_type = PT_NULL;
7187 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7188 made the final decision on stack procedure vs register procedure. */
7189 if (alpha_procedure_type == PT_STACK)
7190 sa_size -= 2;
7192 /* Decide whether to refer to objects off our PV via FP or PV.
7193 If we need FP for something else or if we receive a nonlocal
7194 goto (which expects PV to contain the value), we must use PV.
7195 Otherwise, start by assuming we can use FP. */
7197 vms_base_regno
7198 = (frame_pointer_needed
7199 || current_function_has_nonlocal_label
7200 || alpha_procedure_type == PT_STACK
7201 || current_function_outgoing_args_size)
7202 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7204 /* If we want to copy PV into FP, we need to find some register
7205 in which to save FP. */
7207 vms_save_fp_regno = -1;
7208 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7209 for (i = 0; i < 32; i++)
7210 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7211 vms_save_fp_regno = i;
7213 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7214 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7215 else if (alpha_procedure_type == PT_NULL)
7216 vms_base_regno = REG_PV;
7218 /* Stack unwinding should be done via FP unless we use it for PV. */
7219 vms_unwind_regno = (vms_base_regno == REG_PV
7220 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7222 /* If this is a stack procedure, allow space for saving FP and RA. */
7223 if (alpha_procedure_type == PT_STACK)
7224 sa_size += 2;
7226 else
7228 /* Our size must be even (multiple of 16 bytes). */
7229 if (sa_size & 1)
7230 sa_size++;
7233 return sa_size * 8;
7236 /* Define the offset between two registers, one to be eliminated,
7237 and the other its replacement, at the start of a routine. */
7239 HOST_WIDE_INT
7240 alpha_initial_elimination_offset (unsigned int from,
7241 unsigned int to ATTRIBUTE_UNUSED)
7243 HOST_WIDE_INT ret;
7245 ret = alpha_sa_size ();
7246 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7248 switch (from)
7250 case FRAME_POINTER_REGNUM:
7251 break;
7253 case ARG_POINTER_REGNUM:
7254 ret += (ALPHA_ROUND (get_frame_size ()
7255 + current_function_pretend_args_size)
7256 - current_function_pretend_args_size);
7257 break;
7259 default:
7260 gcc_unreachable ();
7263 return ret;
7267 alpha_pv_save_size (void)
7269 alpha_sa_size ();
7270 return alpha_procedure_type == PT_STACK ? 8 : 0;
7274 alpha_using_fp (void)
7276 alpha_sa_size ();
7277 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7280 #if TARGET_ABI_OPEN_VMS
7282 const struct attribute_spec vms_attribute_table[] =
7284 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7285 { "overlaid", 0, 0, true, false, false, NULL },
7286 { "global", 0, 0, true, false, false, NULL },
7287 { "initialize", 0, 0, true, false, false, NULL },
7288 { NULL, 0, 0, false, false, false, NULL }
7291 #endif
7293 static int
7294 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7296 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7300 alpha_find_lo_sum_using_gp (rtx insn)
7302 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7305 static int
7306 alpha_does_function_need_gp (void)
7308 rtx insn;
7310 /* The GP being variable is an OSF abi thing. */
7311 if (! TARGET_ABI_OSF)
7312 return 0;
7314 /* We need the gp to load the address of __mcount. */
7315 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7316 return 1;
7318 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7319 if (current_function_is_thunk)
7320 return 1;
7322 /* The nonlocal receiver pattern assumes that the gp is valid for
7323 the nested function. Reasonable because it's almost always set
7324 correctly already. For the cases where that's wrong, make sure
7325 the nested function loads its gp on entry. */
7326 if (current_function_has_nonlocal_goto)
7327 return 1;
7329 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7330 Even if we are a static function, we still need to do this in case
7331 our address is taken and passed to something like qsort. */
7333 push_topmost_sequence ();
7334 insn = get_insns ();
7335 pop_topmost_sequence ();
7337 for (; insn; insn = NEXT_INSN (insn))
7338 if (INSN_P (insn)
7339 && ! JUMP_TABLE_DATA_P (insn)
7340 && GET_CODE (PATTERN (insn)) != USE
7341 && GET_CODE (PATTERN (insn)) != CLOBBER
7342 && get_attr_usegp (insn))
7343 return 1;
7345 return 0;
7349 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7350 sequences. */
7352 static rtx
7353 set_frame_related_p (void)
7355 rtx seq = get_insns ();
7356 rtx insn;
7358 end_sequence ();
7360 if (!seq)
7361 return NULL_RTX;
7363 if (INSN_P (seq))
7365 insn = seq;
7366 while (insn != NULL_RTX)
7368 RTX_FRAME_RELATED_P (insn) = 1;
7369 insn = NEXT_INSN (insn);
7371 seq = emit_insn (seq);
7373 else
7375 seq = emit_insn (seq);
7376 RTX_FRAME_RELATED_P (seq) = 1;
7378 return seq;
7381 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7383 /* Generates a store with the proper unwind info attached. VALUE is
7384 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7385 contains SP+FRAME_BIAS, and that is the unwind info that should be
7386 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7387 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7389 static void
7390 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7391 HOST_WIDE_INT base_ofs, rtx frame_reg)
7393 rtx addr, mem, insn;
7395 addr = plus_constant (base_reg, base_ofs);
7396 mem = gen_rtx_MEM (DImode, addr);
7397 set_mem_alias_set (mem, alpha_sr_alias_set);
7399 insn = emit_move_insn (mem, value);
7400 RTX_FRAME_RELATED_P (insn) = 1;
7402 if (frame_bias || value != frame_reg)
7404 if (frame_bias)
7406 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7407 mem = gen_rtx_MEM (DImode, addr);
7410 REG_NOTES (insn)
7411 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7412 gen_rtx_SET (VOIDmode, mem, frame_reg),
7413 REG_NOTES (insn));
7417 static void
7418 emit_frame_store (unsigned int regno, rtx base_reg,
7419 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7421 rtx reg = gen_rtx_REG (DImode, regno);
7422 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7425 /* Write function prologue. */
7427 /* On vms we have two kinds of functions:
7429 - stack frame (PROC_STACK)
7430 these are 'normal' functions with local vars and which are
7431 calling other functions
7432 - register frame (PROC_REGISTER)
7433 keeps all data in registers, needs no stack
7435 We must pass this to the assembler so it can generate the
7436 proper pdsc (procedure descriptor)
7437 This is done with the '.pdesc' command.
7439 On not-vms, we don't really differentiate between the two, as we can
7440 simply allocate stack without saving registers. */
7442 void
7443 alpha_expand_prologue (void)
7445 /* Registers to save. */
7446 unsigned long imask = 0;
7447 unsigned long fmask = 0;
7448 /* Stack space needed for pushing registers clobbered by us. */
7449 HOST_WIDE_INT sa_size;
7450 /* Complete stack size needed. */
7451 HOST_WIDE_INT frame_size;
7452 /* Offset from base reg to register save area. */
7453 HOST_WIDE_INT reg_offset;
7454 rtx sa_reg;
7455 int i;
7457 sa_size = alpha_sa_size ();
7459 frame_size = get_frame_size ();
7460 if (TARGET_ABI_OPEN_VMS)
7461 frame_size = ALPHA_ROUND (sa_size
7462 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7463 + frame_size
7464 + current_function_pretend_args_size);
7465 else if (TARGET_ABI_UNICOSMK)
7466 /* We have to allocate space for the DSIB if we generate a frame. */
7467 frame_size = ALPHA_ROUND (sa_size
7468 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7469 + ALPHA_ROUND (frame_size
7470 + current_function_outgoing_args_size);
7471 else
7472 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7473 + sa_size
7474 + ALPHA_ROUND (frame_size
7475 + current_function_pretend_args_size));
7477 if (TARGET_ABI_OPEN_VMS)
7478 reg_offset = 8;
7479 else
7480 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7482 alpha_sa_mask (&imask, &fmask);
7484 /* Emit an insn to reload GP, if needed. */
7485 if (TARGET_ABI_OSF)
7487 alpha_function_needs_gp = alpha_does_function_need_gp ();
7488 if (alpha_function_needs_gp)
7489 emit_insn (gen_prologue_ldgp ());
7492 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7493 the call to mcount ourselves, rather than having the linker do it
7494 magically in response to -pg. Since _mcount has special linkage,
7495 don't represent the call as a call. */
7496 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7497 emit_insn (gen_prologue_mcount ());
7499 if (TARGET_ABI_UNICOSMK)
7500 unicosmk_gen_dsib (&imask);
7502 /* Adjust the stack by the frame size. If the frame size is > 4096
7503 bytes, we need to be sure we probe somewhere in the first and last
7504 4096 bytes (we can probably get away without the latter test) and
7505 every 8192 bytes in between. If the frame size is > 32768, we
7506 do this in a loop. Otherwise, we generate the explicit probe
7507 instructions.
7509 Note that we are only allowed to adjust sp once in the prologue. */
7511 if (frame_size <= 32768)
7513 if (frame_size > 4096)
7515 int probed;
7517 for (probed = 4096; probed < frame_size; probed += 8192)
7518 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7519 ? -probed + 64
7520 : -probed)));
7522 /* We only have to do this probe if we aren't saving registers. */
7523 if (sa_size == 0 && frame_size > probed - 4096)
7524 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7527 if (frame_size != 0)
7528 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7529 GEN_INT (TARGET_ABI_UNICOSMK
7530 ? -frame_size + 64
7531 : -frame_size))));
7533 else
7535 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7536 number of 8192 byte blocks to probe. We then probe each block
7537 in the loop and then set SP to the proper location. If the
7538 amount remaining is > 4096, we have to do one more probe if we
7539 are not saving any registers. */
7541 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7542 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7543 rtx ptr = gen_rtx_REG (DImode, 22);
7544 rtx count = gen_rtx_REG (DImode, 23);
7545 rtx seq;
7547 emit_move_insn (count, GEN_INT (blocks));
7548 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7549 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7551 /* Because of the difficulty in emitting a new basic block this
7552 late in the compilation, generate the loop as a single insn. */
7553 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7555 if (leftover > 4096 && sa_size == 0)
7557 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7558 MEM_VOLATILE_P (last) = 1;
7559 emit_move_insn (last, const0_rtx);
7562 if (TARGET_ABI_WINDOWS_NT)
7564 /* For NT stack unwind (done by 'reverse execution'), it's
7565 not OK to take the result of a loop, even though the value
7566 is already in ptr, so we reload it via a single operation
7567 and subtract it to sp.
7569 Yes, that's correct -- we have to reload the whole constant
7570 into a temporary via ldah+lda then subtract from sp. */
7572 HOST_WIDE_INT lo, hi;
7573 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7574 hi = frame_size - lo;
7576 emit_move_insn (ptr, GEN_INT (hi));
7577 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7578 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7579 ptr));
7581 else
7583 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7584 GEN_INT (-leftover)));
7587 /* This alternative is special, because the DWARF code cannot
7588 possibly intuit through the loop above. So we invent this
7589 note it looks at instead. */
7590 RTX_FRAME_RELATED_P (seq) = 1;
7591 REG_NOTES (seq)
7592 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7593 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7594 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7595 GEN_INT (TARGET_ABI_UNICOSMK
7596 ? -frame_size + 64
7597 : -frame_size))),
7598 REG_NOTES (seq));
7601 if (!TARGET_ABI_UNICOSMK)
7603 HOST_WIDE_INT sa_bias = 0;
7605 /* Cope with very large offsets to the register save area. */
7606 sa_reg = stack_pointer_rtx;
7607 if (reg_offset + sa_size > 0x8000)
7609 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7610 rtx sa_bias_rtx;
7612 if (low + sa_size <= 0x8000)
7613 sa_bias = reg_offset - low, reg_offset = low;
7614 else
7615 sa_bias = reg_offset, reg_offset = 0;
7617 sa_reg = gen_rtx_REG (DImode, 24);
7618 sa_bias_rtx = GEN_INT (sa_bias);
7620 if (add_operand (sa_bias_rtx, DImode))
7621 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7622 else
7624 emit_move_insn (sa_reg, sa_bias_rtx);
7625 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7629 /* Save regs in stack order. Beginning with VMS PV. */
7630 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7631 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7633 /* Save register RA next. */
7634 if (imask & (1UL << REG_RA))
7636 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7637 imask &= ~(1UL << REG_RA);
7638 reg_offset += 8;
7641 /* Now save any other registers required to be saved. */
7642 for (i = 0; i < 31; i++)
7643 if (imask & (1UL << i))
7645 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7646 reg_offset += 8;
7649 for (i = 0; i < 31; i++)
7650 if (fmask & (1UL << i))
7652 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7653 reg_offset += 8;
7656 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7658 /* The standard frame on the T3E includes space for saving registers.
7659 We just have to use it. We don't have to save the return address and
7660 the old frame pointer here - they are saved in the DSIB. */
7662 reg_offset = -56;
7663 for (i = 9; i < 15; i++)
7664 if (imask & (1UL << i))
7666 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7667 reg_offset -= 8;
7669 for (i = 2; i < 10; i++)
7670 if (fmask & (1UL << i))
7672 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7673 reg_offset -= 8;
7677 if (TARGET_ABI_OPEN_VMS)
7679 if (alpha_procedure_type == PT_REGISTER)
7680 /* Register frame procedures save the fp.
7681 ?? Ought to have a dwarf2 save for this. */
7682 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7683 hard_frame_pointer_rtx);
7685 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7686 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7687 gen_rtx_REG (DImode, REG_PV)));
7689 if (alpha_procedure_type != PT_NULL
7690 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7691 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7693 /* If we have to allocate space for outgoing args, do it now. */
7694 if (current_function_outgoing_args_size != 0)
7696 rtx seq
7697 = emit_move_insn (stack_pointer_rtx,
7698 plus_constant
7699 (hard_frame_pointer_rtx,
7700 - (ALPHA_ROUND
7701 (current_function_outgoing_args_size))));
7703 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7704 if ! frame_pointer_needed. Setting the bit will change the CFA
7705 computation rule to use sp again, which would be wrong if we had
7706 frame_pointer_needed, as this means sp might move unpredictably
7707 later on.
7709 Also, note that
7710 frame_pointer_needed
7711 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7713 current_function_outgoing_args_size != 0
7714 => alpha_procedure_type != PT_NULL,
7716 so when we are not setting the bit here, we are guaranteed to
7717 have emitted an FRP frame pointer update just before. */
7718 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7721 else if (!TARGET_ABI_UNICOSMK)
7723 /* If we need a frame pointer, set it from the stack pointer. */
7724 if (frame_pointer_needed)
7726 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7727 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7728 else
7729 /* This must always be the last instruction in the
7730 prologue, thus we emit a special move + clobber. */
7731 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7732 stack_pointer_rtx, sa_reg)));
7736 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7737 the prologue, for exception handling reasons, we cannot do this for
7738 any insn that might fault. We could prevent this for mems with a
7739 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7740 have to prevent all such scheduling with a blockage.
7742 Linux, on the other hand, never bothered to implement OSF/1's
7743 exception handling, and so doesn't care about such things. Anyone
7744 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7746 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7747 emit_insn (gen_blockage ());
7750 /* Count the number of .file directives, so that .loc is up to date. */
7751 int num_source_filenames = 0;
7753 /* Output the textual info surrounding the prologue. */
7755 void
7756 alpha_start_function (FILE *file, const char *fnname,
7757 tree decl ATTRIBUTE_UNUSED)
7759 unsigned long imask = 0;
7760 unsigned long fmask = 0;
7761 /* Stack space needed for pushing registers clobbered by us. */
7762 HOST_WIDE_INT sa_size;
7763 /* Complete stack size needed. */
7764 unsigned HOST_WIDE_INT frame_size;
7765 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7766 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7767 ? 524288
7768 : 1UL << 31;
7769 /* Offset from base reg to register save area. */
7770 HOST_WIDE_INT reg_offset;
7771 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7772 int i;
7774 /* Don't emit an extern directive for functions defined in the same file. */
7775 if (TARGET_ABI_UNICOSMK)
7777 tree name_tree;
7778 name_tree = get_identifier (fnname);
7779 TREE_ASM_WRITTEN (name_tree) = 1;
7782 alpha_fnname = fnname;
7783 sa_size = alpha_sa_size ();
7785 frame_size = get_frame_size ();
7786 if (TARGET_ABI_OPEN_VMS)
7787 frame_size = ALPHA_ROUND (sa_size
7788 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7789 + frame_size
7790 + current_function_pretend_args_size);
7791 else if (TARGET_ABI_UNICOSMK)
7792 frame_size = ALPHA_ROUND (sa_size
7793 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7794 + ALPHA_ROUND (frame_size
7795 + current_function_outgoing_args_size);
7796 else
7797 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7798 + sa_size
7799 + ALPHA_ROUND (frame_size
7800 + current_function_pretend_args_size));
7802 if (TARGET_ABI_OPEN_VMS)
7803 reg_offset = 8;
7804 else
7805 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7807 alpha_sa_mask (&imask, &fmask);
7809 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7810 We have to do that before the .ent directive as we cannot switch
7811 files within procedures with native ecoff because line numbers are
7812 linked to procedure descriptors.
7813 Outputting the lineno helps debugging of one line functions as they
7814 would otherwise get no line number at all. Please note that we would
7815 like to put out last_linenum from final.c, but it is not accessible. */
7817 if (write_symbols == SDB_DEBUG)
7819 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7820 ASM_OUTPUT_SOURCE_FILENAME (file,
7821 DECL_SOURCE_FILE (current_function_decl));
7822 #endif
7823 #ifdef SDB_OUTPUT_SOURCE_LINE
7824 if (debug_info_level != DINFO_LEVEL_TERSE)
7825 SDB_OUTPUT_SOURCE_LINE (file,
7826 DECL_SOURCE_LINE (current_function_decl));
7827 #endif
7830 /* Issue function start and label. */
7831 if (TARGET_ABI_OPEN_VMS
7832 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7834 fputs ("\t.ent ", file);
7835 assemble_name (file, fnname);
7836 putc ('\n', file);
7838 /* If the function needs GP, we'll write the "..ng" label there.
7839 Otherwise, do it here. */
7840 if (TARGET_ABI_OSF
7841 && ! alpha_function_needs_gp
7842 && ! current_function_is_thunk)
7844 putc ('$', file);
7845 assemble_name (file, fnname);
7846 fputs ("..ng:\n", file);
7850 strcpy (entry_label, fnname);
7851 if (TARGET_ABI_OPEN_VMS)
7852 strcat (entry_label, "..en");
7854 /* For public functions, the label must be globalized by appending an
7855 additional colon. */
7856 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7857 strcat (entry_label, ":");
7859 ASM_OUTPUT_LABEL (file, entry_label);
7860 inside_function = TRUE;
7862 if (TARGET_ABI_OPEN_VMS)
7863 fprintf (file, "\t.base $%d\n", vms_base_regno);
7865 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7866 && !flag_inhibit_size_directive)
7868 /* Set flags in procedure descriptor to request IEEE-conformant
7869 math-library routines. The value we set it to is PDSC_EXC_IEEE
7870 (/usr/include/pdsc.h). */
7871 fputs ("\t.eflag 48\n", file);
7874 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7875 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7876 alpha_arg_offset = -frame_size + 48;
7878 /* Describe our frame. If the frame size is larger than an integer,
7879 print it as zero to avoid an assembler error. We won't be
7880 properly describing such a frame, but that's the best we can do. */
7881 if (TARGET_ABI_UNICOSMK)
7883 else if (TARGET_ABI_OPEN_VMS)
7884 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7885 HOST_WIDE_INT_PRINT_DEC "\n",
7886 vms_unwind_regno,
7887 frame_size >= (1UL << 31) ? 0 : frame_size,
7888 reg_offset);
7889 else if (!flag_inhibit_size_directive)
7890 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7891 (frame_pointer_needed
7892 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7893 frame_size >= max_frame_size ? 0 : frame_size,
7894 current_function_pretend_args_size);
7896 /* Describe which registers were spilled. */
7897 if (TARGET_ABI_UNICOSMK)
7899 else if (TARGET_ABI_OPEN_VMS)
7901 if (imask)
7902 /* ??? Does VMS care if mask contains ra? The old code didn't
7903 set it, so I don't here. */
7904 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7905 if (fmask)
7906 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7907 if (alpha_procedure_type == PT_REGISTER)
7908 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7910 else if (!flag_inhibit_size_directive)
7912 if (imask)
7914 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7915 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7917 for (i = 0; i < 32; ++i)
7918 if (imask & (1UL << i))
7919 reg_offset += 8;
7922 if (fmask)
7923 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7924 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7927 #if TARGET_ABI_OPEN_VMS
7928 /* Ifdef'ed cause link_section are only available then. */
7929 switch_to_section (readonly_data_section);
7930 fprintf (file, "\t.align 3\n");
7931 assemble_name (file, fnname); fputs ("..na:\n", file);
7932 fputs ("\t.ascii \"", file);
7933 assemble_name (file, fnname);
7934 fputs ("\\0\"\n", file);
7935 alpha_need_linkage (fnname, 1);
7936 switch_to_section (text_section);
7937 #endif
7940 /* Emit the .prologue note at the scheduled end of the prologue. */
7942 static void
7943 alpha_output_function_end_prologue (FILE *file)
7945 if (TARGET_ABI_UNICOSMK)
7947 else if (TARGET_ABI_OPEN_VMS)
7948 fputs ("\t.prologue\n", file);
7949 else if (TARGET_ABI_WINDOWS_NT)
7950 fputs ("\t.prologue 0\n", file);
7951 else if (!flag_inhibit_size_directive)
7952 fprintf (file, "\t.prologue %d\n",
7953 alpha_function_needs_gp || current_function_is_thunk);
7956 /* Write function epilogue. */
7958 /* ??? At some point we will want to support full unwind, and so will
7959 need to mark the epilogue as well. At the moment, we just confuse
7960 dwarf2out. */
7961 #undef FRP
7962 #define FRP(exp) exp
7964 void
7965 alpha_expand_epilogue (void)
7967 /* Registers to save. */
7968 unsigned long imask = 0;
7969 unsigned long fmask = 0;
7970 /* Stack space needed for pushing registers clobbered by us. */
7971 HOST_WIDE_INT sa_size;
7972 /* Complete stack size needed. */
7973 HOST_WIDE_INT frame_size;
7974 /* Offset from base reg to register save area. */
7975 HOST_WIDE_INT reg_offset;
7976 int fp_is_frame_pointer, fp_offset;
7977 rtx sa_reg, sa_reg_exp = NULL;
7978 rtx sp_adj1, sp_adj2, mem;
7979 rtx eh_ofs;
7980 int i;
7982 sa_size = alpha_sa_size ();
7984 frame_size = get_frame_size ();
7985 if (TARGET_ABI_OPEN_VMS)
7986 frame_size = ALPHA_ROUND (sa_size
7987 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7988 + frame_size
7989 + current_function_pretend_args_size);
7990 else if (TARGET_ABI_UNICOSMK)
7991 frame_size = ALPHA_ROUND (sa_size
7992 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7993 + ALPHA_ROUND (frame_size
7994 + current_function_outgoing_args_size);
7995 else
7996 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7997 + sa_size
7998 + ALPHA_ROUND (frame_size
7999 + current_function_pretend_args_size));
8001 if (TARGET_ABI_OPEN_VMS)
8003 if (alpha_procedure_type == PT_STACK)
8004 reg_offset = 8;
8005 else
8006 reg_offset = 0;
8008 else
8009 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8011 alpha_sa_mask (&imask, &fmask);
8013 fp_is_frame_pointer
8014 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8015 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8016 fp_offset = 0;
8017 sa_reg = stack_pointer_rtx;
8019 if (current_function_calls_eh_return)
8020 eh_ofs = EH_RETURN_STACKADJ_RTX;
8021 else
8022 eh_ofs = NULL_RTX;
8024 if (!TARGET_ABI_UNICOSMK && sa_size)
8026 /* If we have a frame pointer, restore SP from it. */
8027 if ((TARGET_ABI_OPEN_VMS
8028 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8029 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8030 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8032 /* Cope with very large offsets to the register save area. */
8033 if (reg_offset + sa_size > 0x8000)
8035 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8036 HOST_WIDE_INT bias;
8038 if (low + sa_size <= 0x8000)
8039 bias = reg_offset - low, reg_offset = low;
8040 else
8041 bias = reg_offset, reg_offset = 0;
8043 sa_reg = gen_rtx_REG (DImode, 22);
8044 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8046 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8049 /* Restore registers in order, excepting a true frame pointer. */
8051 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8052 if (! eh_ofs)
8053 set_mem_alias_set (mem, alpha_sr_alias_set);
8054 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8056 reg_offset += 8;
8057 imask &= ~(1UL << REG_RA);
8059 for (i = 0; i < 31; ++i)
8060 if (imask & (1UL << i))
8062 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8063 fp_offset = reg_offset;
8064 else
8066 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8067 set_mem_alias_set (mem, alpha_sr_alias_set);
8068 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8070 reg_offset += 8;
8073 for (i = 0; i < 31; ++i)
8074 if (fmask & (1UL << i))
8076 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8077 set_mem_alias_set (mem, alpha_sr_alias_set);
8078 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8079 reg_offset += 8;
8082 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8084 /* Restore callee-saved general-purpose registers. */
8086 reg_offset = -56;
8088 for (i = 9; i < 15; i++)
8089 if (imask & (1UL << i))
8091 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8092 reg_offset));
8093 set_mem_alias_set (mem, alpha_sr_alias_set);
8094 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8095 reg_offset -= 8;
8098 for (i = 2; i < 10; i++)
8099 if (fmask & (1UL << i))
8101 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8102 reg_offset));
8103 set_mem_alias_set (mem, alpha_sr_alias_set);
8104 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8105 reg_offset -= 8;
8108 /* Restore the return address from the DSIB. */
8110 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8111 set_mem_alias_set (mem, alpha_sr_alias_set);
8112 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8115 if (frame_size || eh_ofs)
8117 sp_adj1 = stack_pointer_rtx;
8119 if (eh_ofs)
8121 sp_adj1 = gen_rtx_REG (DImode, 23);
8122 emit_move_insn (sp_adj1,
8123 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8126 /* If the stack size is large, begin computation into a temporary
8127 register so as not to interfere with a potential fp restore,
8128 which must be consecutive with an SP restore. */
8129 if (frame_size < 32768
8130 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8131 sp_adj2 = GEN_INT (frame_size);
8132 else if (TARGET_ABI_UNICOSMK)
8134 sp_adj1 = gen_rtx_REG (DImode, 23);
8135 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8136 sp_adj2 = const0_rtx;
8138 else if (frame_size < 0x40007fffL)
8140 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8142 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8143 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8144 sp_adj1 = sa_reg;
8145 else
8147 sp_adj1 = gen_rtx_REG (DImode, 23);
8148 FRP (emit_move_insn (sp_adj1, sp_adj2));
8150 sp_adj2 = GEN_INT (low);
8152 else
8154 rtx tmp = gen_rtx_REG (DImode, 23);
8155 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8156 3, false));
8157 if (!sp_adj2)
8159 /* We can't drop new things to memory this late, afaik,
8160 so build it up by pieces. */
8161 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8162 -(frame_size < 0)));
8163 gcc_assert (sp_adj2);
8167 /* From now on, things must be in order. So emit blockages. */
8169 /* Restore the frame pointer. */
8170 if (TARGET_ABI_UNICOSMK)
8172 emit_insn (gen_blockage ());
8173 mem = gen_rtx_MEM (DImode,
8174 plus_constant (hard_frame_pointer_rtx, -16));
8175 set_mem_alias_set (mem, alpha_sr_alias_set);
8176 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8178 else if (fp_is_frame_pointer)
8180 emit_insn (gen_blockage ());
8181 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8182 set_mem_alias_set (mem, alpha_sr_alias_set);
8183 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8185 else if (TARGET_ABI_OPEN_VMS)
8187 emit_insn (gen_blockage ());
8188 FRP (emit_move_insn (hard_frame_pointer_rtx,
8189 gen_rtx_REG (DImode, vms_save_fp_regno)));
8192 /* Restore the stack pointer. */
8193 emit_insn (gen_blockage ());
8194 if (sp_adj2 == const0_rtx)
8195 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8196 else
8197 FRP (emit_move_insn (stack_pointer_rtx,
8198 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8200 else
8202 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8204 emit_insn (gen_blockage ());
8205 FRP (emit_move_insn (hard_frame_pointer_rtx,
8206 gen_rtx_REG (DImode, vms_save_fp_regno)));
8208 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8210 /* Decrement the frame pointer if the function does not have a
8211 frame. */
8213 emit_insn (gen_blockage ());
8214 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8215 hard_frame_pointer_rtx, constm1_rtx)));
8220 /* Output the rest of the textual info surrounding the epilogue. */
8222 void
8223 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8225 rtx insn;
8227 /* We output a nop after noreturn calls at the very end of the function to
8228 ensure that the return address always remains in the caller's code range,
8229 as not doing so might confuse unwinding engines. */
8230 insn = get_last_insn ();
8231 if (!INSN_P (insn))
8232 insn = prev_active_insn (insn);
8233 if (GET_CODE (insn) == CALL_INSN)
8234 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8236 #if TARGET_ABI_OPEN_VMS
8237 alpha_write_linkage (file, fnname, decl);
8238 #endif
8240 /* End the function. */
8241 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8243 fputs ("\t.end ", file);
8244 assemble_name (file, fnname);
8245 putc ('\n', file);
8247 inside_function = FALSE;
8249 /* Output jump tables and the static subroutine information block. */
8250 if (TARGET_ABI_UNICOSMK)
8252 unicosmk_output_ssib (file, fnname);
8253 unicosmk_output_deferred_case_vectors (file);
8257 #if TARGET_ABI_OSF
8258 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8260 In order to avoid the hordes of differences between generated code
8261 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8262 lots of code loading up large constants, generate rtl and emit it
8263 instead of going straight to text.
8265 Not sure why this idea hasn't been explored before... */
8267 static void
8268 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8269 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8270 tree function)
8272 HOST_WIDE_INT hi, lo;
8273 rtx this, insn, funexp;
8275 /* We always require a valid GP. */
8276 emit_insn (gen_prologue_ldgp ());
8277 emit_note (NOTE_INSN_PROLOGUE_END);
8279 /* Find the "this" pointer. If the function returns a structure,
8280 the structure return pointer is in $16. */
8281 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8282 this = gen_rtx_REG (Pmode, 17);
8283 else
8284 this = gen_rtx_REG (Pmode, 16);
8286 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8287 entire constant for the add. */
8288 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8289 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8290 if (hi + lo == delta)
8292 if (hi)
8293 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8294 if (lo)
8295 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8297 else
8299 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8300 delta, -(delta < 0));
8301 emit_insn (gen_adddi3 (this, this, tmp));
8304 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8305 if (vcall_offset)
8307 rtx tmp, tmp2;
8309 tmp = gen_rtx_REG (Pmode, 0);
8310 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8312 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8313 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8314 if (hi + lo == vcall_offset)
8316 if (hi)
8317 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8319 else
8321 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8322 vcall_offset, -(vcall_offset < 0));
8323 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8324 lo = 0;
8326 if (lo)
8327 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8328 else
8329 tmp2 = tmp;
8330 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8332 emit_insn (gen_adddi3 (this, this, tmp));
8335 /* Generate a tail call to the target function. */
8336 if (! TREE_USED (function))
8338 assemble_external (function);
8339 TREE_USED (function) = 1;
8341 funexp = XEXP (DECL_RTL (function), 0);
8342 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8343 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8344 SIBLING_CALL_P (insn) = 1;
8346 /* Run just enough of rest_of_compilation to get the insns emitted.
8347 There's not really enough bulk here to make other passes such as
8348 instruction scheduling worth while. Note that use_thunk calls
8349 assemble_start_function and assemble_end_function. */
8350 insn = get_insns ();
8351 insn_locators_alloc ();
8352 shorten_branches (insn);
8353 final_start_function (insn, file, 1);
8354 final (insn, file, 1);
8355 final_end_function ();
8357 #endif /* TARGET_ABI_OSF */
8359 /* Debugging support. */
8361 #include "gstab.h"
8363 /* Count the number of sdb related labels are generated (to find block
8364 start and end boundaries). */
8366 int sdb_label_count = 0;
8368 /* Name of the file containing the current function. */
8370 static const char *current_function_file = "";
8372 /* Offsets to alpha virtual arg/local debugging pointers. */
8374 long alpha_arg_offset;
8375 long alpha_auto_offset;
8377 /* Emit a new filename to a stream. */
8379 void
8380 alpha_output_filename (FILE *stream, const char *name)
8382 static int first_time = TRUE;
8384 if (first_time)
8386 first_time = FALSE;
8387 ++num_source_filenames;
8388 current_function_file = name;
8389 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8390 output_quoted_string (stream, name);
8391 fprintf (stream, "\n");
8392 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8393 fprintf (stream, "\t#@stabs\n");
8396 else if (write_symbols == DBX_DEBUG)
8397 /* dbxout.c will emit an appropriate .stabs directive. */
8398 return;
8400 else if (name != current_function_file
8401 && strcmp (name, current_function_file) != 0)
8403 if (inside_function && ! TARGET_GAS)
8404 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8405 else
8407 ++num_source_filenames;
8408 current_function_file = name;
8409 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8412 output_quoted_string (stream, name);
8413 fprintf (stream, "\n");
8417 /* Structure to show the current status of registers and memory. */
8419 struct shadow_summary
8421 struct {
8422 unsigned int i : 31; /* Mask of int regs */
8423 unsigned int fp : 31; /* Mask of fp regs */
8424 unsigned int mem : 1; /* mem == imem | fpmem */
8425 } used, defd;
8428 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8429 to the summary structure. SET is nonzero if the insn is setting the
8430 object, otherwise zero. */
8432 static void
8433 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8435 const char *format_ptr;
8436 int i, j;
8438 if (x == 0)
8439 return;
8441 switch (GET_CODE (x))
8443 /* ??? Note that this case would be incorrect if the Alpha had a
8444 ZERO_EXTRACT in SET_DEST. */
8445 case SET:
8446 summarize_insn (SET_SRC (x), sum, 0);
8447 summarize_insn (SET_DEST (x), sum, 1);
8448 break;
8450 case CLOBBER:
8451 summarize_insn (XEXP (x, 0), sum, 1);
8452 break;
8454 case USE:
8455 summarize_insn (XEXP (x, 0), sum, 0);
8456 break;
8458 case ASM_OPERANDS:
8459 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8460 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8461 break;
8463 case PARALLEL:
8464 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8465 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8466 break;
8468 case SUBREG:
8469 summarize_insn (SUBREG_REG (x), sum, 0);
8470 break;
8472 case REG:
8474 int regno = REGNO (x);
8475 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8477 if (regno == 31 || regno == 63)
8478 break;
8480 if (set)
8482 if (regno < 32)
8483 sum->defd.i |= mask;
8484 else
8485 sum->defd.fp |= mask;
8487 else
8489 if (regno < 32)
8490 sum->used.i |= mask;
8491 else
8492 sum->used.fp |= mask;
8495 break;
8497 case MEM:
8498 if (set)
8499 sum->defd.mem = 1;
8500 else
8501 sum->used.mem = 1;
8503 /* Find the regs used in memory address computation: */
8504 summarize_insn (XEXP (x, 0), sum, 0);
8505 break;
8507 case CONST_INT: case CONST_DOUBLE:
8508 case SYMBOL_REF: case LABEL_REF: case CONST:
8509 case SCRATCH: case ASM_INPUT:
8510 break;
8512 /* Handle common unary and binary ops for efficiency. */
8513 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8514 case MOD: case UDIV: case UMOD: case AND: case IOR:
8515 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8516 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8517 case NE: case EQ: case GE: case GT: case LE:
8518 case LT: case GEU: case GTU: case LEU: case LTU:
8519 summarize_insn (XEXP (x, 0), sum, 0);
8520 summarize_insn (XEXP (x, 1), sum, 0);
8521 break;
8523 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8524 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8525 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8526 case SQRT: case FFS:
8527 summarize_insn (XEXP (x, 0), sum, 0);
8528 break;
8530 default:
8531 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8532 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8533 switch (format_ptr[i])
8535 case 'e':
8536 summarize_insn (XEXP (x, i), sum, 0);
8537 break;
8539 case 'E':
8540 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8541 summarize_insn (XVECEXP (x, i, j), sum, 0);
8542 break;
8544 case 'i':
8545 break;
8547 default:
8548 gcc_unreachable ();
8553 /* Ensure a sufficient number of `trapb' insns are in the code when
8554 the user requests code with a trap precision of functions or
8555 instructions.
8557 In naive mode, when the user requests a trap-precision of
8558 "instruction", a trapb is needed after every instruction that may
8559 generate a trap. This ensures that the code is resumption safe but
8560 it is also slow.
8562 When optimizations are turned on, we delay issuing a trapb as long
8563 as possible. In this context, a trap shadow is the sequence of
8564 instructions that starts with a (potentially) trap generating
8565 instruction and extends to the next trapb or call_pal instruction
8566 (but GCC never generates call_pal by itself). We can delay (and
8567 therefore sometimes omit) a trapb subject to the following
8568 conditions:
8570 (a) On entry to the trap shadow, if any Alpha register or memory
8571 location contains a value that is used as an operand value by some
8572 instruction in the trap shadow (live on entry), then no instruction
8573 in the trap shadow may modify the register or memory location.
8575 (b) Within the trap shadow, the computation of the base register
8576 for a memory load or store instruction may not involve using the
8577 result of an instruction that might generate an UNPREDICTABLE
8578 result.
8580 (c) Within the trap shadow, no register may be used more than once
8581 as a destination register. (This is to make life easier for the
8582 trap-handler.)
8584 (d) The trap shadow may not include any branch instructions. */
8586 static void
8587 alpha_handle_trap_shadows (void)
8589 struct shadow_summary shadow;
8590 int trap_pending, exception_nesting;
8591 rtx i, n;
8593 trap_pending = 0;
8594 exception_nesting = 0;
8595 shadow.used.i = 0;
8596 shadow.used.fp = 0;
8597 shadow.used.mem = 0;
8598 shadow.defd = shadow.used;
8600 for (i = get_insns (); i ; i = NEXT_INSN (i))
8602 if (GET_CODE (i) == NOTE)
8604 switch (NOTE_KIND (i))
8606 case NOTE_INSN_EH_REGION_BEG:
8607 exception_nesting++;
8608 if (trap_pending)
8609 goto close_shadow;
8610 break;
8612 case NOTE_INSN_EH_REGION_END:
8613 exception_nesting--;
8614 if (trap_pending)
8615 goto close_shadow;
8616 break;
8618 case NOTE_INSN_EPILOGUE_BEG:
8619 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8620 goto close_shadow;
8621 break;
8624 else if (trap_pending)
8626 if (alpha_tp == ALPHA_TP_FUNC)
8628 if (GET_CODE (i) == JUMP_INSN
8629 && GET_CODE (PATTERN (i)) == RETURN)
8630 goto close_shadow;
8632 else if (alpha_tp == ALPHA_TP_INSN)
8634 if (optimize > 0)
8636 struct shadow_summary sum;
8638 sum.used.i = 0;
8639 sum.used.fp = 0;
8640 sum.used.mem = 0;
8641 sum.defd = sum.used;
8643 switch (GET_CODE (i))
8645 case INSN:
8646 /* Annoyingly, get_attr_trap will die on these. */
8647 if (GET_CODE (PATTERN (i)) == USE
8648 || GET_CODE (PATTERN (i)) == CLOBBER)
8649 break;
8651 summarize_insn (PATTERN (i), &sum, 0);
8653 if ((sum.defd.i & shadow.defd.i)
8654 || (sum.defd.fp & shadow.defd.fp))
8656 /* (c) would be violated */
8657 goto close_shadow;
8660 /* Combine shadow with summary of current insn: */
8661 shadow.used.i |= sum.used.i;
8662 shadow.used.fp |= sum.used.fp;
8663 shadow.used.mem |= sum.used.mem;
8664 shadow.defd.i |= sum.defd.i;
8665 shadow.defd.fp |= sum.defd.fp;
8666 shadow.defd.mem |= sum.defd.mem;
8668 if ((sum.defd.i & shadow.used.i)
8669 || (sum.defd.fp & shadow.used.fp)
8670 || (sum.defd.mem & shadow.used.mem))
8672 /* (a) would be violated (also takes care of (b)) */
8673 gcc_assert (get_attr_trap (i) != TRAP_YES
8674 || (!(sum.defd.i & sum.used.i)
8675 && !(sum.defd.fp & sum.used.fp)));
8677 goto close_shadow;
8679 break;
8681 case JUMP_INSN:
8682 case CALL_INSN:
8683 case CODE_LABEL:
8684 goto close_shadow;
8686 default:
8687 gcc_unreachable ();
8690 else
8692 close_shadow:
8693 n = emit_insn_before (gen_trapb (), i);
8694 PUT_MODE (n, TImode);
8695 PUT_MODE (i, TImode);
8696 trap_pending = 0;
8697 shadow.used.i = 0;
8698 shadow.used.fp = 0;
8699 shadow.used.mem = 0;
8700 shadow.defd = shadow.used;
8705 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8706 && GET_CODE (i) == INSN
8707 && GET_CODE (PATTERN (i)) != USE
8708 && GET_CODE (PATTERN (i)) != CLOBBER
8709 && get_attr_trap (i) == TRAP_YES)
8711 if (optimize && !trap_pending)
8712 summarize_insn (PATTERN (i), &shadow, 0);
8713 trap_pending = 1;
8718 /* Alpha can only issue instruction groups simultaneously if they are
8719 suitably aligned. This is very processor-specific. */
8720 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8721 that are marked "fake". These instructions do not exist on that target,
8722 but it is possible to see these insns with deranged combinations of
8723 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8724 choose a result at random. */
8726 enum alphaev4_pipe {
8727 EV4_STOP = 0,
8728 EV4_IB0 = 1,
8729 EV4_IB1 = 2,
8730 EV4_IBX = 4
8733 enum alphaev5_pipe {
8734 EV5_STOP = 0,
8735 EV5_NONE = 1,
8736 EV5_E01 = 2,
8737 EV5_E0 = 4,
8738 EV5_E1 = 8,
8739 EV5_FAM = 16,
8740 EV5_FA = 32,
8741 EV5_FM = 64
8744 static enum alphaev4_pipe
8745 alphaev4_insn_pipe (rtx insn)
8747 if (recog_memoized (insn) < 0)
8748 return EV4_STOP;
8749 if (get_attr_length (insn) != 4)
8750 return EV4_STOP;
8752 switch (get_attr_type (insn))
8754 case TYPE_ILD:
8755 case TYPE_LDSYM:
8756 case TYPE_FLD:
8757 case TYPE_LD_L:
8758 return EV4_IBX;
8760 case TYPE_IADD:
8761 case TYPE_ILOG:
8762 case TYPE_ICMOV:
8763 case TYPE_ICMP:
8764 case TYPE_FST:
8765 case TYPE_SHIFT:
8766 case TYPE_IMUL:
8767 case TYPE_FBR:
8768 case TYPE_MVI: /* fake */
8769 return EV4_IB0;
8771 case TYPE_IST:
8772 case TYPE_MISC:
8773 case TYPE_IBR:
8774 case TYPE_JSR:
8775 case TYPE_CALLPAL:
8776 case TYPE_FCPYS:
8777 case TYPE_FCMOV:
8778 case TYPE_FADD:
8779 case TYPE_FDIV:
8780 case TYPE_FMUL:
8781 case TYPE_ST_C:
8782 case TYPE_MB:
8783 case TYPE_FSQRT: /* fake */
8784 case TYPE_FTOI: /* fake */
8785 case TYPE_ITOF: /* fake */
8786 return EV4_IB1;
8788 default:
8789 gcc_unreachable ();
8793 static enum alphaev5_pipe
8794 alphaev5_insn_pipe (rtx insn)
8796 if (recog_memoized (insn) < 0)
8797 return EV5_STOP;
8798 if (get_attr_length (insn) != 4)
8799 return EV5_STOP;
8801 switch (get_attr_type (insn))
8803 case TYPE_ILD:
8804 case TYPE_FLD:
8805 case TYPE_LDSYM:
8806 case TYPE_IADD:
8807 case TYPE_ILOG:
8808 case TYPE_ICMOV:
8809 case TYPE_ICMP:
8810 return EV5_E01;
8812 case TYPE_IST:
8813 case TYPE_FST:
8814 case TYPE_SHIFT:
8815 case TYPE_IMUL:
8816 case TYPE_MISC:
8817 case TYPE_MVI:
8818 case TYPE_LD_L:
8819 case TYPE_ST_C:
8820 case TYPE_MB:
8821 case TYPE_FTOI: /* fake */
8822 case TYPE_ITOF: /* fake */
8823 return EV5_E0;
8825 case TYPE_IBR:
8826 case TYPE_JSR:
8827 case TYPE_CALLPAL:
8828 return EV5_E1;
8830 case TYPE_FCPYS:
8831 return EV5_FAM;
8833 case TYPE_FBR:
8834 case TYPE_FCMOV:
8835 case TYPE_FADD:
8836 case TYPE_FDIV:
8837 case TYPE_FSQRT: /* fake */
8838 return EV5_FA;
8840 case TYPE_FMUL:
8841 return EV5_FM;
8843 default:
8844 gcc_unreachable ();
8848 /* IN_USE is a mask of the slots currently filled within the insn group.
8849 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8850 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8852 LEN is, of course, the length of the group in bytes. */
8854 static rtx
8855 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8857 int len, in_use;
8859 len = in_use = 0;
8861 if (! INSN_P (insn)
8862 || GET_CODE (PATTERN (insn)) == CLOBBER
8863 || GET_CODE (PATTERN (insn)) == USE)
8864 goto next_and_done;
8866 while (1)
8868 enum alphaev4_pipe pipe;
8870 pipe = alphaev4_insn_pipe (insn);
8871 switch (pipe)
8873 case EV4_STOP:
8874 /* Force complex instructions to start new groups. */
8875 if (in_use)
8876 goto done;
8878 /* If this is a completely unrecognized insn, it's an asm.
8879 We don't know how long it is, so record length as -1 to
8880 signal a needed realignment. */
8881 if (recog_memoized (insn) < 0)
8882 len = -1;
8883 else
8884 len = get_attr_length (insn);
8885 goto next_and_done;
8887 case EV4_IBX:
8888 if (in_use & EV4_IB0)
8890 if (in_use & EV4_IB1)
8891 goto done;
8892 in_use |= EV4_IB1;
8894 else
8895 in_use |= EV4_IB0 | EV4_IBX;
8896 break;
8898 case EV4_IB0:
8899 if (in_use & EV4_IB0)
8901 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8902 goto done;
8903 in_use |= EV4_IB1;
8905 in_use |= EV4_IB0;
8906 break;
8908 case EV4_IB1:
8909 if (in_use & EV4_IB1)
8910 goto done;
8911 in_use |= EV4_IB1;
8912 break;
8914 default:
8915 gcc_unreachable ();
8917 len += 4;
8919 /* Haifa doesn't do well scheduling branches. */
8920 if (GET_CODE (insn) == JUMP_INSN)
8921 goto next_and_done;
8923 next:
8924 insn = next_nonnote_insn (insn);
8926 if (!insn || ! INSN_P (insn))
8927 goto done;
8929 /* Let Haifa tell us where it thinks insn group boundaries are. */
8930 if (GET_MODE (insn) == TImode)
8931 goto done;
8933 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8934 goto next;
8937 next_and_done:
8938 insn = next_nonnote_insn (insn);
8940 done:
8941 *plen = len;
8942 *pin_use = in_use;
8943 return insn;
8946 /* IN_USE is a mask of the slots currently filled within the insn group.
8947 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8948 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8950 LEN is, of course, the length of the group in bytes. */
8952 static rtx
8953 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8955 int len, in_use;
8957 len = in_use = 0;
8959 if (! INSN_P (insn)
8960 || GET_CODE (PATTERN (insn)) == CLOBBER
8961 || GET_CODE (PATTERN (insn)) == USE)
8962 goto next_and_done;
8964 while (1)
8966 enum alphaev5_pipe pipe;
8968 pipe = alphaev5_insn_pipe (insn);
8969 switch (pipe)
8971 case EV5_STOP:
8972 /* Force complex instructions to start new groups. */
8973 if (in_use)
8974 goto done;
8976 /* If this is a completely unrecognized insn, it's an asm.
8977 We don't know how long it is, so record length as -1 to
8978 signal a needed realignment. */
8979 if (recog_memoized (insn) < 0)
8980 len = -1;
8981 else
8982 len = get_attr_length (insn);
8983 goto next_and_done;
8985 /* ??? Most of the places below, we would like to assert never
8986 happen, as it would indicate an error either in Haifa, or
8987 in the scheduling description. Unfortunately, Haifa never
8988 schedules the last instruction of the BB, so we don't have
8989 an accurate TI bit to go off. */
8990 case EV5_E01:
8991 if (in_use & EV5_E0)
8993 if (in_use & EV5_E1)
8994 goto done;
8995 in_use |= EV5_E1;
8997 else
8998 in_use |= EV5_E0 | EV5_E01;
8999 break;
9001 case EV5_E0:
9002 if (in_use & EV5_E0)
9004 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9005 goto done;
9006 in_use |= EV5_E1;
9008 in_use |= EV5_E0;
9009 break;
9011 case EV5_E1:
9012 if (in_use & EV5_E1)
9013 goto done;
9014 in_use |= EV5_E1;
9015 break;
9017 case EV5_FAM:
9018 if (in_use & EV5_FA)
9020 if (in_use & EV5_FM)
9021 goto done;
9022 in_use |= EV5_FM;
9024 else
9025 in_use |= EV5_FA | EV5_FAM;
9026 break;
9028 case EV5_FA:
9029 if (in_use & EV5_FA)
9030 goto done;
9031 in_use |= EV5_FA;
9032 break;
9034 case EV5_FM:
9035 if (in_use & EV5_FM)
9036 goto done;
9037 in_use |= EV5_FM;
9038 break;
9040 case EV5_NONE:
9041 break;
9043 default:
9044 gcc_unreachable ();
9046 len += 4;
9048 /* Haifa doesn't do well scheduling branches. */
9049 /* ??? If this is predicted not-taken, slotting continues, except
9050 that no more IBR, FBR, or JSR insns may be slotted. */
9051 if (GET_CODE (insn) == JUMP_INSN)
9052 goto next_and_done;
9054 next:
9055 insn = next_nonnote_insn (insn);
9057 if (!insn || ! INSN_P (insn))
9058 goto done;
9060 /* Let Haifa tell us where it thinks insn group boundaries are. */
9061 if (GET_MODE (insn) == TImode)
9062 goto done;
9064 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9065 goto next;
9068 next_and_done:
9069 insn = next_nonnote_insn (insn);
9071 done:
9072 *plen = len;
9073 *pin_use = in_use;
9074 return insn;
9077 static rtx
9078 alphaev4_next_nop (int *pin_use)
9080 int in_use = *pin_use;
9081 rtx nop;
9083 if (!(in_use & EV4_IB0))
9085 in_use |= EV4_IB0;
9086 nop = gen_nop ();
9088 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9090 in_use |= EV4_IB1;
9091 nop = gen_nop ();
9093 else if (TARGET_FP && !(in_use & EV4_IB1))
9095 in_use |= EV4_IB1;
9096 nop = gen_fnop ();
9098 else
9099 nop = gen_unop ();
9101 *pin_use = in_use;
9102 return nop;
9105 static rtx
9106 alphaev5_next_nop (int *pin_use)
9108 int in_use = *pin_use;
9109 rtx nop;
9111 if (!(in_use & EV5_E1))
9113 in_use |= EV5_E1;
9114 nop = gen_nop ();
9116 else if (TARGET_FP && !(in_use & EV5_FA))
9118 in_use |= EV5_FA;
9119 nop = gen_fnop ();
9121 else if (TARGET_FP && !(in_use & EV5_FM))
9123 in_use |= EV5_FM;
9124 nop = gen_fnop ();
9126 else
9127 nop = gen_unop ();
9129 *pin_use = in_use;
9130 return nop;
9133 /* The instruction group alignment main loop. */
9135 static void
9136 alpha_align_insns (unsigned int max_align,
9137 rtx (*next_group) (rtx, int *, int *),
9138 rtx (*next_nop) (int *))
9140 /* ALIGN is the known alignment for the insn group. */
9141 unsigned int align;
9142 /* OFS is the offset of the current insn in the insn group. */
9143 int ofs;
9144 int prev_in_use, in_use, len, ldgp;
9145 rtx i, next;
9147 /* Let shorten branches care for assigning alignments to code labels. */
9148 shorten_branches (get_insns ());
9150 if (align_functions < 4)
9151 align = 4;
9152 else if ((unsigned int) align_functions < max_align)
9153 align = align_functions;
9154 else
9155 align = max_align;
9157 ofs = prev_in_use = 0;
9158 i = get_insns ();
9159 if (GET_CODE (i) == NOTE)
9160 i = next_nonnote_insn (i);
9162 ldgp = alpha_function_needs_gp ? 8 : 0;
9164 while (i)
9166 next = (*next_group) (i, &in_use, &len);
9168 /* When we see a label, resync alignment etc. */
9169 if (GET_CODE (i) == CODE_LABEL)
9171 unsigned int new_align = 1 << label_to_alignment (i);
9173 if (new_align >= align)
9175 align = new_align < max_align ? new_align : max_align;
9176 ofs = 0;
9179 else if (ofs & (new_align-1))
9180 ofs = (ofs | (new_align-1)) + 1;
9181 gcc_assert (!len);
9184 /* Handle complex instructions special. */
9185 else if (in_use == 0)
9187 /* Asms will have length < 0. This is a signal that we have
9188 lost alignment knowledge. Assume, however, that the asm
9189 will not mis-align instructions. */
9190 if (len < 0)
9192 ofs = 0;
9193 align = 4;
9194 len = 0;
9198 /* If the known alignment is smaller than the recognized insn group,
9199 realign the output. */
9200 else if ((int) align < len)
9202 unsigned int new_log_align = len > 8 ? 4 : 3;
9203 rtx prev, where;
9205 where = prev = prev_nonnote_insn (i);
9206 if (!where || GET_CODE (where) != CODE_LABEL)
9207 where = i;
9209 /* Can't realign between a call and its gp reload. */
9210 if (! (TARGET_EXPLICIT_RELOCS
9211 && prev && GET_CODE (prev) == CALL_INSN))
9213 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9214 align = 1 << new_log_align;
9215 ofs = 0;
9219 /* We may not insert padding inside the initial ldgp sequence. */
9220 else if (ldgp > 0)
9221 ldgp -= len;
9223 /* If the group won't fit in the same INT16 as the previous,
9224 we need to add padding to keep the group together. Rather
9225 than simply leaving the insn filling to the assembler, we
9226 can make use of the knowledge of what sorts of instructions
9227 were issued in the previous group to make sure that all of
9228 the added nops are really free. */
9229 else if (ofs + len > (int) align)
9231 int nop_count = (align - ofs) / 4;
9232 rtx where;
9234 /* Insert nops before labels, branches, and calls to truly merge
9235 the execution of the nops with the previous instruction group. */
9236 where = prev_nonnote_insn (i);
9237 if (where)
9239 if (GET_CODE (where) == CODE_LABEL)
9241 rtx where2 = prev_nonnote_insn (where);
9242 if (where2 && GET_CODE (where2) == JUMP_INSN)
9243 where = where2;
9245 else if (GET_CODE (where) == INSN)
9246 where = i;
9248 else
9249 where = i;
9252 emit_insn_before ((*next_nop)(&prev_in_use), where);
9253 while (--nop_count);
9254 ofs = 0;
9257 ofs = (ofs + len) & (align - 1);
9258 prev_in_use = in_use;
9259 i = next;
9263 /* Machine dependent reorg pass. */
9265 static void
9266 alpha_reorg (void)
9268 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9269 alpha_handle_trap_shadows ();
9271 /* Due to the number of extra trapb insns, don't bother fixing up
9272 alignment when trap precision is instruction. Moreover, we can
9273 only do our job when sched2 is run. */
9274 if (optimize && !optimize_size
9275 && alpha_tp != ALPHA_TP_INSN
9276 && flag_schedule_insns_after_reload)
9278 if (alpha_tune == PROCESSOR_EV4)
9279 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9280 else if (alpha_tune == PROCESSOR_EV5)
9281 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9285 #if !TARGET_ABI_UNICOSMK
9287 #ifdef HAVE_STAMP_H
9288 #include <stamp.h>
9289 #endif
9291 static void
9292 alpha_file_start (void)
9294 #ifdef OBJECT_FORMAT_ELF
9295 /* If emitting dwarf2 debug information, we cannot generate a .file
9296 directive to start the file, as it will conflict with dwarf2out
9297 file numbers. So it's only useful when emitting mdebug output. */
9298 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9299 #endif
9301 default_file_start ();
9302 #ifdef MS_STAMP
9303 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9304 #endif
9306 fputs ("\t.set noreorder\n", asm_out_file);
9307 fputs ("\t.set volatile\n", asm_out_file);
9308 if (!TARGET_ABI_OPEN_VMS)
9309 fputs ("\t.set noat\n", asm_out_file);
9310 if (TARGET_EXPLICIT_RELOCS)
9311 fputs ("\t.set nomacro\n", asm_out_file);
9312 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9314 const char *arch;
9316 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9317 arch = "ev6";
9318 else if (TARGET_MAX)
9319 arch = "pca56";
9320 else if (TARGET_BWX)
9321 arch = "ev56";
9322 else if (alpha_cpu == PROCESSOR_EV5)
9323 arch = "ev5";
9324 else
9325 arch = "ev4";
9327 fprintf (asm_out_file, "\t.arch %s\n", arch);
9330 #endif
9332 #ifdef OBJECT_FORMAT_ELF
9333 /* Since we don't have a .dynbss section, we should not allow global
9334 relocations in the .rodata section. */
9336 static int
9337 alpha_elf_reloc_rw_mask (void)
9339 return flag_pic ? 3 : 2;
9342 /* Return a section for X. The only special thing we do here is to
9343 honor small data. */
9345 static section *
9346 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9347 unsigned HOST_WIDE_INT align)
9349 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9350 /* ??? Consider using mergeable sdata sections. */
9351 return sdata_section;
9352 else
9353 return default_elf_select_rtx_section (mode, x, align);
9356 static unsigned int
9357 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9359 unsigned int flags = 0;
9361 if (strcmp (name, ".sdata") == 0
9362 || strncmp (name, ".sdata.", 7) == 0
9363 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9364 || strcmp (name, ".sbss") == 0
9365 || strncmp (name, ".sbss.", 6) == 0
9366 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9367 flags = SECTION_SMALL;
9369 flags |= default_section_type_flags (decl, name, reloc);
9370 return flags;
9372 #endif /* OBJECT_FORMAT_ELF */
9374 /* Structure to collect function names for final output in link section. */
9375 /* Note that items marked with GTY can't be ifdef'ed out. */
9377 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9378 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9380 struct alpha_links GTY(())
9382 int num;
9383 rtx linkage;
9384 enum links_kind lkind;
9385 enum reloc_kind rkind;
9388 struct alpha_funcs GTY(())
9390 int num;
9391 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9392 links;
9395 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9396 splay_tree alpha_links_tree;
9397 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9398 splay_tree alpha_funcs_tree;
9400 static GTY(()) int alpha_funcs_num;
9402 #if TARGET_ABI_OPEN_VMS
9404 /* Return the VMS argument type corresponding to MODE. */
9406 enum avms_arg_type
9407 alpha_arg_type (enum machine_mode mode)
9409 switch (mode)
9411 case SFmode:
9412 return TARGET_FLOAT_VAX ? FF : FS;
9413 case DFmode:
9414 return TARGET_FLOAT_VAX ? FD : FT;
9415 default:
9416 return I64;
9420 /* Return an rtx for an integer representing the VMS Argument Information
9421 register value. */
9424 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9426 unsigned HOST_WIDE_INT regval = cum.num_args;
9427 int i;
9429 for (i = 0; i < 6; i++)
9430 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9432 return GEN_INT (regval);
9435 /* Make (or fake) .linkage entry for function call.
9437 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9439 Return an SYMBOL_REF rtx for the linkage. */
9442 alpha_need_linkage (const char *name, int is_local)
9444 splay_tree_node node;
9445 struct alpha_links *al;
9447 if (name[0] == '*')
9448 name++;
9450 if (is_local)
9452 struct alpha_funcs *cfaf;
9454 if (!alpha_funcs_tree)
9455 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9456 splay_tree_compare_pointers);
9458 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9460 cfaf->links = 0;
9461 cfaf->num = ++alpha_funcs_num;
9463 splay_tree_insert (alpha_funcs_tree,
9464 (splay_tree_key) current_function_decl,
9465 (splay_tree_value) cfaf);
9468 if (alpha_links_tree)
9470 /* Is this name already defined? */
9472 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9473 if (node)
9475 al = (struct alpha_links *) node->value;
9476 if (is_local)
9478 /* Defined here but external assumed. */
9479 if (al->lkind == KIND_EXTERN)
9480 al->lkind = KIND_LOCAL;
9482 else
9484 /* Used here but unused assumed. */
9485 if (al->lkind == KIND_UNUSED)
9486 al->lkind = KIND_LOCAL;
9488 return al->linkage;
9491 else
9492 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9494 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9495 name = ggc_strdup (name);
9497 /* Assume external if no definition. */
9498 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9500 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9501 get_identifier (name);
9503 /* Construct a SYMBOL_REF for us to call. */
9505 size_t name_len = strlen (name);
9506 char *linksym = alloca (name_len + 6);
9507 linksym[0] = '$';
9508 memcpy (linksym + 1, name, name_len);
9509 memcpy (linksym + 1 + name_len, "..lk", 5);
9510 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9511 ggc_alloc_string (linksym, name_len + 5));
9514 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9515 (splay_tree_value) al);
9517 return al->linkage;
9521 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9523 splay_tree_node cfunnode;
9524 struct alpha_funcs *cfaf;
9525 struct alpha_links *al;
9526 const char *name = XSTR (linkage, 0);
9528 cfaf = (struct alpha_funcs *) 0;
9529 al = (struct alpha_links *) 0;
9531 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9532 cfaf = (struct alpha_funcs *) cfunnode->value;
9534 if (cfaf->links)
9536 splay_tree_node lnode;
9538 /* Is this name already defined? */
9540 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9541 if (lnode)
9542 al = (struct alpha_links *) lnode->value;
9544 else
9545 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9547 if (!al)
9549 size_t name_len;
9550 size_t buflen;
9551 char buf [512];
9552 char *linksym;
9553 splay_tree_node node = 0;
9554 struct alpha_links *anl;
9556 if (name[0] == '*')
9557 name++;
9559 name_len = strlen (name);
9561 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9562 al->num = cfaf->num;
9564 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9565 if (node)
9567 anl = (struct alpha_links *) node->value;
9568 al->lkind = anl->lkind;
9571 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9572 buflen = strlen (buf);
9573 linksym = alloca (buflen + 1);
9574 memcpy (linksym, buf, buflen + 1);
9576 al->linkage = gen_rtx_SYMBOL_REF
9577 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9579 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9580 (splay_tree_value) al);
9583 if (rflag)
9584 al->rkind = KIND_CODEADDR;
9585 else
9586 al->rkind = KIND_LINKAGE;
9588 if (lflag)
9589 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9590 else
9591 return al->linkage;
9594 static int
9595 alpha_write_one_linkage (splay_tree_node node, void *data)
9597 const char *const name = (const char *) node->key;
9598 struct alpha_links *link = (struct alpha_links *) node->value;
9599 FILE *stream = (FILE *) data;
9601 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9602 if (link->rkind == KIND_CODEADDR)
9604 if (link->lkind == KIND_LOCAL)
9606 /* Local and used */
9607 fprintf (stream, "\t.quad %s..en\n", name);
9609 else
9611 /* External and used, request code address. */
9612 fprintf (stream, "\t.code_address %s\n", name);
9615 else
9617 if (link->lkind == KIND_LOCAL)
9619 /* Local and used, build linkage pair. */
9620 fprintf (stream, "\t.quad %s..en\n", name);
9621 fprintf (stream, "\t.quad %s\n", name);
9623 else
9625 /* External and used, request linkage pair. */
9626 fprintf (stream, "\t.linkage %s\n", name);
9630 return 0;
9633 static void
9634 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9636 splay_tree_node node;
9637 struct alpha_funcs *func;
9639 fprintf (stream, "\t.link\n");
9640 fprintf (stream, "\t.align 3\n");
9641 in_section = NULL;
9643 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9644 func = (struct alpha_funcs *) node->value;
9646 fputs ("\t.name ", stream);
9647 assemble_name (stream, funname);
9648 fputs ("..na\n", stream);
9649 ASM_OUTPUT_LABEL (stream, funname);
9650 fprintf (stream, "\t.pdesc ");
9651 assemble_name (stream, funname);
9652 fprintf (stream, "..en,%s\n",
9653 alpha_procedure_type == PT_STACK ? "stack"
9654 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9656 if (func->links)
9658 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9659 /* splay_tree_delete (func->links); */
9663 /* Given a decl, a section name, and whether the decl initializer
9664 has relocs, choose attributes for the section. */
9666 #define SECTION_VMS_OVERLAY SECTION_FORGET
9667 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9668 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9670 static unsigned int
9671 vms_section_type_flags (tree decl, const char *name, int reloc)
9673 unsigned int flags = default_section_type_flags (decl, name, reloc);
9675 if (decl && DECL_ATTRIBUTES (decl)
9676 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9677 flags |= SECTION_VMS_OVERLAY;
9678 if (decl && DECL_ATTRIBUTES (decl)
9679 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9680 flags |= SECTION_VMS_GLOBAL;
9681 if (decl && DECL_ATTRIBUTES (decl)
9682 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9683 flags |= SECTION_VMS_INITIALIZE;
9685 return flags;
9688 /* Switch to an arbitrary section NAME with attributes as specified
9689 by FLAGS. ALIGN specifies any known alignment requirements for
9690 the section; 0 if the default should be used. */
9692 static void
9693 vms_asm_named_section (const char *name, unsigned int flags,
9694 tree decl ATTRIBUTE_UNUSED)
9696 fputc ('\n', asm_out_file);
9697 fprintf (asm_out_file, ".section\t%s", name);
9699 if (flags & SECTION_VMS_OVERLAY)
9700 fprintf (asm_out_file, ",OVR");
9701 if (flags & SECTION_VMS_GLOBAL)
9702 fprintf (asm_out_file, ",GBL");
9703 if (flags & SECTION_VMS_INITIALIZE)
9704 fprintf (asm_out_file, ",NOMOD");
9705 if (flags & SECTION_DEBUG)
9706 fprintf (asm_out_file, ",NOWRT");
9708 fputc ('\n', asm_out_file);
9711 /* Record an element in the table of global constructors. SYMBOL is
9712 a SYMBOL_REF of the function to be called; PRIORITY is a number
9713 between 0 and MAX_INIT_PRIORITY.
9715 Differs from default_ctors_section_asm_out_constructor in that the
9716 width of the .ctors entry is always 64 bits, rather than the 32 bits
9717 used by a normal pointer. */
9719 static void
9720 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9722 switch_to_section (ctors_section);
9723 assemble_align (BITS_PER_WORD);
9724 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9727 static void
9728 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9730 switch_to_section (dtors_section);
9731 assemble_align (BITS_PER_WORD);
9732 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9734 #else
9737 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9738 int is_local ATTRIBUTE_UNUSED)
9740 return NULL_RTX;
9744 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9745 tree cfundecl ATTRIBUTE_UNUSED,
9746 int lflag ATTRIBUTE_UNUSED,
9747 int rflag ATTRIBUTE_UNUSED)
9749 return NULL_RTX;
9752 #endif /* TARGET_ABI_OPEN_VMS */
9754 #if TARGET_ABI_UNICOSMK
9756 /* This evaluates to true if we do not know how to pass TYPE solely in
9757 registers. This is the case for all arguments that do not fit in two
9758 registers. */
9760 static bool
9761 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9763 if (type == NULL)
9764 return false;
9766 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9767 return true;
9768 if (TREE_ADDRESSABLE (type))
9769 return true;
9771 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9774 /* Define the offset between two registers, one to be eliminated, and the
9775 other its replacement, at the start of a routine. */
9778 unicosmk_initial_elimination_offset (int from, int to)
9780 int fixed_size;
9782 fixed_size = alpha_sa_size();
9783 if (fixed_size != 0)
9784 fixed_size += 48;
9786 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9787 return -fixed_size;
9788 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9789 return 0;
9790 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9791 return (ALPHA_ROUND (current_function_outgoing_args_size)
9792 + ALPHA_ROUND (get_frame_size()));
9793 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9794 return (ALPHA_ROUND (fixed_size)
9795 + ALPHA_ROUND (get_frame_size()
9796 + current_function_outgoing_args_size));
9797 else
9798 gcc_unreachable ();
9801 /* Output the module name for .ident and .end directives. We have to strip
9802 directories and add make sure that the module name starts with a letter
9803 or '$'. */
9805 static void
9806 unicosmk_output_module_name (FILE *file)
9808 const char *name = lbasename (main_input_filename);
9809 unsigned len = strlen (name);
9810 char *clean_name = alloca (len + 2);
9811 char *ptr = clean_name;
9813 /* CAM only accepts module names that start with a letter or '$'. We
9814 prefix the module name with a '$' if necessary. */
9816 if (!ISALPHA (*name))
9817 *ptr++ = '$';
9818 memcpy (ptr, name, len + 1);
9819 clean_symbol_name (clean_name);
9820 fputs (clean_name, file);
9823 /* Output the definition of a common variable. */
9825 void
9826 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9828 tree name_tree;
9829 printf ("T3E__: common %s\n", name);
9831 in_section = NULL;
9832 fputs("\t.endp\n\n\t.psect ", file);
9833 assemble_name(file, name);
9834 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9835 fprintf(file, "\t.byte\t0:%d\n", size);
9837 /* Mark the symbol as defined in this module. */
9838 name_tree = get_identifier (name);
9839 TREE_ASM_WRITTEN (name_tree) = 1;
9842 #define SECTION_PUBLIC SECTION_MACH_DEP
9843 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9844 static int current_section_align;
9846 /* A get_unnamed_section callback for switching to the text section. */
9848 static void
9849 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9851 static int count = 0;
9852 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9855 /* A get_unnamed_section callback for switching to the data section. */
9857 static void
9858 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9860 static int count = 1;
9861 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9864 /* Implement TARGET_ASM_INIT_SECTIONS.
9866 The Cray assembler is really weird with respect to sections. It has only
9867 named sections and you can't reopen a section once it has been closed.
9868 This means that we have to generate unique names whenever we want to
9869 reenter the text or the data section. */
9871 static void
9872 unicosmk_init_sections (void)
9874 text_section = get_unnamed_section (SECTION_CODE,
9875 unicosmk_output_text_section_asm_op,
9876 NULL);
9877 data_section = get_unnamed_section (SECTION_WRITE,
9878 unicosmk_output_data_section_asm_op,
9879 NULL);
9880 readonly_data_section = data_section;
9883 static unsigned int
9884 unicosmk_section_type_flags (tree decl, const char *name,
9885 int reloc ATTRIBUTE_UNUSED)
9887 unsigned int flags = default_section_type_flags (decl, name, reloc);
9889 if (!decl)
9890 return flags;
9892 if (TREE_CODE (decl) == FUNCTION_DECL)
9894 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9895 if (align_functions_log > current_section_align)
9896 current_section_align = align_functions_log;
9898 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9899 flags |= SECTION_MAIN;
9901 else
9902 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9904 if (TREE_PUBLIC (decl))
9905 flags |= SECTION_PUBLIC;
9907 return flags;
9910 /* Generate a section name for decl and associate it with the
9911 declaration. */
9913 static void
9914 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9916 const char *name;
9917 int len;
9919 gcc_assert (decl);
9921 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9922 name = default_strip_name_encoding (name);
9923 len = strlen (name);
9925 if (TREE_CODE (decl) == FUNCTION_DECL)
9927 char *string;
9929 /* It is essential that we prefix the section name here because
9930 otherwise the section names generated for constructors and
9931 destructors confuse collect2. */
9933 string = alloca (len + 6);
9934 sprintf (string, "code@%s", name);
9935 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9937 else if (TREE_PUBLIC (decl))
9938 DECL_SECTION_NAME (decl) = build_string (len, name);
9939 else
9941 char *string;
9943 string = alloca (len + 6);
9944 sprintf (string, "data@%s", name);
9945 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9949 /* Switch to an arbitrary section NAME with attributes as specified
9950 by FLAGS. ALIGN specifies any known alignment requirements for
9951 the section; 0 if the default should be used. */
9953 static void
9954 unicosmk_asm_named_section (const char *name, unsigned int flags,
9955 tree decl ATTRIBUTE_UNUSED)
9957 const char *kind;
9959 /* Close the previous section. */
9961 fputs ("\t.endp\n\n", asm_out_file);
9963 /* Find out what kind of section we are opening. */
9965 if (flags & SECTION_MAIN)
9966 fputs ("\t.start\tmain\n", asm_out_file);
9968 if (flags & SECTION_CODE)
9969 kind = "code";
9970 else if (flags & SECTION_PUBLIC)
9971 kind = "common";
9972 else
9973 kind = "data";
9975 if (current_section_align != 0)
9976 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9977 current_section_align, kind);
9978 else
9979 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9982 static void
9983 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9985 if (DECL_P (decl)
9986 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9987 unicosmk_unique_section (decl, 0);
9990 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9991 in code sections because .align fill unused space with zeroes. */
9993 void
9994 unicosmk_output_align (FILE *file, int align)
9996 if (inside_function)
9997 fprintf (file, "\tgcc@code@align\t%d\n", align);
9998 else
9999 fprintf (file, "\t.align\t%d\n", align);
10002 /* Add a case vector to the current function's list of deferred case
10003 vectors. Case vectors have to be put into a separate section because CAM
10004 does not allow data definitions in code sections. */
10006 void
10007 unicosmk_defer_case_vector (rtx lab, rtx vec)
10009 struct machine_function *machine = cfun->machine;
10011 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10012 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10013 machine->addr_list);
10016 /* Output a case vector. */
10018 static void
10019 unicosmk_output_addr_vec (FILE *file, rtx vec)
10021 rtx lab = XEXP (vec, 0);
10022 rtx body = XEXP (vec, 1);
10023 int vlen = XVECLEN (body, 0);
10024 int idx;
10026 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10028 for (idx = 0; idx < vlen; idx++)
10030 ASM_OUTPUT_ADDR_VEC_ELT
10031 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10035 /* Output current function's deferred case vectors. */
10037 static void
10038 unicosmk_output_deferred_case_vectors (FILE *file)
10040 struct machine_function *machine = cfun->machine;
10041 rtx t;
10043 if (machine->addr_list == NULL_RTX)
10044 return;
10046 switch_to_section (data_section);
10047 for (t = machine->addr_list; t; t = XEXP (t, 1))
10048 unicosmk_output_addr_vec (file, XEXP (t, 0));
10051 /* Generate the name of the SSIB section for the current function. */
10053 #define SSIB_PREFIX "__SSIB_"
10054 #define SSIB_PREFIX_LEN 7
10056 static const char *
10057 unicosmk_ssib_name (void)
10059 /* This is ok since CAM won't be able to deal with names longer than that
10060 anyway. */
10062 static char name[256];
10064 rtx x;
10065 const char *fnname;
10066 int len;
10068 x = DECL_RTL (cfun->decl);
10069 gcc_assert (GET_CODE (x) == MEM);
10070 x = XEXP (x, 0);
10071 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10072 fnname = XSTR (x, 0);
10074 len = strlen (fnname);
10075 if (len + SSIB_PREFIX_LEN > 255)
10076 len = 255 - SSIB_PREFIX_LEN;
10078 strcpy (name, SSIB_PREFIX);
10079 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10080 name[len + SSIB_PREFIX_LEN] = 0;
10082 return name;
10085 /* Set up the dynamic subprogram information block (DSIB) and update the
10086 frame pointer register ($15) for subroutines which have a frame. If the
10087 subroutine doesn't have a frame, simply increment $15. */
10089 static void
10090 unicosmk_gen_dsib (unsigned long *imaskP)
10092 if (alpha_procedure_type == PT_STACK)
10094 const char *ssib_name;
10095 rtx mem;
10097 /* Allocate 64 bytes for the DSIB. */
10099 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10100 GEN_INT (-64))));
10101 emit_insn (gen_blockage ());
10103 /* Save the return address. */
10105 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10106 set_mem_alias_set (mem, alpha_sr_alias_set);
10107 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10108 (*imaskP) &= ~(1UL << REG_RA);
10110 /* Save the old frame pointer. */
10112 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10113 set_mem_alias_set (mem, alpha_sr_alias_set);
10114 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10115 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10117 emit_insn (gen_blockage ());
10119 /* Store the SSIB pointer. */
10121 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10122 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10123 set_mem_alias_set (mem, alpha_sr_alias_set);
10125 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10126 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10127 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10129 /* Save the CIW index. */
10131 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10132 set_mem_alias_set (mem, alpha_sr_alias_set);
10133 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10135 emit_insn (gen_blockage ());
10137 /* Set the new frame pointer. */
10139 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10140 stack_pointer_rtx, GEN_INT (64))));
10143 else
10145 /* Increment the frame pointer register to indicate that we do not
10146 have a frame. */
10148 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10149 hard_frame_pointer_rtx, const1_rtx)));
10153 /* Output the static subroutine information block for the current
10154 function. */
10156 static void
10157 unicosmk_output_ssib (FILE *file, const char *fnname)
10159 int len;
10160 int i;
10161 rtx x;
10162 rtx ciw;
10163 struct machine_function *machine = cfun->machine;
10165 in_section = NULL;
10166 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10167 unicosmk_ssib_name ());
10169 /* Some required stuff and the function name length. */
10171 len = strlen (fnname);
10172 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10174 /* Saved registers
10175 ??? We don't do that yet. */
10177 fputs ("\t.quad\t0\n", file);
10179 /* Function address. */
10181 fputs ("\t.quad\t", file);
10182 assemble_name (file, fnname);
10183 putc ('\n', file);
10185 fputs ("\t.quad\t0\n", file);
10186 fputs ("\t.quad\t0\n", file);
10188 /* Function name.
10189 ??? We do it the same way Cray CC does it but this could be
10190 simplified. */
10192 for( i = 0; i < len; i++ )
10193 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10194 if( (len % 8) == 0 )
10195 fputs ("\t.quad\t0\n", file);
10196 else
10197 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10199 /* All call information words used in the function. */
10201 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10203 ciw = XEXP (x, 0);
10204 #if HOST_BITS_PER_WIDE_INT == 32
10205 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10206 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10207 #else
10208 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10209 #endif
10213 /* Add a call information word (CIW) to the list of the current function's
10214 CIWs and return its index.
10216 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10219 unicosmk_add_call_info_word (rtx x)
10221 rtx node;
10222 struct machine_function *machine = cfun->machine;
10224 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10225 if (machine->first_ciw == NULL_RTX)
10226 machine->first_ciw = node;
10227 else
10228 XEXP (machine->last_ciw, 1) = node;
10230 machine->last_ciw = node;
10231 ++machine->ciw_count;
10233 return GEN_INT (machine->ciw_count
10234 + strlen (current_function_name ())/8 + 5);
10237 /* The Cray assembler doesn't accept extern declarations for symbols which
10238 are defined in the same file. We have to keep track of all global
10239 symbols which are referenced and/or defined in a source file and output
10240 extern declarations for those which are referenced but not defined at
10241 the end of file. */
10243 /* List of identifiers for which an extern declaration might have to be
10244 emitted. */
10245 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10247 struct unicosmk_extern_list
10249 struct unicosmk_extern_list *next;
10250 const char *name;
10253 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10255 /* Output extern declarations which are required for every asm file. */
10257 static void
10258 unicosmk_output_default_externs (FILE *file)
10260 static const char *const externs[] =
10261 { "__T3E_MISMATCH" };
10263 int i;
10264 int n;
10266 n = ARRAY_SIZE (externs);
10268 for (i = 0; i < n; i++)
10269 fprintf (file, "\t.extern\t%s\n", externs[i]);
10272 /* Output extern declarations for global symbols which are have been
10273 referenced but not defined. */
10275 static void
10276 unicosmk_output_externs (FILE *file)
10278 struct unicosmk_extern_list *p;
10279 const char *real_name;
10280 int len;
10281 tree name_tree;
10283 len = strlen (user_label_prefix);
10284 for (p = unicosmk_extern_head; p != 0; p = p->next)
10286 /* We have to strip the encoding and possibly remove user_label_prefix
10287 from the identifier in order to handle -fleading-underscore and
10288 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10289 real_name = default_strip_name_encoding (p->name);
10290 if (len && p->name[0] == '*'
10291 && !memcmp (real_name, user_label_prefix, len))
10292 real_name += len;
10294 name_tree = get_identifier (real_name);
10295 if (! TREE_ASM_WRITTEN (name_tree))
10297 TREE_ASM_WRITTEN (name_tree) = 1;
10298 fputs ("\t.extern\t", file);
10299 assemble_name (file, p->name);
10300 putc ('\n', file);
10305 /* Record an extern. */
10307 void
10308 unicosmk_add_extern (const char *name)
10310 struct unicosmk_extern_list *p;
10312 p = (struct unicosmk_extern_list *)
10313 xmalloc (sizeof (struct unicosmk_extern_list));
10314 p->next = unicosmk_extern_head;
10315 p->name = name;
10316 unicosmk_extern_head = p;
10319 /* The Cray assembler generates incorrect code if identifiers which
10320 conflict with register names are used as instruction operands. We have
10321 to replace such identifiers with DEX expressions. */
10323 /* Structure to collect identifiers which have been replaced by DEX
10324 expressions. */
10325 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10327 struct unicosmk_dex {
10328 struct unicosmk_dex *next;
10329 const char *name;
10332 /* List of identifiers which have been replaced by DEX expressions. The DEX
10333 number is determined by the position in the list. */
10335 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10337 /* The number of elements in the DEX list. */
10339 static int unicosmk_dex_count = 0;
10341 /* Check if NAME must be replaced by a DEX expression. */
10343 static int
10344 unicosmk_special_name (const char *name)
10346 if (name[0] == '*')
10347 ++name;
10349 if (name[0] == '$')
10350 ++name;
10352 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10353 return 0;
10355 switch (name[1])
10357 case '1': case '2':
10358 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10360 case '3':
10361 return (name[2] == '\0'
10362 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10364 default:
10365 return (ISDIGIT (name[1]) && name[2] == '\0');
10369 /* Return the DEX number if X must be replaced by a DEX expression and 0
10370 otherwise. */
10372 static int
10373 unicosmk_need_dex (rtx x)
10375 struct unicosmk_dex *dex;
10376 const char *name;
10377 int i;
10379 if (GET_CODE (x) != SYMBOL_REF)
10380 return 0;
10382 name = XSTR (x,0);
10383 if (! unicosmk_special_name (name))
10384 return 0;
10386 i = unicosmk_dex_count;
10387 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10389 if (! strcmp (name, dex->name))
10390 return i;
10391 --i;
10394 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10395 dex->name = name;
10396 dex->next = unicosmk_dex_list;
10397 unicosmk_dex_list = dex;
10399 ++unicosmk_dex_count;
10400 return unicosmk_dex_count;
10403 /* Output the DEX definitions for this file. */
10405 static void
10406 unicosmk_output_dex (FILE *file)
10408 struct unicosmk_dex *dex;
10409 int i;
10411 if (unicosmk_dex_list == NULL)
10412 return;
10414 fprintf (file, "\t.dexstart\n");
10416 i = unicosmk_dex_count;
10417 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10419 fprintf (file, "\tDEX (%d) = ", i);
10420 assemble_name (file, dex->name);
10421 putc ('\n', file);
10422 --i;
10425 fprintf (file, "\t.dexend\n");
10428 /* Output text that to appear at the beginning of an assembler file. */
10430 static void
10431 unicosmk_file_start (void)
10433 int i;
10435 fputs ("\t.ident\t", asm_out_file);
10436 unicosmk_output_module_name (asm_out_file);
10437 fputs ("\n\n", asm_out_file);
10439 /* The Unicos/Mk assembler uses different register names. Instead of trying
10440 to support them, we simply use micro definitions. */
10442 /* CAM has different register names: rN for the integer register N and fN
10443 for the floating-point register N. Instead of trying to use these in
10444 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10445 register. */
10447 for (i = 0; i < 32; ++i)
10448 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10450 for (i = 0; i < 32; ++i)
10451 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10453 putc ('\n', asm_out_file);
10455 /* The .align directive fill unused space with zeroes which does not work
10456 in code sections. We define the macro 'gcc@code@align' which uses nops
10457 instead. Note that it assumes that code sections always have the
10458 biggest possible alignment since . refers to the current offset from
10459 the beginning of the section. */
10461 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10462 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10463 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10464 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10465 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10466 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10467 fputs ("\t.endr\n", asm_out_file);
10468 fputs ("\t.endif\n", asm_out_file);
10469 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10471 /* Output extern declarations which should always be visible. */
10472 unicosmk_output_default_externs (asm_out_file);
10474 /* Open a dummy section. We always need to be inside a section for the
10475 section-switching code to work correctly.
10476 ??? This should be a module id or something like that. I still have to
10477 figure out what the rules for those are. */
10478 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10481 /* Output text to appear at the end of an assembler file. This includes all
10482 pending extern declarations and DEX expressions. */
10484 static void
10485 unicosmk_file_end (void)
10487 fputs ("\t.endp\n\n", asm_out_file);
10489 /* Output all pending externs. */
10491 unicosmk_output_externs (asm_out_file);
10493 /* Output dex definitions used for functions whose names conflict with
10494 register names. */
10496 unicosmk_output_dex (asm_out_file);
10498 fputs ("\t.end\t", asm_out_file);
10499 unicosmk_output_module_name (asm_out_file);
10500 putc ('\n', asm_out_file);
10503 #else
10505 static void
10506 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10509 static void
10510 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10513 static void
10514 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10515 const char * fnname ATTRIBUTE_UNUSED)
10519 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10521 return NULL_RTX;
10524 static int
10525 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10527 return 0;
10530 #endif /* TARGET_ABI_UNICOSMK */
10532 static void
10533 alpha_init_libfuncs (void)
10535 if (TARGET_ABI_UNICOSMK)
10537 /* Prevent gcc from generating calls to __divsi3. */
10538 set_optab_libfunc (sdiv_optab, SImode, 0);
10539 set_optab_libfunc (udiv_optab, SImode, 0);
10541 /* Use the functions provided by the system library
10542 for DImode integer division. */
10543 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10544 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10546 else if (TARGET_ABI_OPEN_VMS)
10548 /* Use the VMS runtime library functions for division and
10549 remainder. */
10550 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10551 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10552 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10553 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10554 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10555 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10556 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10557 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10562 /* Initialize the GCC target structure. */
10563 #if TARGET_ABI_OPEN_VMS
10564 # undef TARGET_ATTRIBUTE_TABLE
10565 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10566 # undef TARGET_SECTION_TYPE_FLAGS
10567 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10568 #endif
10570 #undef TARGET_IN_SMALL_DATA_P
10571 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10573 #if TARGET_ABI_UNICOSMK
10574 # undef TARGET_INSERT_ATTRIBUTES
10575 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10576 # undef TARGET_SECTION_TYPE_FLAGS
10577 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10578 # undef TARGET_ASM_UNIQUE_SECTION
10579 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10580 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10581 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10582 # undef TARGET_ASM_GLOBALIZE_LABEL
10583 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10584 # undef TARGET_MUST_PASS_IN_STACK
10585 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10586 #endif
10588 #undef TARGET_ASM_ALIGNED_HI_OP
10589 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10590 #undef TARGET_ASM_ALIGNED_DI_OP
10591 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10593 /* Default unaligned ops are provided for ELF systems. To get unaligned
10594 data for non-ELF systems, we have to turn off auto alignment. */
10595 #ifndef OBJECT_FORMAT_ELF
10596 #undef TARGET_ASM_UNALIGNED_HI_OP
10597 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10598 #undef TARGET_ASM_UNALIGNED_SI_OP
10599 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10600 #undef TARGET_ASM_UNALIGNED_DI_OP
10601 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10602 #endif
10604 #ifdef OBJECT_FORMAT_ELF
10605 #undef TARGET_ASM_RELOC_RW_MASK
10606 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10607 #undef TARGET_ASM_SELECT_RTX_SECTION
10608 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10609 #undef TARGET_SECTION_TYPE_FLAGS
10610 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10611 #endif
10613 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10614 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10616 #undef TARGET_INIT_LIBFUNCS
10617 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10619 #if TARGET_ABI_UNICOSMK
10620 #undef TARGET_ASM_FILE_START
10621 #define TARGET_ASM_FILE_START unicosmk_file_start
10622 #undef TARGET_ASM_FILE_END
10623 #define TARGET_ASM_FILE_END unicosmk_file_end
10624 #else
10625 #undef TARGET_ASM_FILE_START
10626 #define TARGET_ASM_FILE_START alpha_file_start
10627 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10628 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10629 #endif
10631 #undef TARGET_SCHED_ADJUST_COST
10632 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10633 #undef TARGET_SCHED_ISSUE_RATE
10634 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10635 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10636 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10637 alpha_multipass_dfa_lookahead
10639 #undef TARGET_HAVE_TLS
10640 #define TARGET_HAVE_TLS HAVE_AS_TLS
10642 #undef TARGET_INIT_BUILTINS
10643 #define TARGET_INIT_BUILTINS alpha_init_builtins
10644 #undef TARGET_EXPAND_BUILTIN
10645 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10646 #undef TARGET_FOLD_BUILTIN
10647 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10649 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10650 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10651 #undef TARGET_CANNOT_COPY_INSN_P
10652 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10653 #undef TARGET_CANNOT_FORCE_CONST_MEM
10654 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10656 #if TARGET_ABI_OSF
10657 #undef TARGET_ASM_OUTPUT_MI_THUNK
10658 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10659 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10660 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10661 #undef TARGET_STDARG_OPTIMIZE_HOOK
10662 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10663 #endif
10665 #undef TARGET_RTX_COSTS
10666 #define TARGET_RTX_COSTS alpha_rtx_costs
10667 #undef TARGET_ADDRESS_COST
10668 #define TARGET_ADDRESS_COST hook_int_rtx_0
10670 #undef TARGET_MACHINE_DEPENDENT_REORG
10671 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10673 #undef TARGET_PROMOTE_FUNCTION_ARGS
10674 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10675 #undef TARGET_PROMOTE_FUNCTION_RETURN
10676 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10677 #undef TARGET_PROMOTE_PROTOTYPES
10678 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10679 #undef TARGET_RETURN_IN_MEMORY
10680 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10681 #undef TARGET_PASS_BY_REFERENCE
10682 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10683 #undef TARGET_SETUP_INCOMING_VARARGS
10684 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10685 #undef TARGET_STRICT_ARGUMENT_NAMING
10686 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10687 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10688 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10689 #undef TARGET_SPLIT_COMPLEX_ARG
10690 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10691 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10692 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10693 #undef TARGET_ARG_PARTIAL_BYTES
10694 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10696 #undef TARGET_SECONDARY_RELOAD
10697 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10699 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10700 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10701 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10702 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10704 #undef TARGET_BUILD_BUILTIN_VA_LIST
10705 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10707 /* The Alpha architecture does not require sequential consistency. See
10708 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10709 for an example of how it can be violated in practice. */
10710 #undef TARGET_RELAXED_ORDERING
10711 #define TARGET_RELAXED_ORDERING true
10713 #undef TARGET_DEFAULT_TARGET_FLAGS
10714 #define TARGET_DEFAULT_TARGET_FLAGS \
10715 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10716 #undef TARGET_HANDLE_OPTION
10717 #define TARGET_HANDLE_OPTION alpha_handle_option
10719 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10720 #undef TARGET_MANGLE_TYPE
10721 #define TARGET_MANGLE_TYPE alpha_mangle_type
10722 #endif
10724 struct gcc_target targetm = TARGET_INITIALIZER;
10727 #include "gt-alpha.h"