Merge -r 127928:132243 from trunk
[official-gcc.git] / gcc / config / alpha / alpha.c
blob991193f28dc129a7b173fe66c97585c619e3d286
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
66 static const char * const alpha_cpu_name[] =
68 "ev4", "ev5", "ev6"
71 /* Specify how accurate floating-point traps need to be. */
73 enum alpha_trap_precision alpha_tp;
75 /* Specify the floating-point rounding mode. */
77 enum alpha_fp_rounding_mode alpha_fprm;
79 /* Specify which things cause traps. */
81 enum alpha_fp_trap_mode alpha_fptm;
83 /* Save information from a "cmpxx" operation until the branch or scc is
84 emitted. */
86 struct alpha_compare alpha_compare;
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
91 static int inside_function = FALSE;
93 /* The number of cycles of latency we should assume on memory reads. */
95 int alpha_memory_latency = 3;
97 /* Whether the function needs the GP. */
99 static int alpha_function_needs_gp;
101 /* The alias set for prologue/epilogue register save/restore. */
103 static GTY(()) alias_set_type alpha_sr_alias_set;
105 /* The assembler name of the current function. */
107 static const char *alpha_fnname;
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
120 /* Costs of various operations on the different architectures. */
122 struct alpha_rtx_cost_data
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
137 { /* EV4 */
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
148 { /* EV5 */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
159 { /* EV6 */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
175 one of them. */
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS current_function_args_info.num_args
193 #else
194 #define NUM_ARGS current_function_args_info
195 #endif
197 #define REG_PV 27
198 #define REG_RA 26
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
206 #endif
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
213 /* Implement TARGET_HANDLE_OPTION. */
215 static bool
216 alpha_handle_option (size_t code, const char *arg, int value)
218 switch (code)
220 case OPT_mfp_regs:
221 if (value == 0)
222 target_flags |= MASK_SOFT_FP;
223 break;
225 case OPT_mieee:
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
228 break;
230 case OPT_mtls_size_:
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
233 break;
236 return true;
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
242 static const char *
243 alpha_mangle_type (const_tree type)
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
247 return "g";
249 /* For all other types, use normal C++ mangling. */
250 return NULL;
252 #endif
254 /* Parse target option strings. */
256 void
257 override_options (void)
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
262 const int flags;
263 } cpu_table[] = {
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { 0, 0, 0 }
281 int i;
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
288 flag_pic = 0;
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
295 else
296 alpha_fprm = ALPHA_FPRM_NORM;
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
307 if (TARGET_IEEE)
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
311 else
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
318 if (TARGET_IEEE_WITH_INEXACT)
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
322 else
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
329 if (alpha_tp_string)
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
337 else
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
341 if (alpha_fprm_string)
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
351 else
352 error ("bad value %qs for -mfp-rounding-mode switch",
353 alpha_fprm_string);
356 if (alpha_fptm_string)
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
366 else
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
370 if (alpha_cpu_string)
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
378 break;
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
384 if (alpha_tune_string)
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
389 alpha_tune = cpu_table [i].processor;
390 break;
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
396 /* Do some sanity checks on the above options. */
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
411 if (alpha_cpu == PROCESSOR_EV6)
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
419 if (TARGET_FLOAT_VAX)
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
426 if (alpha_fptm == ALPHA_FPTM_SUI)
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
437 char *end;
438 int lat;
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
450 static int const cache_latency[][4] =
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
462 lat = 3;
464 else
465 lat = cache_latency[alpha_tune][lat-1];
467 else if (! strcmp (alpha_mlat_string, "main"))
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
471 lat = 150;
473 else
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
476 lat = 3;
479 alpha_memory_latency = lat;
482 /* Default the definition of "small data" to 8 bytes. */
483 if (!g_switch_set)
484 g_switch_value = 8;
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 if (flag_pic == 1)
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
497 if (align_loops <= 0)
498 align_loops = 16;
499 if (align_jumps <= 0)
500 align_jumps = 16;
502 if (align_functions <= 0)
503 align_functions = 16;
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
508 /* Register variables and functions with the garbage collector. */
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
524 #endif
526 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
527 can be optimized to ap = __builtin_next_arg (0). */
528 if (TARGET_ABI_UNICOSMK)
529 targetm.expand_builtin_va_start = NULL;
532 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
535 zap_mask (HOST_WIDE_INT value)
537 int i;
539 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
540 i++, value >>= 8)
541 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
542 return 0;
544 return 1;
547 /* Return true if OP is valid for a particular TLS relocation.
548 We are already guaranteed that OP is a CONST. */
551 tls_symbolic_operand_1 (rtx op, int size, int unspec)
553 op = XEXP (op, 0);
555 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
556 return 0;
557 op = XVECEXP (op, 0, 0);
559 if (GET_CODE (op) != SYMBOL_REF)
560 return 0;
562 switch (SYMBOL_REF_TLS_MODEL (op))
564 case TLS_MODEL_LOCAL_DYNAMIC:
565 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
566 case TLS_MODEL_INITIAL_EXEC:
567 return unspec == UNSPEC_TPREL && size == 64;
568 case TLS_MODEL_LOCAL_EXEC:
569 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
570 default:
571 gcc_unreachable ();
575 /* Used by aligned_memory_operand and unaligned_memory_operand to
576 resolve what reload is going to do with OP if it's a register. */
579 resolve_reload_operand (rtx op)
581 if (reload_in_progress)
583 rtx tmp = op;
584 if (GET_CODE (tmp) == SUBREG)
585 tmp = SUBREG_REG (tmp);
586 if (GET_CODE (tmp) == REG
587 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
589 op = reg_equiv_memory_loc[REGNO (tmp)];
590 if (op == 0)
591 return 0;
594 return op;
597 /* The scalar modes supported differs from the default check-what-c-supports
598 version in that sometimes TFmode is available even when long double
599 indicates only DFmode. On unicosmk, we have the situation that HImode
600 doesn't map to any C type, but of course we still support that. */
602 static bool
603 alpha_scalar_mode_supported_p (enum machine_mode mode)
605 switch (mode)
607 case QImode:
608 case HImode:
609 case SImode:
610 case DImode:
611 case TImode: /* via optabs.c */
612 return true;
614 case SFmode:
615 case DFmode:
616 return true;
618 case TFmode:
619 return TARGET_HAS_XFLOATING_LIBS;
621 default:
622 return false;
626 /* Alpha implements a couple of integer vector mode operations when
627 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
628 which allows the vectorizer to operate on e.g. move instructions,
629 or when expand_vector_operations can do something useful. */
631 static bool
632 alpha_vector_mode_supported_p (enum machine_mode mode)
634 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
637 /* Return 1 if this function can directly return via $26. */
640 direct_return (void)
642 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
643 && reload_completed
644 && alpha_sa_size () == 0
645 && get_frame_size () == 0
646 && current_function_outgoing_args_size == 0
647 && current_function_pretend_args_size == 0);
650 /* Return the ADDR_VEC associated with a tablejump insn. */
653 alpha_tablejump_addr_vec (rtx insn)
655 rtx tmp;
657 tmp = JUMP_LABEL (insn);
658 if (!tmp)
659 return NULL_RTX;
660 tmp = NEXT_INSN (tmp);
661 if (!tmp)
662 return NULL_RTX;
663 if (GET_CODE (tmp) == JUMP_INSN
664 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
665 return PATTERN (tmp);
666 return NULL_RTX;
669 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
672 alpha_tablejump_best_label (rtx insn)
674 rtx jump_table = alpha_tablejump_addr_vec (insn);
675 rtx best_label = NULL_RTX;
677 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
678 there for edge frequency counts from profile data. */
680 if (jump_table)
682 int n_labels = XVECLEN (jump_table, 1);
683 int best_count = -1;
684 int i, j;
686 for (i = 0; i < n_labels; i++)
688 int count = 1;
690 for (j = i + 1; j < n_labels; j++)
691 if (XEXP (XVECEXP (jump_table, 1, i), 0)
692 == XEXP (XVECEXP (jump_table, 1, j), 0))
693 count++;
695 if (count > best_count)
696 best_count = count, best_label = XVECEXP (jump_table, 1, i);
700 return best_label ? best_label : const0_rtx;
703 /* Return the TLS model to use for SYMBOL. */
705 static enum tls_model
706 tls_symbolic_operand_type (rtx symbol)
708 enum tls_model model;
710 if (GET_CODE (symbol) != SYMBOL_REF)
711 return 0;
712 model = SYMBOL_REF_TLS_MODEL (symbol);
714 /* Local-exec with a 64-bit size is the same code as initial-exec. */
715 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
716 model = TLS_MODEL_INITIAL_EXEC;
718 return model;
721 /* Return true if the function DECL will share the same GP as any
722 function in the current unit of translation. */
724 static bool
725 decl_has_samegp (const_tree decl)
727 /* Functions that are not local can be overridden, and thus may
728 not share the same gp. */
729 if (!(*targetm.binds_local_p) (decl))
730 return false;
732 /* If -msmall-data is in effect, assume that there is only one GP
733 for the module, and so any local symbol has this property. We
734 need explicit relocations to be able to enforce this for symbols
735 not defined in this unit of translation, however. */
736 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
737 return true;
739 /* Functions that are not external are defined in this UoT. */
740 /* ??? Irritatingly, static functions not yet emitted are still
741 marked "external". Apply this to non-static functions only. */
742 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
745 /* Return true if EXP should be placed in the small data section. */
747 static bool
748 alpha_in_small_data_p (const_tree exp)
750 /* We want to merge strings, so we never consider them small data. */
751 if (TREE_CODE (exp) == STRING_CST)
752 return false;
754 /* Functions are never in the small data area. Duh. */
755 if (TREE_CODE (exp) == FUNCTION_DECL)
756 return false;
758 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
760 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
761 if (strcmp (section, ".sdata") == 0
762 || strcmp (section, ".sbss") == 0)
763 return true;
765 else
767 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
769 /* If this is an incomplete type with size 0, then we can't put it
770 in sdata because it might be too big when completed. */
771 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
772 return true;
775 return false;
778 #if TARGET_ABI_OPEN_VMS
779 static bool
780 alpha_linkage_symbol_p (const char *symname)
782 int symlen = strlen (symname);
784 if (symlen > 4)
785 return strcmp (&symname [symlen - 4], "..lk") == 0;
787 return false;
790 #define LINKAGE_SYMBOL_REF_P(X) \
791 ((GET_CODE (X) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (X, 0))) \
793 || (GET_CODE (X) == CONST \
794 && GET_CODE (XEXP (X, 0)) == PLUS \
795 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
796 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
797 #endif
799 /* legitimate_address_p recognizes an RTL expression that is a valid
800 memory address for an instruction. The MODE argument is the
801 machine mode for the MEM expression that wants to use this address.
803 For Alpha, we have either a constant address or the sum of a
804 register and a constant address, or just a register. For DImode,
805 any of those forms can be surrounded with an AND that clear the
806 low-order three bits; this is an "unaligned" access. */
808 bool
809 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
811 /* If this is an ldq_u type address, discard the outer AND. */
812 if (mode == DImode
813 && GET_CODE (x) == AND
814 && GET_CODE (XEXP (x, 1)) == CONST_INT
815 && INTVAL (XEXP (x, 1)) == -8)
816 x = XEXP (x, 0);
818 /* Discard non-paradoxical subregs. */
819 if (GET_CODE (x) == SUBREG
820 && (GET_MODE_SIZE (GET_MODE (x))
821 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
822 x = SUBREG_REG (x);
824 /* Unadorned general registers are valid. */
825 if (REG_P (x)
826 && (strict
827 ? STRICT_REG_OK_FOR_BASE_P (x)
828 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
829 return true;
831 /* Constant addresses (i.e. +/- 32k) are valid. */
832 if (CONSTANT_ADDRESS_P (x))
833 return true;
835 #if TARGET_ABI_OPEN_VMS
836 if (LINKAGE_SYMBOL_REF_P (x))
837 return true;
838 #endif
840 /* Register plus a small constant offset is valid. */
841 if (GET_CODE (x) == PLUS)
843 rtx ofs = XEXP (x, 1);
844 x = XEXP (x, 0);
846 /* Discard non-paradoxical subregs. */
847 if (GET_CODE (x) == SUBREG
848 && (GET_MODE_SIZE (GET_MODE (x))
849 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
850 x = SUBREG_REG (x);
852 if (REG_P (x))
854 if (! strict
855 && NONSTRICT_REG_OK_FP_BASE_P (x)
856 && GET_CODE (ofs) == CONST_INT)
857 return true;
858 if ((strict
859 ? STRICT_REG_OK_FOR_BASE_P (x)
860 : NONSTRICT_REG_OK_FOR_BASE_P (x))
861 && CONSTANT_ADDRESS_P (ofs))
862 return true;
866 /* If we're managing explicit relocations, LO_SUM is valid, as
867 are small data symbols. */
868 else if (TARGET_EXPLICIT_RELOCS)
870 if (small_symbolic_operand (x, Pmode))
871 return true;
873 if (GET_CODE (x) == LO_SUM)
875 rtx ofs = XEXP (x, 1);
876 x = XEXP (x, 0);
878 /* Discard non-paradoxical subregs. */
879 if (GET_CODE (x) == SUBREG
880 && (GET_MODE_SIZE (GET_MODE (x))
881 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
882 x = SUBREG_REG (x);
884 /* Must have a valid base register. */
885 if (! (REG_P (x)
886 && (strict
887 ? STRICT_REG_OK_FOR_BASE_P (x)
888 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
889 return false;
891 /* The symbol must be local. */
892 if (local_symbolic_operand (ofs, Pmode)
893 || dtp32_symbolic_operand (ofs, Pmode)
894 || tp32_symbolic_operand (ofs, Pmode))
895 return true;
899 return false;
902 /* Build the SYMBOL_REF for __tls_get_addr. */
904 static GTY(()) rtx tls_get_addr_libfunc;
906 static rtx
907 get_tls_get_addr (void)
909 if (!tls_get_addr_libfunc)
910 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
911 return tls_get_addr_libfunc;
914 /* Try machine-dependent ways of modifying an illegitimate address
915 to be legitimate. If we find one, return the new, valid address. */
918 alpha_legitimize_address (rtx x, rtx scratch,
919 enum machine_mode mode ATTRIBUTE_UNUSED)
921 HOST_WIDE_INT addend;
923 /* If the address is (plus reg const_int) and the CONST_INT is not a
924 valid offset, compute the high part of the constant and add it to
925 the register. Then our address is (plus temp low-part-const). */
926 if (GET_CODE (x) == PLUS
927 && GET_CODE (XEXP (x, 0)) == REG
928 && GET_CODE (XEXP (x, 1)) == CONST_INT
929 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
931 addend = INTVAL (XEXP (x, 1));
932 x = XEXP (x, 0);
933 goto split_addend;
936 /* If the address is (const (plus FOO const_int)), find the low-order
937 part of the CONST_INT. Then load FOO plus any high-order part of the
938 CONST_INT into a register. Our address is (plus reg low-part-const).
939 This is done to reduce the number of GOT entries. */
940 if (can_create_pseudo_p ()
941 && GET_CODE (x) == CONST
942 && GET_CODE (XEXP (x, 0)) == PLUS
943 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
945 addend = INTVAL (XEXP (XEXP (x, 0), 1));
946 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
947 goto split_addend;
950 /* If we have a (plus reg const), emit the load as in (2), then add
951 the two registers, and finally generate (plus reg low-part-const) as
952 our address. */
953 if (can_create_pseudo_p ()
954 && GET_CODE (x) == PLUS
955 && GET_CODE (XEXP (x, 0)) == REG
956 && GET_CODE (XEXP (x, 1)) == CONST
957 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
958 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
960 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
961 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
962 XEXP (XEXP (XEXP (x, 1), 0), 0),
963 NULL_RTX, 1, OPTAB_LIB_WIDEN);
964 goto split_addend;
967 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
968 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
970 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
972 switch (tls_symbolic_operand_type (x))
974 case TLS_MODEL_NONE:
975 break;
977 case TLS_MODEL_GLOBAL_DYNAMIC:
978 start_sequence ();
980 r0 = gen_rtx_REG (Pmode, 0);
981 r16 = gen_rtx_REG (Pmode, 16);
982 tga = get_tls_get_addr ();
983 dest = gen_reg_rtx (Pmode);
984 seq = GEN_INT (alpha_next_sequence_number++);
986 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
987 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
988 insn = emit_call_insn (insn);
989 CONST_OR_PURE_CALL_P (insn) = 1;
990 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
992 insn = get_insns ();
993 end_sequence ();
995 emit_libcall_block (insn, dest, r0, x);
996 return dest;
998 case TLS_MODEL_LOCAL_DYNAMIC:
999 start_sequence ();
1001 r0 = gen_rtx_REG (Pmode, 0);
1002 r16 = gen_rtx_REG (Pmode, 16);
1003 tga = get_tls_get_addr ();
1004 scratch = gen_reg_rtx (Pmode);
1005 seq = GEN_INT (alpha_next_sequence_number++);
1007 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1008 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1009 insn = emit_call_insn (insn);
1010 CONST_OR_PURE_CALL_P (insn) = 1;
1011 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1013 insn = get_insns ();
1014 end_sequence ();
1016 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1017 UNSPEC_TLSLDM_CALL);
1018 emit_libcall_block (insn, scratch, r0, eqv);
1020 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1021 eqv = gen_rtx_CONST (Pmode, eqv);
1023 if (alpha_tls_size == 64)
1025 dest = gen_reg_rtx (Pmode);
1026 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1027 emit_insn (gen_adddi3 (dest, dest, scratch));
1028 return dest;
1030 if (alpha_tls_size == 32)
1032 insn = gen_rtx_HIGH (Pmode, eqv);
1033 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1034 scratch = gen_reg_rtx (Pmode);
1035 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1037 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1039 case TLS_MODEL_INITIAL_EXEC:
1040 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1041 eqv = gen_rtx_CONST (Pmode, eqv);
1042 tp = gen_reg_rtx (Pmode);
1043 scratch = gen_reg_rtx (Pmode);
1044 dest = gen_reg_rtx (Pmode);
1046 emit_insn (gen_load_tp (tp));
1047 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1048 emit_insn (gen_adddi3 (dest, tp, scratch));
1049 return dest;
1051 case TLS_MODEL_LOCAL_EXEC:
1052 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1053 eqv = gen_rtx_CONST (Pmode, eqv);
1054 tp = gen_reg_rtx (Pmode);
1056 emit_insn (gen_load_tp (tp));
1057 if (alpha_tls_size == 32)
1059 insn = gen_rtx_HIGH (Pmode, eqv);
1060 insn = gen_rtx_PLUS (Pmode, tp, insn);
1061 tp = gen_reg_rtx (Pmode);
1062 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1064 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1066 default:
1067 gcc_unreachable ();
1070 if (local_symbolic_operand (x, Pmode))
1072 if (small_symbolic_operand (x, Pmode))
1073 return x;
1074 else
1076 if (can_create_pseudo_p ())
1077 scratch = gen_reg_rtx (Pmode);
1078 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1079 gen_rtx_HIGH (Pmode, x)));
1080 return gen_rtx_LO_SUM (Pmode, scratch, x);
1085 return NULL;
1087 split_addend:
1089 HOST_WIDE_INT low, high;
1091 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1092 addend -= low;
1093 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1094 addend -= high;
1096 if (addend)
1097 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1098 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1099 1, OPTAB_LIB_WIDEN);
1100 if (high)
1101 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1102 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1103 1, OPTAB_LIB_WIDEN);
1105 return plus_constant (x, low);
1109 /* Primarily this is required for TLS symbols, but given that our move
1110 patterns *ought* to be able to handle any symbol at any time, we
1111 should never be spilling symbolic operands to the constant pool, ever. */
1113 static bool
1114 alpha_cannot_force_const_mem (rtx x)
1116 enum rtx_code code = GET_CODE (x);
1117 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1120 /* We do not allow indirect calls to be optimized into sibling calls, nor
1121 can we allow a call to a function with a different GP to be optimized
1122 into a sibcall. */
1124 static bool
1125 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1127 /* Can't do indirect tail calls, since we don't know if the target
1128 uses the same GP. */
1129 if (!decl)
1130 return false;
1132 /* Otherwise, we can make a tail call if the target function shares
1133 the same GP. */
1134 return decl_has_samegp (decl);
1138 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1140 rtx x = *px;
1142 /* Don't re-split. */
1143 if (GET_CODE (x) == LO_SUM)
1144 return -1;
1146 return small_symbolic_operand (x, Pmode) != 0;
1149 static int
1150 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1152 rtx x = *px;
1154 /* Don't re-split. */
1155 if (GET_CODE (x) == LO_SUM)
1156 return -1;
1158 if (small_symbolic_operand (x, Pmode))
1160 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1161 *px = x;
1162 return -1;
1165 return 0;
1169 split_small_symbolic_operand (rtx x)
1171 x = copy_insn (x);
1172 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1173 return x;
1176 /* Indicate that INSN cannot be duplicated. This is true for any insn
1177 that we've marked with gpdisp relocs, since those have to stay in
1178 1-1 correspondence with one another.
1180 Technically we could copy them if we could set up a mapping from one
1181 sequence number to another, across the set of insns to be duplicated.
1182 This seems overly complicated and error-prone since interblock motion
1183 from sched-ebb could move one of the pair of insns to a different block.
1185 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1186 then they'll be in a different block from their ldgp. Which could lead
1187 the bb reorder code to think that it would be ok to copy just the block
1188 containing the call and branch to the block containing the ldgp. */
1190 static bool
1191 alpha_cannot_copy_insn_p (rtx insn)
1193 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1194 return false;
1195 if (recog_memoized (insn) >= 0)
1196 return get_attr_cannot_copy (insn);
1197 else
1198 return false;
1202 /* Try a machine-dependent way of reloading an illegitimate address
1203 operand. If we find one, push the reload and return the new rtx. */
1206 alpha_legitimize_reload_address (rtx x,
1207 enum machine_mode mode ATTRIBUTE_UNUSED,
1208 int opnum, int type,
1209 int ind_levels ATTRIBUTE_UNUSED)
1211 /* We must recognize output that we have already generated ourselves. */
1212 if (GET_CODE (x) == PLUS
1213 && GET_CODE (XEXP (x, 0)) == PLUS
1214 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1215 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1216 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1218 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1219 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1220 opnum, type);
1221 return x;
1224 /* We wish to handle large displacements off a base register by
1225 splitting the addend across an ldah and the mem insn. This
1226 cuts number of extra insns needed from 3 to 1. */
1227 if (GET_CODE (x) == PLUS
1228 && GET_CODE (XEXP (x, 0)) == REG
1229 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1230 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1231 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1233 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1234 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1235 HOST_WIDE_INT high
1236 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1238 /* Check for 32-bit overflow. */
1239 if (high + low != val)
1240 return NULL_RTX;
1242 /* Reload the high part into a base reg; leave the low part
1243 in the mem directly. */
1244 x = gen_rtx_PLUS (GET_MODE (x),
1245 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1246 GEN_INT (high)),
1247 GEN_INT (low));
1249 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1250 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1251 opnum, type);
1252 return x;
1255 return NULL_RTX;
1258 /* Compute a (partial) cost for rtx X. Return true if the complete
1259 cost has been computed, and false if subexpressions should be
1260 scanned. In either case, *TOTAL contains the cost result. */
1262 static bool
1263 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1265 enum machine_mode mode = GET_MODE (x);
1266 bool float_mode_p = FLOAT_MODE_P (mode);
1267 const struct alpha_rtx_cost_data *cost_data;
1269 if (optimize_size)
1270 cost_data = &alpha_rtx_cost_size;
1271 else
1272 cost_data = &alpha_rtx_cost_data[alpha_tune];
1274 switch (code)
1276 case CONST_INT:
1277 /* If this is an 8-bit constant, return zero since it can be used
1278 nearly anywhere with no cost. If it is a valid operand for an
1279 ADD or AND, likewise return 0 if we know it will be used in that
1280 context. Otherwise, return 2 since it might be used there later.
1281 All other constants take at least two insns. */
1282 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1284 *total = 0;
1285 return true;
1287 /* FALLTHRU */
1289 case CONST_DOUBLE:
1290 if (x == CONST0_RTX (mode))
1291 *total = 0;
1292 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1293 || (outer_code == AND && and_operand (x, VOIDmode)))
1294 *total = 0;
1295 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1296 *total = 2;
1297 else
1298 *total = COSTS_N_INSNS (2);
1299 return true;
1301 case CONST:
1302 case SYMBOL_REF:
1303 case LABEL_REF:
1304 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1305 *total = COSTS_N_INSNS (outer_code != MEM);
1306 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1307 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1308 else if (tls_symbolic_operand_type (x))
1309 /* Estimate of cost for call_pal rduniq. */
1310 /* ??? How many insns do we emit here? More than one... */
1311 *total = COSTS_N_INSNS (15);
1312 else
1313 /* Otherwise we do a load from the GOT. */
1314 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1315 return true;
1317 case HIGH:
1318 /* This is effectively an add_operand. */
1319 *total = 2;
1320 return true;
1322 case PLUS:
1323 case MINUS:
1324 if (float_mode_p)
1325 *total = cost_data->fp_add;
1326 else if (GET_CODE (XEXP (x, 0)) == MULT
1327 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1329 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1330 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1331 return true;
1333 return false;
1335 case MULT:
1336 if (float_mode_p)
1337 *total = cost_data->fp_mult;
1338 else if (mode == DImode)
1339 *total = cost_data->int_mult_di;
1340 else
1341 *total = cost_data->int_mult_si;
1342 return false;
1344 case ASHIFT:
1345 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1346 && INTVAL (XEXP (x, 1)) <= 3)
1348 *total = COSTS_N_INSNS (1);
1349 return false;
1351 /* FALLTHRU */
1353 case ASHIFTRT:
1354 case LSHIFTRT:
1355 *total = cost_data->int_shift;
1356 return false;
1358 case IF_THEN_ELSE:
1359 if (float_mode_p)
1360 *total = cost_data->fp_add;
1361 else
1362 *total = cost_data->int_cmov;
1363 return false;
1365 case DIV:
1366 case UDIV:
1367 case MOD:
1368 case UMOD:
1369 if (!float_mode_p)
1370 *total = cost_data->int_div;
1371 else if (mode == SFmode)
1372 *total = cost_data->fp_div_sf;
1373 else
1374 *total = cost_data->fp_div_df;
1375 return false;
1377 case MEM:
1378 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1379 return true;
1381 case NEG:
1382 if (! float_mode_p)
1384 *total = COSTS_N_INSNS (1);
1385 return false;
1387 /* FALLTHRU */
1389 case ABS:
1390 if (! float_mode_p)
1392 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1393 return false;
1395 /* FALLTHRU */
1397 case FLOAT:
1398 case UNSIGNED_FLOAT:
1399 case FIX:
1400 case UNSIGNED_FIX:
1401 case FLOAT_TRUNCATE:
1402 *total = cost_data->fp_add;
1403 return false;
1405 case FLOAT_EXTEND:
1406 if (GET_CODE (XEXP (x, 0)) == MEM)
1407 *total = 0;
1408 else
1409 *total = cost_data->fp_add;
1410 return false;
1412 default:
1413 return false;
1417 /* REF is an alignable memory location. Place an aligned SImode
1418 reference into *PALIGNED_MEM and the number of bits to shift into
1419 *PBITNUM. SCRATCH is a free register for use in reloading out
1420 of range stack slots. */
1422 void
1423 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1425 rtx base;
1426 HOST_WIDE_INT disp, offset;
1428 gcc_assert (GET_CODE (ref) == MEM);
1430 if (reload_in_progress
1431 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1433 base = find_replacement (&XEXP (ref, 0));
1434 gcc_assert (memory_address_p (GET_MODE (ref), base));
1436 else
1437 base = XEXP (ref, 0);
1439 if (GET_CODE (base) == PLUS)
1440 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1441 else
1442 disp = 0;
1444 /* Find the byte offset within an aligned word. If the memory itself is
1445 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1446 will have examined the base register and determined it is aligned, and
1447 thus displacements from it are naturally alignable. */
1448 if (MEM_ALIGN (ref) >= 32)
1449 offset = 0;
1450 else
1451 offset = disp & 3;
1453 /* Access the entire aligned word. */
1454 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1456 /* Convert the byte offset within the word to a bit offset. */
1457 if (WORDS_BIG_ENDIAN)
1458 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1459 else
1460 offset *= 8;
1461 *pbitnum = GEN_INT (offset);
1464 /* Similar, but just get the address. Handle the two reload cases.
1465 Add EXTRA_OFFSET to the address we return. */
1468 get_unaligned_address (rtx ref)
1470 rtx base;
1471 HOST_WIDE_INT offset = 0;
1473 gcc_assert (GET_CODE (ref) == MEM);
1475 if (reload_in_progress
1476 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1478 base = find_replacement (&XEXP (ref, 0));
1480 gcc_assert (memory_address_p (GET_MODE (ref), base));
1482 else
1483 base = XEXP (ref, 0);
1485 if (GET_CODE (base) == PLUS)
1486 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1488 return plus_constant (base, offset);
1491 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1492 X is always returned in a register. */
1495 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1497 if (GET_CODE (addr) == PLUS)
1499 ofs += INTVAL (XEXP (addr, 1));
1500 addr = XEXP (addr, 0);
1503 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1504 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1507 /* On the Alpha, all (non-symbolic) constants except zero go into
1508 a floating-point register via memory. Note that we cannot
1509 return anything that is not a subset of CLASS, and that some
1510 symbolic constants cannot be dropped to memory. */
1512 enum reg_class
1513 alpha_preferred_reload_class(rtx x, enum reg_class class)
1515 /* Zero is present in any register class. */
1516 if (x == CONST0_RTX (GET_MODE (x)))
1517 return class;
1519 /* These sorts of constants we can easily drop to memory. */
1520 if (GET_CODE (x) == CONST_INT
1521 || GET_CODE (x) == CONST_DOUBLE
1522 || GET_CODE (x) == CONST_VECTOR)
1524 if (class == FLOAT_REGS)
1525 return NO_REGS;
1526 if (class == ALL_REGS)
1527 return GENERAL_REGS;
1528 return class;
1531 /* All other kinds of constants should not (and in the case of HIGH
1532 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1533 secondary reload. */
1534 if (CONSTANT_P (x))
1535 return (class == ALL_REGS ? GENERAL_REGS : class);
1537 return class;
1540 /* Inform reload about cases where moving X with a mode MODE to a register in
1541 CLASS requires an extra scratch or immediate register. Return the class
1542 needed for the immediate register. */
1544 static enum reg_class
1545 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1546 enum machine_mode mode, secondary_reload_info *sri)
1548 /* Loading and storing HImode or QImode values to and from memory
1549 usually requires a scratch register. */
1550 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1552 if (any_memory_operand (x, mode))
1554 if (in_p)
1556 if (!aligned_memory_operand (x, mode))
1557 sri->icode = reload_in_optab[mode];
1559 else
1560 sri->icode = reload_out_optab[mode];
1561 return NO_REGS;
1565 /* We also cannot do integral arithmetic into FP regs, as might result
1566 from register elimination into a DImode fp register. */
1567 if (class == FLOAT_REGS)
1569 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1570 return GENERAL_REGS;
1571 if (in_p && INTEGRAL_MODE_P (mode)
1572 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1573 return GENERAL_REGS;
1576 return NO_REGS;
1579 /* Subfunction of the following function. Update the flags of any MEM
1580 found in part of X. */
1582 static int
1583 alpha_set_memflags_1 (rtx *xp, void *data)
1585 rtx x = *xp, orig = (rtx) data;
1587 if (GET_CODE (x) != MEM)
1588 return 0;
1590 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1591 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1592 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1593 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1594 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1596 /* Sadly, we cannot use alias sets because the extra aliasing
1597 produced by the AND interferes. Given that two-byte quantities
1598 are the only thing we would be able to differentiate anyway,
1599 there does not seem to be any point in convoluting the early
1600 out of the alias check. */
1602 return -1;
1605 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1606 generated to perform a memory operation, look for any MEMs in either
1607 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1608 volatile flags from REF into each of the MEMs found. If REF is not
1609 a MEM, don't do anything. */
1611 void
1612 alpha_set_memflags (rtx insn, rtx ref)
1614 rtx *base_ptr;
1616 if (GET_CODE (ref) != MEM)
1617 return;
1619 /* This is only called from alpha.md, after having had something
1620 generated from one of the insn patterns. So if everything is
1621 zero, the pattern is already up-to-date. */
1622 if (!MEM_VOLATILE_P (ref)
1623 && !MEM_IN_STRUCT_P (ref)
1624 && !MEM_SCALAR_P (ref)
1625 && !MEM_NOTRAP_P (ref)
1626 && !MEM_READONLY_P (ref))
1627 return;
1629 if (INSN_P (insn))
1630 base_ptr = &PATTERN (insn);
1631 else
1632 base_ptr = &insn;
1633 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1636 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1637 int, bool);
1639 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1640 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1641 and return pc_rtx if successful. */
1643 static rtx
1644 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1645 HOST_WIDE_INT c, int n, bool no_output)
1647 HOST_WIDE_INT new;
1648 int i, bits;
1649 /* Use a pseudo if highly optimizing and still generating RTL. */
1650 rtx subtarget
1651 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1652 rtx temp, insn;
1654 /* If this is a sign-extended 32-bit constant, we can do this in at most
1655 three insns, so do it if we have enough insns left. We always have
1656 a sign-extended 32-bit constant when compiling on a narrow machine. */
1658 if (HOST_BITS_PER_WIDE_INT != 64
1659 || c >> 31 == -1 || c >> 31 == 0)
1661 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1662 HOST_WIDE_INT tmp1 = c - low;
1663 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1664 HOST_WIDE_INT extra = 0;
1666 /* If HIGH will be interpreted as negative but the constant is
1667 positive, we must adjust it to do two ldha insns. */
1669 if ((high & 0x8000) != 0 && c >= 0)
1671 extra = 0x4000;
1672 tmp1 -= 0x40000000;
1673 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1676 if (c == low || (low == 0 && extra == 0))
1678 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1679 but that meant that we can't handle INT_MIN on 32-bit machines
1680 (like NT/Alpha), because we recurse indefinitely through
1681 emit_move_insn to gen_movdi. So instead, since we know exactly
1682 what we want, create it explicitly. */
1684 if (no_output)
1685 return pc_rtx;
1686 if (target == NULL)
1687 target = gen_reg_rtx (mode);
1688 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1689 return target;
1691 else if (n >= 2 + (extra != 0))
1693 if (no_output)
1694 return pc_rtx;
1695 if (!can_create_pseudo_p ())
1697 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1698 temp = target;
1700 else
1701 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1702 subtarget, mode);
1704 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1705 This means that if we go through expand_binop, we'll try to
1706 generate extensions, etc, which will require new pseudos, which
1707 will fail during some split phases. The SImode add patterns
1708 still exist, but are not named. So build the insns by hand. */
1710 if (extra != 0)
1712 if (! subtarget)
1713 subtarget = gen_reg_rtx (mode);
1714 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1715 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1716 emit_insn (insn);
1717 temp = subtarget;
1720 if (target == NULL)
1721 target = gen_reg_rtx (mode);
1722 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1723 insn = gen_rtx_SET (VOIDmode, target, insn);
1724 emit_insn (insn);
1725 return target;
1729 /* If we couldn't do it that way, try some other methods. But if we have
1730 no instructions left, don't bother. Likewise, if this is SImode and
1731 we can't make pseudos, we can't do anything since the expand_binop
1732 and expand_unop calls will widen and try to make pseudos. */
1734 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1735 return 0;
1737 /* Next, see if we can load a related constant and then shift and possibly
1738 negate it to get the constant we want. Try this once each increasing
1739 numbers of insns. */
1741 for (i = 1; i < n; i++)
1743 /* First, see if minus some low bits, we've an easy load of
1744 high bits. */
1746 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1747 if (new != 0)
1749 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1750 if (temp)
1752 if (no_output)
1753 return temp;
1754 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1755 target, 0, OPTAB_WIDEN);
1759 /* Next try complementing. */
1760 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1761 if (temp)
1763 if (no_output)
1764 return temp;
1765 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1768 /* Next try to form a constant and do a left shift. We can do this
1769 if some low-order bits are zero; the exact_log2 call below tells
1770 us that information. The bits we are shifting out could be any
1771 value, but here we'll just try the 0- and sign-extended forms of
1772 the constant. To try to increase the chance of having the same
1773 constant in more than one insn, start at the highest number of
1774 bits to shift, but try all possibilities in case a ZAPNOT will
1775 be useful. */
1777 bits = exact_log2 (c & -c);
1778 if (bits > 0)
1779 for (; bits > 0; bits--)
1781 new = c >> bits;
1782 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1783 if (!temp && c < 0)
1785 new = (unsigned HOST_WIDE_INT)c >> bits;
1786 temp = alpha_emit_set_const (subtarget, mode, new,
1787 i, no_output);
1789 if (temp)
1791 if (no_output)
1792 return temp;
1793 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1794 target, 0, OPTAB_WIDEN);
1798 /* Now try high-order zero bits. Here we try the shifted-in bits as
1799 all zero and all ones. Be careful to avoid shifting outside the
1800 mode and to avoid shifting outside the host wide int size. */
1801 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1802 confuse the recursive call and set all of the high 32 bits. */
1804 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1805 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1806 if (bits > 0)
1807 for (; bits > 0; bits--)
1809 new = c << bits;
1810 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1811 if (!temp)
1813 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1814 temp = alpha_emit_set_const (subtarget, mode, new,
1815 i, no_output);
1817 if (temp)
1819 if (no_output)
1820 return temp;
1821 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1822 target, 1, OPTAB_WIDEN);
1826 /* Now try high-order 1 bits. We get that with a sign-extension.
1827 But one bit isn't enough here. Be careful to avoid shifting outside
1828 the mode and to avoid shifting outside the host wide int size. */
1830 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1831 - floor_log2 (~ c) - 2);
1832 if (bits > 0)
1833 for (; bits > 0; bits--)
1835 new = c << bits;
1836 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1837 if (!temp)
1839 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1840 temp = alpha_emit_set_const (subtarget, mode, new,
1841 i, no_output);
1843 if (temp)
1845 if (no_output)
1846 return temp;
1847 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1848 target, 0, OPTAB_WIDEN);
1853 #if HOST_BITS_PER_WIDE_INT == 64
1854 /* Finally, see if can load a value into the target that is the same as the
1855 constant except that all bytes that are 0 are changed to be 0xff. If we
1856 can, then we can do a ZAPNOT to obtain the desired constant. */
1858 new = c;
1859 for (i = 0; i < 64; i += 8)
1860 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1861 new |= (HOST_WIDE_INT) 0xff << i;
1863 /* We are only called for SImode and DImode. If this is SImode, ensure that
1864 we are sign extended to a full word. */
1866 if (mode == SImode)
1867 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1869 if (new != c)
1871 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1872 if (temp)
1874 if (no_output)
1875 return temp;
1876 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1877 target, 0, OPTAB_WIDEN);
1880 #endif
1882 return 0;
1885 /* Try to output insns to set TARGET equal to the constant C if it can be
1886 done in less than N insns. Do all computations in MODE. Returns the place
1887 where the output has been placed if it can be done and the insns have been
1888 emitted. If it would take more than N insns, zero is returned and no
1889 insns and emitted. */
1891 static rtx
1892 alpha_emit_set_const (rtx target, enum machine_mode mode,
1893 HOST_WIDE_INT c, int n, bool no_output)
1895 enum machine_mode orig_mode = mode;
1896 rtx orig_target = target;
1897 rtx result = 0;
1898 int i;
1900 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1901 can't load this constant in one insn, do this in DImode. */
1902 if (!can_create_pseudo_p () && mode == SImode
1903 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1905 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1906 if (result)
1907 return result;
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1910 mode = DImode;
1912 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1914 target = no_output ? NULL : gen_lowpart (DImode, target);
1915 mode = DImode;
1918 /* Try 1 insn, then 2, then up to N. */
1919 for (i = 1; i <= n; i++)
1921 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1922 if (result)
1924 rtx insn, set;
1926 if (no_output)
1927 return result;
1929 insn = get_last_insn ();
1930 set = single_set (insn);
1931 if (! CONSTANT_P (SET_SRC (set)))
1932 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1933 break;
1937 /* Allow for the case where we changed the mode of TARGET. */
1938 if (result)
1940 if (result == target)
1941 result = orig_target;
1942 else if (mode != orig_mode)
1943 result = gen_lowpart (orig_mode, result);
1946 return result;
1949 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1950 fall back to a straight forward decomposition. We do this to avoid
1951 exponential run times encountered when looking for longer sequences
1952 with alpha_emit_set_const. */
1954 static rtx
1955 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1957 HOST_WIDE_INT d1, d2, d3, d4;
1959 /* Decompose the entire word */
1960 #if HOST_BITS_PER_WIDE_INT >= 64
1961 gcc_assert (c2 == -(c1 < 0));
1962 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1963 c1 -= d1;
1964 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1965 c1 = (c1 - d2) >> 32;
1966 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d3;
1968 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d4);
1970 #else
1971 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1972 c1 -= d1;
1973 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c1 == d2);
1975 c2 += (d2 < 0);
1976 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1977 c2 -= d3;
1978 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1979 gcc_assert (c2 == d4);
1980 #endif
1982 /* Construct the high word */
1983 if (d4)
1985 emit_move_insn (target, GEN_INT (d4));
1986 if (d3)
1987 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1989 else
1990 emit_move_insn (target, GEN_INT (d3));
1992 /* Shift it into place */
1993 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1995 /* Add in the low bits. */
1996 if (d2)
1997 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1998 if (d1)
1999 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2001 return target;
2004 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2005 the low 64 bits. */
2007 static void
2008 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2010 HOST_WIDE_INT i0, i1;
2012 if (GET_CODE (x) == CONST_VECTOR)
2013 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2016 if (GET_CODE (x) == CONST_INT)
2018 i0 = INTVAL (x);
2019 i1 = -(i0 < 0);
2021 else if (HOST_BITS_PER_WIDE_INT >= 64)
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = -(i0 < 0);
2026 else
2028 i0 = CONST_DOUBLE_LOW (x);
2029 i1 = CONST_DOUBLE_HIGH (x);
2032 *p0 = i0;
2033 *p1 = i1;
2036 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2037 are willing to load the value into a register via a move pattern.
2038 Normally this is all symbolic constants, integral constants that
2039 take three or fewer instructions, and floating-point zero. */
2041 bool
2042 alpha_legitimate_constant_p (rtx x)
2044 enum machine_mode mode = GET_MODE (x);
2045 HOST_WIDE_INT i0, i1;
2047 switch (GET_CODE (x))
2049 case CONST:
2050 case LABEL_REF:
2051 case HIGH:
2052 return true;
2054 case SYMBOL_REF:
2055 /* TLS symbols are never valid. */
2056 return SYMBOL_REF_TLS_MODEL (x) == 0;
2058 case CONST_DOUBLE:
2059 if (x == CONST0_RTX (mode))
2060 return true;
2061 if (FLOAT_MODE_P (mode))
2062 return false;
2063 goto do_integer;
2065 case CONST_VECTOR:
2066 if (x == CONST0_RTX (mode))
2067 return true;
2068 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2069 return false;
2070 if (GET_MODE_SIZE (mode) != 8)
2071 return false;
2072 goto do_integer;
2074 case CONST_INT:
2075 do_integer:
2076 if (TARGET_BUILD_CONSTANTS)
2077 return true;
2078 alpha_extract_integer (x, &i0, &i1);
2079 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2080 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2081 return false;
2083 default:
2084 return false;
2088 /* Operand 1 is known to be a constant, and should require more than one
2089 instruction to load. Emit that multi-part load. */
2091 bool
2092 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2094 HOST_WIDE_INT i0, i1;
2095 rtx temp = NULL_RTX;
2097 alpha_extract_integer (operands[1], &i0, &i1);
2099 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2100 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2102 if (!temp && TARGET_BUILD_CONSTANTS)
2103 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2105 if (temp)
2107 if (!rtx_equal_p (operands[0], temp))
2108 emit_move_insn (operands[0], temp);
2109 return true;
2112 return false;
2115 /* Expand a move instruction; return true if all work is done.
2116 We don't handle non-bwx subword loads here. */
2118 bool
2119 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2121 /* If the output is not a register, the input must be. */
2122 if (GET_CODE (operands[0]) == MEM
2123 && ! reg_or_0_operand (operands[1], mode))
2124 operands[1] = force_reg (mode, operands[1]);
2126 /* Allow legitimize_address to perform some simplifications. */
2127 if (mode == Pmode && symbolic_operand (operands[1], mode))
2129 rtx tmp;
2131 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2132 if (tmp)
2134 if (tmp == operands[0])
2135 return true;
2136 operands[1] = tmp;
2137 return false;
2141 /* Early out for non-constants and valid constants. */
2142 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2143 return false;
2145 /* Split large integers. */
2146 if (GET_CODE (operands[1]) == CONST_INT
2147 || GET_CODE (operands[1]) == CONST_DOUBLE
2148 || GET_CODE (operands[1]) == CONST_VECTOR)
2150 if (alpha_split_const_mov (mode, operands))
2151 return true;
2154 /* Otherwise we've nothing left but to drop the thing to memory. */
2155 operands[1] = force_const_mem (mode, operands[1]);
2156 if (reload_in_progress)
2158 emit_move_insn (operands[0], XEXP (operands[1], 0));
2159 operands[1] = replace_equiv_address (operands[1], operands[0]);
2161 else
2162 operands[1] = validize_mem (operands[1]);
2163 return false;
2166 /* Expand a non-bwx QImode or HImode move instruction;
2167 return true if all work is done. */
2169 bool
2170 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2172 rtx seq;
2174 /* If the output is not a register, the input must be. */
2175 if (MEM_P (operands[0]))
2176 operands[1] = force_reg (mode, operands[1]);
2178 /* Handle four memory cases, unaligned and aligned for either the input
2179 or the output. The only case where we can be called during reload is
2180 for aligned loads; all other cases require temporaries. */
2182 if (any_memory_operand (operands[1], mode))
2184 if (aligned_memory_operand (operands[1], mode))
2186 if (reload_in_progress)
2188 if (mode == QImode)
2189 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2190 else
2191 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2192 emit_insn (seq);
2194 else
2196 rtx aligned_mem, bitnum;
2197 rtx scratch = gen_reg_rtx (SImode);
2198 rtx subtarget;
2199 bool copyout;
2201 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2203 subtarget = operands[0];
2204 if (GET_CODE (subtarget) == REG)
2205 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2206 else
2207 subtarget = gen_reg_rtx (DImode), copyout = true;
2209 if (mode == QImode)
2210 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2211 bitnum, scratch);
2212 else
2213 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2214 bitnum, scratch);
2215 emit_insn (seq);
2217 if (copyout)
2218 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2221 else
2223 /* Don't pass these as parameters since that makes the generated
2224 code depend on parameter evaluation order which will cause
2225 bootstrap failures. */
2227 rtx temp1, temp2, subtarget, ua;
2228 bool copyout;
2230 temp1 = gen_reg_rtx (DImode);
2231 temp2 = gen_reg_rtx (DImode);
2233 subtarget = operands[0];
2234 if (GET_CODE (subtarget) == REG)
2235 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2236 else
2237 subtarget = gen_reg_rtx (DImode), copyout = true;
2239 ua = get_unaligned_address (operands[1]);
2240 if (mode == QImode)
2241 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2242 else
2243 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2245 alpha_set_memflags (seq, operands[1]);
2246 emit_insn (seq);
2248 if (copyout)
2249 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2251 return true;
2254 if (any_memory_operand (operands[0], mode))
2256 if (aligned_memory_operand (operands[0], mode))
2258 rtx aligned_mem, bitnum;
2259 rtx temp1 = gen_reg_rtx (SImode);
2260 rtx temp2 = gen_reg_rtx (SImode);
2262 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2264 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2265 temp1, temp2));
2267 else
2269 rtx temp1 = gen_reg_rtx (DImode);
2270 rtx temp2 = gen_reg_rtx (DImode);
2271 rtx temp3 = gen_reg_rtx (DImode);
2272 rtx ua = get_unaligned_address (operands[0]);
2274 if (mode == QImode)
2275 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2276 else
2277 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2279 alpha_set_memflags (seq, operands[0]);
2280 emit_insn (seq);
2282 return true;
2285 return false;
2288 /* Implement the movmisalign patterns. One of the operands is a memory
2289 that is not naturally aligned. Emit instructions to load it. */
2291 void
2292 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2294 /* Honor misaligned loads, for those we promised to do so. */
2295 if (MEM_P (operands[1]))
2297 rtx tmp;
2299 if (register_operand (operands[0], mode))
2300 tmp = operands[0];
2301 else
2302 tmp = gen_reg_rtx (mode);
2304 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2305 if (tmp != operands[0])
2306 emit_move_insn (operands[0], tmp);
2308 else if (MEM_P (operands[0]))
2310 if (!reg_or_0_operand (operands[1], mode))
2311 operands[1] = force_reg (mode, operands[1]);
2312 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2314 else
2315 gcc_unreachable ();
2318 /* Generate an unsigned DImode to FP conversion. This is the same code
2319 optabs would emit if we didn't have TFmode patterns.
2321 For SFmode, this is the only construction I've found that can pass
2322 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2323 intermediates will work, because you'll get intermediate rounding
2324 that ruins the end result. Some of this could be fixed by turning
2325 on round-to-positive-infinity, but that requires diddling the fpsr,
2326 which kills performance. I tried turning this around and converting
2327 to a negative number, so that I could turn on /m, but either I did
2328 it wrong or there's something else cause I wound up with the exact
2329 same single-bit error. There is a branch-less form of this same code:
2331 srl $16,1,$1
2332 and $16,1,$2
2333 cmplt $16,0,$3
2334 or $1,$2,$2
2335 cmovge $16,$16,$2
2336 itoft $3,$f10
2337 itoft $2,$f11
2338 cvtqs $f11,$f11
2339 adds $f11,$f11,$f0
2340 fcmoveq $f10,$f11,$f0
2342 I'm not using it because it's the same number of instructions as
2343 this branch-full form, and it has more serialized long latency
2344 instructions on the critical path.
2346 For DFmode, we can avoid rounding errors by breaking up the word
2347 into two pieces, converting them separately, and adding them back:
2349 LC0: .long 0,0x5f800000
2351 itoft $16,$f11
2352 lda $2,LC0
2353 cmplt $16,0,$1
2354 cpyse $f11,$f31,$f10
2355 cpyse $f31,$f11,$f11
2356 s4addq $1,$2,$1
2357 lds $f12,0($1)
2358 cvtqt $f10,$f10
2359 cvtqt $f11,$f11
2360 addt $f12,$f10,$f0
2361 addt $f0,$f11,$f0
2363 This doesn't seem to be a clear-cut win over the optabs form.
2364 It probably all depends on the distribution of numbers being
2365 converted -- in the optabs form, all but high-bit-set has a
2366 much lower minimum execution time. */
2368 void
2369 alpha_emit_floatuns (rtx operands[2])
2371 rtx neglab, donelab, i0, i1, f0, in, out;
2372 enum machine_mode mode;
2374 out = operands[0];
2375 in = force_reg (DImode, operands[1]);
2376 mode = GET_MODE (out);
2377 neglab = gen_label_rtx ();
2378 donelab = gen_label_rtx ();
2379 i0 = gen_reg_rtx (DImode);
2380 i1 = gen_reg_rtx (DImode);
2381 f0 = gen_reg_rtx (mode);
2383 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2385 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2386 emit_jump_insn (gen_jump (donelab));
2387 emit_barrier ();
2389 emit_label (neglab);
2391 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2392 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2393 emit_insn (gen_iordi3 (i0, i0, i1));
2394 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2395 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2397 emit_label (donelab);
2400 /* Generate the comparison for a conditional branch. */
2403 alpha_emit_conditional_branch (enum rtx_code code)
2405 enum rtx_code cmp_code, branch_code;
2406 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2407 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2408 rtx tem;
2410 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2412 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2413 op1 = const0_rtx;
2414 alpha_compare.fp_p = 0;
2417 /* The general case: fold the comparison code to the types of compares
2418 that we have, choosing the branch as necessary. */
2419 switch (code)
2421 case EQ: case LE: case LT: case LEU: case LTU:
2422 case UNORDERED:
2423 /* We have these compares: */
2424 cmp_code = code, branch_code = NE;
2425 break;
2427 case NE:
2428 case ORDERED:
2429 /* These must be reversed. */
2430 cmp_code = reverse_condition (code), branch_code = EQ;
2431 break;
2433 case GE: case GT: case GEU: case GTU:
2434 /* For FP, we swap them, for INT, we reverse them. */
2435 if (alpha_compare.fp_p)
2437 cmp_code = swap_condition (code);
2438 branch_code = NE;
2439 tem = op0, op0 = op1, op1 = tem;
2441 else
2443 cmp_code = reverse_condition (code);
2444 branch_code = EQ;
2446 break;
2448 default:
2449 gcc_unreachable ();
2452 if (alpha_compare.fp_p)
2454 cmp_mode = DFmode;
2455 if (flag_unsafe_math_optimizations)
2457 /* When we are not as concerned about non-finite values, and we
2458 are comparing against zero, we can branch directly. */
2459 if (op1 == CONST0_RTX (DFmode))
2460 cmp_code = UNKNOWN, branch_code = code;
2461 else if (op0 == CONST0_RTX (DFmode))
2463 /* Undo the swap we probably did just above. */
2464 tem = op0, op0 = op1, op1 = tem;
2465 branch_code = swap_condition (cmp_code);
2466 cmp_code = UNKNOWN;
2469 else
2471 /* ??? We mark the branch mode to be CCmode to prevent the
2472 compare and branch from being combined, since the compare
2473 insn follows IEEE rules that the branch does not. */
2474 branch_mode = CCmode;
2477 else
2479 cmp_mode = DImode;
2481 /* The following optimizations are only for signed compares. */
2482 if (code != LEU && code != LTU && code != GEU && code != GTU)
2484 /* Whee. Compare and branch against 0 directly. */
2485 if (op1 == const0_rtx)
2486 cmp_code = UNKNOWN, branch_code = code;
2488 /* If the constants doesn't fit into an immediate, but can
2489 be generated by lda/ldah, we adjust the argument and
2490 compare against zero, so we can use beq/bne directly. */
2491 /* ??? Don't do this when comparing against symbols, otherwise
2492 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2493 be declared false out of hand (at least for non-weak). */
2494 else if (GET_CODE (op1) == CONST_INT
2495 && (code == EQ || code == NE)
2496 && !(symbolic_operand (op0, VOIDmode)
2497 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2499 rtx n_op1 = GEN_INT (-INTVAL (op1));
2501 if (! satisfies_constraint_I (op1)
2502 && (satisfies_constraint_K (n_op1)
2503 || satisfies_constraint_L (n_op1)))
2504 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2508 if (!reg_or_0_operand (op0, DImode))
2509 op0 = force_reg (DImode, op0);
2510 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2511 op1 = force_reg (DImode, op1);
2514 /* Emit an initial compare instruction, if necessary. */
2515 tem = op0;
2516 if (cmp_code != UNKNOWN)
2518 tem = gen_reg_rtx (cmp_mode);
2519 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2522 /* Zero the operands. */
2523 memset (&alpha_compare, 0, sizeof (alpha_compare));
2525 /* Return the branch comparison. */
2526 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2529 /* Certain simplifications can be done to make invalid setcc operations
2530 valid. Return the final comparison, or NULL if we can't work. */
2533 alpha_emit_setcc (enum rtx_code code)
2535 enum rtx_code cmp_code;
2536 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2537 int fp_p = alpha_compare.fp_p;
2538 rtx tmp;
2540 /* Zero the operands. */
2541 memset (&alpha_compare, 0, sizeof (alpha_compare));
2543 if (fp_p && GET_MODE (op0) == TFmode)
2545 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2546 op1 = const0_rtx;
2547 fp_p = 0;
2550 if (fp_p && !TARGET_FIX)
2551 return NULL_RTX;
2553 /* The general case: fold the comparison code to the types of compares
2554 that we have, choosing the branch as necessary. */
2556 cmp_code = UNKNOWN;
2557 switch (code)
2559 case EQ: case LE: case LT: case LEU: case LTU:
2560 case UNORDERED:
2561 /* We have these compares. */
2562 if (fp_p)
2563 cmp_code = code, code = NE;
2564 break;
2566 case NE:
2567 if (!fp_p && op1 == const0_rtx)
2568 break;
2569 /* FALLTHRU */
2571 case ORDERED:
2572 cmp_code = reverse_condition (code);
2573 code = EQ;
2574 break;
2576 case GE: case GT: case GEU: case GTU:
2577 /* These normally need swapping, but for integer zero we have
2578 special patterns that recognize swapped operands. */
2579 if (!fp_p && op1 == const0_rtx)
2580 break;
2581 code = swap_condition (code);
2582 if (fp_p)
2583 cmp_code = code, code = NE;
2584 tmp = op0, op0 = op1, op1 = tmp;
2585 break;
2587 default:
2588 gcc_unreachable ();
2591 if (!fp_p)
2593 if (!register_operand (op0, DImode))
2594 op0 = force_reg (DImode, op0);
2595 if (!reg_or_8bit_operand (op1, DImode))
2596 op1 = force_reg (DImode, op1);
2599 /* Emit an initial compare instruction, if necessary. */
2600 if (cmp_code != UNKNOWN)
2602 enum machine_mode mode = fp_p ? DFmode : DImode;
2604 tmp = gen_reg_rtx (mode);
2605 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2606 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2608 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2609 op1 = const0_rtx;
2612 /* Return the setcc comparison. */
2613 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2617 /* Rewrite a comparison against zero CMP of the form
2618 (CODE (cc0) (const_int 0)) so it can be written validly in
2619 a conditional move (if_then_else CMP ...).
2620 If both of the operands that set cc0 are nonzero we must emit
2621 an insn to perform the compare (it can't be done within
2622 the conditional move). */
2625 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2627 enum rtx_code code = GET_CODE (cmp);
2628 enum rtx_code cmov_code = NE;
2629 rtx op0 = alpha_compare.op0;
2630 rtx op1 = alpha_compare.op1;
2631 int fp_p = alpha_compare.fp_p;
2632 enum machine_mode cmp_mode
2633 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2634 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2635 enum machine_mode cmov_mode = VOIDmode;
2636 int local_fast_math = flag_unsafe_math_optimizations;
2637 rtx tem;
2639 /* Zero the operands. */
2640 memset (&alpha_compare, 0, sizeof (alpha_compare));
2642 if (fp_p != FLOAT_MODE_P (mode))
2644 enum rtx_code cmp_code;
2646 if (! TARGET_FIX)
2647 return 0;
2649 /* If we have fp<->int register move instructions, do a cmov by
2650 performing the comparison in fp registers, and move the
2651 zero/nonzero value to integer registers, where we can then
2652 use a normal cmov, or vice-versa. */
2654 switch (code)
2656 case EQ: case LE: case LT: case LEU: case LTU:
2657 /* We have these compares. */
2658 cmp_code = code, code = NE;
2659 break;
2661 case NE:
2662 /* This must be reversed. */
2663 cmp_code = EQ, code = EQ;
2664 break;
2666 case GE: case GT: case GEU: case GTU:
2667 /* These normally need swapping, but for integer zero we have
2668 special patterns that recognize swapped operands. */
2669 if (!fp_p && op1 == const0_rtx)
2670 cmp_code = code, code = NE;
2671 else
2673 cmp_code = swap_condition (code);
2674 code = NE;
2675 tem = op0, op0 = op1, op1 = tem;
2677 break;
2679 default:
2680 gcc_unreachable ();
2683 tem = gen_reg_rtx (cmp_op_mode);
2684 emit_insn (gen_rtx_SET (VOIDmode, tem,
2685 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2686 op0, op1)));
2688 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2689 op0 = gen_lowpart (cmp_op_mode, tem);
2690 op1 = CONST0_RTX (cmp_op_mode);
2691 fp_p = !fp_p;
2692 local_fast_math = 1;
2695 /* We may be able to use a conditional move directly.
2696 This avoids emitting spurious compares. */
2697 if (signed_comparison_operator (cmp, VOIDmode)
2698 && (!fp_p || local_fast_math)
2699 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2700 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2702 /* We can't put the comparison inside the conditional move;
2703 emit a compare instruction and put that inside the
2704 conditional move. Make sure we emit only comparisons we have;
2705 swap or reverse as necessary. */
2707 if (!can_create_pseudo_p ())
2708 return NULL_RTX;
2710 switch (code)
2712 case EQ: case LE: case LT: case LEU: case LTU:
2713 /* We have these compares: */
2714 break;
2716 case NE:
2717 /* This must be reversed. */
2718 code = reverse_condition (code);
2719 cmov_code = EQ;
2720 break;
2722 case GE: case GT: case GEU: case GTU:
2723 /* These must be swapped. */
2724 if (op1 != CONST0_RTX (cmp_mode))
2726 code = swap_condition (code);
2727 tem = op0, op0 = op1, op1 = tem;
2729 break;
2731 default:
2732 gcc_unreachable ();
2735 if (!fp_p)
2737 if (!reg_or_0_operand (op0, DImode))
2738 op0 = force_reg (DImode, op0);
2739 if (!reg_or_8bit_operand (op1, DImode))
2740 op1 = force_reg (DImode, op1);
2743 /* ??? We mark the branch mode to be CCmode to prevent the compare
2744 and cmov from being combined, since the compare insn follows IEEE
2745 rules that the cmov does not. */
2746 if (fp_p && !local_fast_math)
2747 cmov_mode = CCmode;
2749 tem = gen_reg_rtx (cmp_op_mode);
2750 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2751 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2754 /* Simplify a conditional move of two constants into a setcc with
2755 arithmetic. This is done with a splitter since combine would
2756 just undo the work if done during code generation. It also catches
2757 cases we wouldn't have before cse. */
2760 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2761 rtx t_rtx, rtx f_rtx)
2763 HOST_WIDE_INT t, f, diff;
2764 enum machine_mode mode;
2765 rtx target, subtarget, tmp;
2767 mode = GET_MODE (dest);
2768 t = INTVAL (t_rtx);
2769 f = INTVAL (f_rtx);
2770 diff = t - f;
2772 if (((code == NE || code == EQ) && diff < 0)
2773 || (code == GE || code == GT))
2775 code = reverse_condition (code);
2776 diff = t, t = f, f = diff;
2777 diff = t - f;
2780 subtarget = target = dest;
2781 if (mode != DImode)
2783 target = gen_lowpart (DImode, dest);
2784 if (can_create_pseudo_p ())
2785 subtarget = gen_reg_rtx (DImode);
2786 else
2787 subtarget = target;
2789 /* Below, we must be careful to use copy_rtx on target and subtarget
2790 in intermediate insns, as they may be a subreg rtx, which may not
2791 be shared. */
2793 if (f == 0 && exact_log2 (diff) > 0
2794 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2795 viable over a longer latency cmove. On EV5, the E0 slot is a
2796 scarce resource, and on EV4 shift has the same latency as a cmove. */
2797 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2799 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2800 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2802 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2803 GEN_INT (exact_log2 (t)));
2804 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2806 else if (f == 0 && t == -1)
2808 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2809 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2811 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2813 else if (diff == 1 || diff == 4 || diff == 8)
2815 rtx add_op;
2817 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2818 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2820 if (diff == 1)
2821 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2822 else
2824 add_op = GEN_INT (f);
2825 if (sext_add_operand (add_op, mode))
2827 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2828 GEN_INT (diff));
2829 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2830 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2832 else
2833 return 0;
2836 else
2837 return 0;
2839 return 1;
2842 /* Look up the function X_floating library function name for the
2843 given operation. */
2845 struct xfloating_op GTY(())
2847 const enum rtx_code code;
2848 const char *const GTY((skip)) osf_func;
2849 const char *const GTY((skip)) vms_func;
2850 rtx libcall;
2853 static GTY(()) struct xfloating_op xfloating_ops[] =
2855 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2856 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2857 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2858 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2859 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2860 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2861 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2862 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2863 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2864 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2865 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2866 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2867 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2868 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2869 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2872 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2874 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2875 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2878 static rtx
2879 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2881 struct xfloating_op *ops = xfloating_ops;
2882 long n = ARRAY_SIZE (xfloating_ops);
2883 long i;
2885 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2887 /* How irritating. Nothing to key off for the main table. */
2888 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2890 ops = vax_cvt_ops;
2891 n = ARRAY_SIZE (vax_cvt_ops);
2894 for (i = 0; i < n; ++i, ++ops)
2895 if (ops->code == code)
2897 rtx func = ops->libcall;
2898 if (!func)
2900 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2901 ? ops->vms_func : ops->osf_func);
2902 ops->libcall = func;
2904 return func;
2907 gcc_unreachable ();
2910 /* Most X_floating operations take the rounding mode as an argument.
2911 Compute that here. */
2913 static int
2914 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2915 enum alpha_fp_rounding_mode round)
2917 int mode;
2919 switch (round)
2921 case ALPHA_FPRM_NORM:
2922 mode = 2;
2923 break;
2924 case ALPHA_FPRM_MINF:
2925 mode = 1;
2926 break;
2927 case ALPHA_FPRM_CHOP:
2928 mode = 0;
2929 break;
2930 case ALPHA_FPRM_DYN:
2931 mode = 4;
2932 break;
2933 default:
2934 gcc_unreachable ();
2936 /* XXX For reference, round to +inf is mode = 3. */
2939 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2940 mode |= 0x10000;
2942 return mode;
2945 /* Emit an X_floating library function call.
2947 Note that these functions do not follow normal calling conventions:
2948 TFmode arguments are passed in two integer registers (as opposed to
2949 indirect); TFmode return values appear in R16+R17.
2951 FUNC is the function to call.
2952 TARGET is where the output belongs.
2953 OPERANDS are the inputs.
2954 NOPERANDS is the count of inputs.
2955 EQUIV is the expression equivalent for the function.
2958 static void
2959 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2960 int noperands, rtx equiv)
2962 rtx usage = NULL_RTX, tmp, reg;
2963 int regno = 16, i;
2965 start_sequence ();
2967 for (i = 0; i < noperands; ++i)
2969 switch (GET_MODE (operands[i]))
2971 case TFmode:
2972 reg = gen_rtx_REG (TFmode, regno);
2973 regno += 2;
2974 break;
2976 case DFmode:
2977 reg = gen_rtx_REG (DFmode, regno + 32);
2978 regno += 1;
2979 break;
2981 case VOIDmode:
2982 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2983 /* FALLTHRU */
2984 case DImode:
2985 reg = gen_rtx_REG (DImode, regno);
2986 regno += 1;
2987 break;
2989 default:
2990 gcc_unreachable ();
2993 emit_move_insn (reg, operands[i]);
2994 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2997 switch (GET_MODE (target))
2999 case TFmode:
3000 reg = gen_rtx_REG (TFmode, 16);
3001 break;
3002 case DFmode:
3003 reg = gen_rtx_REG (DFmode, 32);
3004 break;
3005 case DImode:
3006 reg = gen_rtx_REG (DImode, 0);
3007 break;
3008 default:
3009 gcc_unreachable ();
3012 tmp = gen_rtx_MEM (QImode, func);
3013 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3014 const0_rtx, const0_rtx));
3015 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3016 CONST_OR_PURE_CALL_P (tmp) = 1;
3018 tmp = get_insns ();
3019 end_sequence ();
3021 emit_libcall_block (tmp, target, reg, equiv);
3024 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3026 void
3027 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3029 rtx func;
3030 int mode;
3031 rtx out_operands[3];
3033 func = alpha_lookup_xfloating_lib_func (code);
3034 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3036 out_operands[0] = operands[1];
3037 out_operands[1] = operands[2];
3038 out_operands[2] = GEN_INT (mode);
3039 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3040 gen_rtx_fmt_ee (code, TFmode, operands[1],
3041 operands[2]));
3044 /* Emit an X_floating library function call for a comparison. */
3046 static rtx
3047 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3049 enum rtx_code cmp_code, res_code;
3050 rtx func, out, operands[2], note;
3052 /* X_floating library comparison functions return
3053 -1 unordered
3054 0 false
3055 1 true
3056 Convert the compare against the raw return value. */
3058 cmp_code = *pcode;
3059 switch (cmp_code)
3061 case UNORDERED:
3062 cmp_code = EQ;
3063 res_code = LT;
3064 break;
3065 case ORDERED:
3066 cmp_code = EQ;
3067 res_code = GE;
3068 break;
3069 case NE:
3070 res_code = NE;
3071 break;
3072 case EQ:
3073 case LT:
3074 case GT:
3075 case LE:
3076 case GE:
3077 res_code = GT;
3078 break;
3079 default:
3080 gcc_unreachable ();
3082 *pcode = res_code;
3084 func = alpha_lookup_xfloating_lib_func (cmp_code);
3086 operands[0] = op0;
3087 operands[1] = op1;
3088 out = gen_reg_rtx (DImode);
3090 /* What's actually returned is -1,0,1, not a proper boolean value,
3091 so use an EXPR_LIST as with a generic libcall instead of a
3092 comparison type expression. */
3093 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3094 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3095 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3096 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3098 return out;
3101 /* Emit an X_floating library function call for a conversion. */
3103 void
3104 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3106 int noperands = 1, mode;
3107 rtx out_operands[2];
3108 rtx func;
3109 enum rtx_code code = orig_code;
3111 if (code == UNSIGNED_FIX)
3112 code = FIX;
3114 func = alpha_lookup_xfloating_lib_func (code);
3116 out_operands[0] = operands[1];
3118 switch (code)
3120 case FIX:
3121 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3122 out_operands[1] = GEN_INT (mode);
3123 noperands = 2;
3124 break;
3125 case FLOAT_TRUNCATE:
3126 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3127 out_operands[1] = GEN_INT (mode);
3128 noperands = 2;
3129 break;
3130 default:
3131 break;
3134 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3135 gen_rtx_fmt_e (orig_code,
3136 GET_MODE (operands[0]),
3137 operands[1]));
3140 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3141 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3142 guarantee that the sequence
3143 set (OP[0] OP[2])
3144 set (OP[1] OP[3])
3145 is valid. Naturally, output operand ordering is little-endian.
3146 This is used by *movtf_internal and *movti_internal. */
3148 void
3149 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3150 bool fixup_overlap)
3152 switch (GET_CODE (operands[1]))
3154 case REG:
3155 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3156 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3157 break;
3159 case MEM:
3160 operands[3] = adjust_address (operands[1], DImode, 8);
3161 operands[2] = adjust_address (operands[1], DImode, 0);
3162 break;
3164 case CONST_INT:
3165 case CONST_DOUBLE:
3166 gcc_assert (operands[1] == CONST0_RTX (mode));
3167 operands[2] = operands[3] = const0_rtx;
3168 break;
3170 default:
3171 gcc_unreachable ();
3174 switch (GET_CODE (operands[0]))
3176 case REG:
3177 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3178 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3179 break;
3181 case MEM:
3182 operands[1] = adjust_address (operands[0], DImode, 8);
3183 operands[0] = adjust_address (operands[0], DImode, 0);
3184 break;
3186 default:
3187 gcc_unreachable ();
3190 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3192 rtx tmp;
3193 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3194 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3198 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3199 op2 is a register containing the sign bit, operation is the
3200 logical operation to be performed. */
3202 void
3203 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3205 rtx high_bit = operands[2];
3206 rtx scratch;
3207 int move;
3209 alpha_split_tmode_pair (operands, TFmode, false);
3211 /* Detect three flavors of operand overlap. */
3212 move = 1;
3213 if (rtx_equal_p (operands[0], operands[2]))
3214 move = 0;
3215 else if (rtx_equal_p (operands[1], operands[2]))
3217 if (rtx_equal_p (operands[0], high_bit))
3218 move = 2;
3219 else
3220 move = -1;
3223 if (move < 0)
3224 emit_move_insn (operands[0], operands[2]);
3226 /* ??? If the destination overlaps both source tf and high_bit, then
3227 assume source tf is dead in its entirety and use the other half
3228 for a scratch register. Otherwise "scratch" is just the proper
3229 destination register. */
3230 scratch = operands[move < 2 ? 1 : 3];
3232 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3234 if (move > 0)
3236 emit_move_insn (operands[0], operands[2]);
3237 if (move > 1)
3238 emit_move_insn (operands[1], scratch);
3242 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3243 unaligned data:
3245 unsigned: signed:
3246 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3248 lda r3,X(r11) lda r3,X+2(r11)
3249 extwl r1,r3,r1 extql r1,r3,r1
3250 extwh r2,r3,r2 extqh r2,r3,r2
3251 or r1.r2.r1 or r1,r2,r1
3252 sra r1,48,r1
3254 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3255 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3256 lda r3,X(r11) lda r3,X(r11)
3257 extll r1,r3,r1 extll r1,r3,r1
3258 extlh r2,r3,r2 extlh r2,r3,r2
3259 or r1.r2.r1 addl r1,r2,r1
3261 quad: ldq_u r1,X(r11)
3262 ldq_u r2,X+7(r11)
3263 lda r3,X(r11)
3264 extql r1,r3,r1
3265 extqh r2,r3,r2
3266 or r1.r2.r1
3269 void
3270 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3271 HOST_WIDE_INT ofs, int sign)
3273 rtx meml, memh, addr, extl, exth, tmp, mema;
3274 enum machine_mode mode;
3276 if (TARGET_BWX && size == 2)
3278 meml = adjust_address (mem, QImode, ofs);
3279 memh = adjust_address (mem, QImode, ofs+1);
3280 if (BYTES_BIG_ENDIAN)
3281 tmp = meml, meml = memh, memh = tmp;
3282 extl = gen_reg_rtx (DImode);
3283 exth = gen_reg_rtx (DImode);
3284 emit_insn (gen_zero_extendqidi2 (extl, meml));
3285 emit_insn (gen_zero_extendqidi2 (exth, memh));
3286 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3287 NULL, 1, OPTAB_LIB_WIDEN);
3288 addr = expand_simple_binop (DImode, IOR, extl, exth,
3289 NULL, 1, OPTAB_LIB_WIDEN);
3291 if (sign && GET_MODE (tgt) != HImode)
3293 addr = gen_lowpart (HImode, addr);
3294 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3296 else
3298 if (GET_MODE (tgt) != DImode)
3299 addr = gen_lowpart (GET_MODE (tgt), addr);
3300 emit_move_insn (tgt, addr);
3302 return;
3305 meml = gen_reg_rtx (DImode);
3306 memh = gen_reg_rtx (DImode);
3307 addr = gen_reg_rtx (DImode);
3308 extl = gen_reg_rtx (DImode);
3309 exth = gen_reg_rtx (DImode);
3311 mema = XEXP (mem, 0);
3312 if (GET_CODE (mema) == LO_SUM)
3313 mema = force_reg (Pmode, mema);
3315 /* AND addresses cannot be in any alias set, since they may implicitly
3316 alias surrounding code. Ideally we'd have some alias set that
3317 covered all types except those with alignment 8 or higher. */
3319 tmp = change_address (mem, DImode,
3320 gen_rtx_AND (DImode,
3321 plus_constant (mema, ofs),
3322 GEN_INT (-8)));
3323 set_mem_alias_set (tmp, 0);
3324 emit_move_insn (meml, tmp);
3326 tmp = change_address (mem, DImode,
3327 gen_rtx_AND (DImode,
3328 plus_constant (mema, ofs + size - 1),
3329 GEN_INT (-8)));
3330 set_mem_alias_set (tmp, 0);
3331 emit_move_insn (memh, tmp);
3333 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3335 emit_move_insn (addr, plus_constant (mema, -1));
3337 emit_insn (gen_extqh_be (extl, meml, addr));
3338 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3340 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3341 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3342 addr, 1, OPTAB_WIDEN);
3344 else if (sign && size == 2)
3346 emit_move_insn (addr, plus_constant (mema, ofs+2));
3348 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3349 emit_insn (gen_extqh_le (exth, memh, addr));
3351 /* We must use tgt here for the target. Alpha-vms port fails if we use
3352 addr for the target, because addr is marked as a pointer and combine
3353 knows that pointers are always sign-extended 32-bit values. */
3354 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3355 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3356 addr, 1, OPTAB_WIDEN);
3358 else
3360 if (WORDS_BIG_ENDIAN)
3362 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3363 switch ((int) size)
3365 case 2:
3366 emit_insn (gen_extwh_be (extl, meml, addr));
3367 mode = HImode;
3368 break;
3370 case 4:
3371 emit_insn (gen_extlh_be (extl, meml, addr));
3372 mode = SImode;
3373 break;
3375 case 8:
3376 emit_insn (gen_extqh_be (extl, meml, addr));
3377 mode = DImode;
3378 break;
3380 default:
3381 gcc_unreachable ();
3383 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3385 else
3387 emit_move_insn (addr, plus_constant (mema, ofs));
3388 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3389 switch ((int) size)
3391 case 2:
3392 emit_insn (gen_extwh_le (exth, memh, addr));
3393 mode = HImode;
3394 break;
3396 case 4:
3397 emit_insn (gen_extlh_le (exth, memh, addr));
3398 mode = SImode;
3399 break;
3401 case 8:
3402 emit_insn (gen_extqh_le (exth, memh, addr));
3403 mode = DImode;
3404 break;
3406 default:
3407 gcc_unreachable ();
3411 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3412 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3413 sign, OPTAB_WIDEN);
3416 if (addr != tgt)
3417 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3420 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3422 void
3423 alpha_expand_unaligned_store (rtx dst, rtx src,
3424 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3426 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3428 if (TARGET_BWX && size == 2)
3430 if (src != const0_rtx)
3432 dstl = gen_lowpart (QImode, src);
3433 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3434 NULL, 1, OPTAB_LIB_WIDEN);
3435 dsth = gen_lowpart (QImode, dsth);
3437 else
3438 dstl = dsth = const0_rtx;
3440 meml = adjust_address (dst, QImode, ofs);
3441 memh = adjust_address (dst, QImode, ofs+1);
3442 if (BYTES_BIG_ENDIAN)
3443 addr = meml, meml = memh, memh = addr;
3445 emit_move_insn (meml, dstl);
3446 emit_move_insn (memh, dsth);
3447 return;
3450 dstl = gen_reg_rtx (DImode);
3451 dsth = gen_reg_rtx (DImode);
3452 insl = gen_reg_rtx (DImode);
3453 insh = gen_reg_rtx (DImode);
3455 dsta = XEXP (dst, 0);
3456 if (GET_CODE (dsta) == LO_SUM)
3457 dsta = force_reg (Pmode, dsta);
3459 /* AND addresses cannot be in any alias set, since they may implicitly
3460 alias surrounding code. Ideally we'd have some alias set that
3461 covered all types except those with alignment 8 or higher. */
3463 meml = change_address (dst, DImode,
3464 gen_rtx_AND (DImode,
3465 plus_constant (dsta, ofs),
3466 GEN_INT (-8)));
3467 set_mem_alias_set (meml, 0);
3469 memh = change_address (dst, DImode,
3470 gen_rtx_AND (DImode,
3471 plus_constant (dsta, ofs + size - 1),
3472 GEN_INT (-8)));
3473 set_mem_alias_set (memh, 0);
3475 emit_move_insn (dsth, memh);
3476 emit_move_insn (dstl, meml);
3477 if (WORDS_BIG_ENDIAN)
3479 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3481 if (src != const0_rtx)
3483 switch ((int) size)
3485 case 2:
3486 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3487 break;
3488 case 4:
3489 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3490 break;
3491 case 8:
3492 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3493 break;
3495 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3496 GEN_INT (size*8), addr));
3499 switch ((int) size)
3501 case 2:
3502 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3503 break;
3504 case 4:
3506 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3507 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3508 break;
3510 case 8:
3511 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3512 break;
3515 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3517 else
3519 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3521 if (src != CONST0_RTX (GET_MODE (src)))
3523 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3524 GEN_INT (size*8), addr));
3526 switch ((int) size)
3528 case 2:
3529 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3530 break;
3531 case 4:
3532 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3533 break;
3534 case 8:
3535 emit_insn (gen_insql_le (insl, src, addr));
3536 break;
3540 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3542 switch ((int) size)
3544 case 2:
3545 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3546 break;
3547 case 4:
3549 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3550 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3551 break;
3553 case 8:
3554 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3555 break;
3559 if (src != CONST0_RTX (GET_MODE (src)))
3561 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3562 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3565 if (WORDS_BIG_ENDIAN)
3567 emit_move_insn (meml, dstl);
3568 emit_move_insn (memh, dsth);
3570 else
3572 /* Must store high before low for degenerate case of aligned. */
3573 emit_move_insn (memh, dsth);
3574 emit_move_insn (meml, dstl);
3578 /* The block move code tries to maximize speed by separating loads and
3579 stores at the expense of register pressure: we load all of the data
3580 before we store it back out. There are two secondary effects worth
3581 mentioning, that this speeds copying to/from aligned and unaligned
3582 buffers, and that it makes the code significantly easier to write. */
3584 #define MAX_MOVE_WORDS 8
3586 /* Load an integral number of consecutive unaligned quadwords. */
3588 static void
3589 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3590 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3592 rtx const im8 = GEN_INT (-8);
3593 rtx const i64 = GEN_INT (64);
3594 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3595 rtx sreg, areg, tmp, smema;
3596 HOST_WIDE_INT i;
3598 smema = XEXP (smem, 0);
3599 if (GET_CODE (smema) == LO_SUM)
3600 smema = force_reg (Pmode, smema);
3602 /* Generate all the tmp registers we need. */
3603 for (i = 0; i < words; ++i)
3605 data_regs[i] = out_regs[i];
3606 ext_tmps[i] = gen_reg_rtx (DImode);
3608 data_regs[words] = gen_reg_rtx (DImode);
3610 if (ofs != 0)
3611 smem = adjust_address (smem, GET_MODE (smem), ofs);
3613 /* Load up all of the source data. */
3614 for (i = 0; i < words; ++i)
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*i),
3619 im8));
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[i], tmp);
3624 tmp = change_address (smem, DImode,
3625 gen_rtx_AND (DImode,
3626 plus_constant (smema, 8*words - 1),
3627 im8));
3628 set_mem_alias_set (tmp, 0);
3629 emit_move_insn (data_regs[words], tmp);
3631 /* Extract the half-word fragments. Unfortunately DEC decided to make
3632 extxh with offset zero a noop instead of zeroing the register, so
3633 we must take care of that edge condition ourselves with cmov. */
3635 sreg = copy_addr_to_reg (smema);
3636 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3637 1, OPTAB_WIDEN);
3638 if (WORDS_BIG_ENDIAN)
3639 emit_move_insn (sreg, plus_constant (sreg, 7));
3640 for (i = 0; i < words; ++i)
3642 if (WORDS_BIG_ENDIAN)
3644 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3645 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3647 else
3649 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3650 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3652 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3653 gen_rtx_IF_THEN_ELSE (DImode,
3654 gen_rtx_EQ (DImode, areg,
3655 const0_rtx),
3656 const0_rtx, ext_tmps[i])));
3659 /* Merge the half-words into whole words. */
3660 for (i = 0; i < words; ++i)
3662 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3663 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3667 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3668 may be NULL to store zeros. */
3670 static void
3671 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3672 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3674 rtx const im8 = GEN_INT (-8);
3675 rtx const i64 = GEN_INT (64);
3676 rtx ins_tmps[MAX_MOVE_WORDS];
3677 rtx st_tmp_1, st_tmp_2, dreg;
3678 rtx st_addr_1, st_addr_2, dmema;
3679 HOST_WIDE_INT i;
3681 dmema = XEXP (dmem, 0);
3682 if (GET_CODE (dmema) == LO_SUM)
3683 dmema = force_reg (Pmode, dmema);
3685 /* Generate all the tmp registers we need. */
3686 if (data_regs != NULL)
3687 for (i = 0; i < words; ++i)
3688 ins_tmps[i] = gen_reg_rtx(DImode);
3689 st_tmp_1 = gen_reg_rtx(DImode);
3690 st_tmp_2 = gen_reg_rtx(DImode);
3692 if (ofs != 0)
3693 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3695 st_addr_2 = change_address (dmem, DImode,
3696 gen_rtx_AND (DImode,
3697 plus_constant (dmema, words*8 - 1),
3698 im8));
3699 set_mem_alias_set (st_addr_2, 0);
3701 st_addr_1 = change_address (dmem, DImode,
3702 gen_rtx_AND (DImode, dmema, im8));
3703 set_mem_alias_set (st_addr_1, 0);
3705 /* Load up the destination end bits. */
3706 emit_move_insn (st_tmp_2, st_addr_2);
3707 emit_move_insn (st_tmp_1, st_addr_1);
3709 /* Shift the input data into place. */
3710 dreg = copy_addr_to_reg (dmema);
3711 if (WORDS_BIG_ENDIAN)
3712 emit_move_insn (dreg, plus_constant (dreg, 7));
3713 if (data_regs != NULL)
3715 for (i = words-1; i >= 0; --i)
3717 if (WORDS_BIG_ENDIAN)
3719 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3720 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3722 else
3724 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3725 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3728 for (i = words-1; i > 0; --i)
3730 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3731 ins_tmps[i-1], ins_tmps[i-1], 1,
3732 OPTAB_WIDEN);
3736 /* Split and merge the ends with the destination data. */
3737 if (WORDS_BIG_ENDIAN)
3739 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3740 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3742 else
3744 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3745 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3748 if (data_regs != NULL)
3750 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3751 st_tmp_2, 1, OPTAB_WIDEN);
3752 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3753 st_tmp_1, 1, OPTAB_WIDEN);
3756 /* Store it all. */
3757 if (WORDS_BIG_ENDIAN)
3758 emit_move_insn (st_addr_1, st_tmp_1);
3759 else
3760 emit_move_insn (st_addr_2, st_tmp_2);
3761 for (i = words-1; i > 0; --i)
3763 rtx tmp = change_address (dmem, DImode,
3764 gen_rtx_AND (DImode,
3765 plus_constant(dmema,
3766 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3767 im8));
3768 set_mem_alias_set (tmp, 0);
3769 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3771 if (WORDS_BIG_ENDIAN)
3772 emit_move_insn (st_addr_2, st_tmp_2);
3773 else
3774 emit_move_insn (st_addr_1, st_tmp_1);
3778 /* Expand string/block move operations.
3780 operands[0] is the pointer to the destination.
3781 operands[1] is the pointer to the source.
3782 operands[2] is the number of bytes to move.
3783 operands[3] is the alignment. */
3786 alpha_expand_block_move (rtx operands[])
3788 rtx bytes_rtx = operands[2];
3789 rtx align_rtx = operands[3];
3790 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3791 HOST_WIDE_INT bytes = orig_bytes;
3792 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3793 HOST_WIDE_INT dst_align = src_align;
3794 rtx orig_src = operands[1];
3795 rtx orig_dst = operands[0];
3796 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3797 rtx tmp;
3798 unsigned int i, words, ofs, nregs = 0;
3800 if (orig_bytes <= 0)
3801 return 1;
3802 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3803 return 0;
3805 /* Look for additional alignment information from recorded register info. */
3807 tmp = XEXP (orig_src, 0);
3808 if (GET_CODE (tmp) == REG)
3809 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3810 else if (GET_CODE (tmp) == PLUS
3811 && GET_CODE (XEXP (tmp, 0)) == REG
3812 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3814 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3815 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3817 if (a > src_align)
3819 if (a >= 64 && c % 8 == 0)
3820 src_align = 64;
3821 else if (a >= 32 && c % 4 == 0)
3822 src_align = 32;
3823 else if (a >= 16 && c % 2 == 0)
3824 src_align = 16;
3828 tmp = XEXP (orig_dst, 0);
3829 if (GET_CODE (tmp) == REG)
3830 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3831 else if (GET_CODE (tmp) == PLUS
3832 && GET_CODE (XEXP (tmp, 0)) == REG
3833 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3835 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3836 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3838 if (a > dst_align)
3840 if (a >= 64 && c % 8 == 0)
3841 dst_align = 64;
3842 else if (a >= 32 && c % 4 == 0)
3843 dst_align = 32;
3844 else if (a >= 16 && c % 2 == 0)
3845 dst_align = 16;
3849 ofs = 0;
3850 if (src_align >= 64 && bytes >= 8)
3852 words = bytes / 8;
3854 for (i = 0; i < words; ++i)
3855 data_regs[nregs + i] = gen_reg_rtx (DImode);
3857 for (i = 0; i < words; ++i)
3858 emit_move_insn (data_regs[nregs + i],
3859 adjust_address (orig_src, DImode, ofs + i * 8));
3861 nregs += words;
3862 bytes -= words * 8;
3863 ofs += words * 8;
3866 if (src_align >= 32 && bytes >= 4)
3868 words = bytes / 4;
3870 for (i = 0; i < words; ++i)
3871 data_regs[nregs + i] = gen_reg_rtx (SImode);
3873 for (i = 0; i < words; ++i)
3874 emit_move_insn (data_regs[nregs + i],
3875 adjust_address (orig_src, SImode, ofs + i * 4));
3877 nregs += words;
3878 bytes -= words * 4;
3879 ofs += words * 4;
3882 if (bytes >= 8)
3884 words = bytes / 8;
3886 for (i = 0; i < words+1; ++i)
3887 data_regs[nregs + i] = gen_reg_rtx (DImode);
3889 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3890 words, ofs);
3892 nregs += words;
3893 bytes -= words * 8;
3894 ofs += words * 8;
3897 if (! TARGET_BWX && bytes >= 4)
3899 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3900 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3901 bytes -= 4;
3902 ofs += 4;
3905 if (bytes >= 2)
3907 if (src_align >= 16)
3909 do {
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3912 bytes -= 2;
3913 ofs += 2;
3914 } while (bytes >= 2);
3916 else if (! TARGET_BWX)
3918 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3919 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3920 bytes -= 2;
3921 ofs += 2;
3925 while (bytes > 0)
3927 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3928 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3929 bytes -= 1;
3930 ofs += 1;
3933 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3935 /* Now save it back out again. */
3937 i = 0, ofs = 0;
3939 /* Write out the data in whatever chunks reading the source allowed. */
3940 if (dst_align >= 64)
3942 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3944 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3945 data_regs[i]);
3946 ofs += 8;
3947 i++;
3951 if (dst_align >= 32)
3953 /* If the source has remaining DImode regs, write them out in
3954 two pieces. */
3955 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3957 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3958 NULL_RTX, 1, OPTAB_WIDEN);
3960 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3961 gen_lowpart (SImode, data_regs[i]));
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3963 gen_lowpart (SImode, tmp));
3964 ofs += 8;
3965 i++;
3968 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3970 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3971 data_regs[i]);
3972 ofs += 4;
3973 i++;
3977 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3979 /* Write out a remaining block of words using unaligned methods. */
3981 for (words = 1; i + words < nregs; words++)
3982 if (GET_MODE (data_regs[i + words]) != DImode)
3983 break;
3985 if (words == 1)
3986 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3987 else
3988 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3989 words, ofs);
3991 i += words;
3992 ofs += words * 8;
3995 /* Due to the above, this won't be aligned. */
3996 /* ??? If we have more than one of these, consider constructing full
3997 words in registers and using alpha_expand_unaligned_store_words. */
3998 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4000 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4001 ofs += 4;
4002 i++;
4005 if (dst_align >= 16)
4006 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4008 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4009 i++;
4010 ofs += 2;
4012 else
4013 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4015 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4016 i++;
4017 ofs += 2;
4020 /* The remainder must be byte copies. */
4021 while (i < nregs)
4023 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4024 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4025 i++;
4026 ofs += 1;
4029 return 1;
4033 alpha_expand_block_clear (rtx operands[])
4035 rtx bytes_rtx = operands[1];
4036 rtx align_rtx = operands[3];
4037 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4038 HOST_WIDE_INT bytes = orig_bytes;
4039 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4040 HOST_WIDE_INT alignofs = 0;
4041 rtx orig_dst = operands[0];
4042 rtx tmp;
4043 int i, words, ofs = 0;
4045 if (orig_bytes <= 0)
4046 return 1;
4047 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4048 return 0;
4050 /* Look for stricter alignment. */
4051 tmp = XEXP (orig_dst, 0);
4052 if (GET_CODE (tmp) == REG)
4053 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4054 else if (GET_CODE (tmp) == PLUS
4055 && GET_CODE (XEXP (tmp, 0)) == REG
4056 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4058 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4059 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4061 if (a > align)
4063 if (a >= 64)
4064 align = a, alignofs = 8 - c % 8;
4065 else if (a >= 32)
4066 align = a, alignofs = 4 - c % 4;
4067 else if (a >= 16)
4068 align = a, alignofs = 2 - c % 2;
4072 /* Handle an unaligned prefix first. */
4074 if (alignofs > 0)
4076 #if HOST_BITS_PER_WIDE_INT >= 64
4077 /* Given that alignofs is bounded by align, the only time BWX could
4078 generate three stores is for a 7 byte fill. Prefer two individual
4079 stores over a load/mask/store sequence. */
4080 if ((!TARGET_BWX || alignofs == 7)
4081 && align >= 32
4082 && !(alignofs == 4 && bytes >= 4))
4084 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4085 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4086 rtx mem, tmp;
4087 HOST_WIDE_INT mask;
4089 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4090 set_mem_alias_set (mem, 0);
4092 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4093 if (bytes < alignofs)
4095 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4096 ofs += bytes;
4097 bytes = 0;
4099 else
4101 bytes -= alignofs;
4102 ofs += alignofs;
4104 alignofs = 0;
4106 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4107 NULL_RTX, 1, OPTAB_WIDEN);
4109 emit_move_insn (mem, tmp);
4111 #endif
4113 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4115 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4116 bytes -= 1;
4117 ofs += 1;
4118 alignofs -= 1;
4120 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4122 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4123 bytes -= 2;
4124 ofs += 2;
4125 alignofs -= 2;
4127 if (alignofs == 4 && bytes >= 4)
4129 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4130 bytes -= 4;
4131 ofs += 4;
4132 alignofs = 0;
4135 /* If we've not used the extra lead alignment information by now,
4136 we won't be able to. Downgrade align to match what's left over. */
4137 if (alignofs > 0)
4139 alignofs = alignofs & -alignofs;
4140 align = MIN (align, alignofs * BITS_PER_UNIT);
4144 /* Handle a block of contiguous long-words. */
4146 if (align >= 64 && bytes >= 8)
4148 words = bytes / 8;
4150 for (i = 0; i < words; ++i)
4151 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4152 const0_rtx);
4154 bytes -= words * 8;
4155 ofs += words * 8;
4158 /* If the block is large and appropriately aligned, emit a single
4159 store followed by a sequence of stq_u insns. */
4161 if (align >= 32 && bytes > 16)
4163 rtx orig_dsta;
4165 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4166 bytes -= 4;
4167 ofs += 4;
4169 orig_dsta = XEXP (orig_dst, 0);
4170 if (GET_CODE (orig_dsta) == LO_SUM)
4171 orig_dsta = force_reg (Pmode, orig_dsta);
4173 words = bytes / 8;
4174 for (i = 0; i < words; ++i)
4176 rtx mem
4177 = change_address (orig_dst, DImode,
4178 gen_rtx_AND (DImode,
4179 plus_constant (orig_dsta, ofs + i*8),
4180 GEN_INT (-8)));
4181 set_mem_alias_set (mem, 0);
4182 emit_move_insn (mem, const0_rtx);
4185 /* Depending on the alignment, the first stq_u may have overlapped
4186 with the initial stl, which means that the last stq_u didn't
4187 write as much as it would appear. Leave those questionable bytes
4188 unaccounted for. */
4189 bytes -= words * 8 - 4;
4190 ofs += words * 8 - 4;
4193 /* Handle a smaller block of aligned words. */
4195 if ((align >= 64 && bytes == 4)
4196 || (align == 32 && bytes >= 4))
4198 words = bytes / 4;
4200 for (i = 0; i < words; ++i)
4201 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4202 const0_rtx);
4204 bytes -= words * 4;
4205 ofs += words * 4;
4208 /* An unaligned block uses stq_u stores for as many as possible. */
4210 if (bytes >= 8)
4212 words = bytes / 8;
4214 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4216 bytes -= words * 8;
4217 ofs += words * 8;
4220 /* Next clean up any trailing pieces. */
4222 #if HOST_BITS_PER_WIDE_INT >= 64
4223 /* Count the number of bits in BYTES for which aligned stores could
4224 be emitted. */
4225 words = 0;
4226 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4227 if (bytes & i)
4228 words += 1;
4230 /* If we have appropriate alignment (and it wouldn't take too many
4231 instructions otherwise), mask out the bytes we need. */
4232 if (TARGET_BWX ? words > 2 : bytes > 0)
4234 if (align >= 64)
4236 rtx mem, tmp;
4237 HOST_WIDE_INT mask;
4239 mem = adjust_address (orig_dst, DImode, ofs);
4240 set_mem_alias_set (mem, 0);
4242 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4244 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4245 NULL_RTX, 1, OPTAB_WIDEN);
4247 emit_move_insn (mem, tmp);
4248 return 1;
4250 else if (align >= 32 && bytes < 4)
4252 rtx mem, tmp;
4253 HOST_WIDE_INT mask;
4255 mem = adjust_address (orig_dst, SImode, ofs);
4256 set_mem_alias_set (mem, 0);
4258 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4260 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4261 NULL_RTX, 1, OPTAB_WIDEN);
4263 emit_move_insn (mem, tmp);
4264 return 1;
4267 #endif
4269 if (!TARGET_BWX && bytes >= 4)
4271 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4272 bytes -= 4;
4273 ofs += 4;
4276 if (bytes >= 2)
4278 if (align >= 16)
4280 do {
4281 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4282 const0_rtx);
4283 bytes -= 2;
4284 ofs += 2;
4285 } while (bytes >= 2);
4287 else if (! TARGET_BWX)
4289 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4290 bytes -= 2;
4291 ofs += 2;
4295 while (bytes > 0)
4297 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4298 bytes -= 1;
4299 ofs += 1;
4302 return 1;
4305 /* Returns a mask so that zap(x, value) == x & mask. */
4308 alpha_expand_zap_mask (HOST_WIDE_INT value)
4310 rtx result;
4311 int i;
4313 if (HOST_BITS_PER_WIDE_INT >= 64)
4315 HOST_WIDE_INT mask = 0;
4317 for (i = 7; i >= 0; --i)
4319 mask <<= 8;
4320 if (!((value >> i) & 1))
4321 mask |= 0xff;
4324 result = gen_int_mode (mask, DImode);
4326 else
4328 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4330 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4332 for (i = 7; i >= 4; --i)
4334 mask_hi <<= 8;
4335 if (!((value >> i) & 1))
4336 mask_hi |= 0xff;
4339 for (i = 3; i >= 0; --i)
4341 mask_lo <<= 8;
4342 if (!((value >> i) & 1))
4343 mask_lo |= 0xff;
4346 result = immed_double_const (mask_lo, mask_hi, DImode);
4349 return result;
4352 void
4353 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4354 enum machine_mode mode,
4355 rtx op0, rtx op1, rtx op2)
4357 op0 = gen_lowpart (mode, op0);
4359 if (op1 == const0_rtx)
4360 op1 = CONST0_RTX (mode);
4361 else
4362 op1 = gen_lowpart (mode, op1);
4364 if (op2 == const0_rtx)
4365 op2 = CONST0_RTX (mode);
4366 else
4367 op2 = gen_lowpart (mode, op2);
4369 emit_insn ((*gen) (op0, op1, op2));
4372 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4373 COND is true. Mark the jump as unlikely to be taken. */
4375 static void
4376 emit_unlikely_jump (rtx cond, rtx label)
4378 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4379 rtx x;
4381 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4382 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4383 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4386 /* A subroutine of the atomic operation splitters. Emit a load-locked
4387 instruction in MODE. */
4389 static void
4390 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4392 rtx (*fn) (rtx, rtx) = NULL;
4393 if (mode == SImode)
4394 fn = gen_load_locked_si;
4395 else if (mode == DImode)
4396 fn = gen_load_locked_di;
4397 emit_insn (fn (reg, mem));
4400 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4401 instruction in MODE. */
4403 static void
4404 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4406 rtx (*fn) (rtx, rtx, rtx) = NULL;
4407 if (mode == SImode)
4408 fn = gen_store_conditional_si;
4409 else if (mode == DImode)
4410 fn = gen_store_conditional_di;
4411 emit_insn (fn (res, mem, val));
4414 /* A subroutine of the atomic operation splitters. Emit an insxl
4415 instruction in MODE. */
4417 static rtx
4418 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4420 rtx ret = gen_reg_rtx (DImode);
4421 rtx (*fn) (rtx, rtx, rtx);
4423 if (WORDS_BIG_ENDIAN)
4425 if (mode == QImode)
4426 fn = gen_insbl_be;
4427 else
4428 fn = gen_inswl_be;
4430 else
4432 if (mode == QImode)
4433 fn = gen_insbl_le;
4434 else
4435 fn = gen_inswl_le;
4437 /* The insbl and inswl patterns require a register operand. */
4438 op1 = force_reg (mode, op1);
4439 emit_insn (fn (ret, op1, op2));
4441 return ret;
4444 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4445 to perform. MEM is the memory on which to operate. VAL is the second
4446 operand of the binary operator. BEFORE and AFTER are optional locations to
4447 return the value of MEM either before of after the operation. SCRATCH is
4448 a scratch register. */
4450 void
4451 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4452 rtx before, rtx after, rtx scratch)
4454 enum machine_mode mode = GET_MODE (mem);
4455 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4457 emit_insn (gen_memory_barrier ());
4459 label = gen_label_rtx ();
4460 emit_label (label);
4461 label = gen_rtx_LABEL_REF (DImode, label);
4463 if (before == NULL)
4464 before = scratch;
4465 emit_load_locked (mode, before, mem);
4467 if (code == NOT)
4468 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4469 else
4470 x = gen_rtx_fmt_ee (code, mode, before, val);
4471 if (after)
4472 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4473 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4475 emit_store_conditional (mode, cond, mem, scratch);
4477 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4478 emit_unlikely_jump (x, label);
4480 emit_insn (gen_memory_barrier ());
4483 /* Expand a compare and swap operation. */
4485 void
4486 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4487 rtx scratch)
4489 enum machine_mode mode = GET_MODE (mem);
4490 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4492 emit_insn (gen_memory_barrier ());
4494 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4495 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4496 emit_label (XEXP (label1, 0));
4498 emit_load_locked (mode, retval, mem);
4500 x = gen_lowpart (DImode, retval);
4501 if (oldval == const0_rtx)
4502 x = gen_rtx_NE (DImode, x, const0_rtx);
4503 else
4505 x = gen_rtx_EQ (DImode, x, oldval);
4506 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4507 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4509 emit_unlikely_jump (x, label2);
4511 emit_move_insn (scratch, newval);
4512 emit_store_conditional (mode, cond, mem, scratch);
4514 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4515 emit_unlikely_jump (x, label1);
4517 emit_insn (gen_memory_barrier ());
4518 emit_label (XEXP (label2, 0));
4521 void
4522 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4524 enum machine_mode mode = GET_MODE (mem);
4525 rtx addr, align, wdst;
4526 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4528 addr = force_reg (DImode, XEXP (mem, 0));
4529 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4530 NULL_RTX, 1, OPTAB_DIRECT);
4532 oldval = convert_modes (DImode, mode, oldval, 1);
4533 newval = emit_insxl (mode, newval, addr);
4535 wdst = gen_reg_rtx (DImode);
4536 if (mode == QImode)
4537 fn5 = gen_sync_compare_and_swapqi_1;
4538 else
4539 fn5 = gen_sync_compare_and_swaphi_1;
4540 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4542 emit_move_insn (dst, gen_lowpart (mode, wdst));
4545 void
4546 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4547 rtx oldval, rtx newval, rtx align,
4548 rtx scratch, rtx cond)
4550 rtx label1, label2, mem, width, mask, x;
4552 mem = gen_rtx_MEM (DImode, align);
4553 MEM_VOLATILE_P (mem) = 1;
4555 emit_insn (gen_memory_barrier ());
4556 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4557 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4558 emit_label (XEXP (label1, 0));
4560 emit_load_locked (DImode, scratch, mem);
4562 width = GEN_INT (GET_MODE_BITSIZE (mode));
4563 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4564 if (WORDS_BIG_ENDIAN)
4565 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4566 else
4567 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4569 if (oldval == const0_rtx)
4570 x = gen_rtx_NE (DImode, dest, const0_rtx);
4571 else
4573 x = gen_rtx_EQ (DImode, dest, oldval);
4574 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4575 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4577 emit_unlikely_jump (x, label2);
4579 if (WORDS_BIG_ENDIAN)
4580 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4581 else
4582 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4583 emit_insn (gen_iordi3 (scratch, scratch, newval));
4585 emit_store_conditional (DImode, scratch, mem, scratch);
4587 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4588 emit_unlikely_jump (x, label1);
4590 emit_insn (gen_memory_barrier ());
4591 emit_label (XEXP (label2, 0));
4594 /* Expand an atomic exchange operation. */
4596 void
4597 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4599 enum machine_mode mode = GET_MODE (mem);
4600 rtx label, x, cond = gen_lowpart (DImode, scratch);
4602 emit_insn (gen_memory_barrier ());
4604 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4605 emit_label (XEXP (label, 0));
4607 emit_load_locked (mode, retval, mem);
4608 emit_move_insn (scratch, val);
4609 emit_store_conditional (mode, cond, mem, scratch);
4611 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4612 emit_unlikely_jump (x, label);
4615 void
4616 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4618 enum machine_mode mode = GET_MODE (mem);
4619 rtx addr, align, wdst;
4620 rtx (*fn4) (rtx, rtx, rtx, rtx);
4622 /* Force the address into a register. */
4623 addr = force_reg (DImode, XEXP (mem, 0));
4625 /* Align it to a multiple of 8. */
4626 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4627 NULL_RTX, 1, OPTAB_DIRECT);
4629 /* Insert val into the correct byte location within the word. */
4630 val = emit_insxl (mode, val, addr);
4632 wdst = gen_reg_rtx (DImode);
4633 if (mode == QImode)
4634 fn4 = gen_sync_lock_test_and_setqi_1;
4635 else
4636 fn4 = gen_sync_lock_test_and_sethi_1;
4637 emit_insn (fn4 (wdst, addr, val, align));
4639 emit_move_insn (dst, gen_lowpart (mode, wdst));
4642 void
4643 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4644 rtx val, rtx align, rtx scratch)
4646 rtx label, mem, width, mask, x;
4648 mem = gen_rtx_MEM (DImode, align);
4649 MEM_VOLATILE_P (mem) = 1;
4651 emit_insn (gen_memory_barrier ());
4652 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4653 emit_label (XEXP (label, 0));
4655 emit_load_locked (DImode, scratch, mem);
4657 width = GEN_INT (GET_MODE_BITSIZE (mode));
4658 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4659 if (WORDS_BIG_ENDIAN)
4661 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4662 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4664 else
4666 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4667 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4669 emit_insn (gen_iordi3 (scratch, scratch, val));
4671 emit_store_conditional (DImode, scratch, mem, scratch);
4673 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4674 emit_unlikely_jump (x, label);
4677 /* Adjust the cost of a scheduling dependency. Return the new cost of
4678 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4680 static int
4681 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4683 enum attr_type insn_type, dep_insn_type;
4685 /* If the dependence is an anti-dependence, there is no cost. For an
4686 output dependence, there is sometimes a cost, but it doesn't seem
4687 worth handling those few cases. */
4688 if (REG_NOTE_KIND (link) != 0)
4689 return cost;
4691 /* If we can't recognize the insns, we can't really do anything. */
4692 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4693 return cost;
4695 insn_type = get_attr_type (insn);
4696 dep_insn_type = get_attr_type (dep_insn);
4698 /* Bring in the user-defined memory latency. */
4699 if (dep_insn_type == TYPE_ILD
4700 || dep_insn_type == TYPE_FLD
4701 || dep_insn_type == TYPE_LDSYM)
4702 cost += alpha_memory_latency-1;
4704 /* Everything else handled in DFA bypasses now. */
4706 return cost;
4709 /* The number of instructions that can be issued per cycle. */
4711 static int
4712 alpha_issue_rate (void)
4714 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4717 /* How many alternative schedules to try. This should be as wide as the
4718 scheduling freedom in the DFA, but no wider. Making this value too
4719 large results extra work for the scheduler.
4721 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4722 alternative schedules. For EV5, we can choose between E0/E1 and
4723 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4725 static int
4726 alpha_multipass_dfa_lookahead (void)
4728 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4731 /* Machine-specific function data. */
4733 struct machine_function GTY(())
4735 /* For unicosmk. */
4736 /* List of call information words for calls from this function. */
4737 struct rtx_def *first_ciw;
4738 struct rtx_def *last_ciw;
4739 int ciw_count;
4741 /* List of deferred case vectors. */
4742 struct rtx_def *addr_list;
4744 /* For OSF. */
4745 const char *some_ld_name;
4747 /* For TARGET_LD_BUGGY_LDGP. */
4748 struct rtx_def *gp_save_rtx;
4751 /* How to allocate a 'struct machine_function'. */
4753 static struct machine_function *
4754 alpha_init_machine_status (void)
4756 return ((struct machine_function *)
4757 ggc_alloc_cleared (sizeof (struct machine_function)));
4760 /* Functions to save and restore alpha_return_addr_rtx. */
4762 /* Start the ball rolling with RETURN_ADDR_RTX. */
4765 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4767 if (count != 0)
4768 return const0_rtx;
4770 return get_hard_reg_initial_val (Pmode, REG_RA);
4773 /* Return or create a memory slot containing the gp value for the current
4774 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4777 alpha_gp_save_rtx (void)
4779 rtx seq, m = cfun->machine->gp_save_rtx;
4781 if (m == NULL)
4783 start_sequence ();
4785 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4786 m = validize_mem (m);
4787 emit_move_insn (m, pic_offset_table_rtx);
4789 seq = get_insns ();
4790 end_sequence ();
4792 /* We used to simply emit the sequence after entry_of_function.
4793 However this breaks the CFG if the first instruction in the
4794 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4795 label. Emit the sequence properly on the edge. We are only
4796 invoked from dw2_build_landing_pads and finish_eh_generation
4797 will call commit_edge_insertions thanks to a kludge. */
4798 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4800 cfun->machine->gp_save_rtx = m;
4803 return m;
4806 static int
4807 alpha_ra_ever_killed (void)
4809 rtx top;
4811 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4812 return (int)df_regs_ever_live_p (REG_RA);
4814 push_topmost_sequence ();
4815 top = get_insns ();
4816 pop_topmost_sequence ();
4818 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4822 /* Return the trap mode suffix applicable to the current
4823 instruction, or NULL. */
4825 static const char *
4826 get_trap_mode_suffix (void)
4828 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4830 switch (s)
4832 case TRAP_SUFFIX_NONE:
4833 return NULL;
4835 case TRAP_SUFFIX_SU:
4836 if (alpha_fptm >= ALPHA_FPTM_SU)
4837 return "su";
4838 return NULL;
4840 case TRAP_SUFFIX_SUI:
4841 if (alpha_fptm >= ALPHA_FPTM_SUI)
4842 return "sui";
4843 return NULL;
4845 case TRAP_SUFFIX_V_SV:
4846 switch (alpha_fptm)
4848 case ALPHA_FPTM_N:
4849 return NULL;
4850 case ALPHA_FPTM_U:
4851 return "v";
4852 case ALPHA_FPTM_SU:
4853 case ALPHA_FPTM_SUI:
4854 return "sv";
4855 default:
4856 gcc_unreachable ();
4859 case TRAP_SUFFIX_V_SV_SVI:
4860 switch (alpha_fptm)
4862 case ALPHA_FPTM_N:
4863 return NULL;
4864 case ALPHA_FPTM_U:
4865 return "v";
4866 case ALPHA_FPTM_SU:
4867 return "sv";
4868 case ALPHA_FPTM_SUI:
4869 return "svi";
4870 default:
4871 gcc_unreachable ();
4873 break;
4875 case TRAP_SUFFIX_U_SU_SUI:
4876 switch (alpha_fptm)
4878 case ALPHA_FPTM_N:
4879 return NULL;
4880 case ALPHA_FPTM_U:
4881 return "u";
4882 case ALPHA_FPTM_SU:
4883 return "su";
4884 case ALPHA_FPTM_SUI:
4885 return "sui";
4886 default:
4887 gcc_unreachable ();
4889 break;
4891 default:
4892 gcc_unreachable ();
4894 gcc_unreachable ();
4897 /* Return the rounding mode suffix applicable to the current
4898 instruction, or NULL. */
4900 static const char *
4901 get_round_mode_suffix (void)
4903 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4905 switch (s)
4907 case ROUND_SUFFIX_NONE:
4908 return NULL;
4909 case ROUND_SUFFIX_NORMAL:
4910 switch (alpha_fprm)
4912 case ALPHA_FPRM_NORM:
4913 return NULL;
4914 case ALPHA_FPRM_MINF:
4915 return "m";
4916 case ALPHA_FPRM_CHOP:
4917 return "c";
4918 case ALPHA_FPRM_DYN:
4919 return "d";
4920 default:
4921 gcc_unreachable ();
4923 break;
4925 case ROUND_SUFFIX_C:
4926 return "c";
4928 default:
4929 gcc_unreachable ();
4931 gcc_unreachable ();
4934 /* Locate some local-dynamic symbol still in use by this function
4935 so that we can print its name in some movdi_er_tlsldm pattern. */
4937 static int
4938 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4940 rtx x = *px;
4942 if (GET_CODE (x) == SYMBOL_REF
4943 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4945 cfun->machine->some_ld_name = XSTR (x, 0);
4946 return 1;
4949 return 0;
4952 static const char *
4953 get_some_local_dynamic_name (void)
4955 rtx insn;
4957 if (cfun->machine->some_ld_name)
4958 return cfun->machine->some_ld_name;
4960 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4961 if (INSN_P (insn)
4962 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4963 return cfun->machine->some_ld_name;
4965 gcc_unreachable ();
4968 /* Print an operand. Recognize special options, documented below. */
4970 void
4971 print_operand (FILE *file, rtx x, int code)
4973 int i;
4975 switch (code)
4977 case '~':
4978 /* Print the assembler name of the current function. */
4979 assemble_name (file, alpha_fnname);
4980 break;
4982 case '&':
4983 assemble_name (file, get_some_local_dynamic_name ());
4984 break;
4986 case '/':
4988 const char *trap = get_trap_mode_suffix ();
4989 const char *round = get_round_mode_suffix ();
4991 if (trap || round)
4992 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4993 (trap ? trap : ""), (round ? round : ""));
4994 break;
4997 case ',':
4998 /* Generates single precision instruction suffix. */
4999 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5000 break;
5002 case '-':
5003 /* Generates double precision instruction suffix. */
5004 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5005 break;
5007 case '#':
5008 if (alpha_this_literal_sequence_number == 0)
5009 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5010 fprintf (file, "%d", alpha_this_literal_sequence_number);
5011 break;
5013 case '*':
5014 if (alpha_this_gpdisp_sequence_number == 0)
5015 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5016 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5017 break;
5019 case 'H':
5020 if (GET_CODE (x) == HIGH)
5021 output_addr_const (file, XEXP (x, 0));
5022 else
5023 output_operand_lossage ("invalid %%H value");
5024 break;
5026 case 'J':
5028 const char *lituse;
5030 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5032 x = XVECEXP (x, 0, 0);
5033 lituse = "lituse_tlsgd";
5035 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5037 x = XVECEXP (x, 0, 0);
5038 lituse = "lituse_tlsldm";
5040 else if (GET_CODE (x) == CONST_INT)
5041 lituse = "lituse_jsr";
5042 else
5044 output_operand_lossage ("invalid %%J value");
5045 break;
5048 if (x != const0_rtx)
5049 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5051 break;
5053 case 'j':
5055 const char *lituse;
5057 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5058 lituse = "lituse_jsrdirect";
5059 #else
5060 lituse = "lituse_jsr";
5061 #endif
5063 gcc_assert (INTVAL (x) != 0);
5064 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5066 break;
5067 case 'r':
5068 /* If this operand is the constant zero, write it as "$31". */
5069 if (GET_CODE (x) == REG)
5070 fprintf (file, "%s", reg_names[REGNO (x)]);
5071 else if (x == CONST0_RTX (GET_MODE (x)))
5072 fprintf (file, "$31");
5073 else
5074 output_operand_lossage ("invalid %%r value");
5075 break;
5077 case 'R':
5078 /* Similar, but for floating-point. */
5079 if (GET_CODE (x) == REG)
5080 fprintf (file, "%s", reg_names[REGNO (x)]);
5081 else if (x == CONST0_RTX (GET_MODE (x)))
5082 fprintf (file, "$f31");
5083 else
5084 output_operand_lossage ("invalid %%R value");
5085 break;
5087 case 'N':
5088 /* Write the 1's complement of a constant. */
5089 if (GET_CODE (x) != CONST_INT)
5090 output_operand_lossage ("invalid %%N value");
5092 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5093 break;
5095 case 'P':
5096 /* Write 1 << C, for a constant C. */
5097 if (GET_CODE (x) != CONST_INT)
5098 output_operand_lossage ("invalid %%P value");
5100 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5101 break;
5103 case 'h':
5104 /* Write the high-order 16 bits of a constant, sign-extended. */
5105 if (GET_CODE (x) != CONST_INT)
5106 output_operand_lossage ("invalid %%h value");
5108 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5109 break;
5111 case 'L':
5112 /* Write the low-order 16 bits of a constant, sign-extended. */
5113 if (GET_CODE (x) != CONST_INT)
5114 output_operand_lossage ("invalid %%L value");
5116 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5117 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5118 break;
5120 case 'm':
5121 /* Write mask for ZAP insn. */
5122 if (GET_CODE (x) == CONST_DOUBLE)
5124 HOST_WIDE_INT mask = 0;
5125 HOST_WIDE_INT value;
5127 value = CONST_DOUBLE_LOW (x);
5128 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5129 i++, value >>= 8)
5130 if (value & 0xff)
5131 mask |= (1 << i);
5133 value = CONST_DOUBLE_HIGH (x);
5134 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5135 i++, value >>= 8)
5136 if (value & 0xff)
5137 mask |= (1 << (i + sizeof (int)));
5139 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5142 else if (GET_CODE (x) == CONST_INT)
5144 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5146 for (i = 0; i < 8; i++, value >>= 8)
5147 if (value & 0xff)
5148 mask |= (1 << i);
5150 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5152 else
5153 output_operand_lossage ("invalid %%m value");
5154 break;
5156 case 'M':
5157 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5158 if (GET_CODE (x) != CONST_INT
5159 || (INTVAL (x) != 8 && INTVAL (x) != 16
5160 && INTVAL (x) != 32 && INTVAL (x) != 64))
5161 output_operand_lossage ("invalid %%M value");
5163 fprintf (file, "%s",
5164 (INTVAL (x) == 8 ? "b"
5165 : INTVAL (x) == 16 ? "w"
5166 : INTVAL (x) == 32 ? "l"
5167 : "q"));
5168 break;
5170 case 'U':
5171 /* Similar, except do it from the mask. */
5172 if (GET_CODE (x) == CONST_INT)
5174 HOST_WIDE_INT value = INTVAL (x);
5176 if (value == 0xff)
5178 fputc ('b', file);
5179 break;
5181 if (value == 0xffff)
5183 fputc ('w', file);
5184 break;
5186 if (value == 0xffffffff)
5188 fputc ('l', file);
5189 break;
5191 if (value == -1)
5193 fputc ('q', file);
5194 break;
5197 else if (HOST_BITS_PER_WIDE_INT == 32
5198 && GET_CODE (x) == CONST_DOUBLE
5199 && CONST_DOUBLE_LOW (x) == 0xffffffff
5200 && CONST_DOUBLE_HIGH (x) == 0)
5202 fputc ('l', file);
5203 break;
5205 output_operand_lossage ("invalid %%U value");
5206 break;
5208 case 's':
5209 /* Write the constant value divided by 8 for little-endian mode or
5210 (56 - value) / 8 for big-endian mode. */
5212 if (GET_CODE (x) != CONST_INT
5213 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5214 ? 56
5215 : 64)
5216 || (INTVAL (x) & 7) != 0)
5217 output_operand_lossage ("invalid %%s value");
5219 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5220 WORDS_BIG_ENDIAN
5221 ? (56 - INTVAL (x)) / 8
5222 : INTVAL (x) / 8);
5223 break;
5225 case 'S':
5226 /* Same, except compute (64 - c) / 8 */
5228 if (GET_CODE (x) != CONST_INT
5229 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5230 && (INTVAL (x) & 7) != 8)
5231 output_operand_lossage ("invalid %%s value");
5233 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5234 break;
5236 case 't':
5238 /* On Unicos/Mk systems: use a DEX expression if the symbol
5239 clashes with a register name. */
5240 int dex = unicosmk_need_dex (x);
5241 if (dex)
5242 fprintf (file, "DEX(%d)", dex);
5243 else
5244 output_addr_const (file, x);
5246 break;
5248 case 'C': case 'D': case 'c': case 'd':
5249 /* Write out comparison name. */
5251 enum rtx_code c = GET_CODE (x);
5253 if (!COMPARISON_P (x))
5254 output_operand_lossage ("invalid %%C value");
5256 else if (code == 'D')
5257 c = reverse_condition (c);
5258 else if (code == 'c')
5259 c = swap_condition (c);
5260 else if (code == 'd')
5261 c = swap_condition (reverse_condition (c));
5263 if (c == LEU)
5264 fprintf (file, "ule");
5265 else if (c == LTU)
5266 fprintf (file, "ult");
5267 else if (c == UNORDERED)
5268 fprintf (file, "un");
5269 else
5270 fprintf (file, "%s", GET_RTX_NAME (c));
5272 break;
5274 case 'E':
5275 /* Write the divide or modulus operator. */
5276 switch (GET_CODE (x))
5278 case DIV:
5279 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5280 break;
5281 case UDIV:
5282 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5283 break;
5284 case MOD:
5285 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5286 break;
5287 case UMOD:
5288 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5289 break;
5290 default:
5291 output_operand_lossage ("invalid %%E value");
5292 break;
5294 break;
5296 case 'A':
5297 /* Write "_u" for unaligned access. */
5298 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5299 fprintf (file, "_u");
5300 break;
5302 case 0:
5303 if (GET_CODE (x) == REG)
5304 fprintf (file, "%s", reg_names[REGNO (x)]);
5305 else if (GET_CODE (x) == MEM)
5306 output_address (XEXP (x, 0));
5307 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5309 switch (XINT (XEXP (x, 0), 1))
5311 case UNSPEC_DTPREL:
5312 case UNSPEC_TPREL:
5313 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5314 break;
5315 default:
5316 output_operand_lossage ("unknown relocation unspec");
5317 break;
5320 else
5321 output_addr_const (file, x);
5322 break;
5324 default:
5325 output_operand_lossage ("invalid %%xn code");
5329 void
5330 print_operand_address (FILE *file, rtx addr)
5332 int basereg = 31;
5333 HOST_WIDE_INT offset = 0;
5335 if (GET_CODE (addr) == AND)
5336 addr = XEXP (addr, 0);
5338 if (GET_CODE (addr) == PLUS
5339 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5341 offset = INTVAL (XEXP (addr, 1));
5342 addr = XEXP (addr, 0);
5345 if (GET_CODE (addr) == LO_SUM)
5347 const char *reloc16, *reloclo;
5348 rtx op1 = XEXP (addr, 1);
5350 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5352 op1 = XEXP (op1, 0);
5353 switch (XINT (op1, 1))
5355 case UNSPEC_DTPREL:
5356 reloc16 = NULL;
5357 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5358 break;
5359 case UNSPEC_TPREL:
5360 reloc16 = NULL;
5361 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5362 break;
5363 default:
5364 output_operand_lossage ("unknown relocation unspec");
5365 return;
5368 output_addr_const (file, XVECEXP (op1, 0, 0));
5370 else
5372 reloc16 = "gprel";
5373 reloclo = "gprellow";
5374 output_addr_const (file, op1);
5377 if (offset)
5378 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5380 addr = XEXP (addr, 0);
5381 switch (GET_CODE (addr))
5383 case REG:
5384 basereg = REGNO (addr);
5385 break;
5387 case SUBREG:
5388 basereg = subreg_regno (addr);
5389 break;
5391 default:
5392 gcc_unreachable ();
5395 fprintf (file, "($%d)\t\t!%s", basereg,
5396 (basereg == 29 ? reloc16 : reloclo));
5397 return;
5400 switch (GET_CODE (addr))
5402 case REG:
5403 basereg = REGNO (addr);
5404 break;
5406 case SUBREG:
5407 basereg = subreg_regno (addr);
5408 break;
5410 case CONST_INT:
5411 offset = INTVAL (addr);
5412 break;
5414 #if TARGET_ABI_OPEN_VMS
5415 case SYMBOL_REF:
5416 fprintf (file, "%s", XSTR (addr, 0));
5417 return;
5419 case CONST:
5420 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5421 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5422 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5423 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5424 INTVAL (XEXP (XEXP (addr, 0), 1)));
5425 return;
5427 #endif
5428 default:
5429 gcc_unreachable ();
5432 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5435 /* Emit RTL insns to initialize the variable parts of a trampoline at
5436 TRAMP. FNADDR is an RTX for the address of the function's pure
5437 code. CXT is an RTX for the static chain value for the function.
5439 The three offset parameters are for the individual template's
5440 layout. A JMPOFS < 0 indicates that the trampoline does not
5441 contain instructions at all.
5443 We assume here that a function will be called many more times than
5444 its address is taken (e.g., it might be passed to qsort), so we
5445 take the trouble to initialize the "hint" field in the JMP insn.
5446 Note that the hint field is PC (new) + 4 * bits 13:0. */
5448 void
5449 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5450 int fnofs, int cxtofs, int jmpofs)
5452 rtx temp, temp1, addr;
5453 /* VMS really uses DImode pointers in memory at this point. */
5454 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5456 #ifdef POINTERS_EXTEND_UNSIGNED
5457 fnaddr = convert_memory_address (mode, fnaddr);
5458 cxt = convert_memory_address (mode, cxt);
5459 #endif
5461 /* Store function address and CXT. */
5462 addr = memory_address (mode, plus_constant (tramp, fnofs));
5463 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5464 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5465 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5467 /* This has been disabled since the hint only has a 32k range, and in
5468 no existing OS is the stack within 32k of the text segment. */
5469 if (0 && jmpofs >= 0)
5471 /* Compute hint value. */
5472 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5473 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5474 OPTAB_WIDEN);
5475 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5476 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5477 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5478 GEN_INT (0x3fff), 0);
5480 /* Merge in the hint. */
5481 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5482 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5483 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5484 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5485 OPTAB_WIDEN);
5486 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5489 #ifdef ENABLE_EXECUTE_STACK
5490 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5491 0, VOIDmode, 1, tramp, Pmode);
5492 #endif
5494 if (jmpofs >= 0)
5495 emit_insn (gen_imb ());
5498 /* Determine where to put an argument to a function.
5499 Value is zero to push the argument on the stack,
5500 or a hard register in which to store the argument.
5502 MODE is the argument's machine mode.
5503 TYPE is the data type of the argument (as a tree).
5504 This is null for libcalls where that information may
5505 not be available.
5506 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5507 the preceding args and about the function being called.
5508 NAMED is nonzero if this argument is a named parameter
5509 (otherwise it is an extra parameter matching an ellipsis).
5511 On Alpha the first 6 words of args are normally in registers
5512 and the rest are pushed. */
5515 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5516 int named ATTRIBUTE_UNUSED)
5518 int basereg;
5519 int num_args;
5521 /* Don't get confused and pass small structures in FP registers. */
5522 if (type && AGGREGATE_TYPE_P (type))
5523 basereg = 16;
5524 else
5526 #ifdef ENABLE_CHECKING
5527 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5528 values here. */
5529 gcc_assert (!COMPLEX_MODE_P (mode));
5530 #endif
5532 /* Set up defaults for FP operands passed in FP registers, and
5533 integral operands passed in integer registers. */
5534 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5535 basereg = 32 + 16;
5536 else
5537 basereg = 16;
5540 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5541 the three platforms, so we can't avoid conditional compilation. */
5542 #if TARGET_ABI_OPEN_VMS
5544 if (mode == VOIDmode)
5545 return alpha_arg_info_reg_val (cum);
5547 num_args = cum.num_args;
5548 if (num_args >= 6
5549 || targetm.calls.must_pass_in_stack (mode, type))
5550 return NULL_RTX;
5552 #elif TARGET_ABI_UNICOSMK
5554 int size;
5556 /* If this is the last argument, generate the call info word (CIW). */
5557 /* ??? We don't include the caller's line number in the CIW because
5558 I don't know how to determine it if debug infos are turned off. */
5559 if (mode == VOIDmode)
5561 int i;
5562 HOST_WIDE_INT lo;
5563 HOST_WIDE_INT hi;
5564 rtx ciw;
5566 lo = 0;
5568 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5569 if (cum.reg_args_type[i])
5570 lo |= (1 << (7 - i));
5572 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5573 lo |= 7;
5574 else
5575 lo |= cum.num_reg_words;
5577 #if HOST_BITS_PER_WIDE_INT == 32
5578 hi = (cum.num_args << 20) | cum.num_arg_words;
5579 #else
5580 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5581 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5582 hi = 0;
5583 #endif
5584 ciw = immed_double_const (lo, hi, DImode);
5586 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5587 UNSPEC_UMK_LOAD_CIW);
5590 size = ALPHA_ARG_SIZE (mode, type, named);
5591 num_args = cum.num_reg_words;
5592 if (cum.force_stack
5593 || cum.num_reg_words + size > 6
5594 || targetm.calls.must_pass_in_stack (mode, type))
5595 return NULL_RTX;
5596 else if (type && TYPE_MODE (type) == BLKmode)
5598 rtx reg1, reg2;
5600 reg1 = gen_rtx_REG (DImode, num_args + 16);
5601 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5603 /* The argument fits in two registers. Note that we still need to
5604 reserve a register for empty structures. */
5605 if (size == 0)
5606 return NULL_RTX;
5607 else if (size == 1)
5608 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5609 else
5611 reg2 = gen_rtx_REG (DImode, num_args + 17);
5612 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5613 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5617 #elif TARGET_ABI_OSF
5619 if (cum >= 6)
5620 return NULL_RTX;
5621 num_args = cum;
5623 /* VOID is passed as a special flag for "last argument". */
5624 if (type == void_type_node)
5625 basereg = 16;
5626 else if (targetm.calls.must_pass_in_stack (mode, type))
5627 return NULL_RTX;
5629 #else
5630 #error Unhandled ABI
5631 #endif
5633 return gen_rtx_REG (mode, num_args + basereg);
5636 static int
5637 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5638 enum machine_mode mode ATTRIBUTE_UNUSED,
5639 tree type ATTRIBUTE_UNUSED,
5640 bool named ATTRIBUTE_UNUSED)
5642 int words = 0;
5644 #if TARGET_ABI_OPEN_VMS
5645 if (cum->num_args < 6
5646 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5647 words = 6 - cum->num_args;
5648 #elif TARGET_ABI_UNICOSMK
5649 /* Never any split arguments. */
5650 #elif TARGET_ABI_OSF
5651 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5652 words = 6 - *cum;
5653 #else
5654 #error Unhandled ABI
5655 #endif
5657 return words * UNITS_PER_WORD;
5661 /* Return true if TYPE must be returned in memory, instead of in registers. */
5663 static bool
5664 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5666 enum machine_mode mode = VOIDmode;
5667 int size;
5669 if (type)
5671 mode = TYPE_MODE (type);
5673 /* All aggregates are returned in memory. */
5674 if (AGGREGATE_TYPE_P (type))
5675 return true;
5678 size = GET_MODE_SIZE (mode);
5679 switch (GET_MODE_CLASS (mode))
5681 case MODE_VECTOR_FLOAT:
5682 /* Pass all float vectors in memory, like an aggregate. */
5683 return true;
5685 case MODE_COMPLEX_FLOAT:
5686 /* We judge complex floats on the size of their element,
5687 not the size of the whole type. */
5688 size = GET_MODE_UNIT_SIZE (mode);
5689 break;
5691 case MODE_INT:
5692 case MODE_FLOAT:
5693 case MODE_COMPLEX_INT:
5694 case MODE_VECTOR_INT:
5695 break;
5697 default:
5698 /* ??? We get called on all sorts of random stuff from
5699 aggregate_value_p. We must return something, but it's not
5700 clear what's safe to return. Pretend it's a struct I
5701 guess. */
5702 return true;
5705 /* Otherwise types must fit in one register. */
5706 return size > UNITS_PER_WORD;
5709 /* Return true if TYPE should be passed by invisible reference. */
5711 static bool
5712 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5713 enum machine_mode mode,
5714 const_tree type ATTRIBUTE_UNUSED,
5715 bool named ATTRIBUTE_UNUSED)
5717 return mode == TFmode || mode == TCmode;
5720 /* Define how to find the value returned by a function. VALTYPE is the
5721 data type of the value (as a tree). If the precise function being
5722 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5723 MODE is set instead of VALTYPE for libcalls.
5725 On Alpha the value is found in $0 for integer functions and
5726 $f0 for floating-point functions. */
5729 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5730 enum machine_mode mode)
5732 unsigned int regnum, dummy;
5733 enum mode_class class;
5735 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5737 if (valtype)
5738 mode = TYPE_MODE (valtype);
5740 class = GET_MODE_CLASS (mode);
5741 switch (class)
5743 case MODE_INT:
5744 PROMOTE_MODE (mode, dummy, valtype);
5745 /* FALLTHRU */
5747 case MODE_COMPLEX_INT:
5748 case MODE_VECTOR_INT:
5749 regnum = 0;
5750 break;
5752 case MODE_FLOAT:
5753 regnum = 32;
5754 break;
5756 case MODE_COMPLEX_FLOAT:
5758 enum machine_mode cmode = GET_MODE_INNER (mode);
5760 return gen_rtx_PARALLEL
5761 (VOIDmode,
5762 gen_rtvec (2,
5763 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5764 const0_rtx),
5765 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5766 GEN_INT (GET_MODE_SIZE (cmode)))));
5769 default:
5770 gcc_unreachable ();
5773 return gen_rtx_REG (mode, regnum);
5776 /* TCmode complex values are passed by invisible reference. We
5777 should not split these values. */
5779 static bool
5780 alpha_split_complex_arg (const_tree type)
5782 return TYPE_MODE (type) != TCmode;
5785 static tree
5786 alpha_build_builtin_va_list (void)
5788 tree base, ofs, space, record, type_decl;
5790 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5791 return ptr_type_node;
5793 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5794 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5795 TREE_CHAIN (record) = type_decl;
5796 TYPE_NAME (record) = type_decl;
5798 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5800 /* Dummy field to prevent alignment warnings. */
5801 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5802 DECL_FIELD_CONTEXT (space) = record;
5803 DECL_ARTIFICIAL (space) = 1;
5804 DECL_IGNORED_P (space) = 1;
5806 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5807 integer_type_node);
5808 DECL_FIELD_CONTEXT (ofs) = record;
5809 TREE_CHAIN (ofs) = space;
5811 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5812 ptr_type_node);
5813 DECL_FIELD_CONTEXT (base) = record;
5814 TREE_CHAIN (base) = ofs;
5816 TYPE_FIELDS (record) = base;
5817 layout_type (record);
5819 va_list_gpr_counter_field = ofs;
5820 return record;
5823 #if TARGET_ABI_OSF
5824 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5825 and constant additions. */
5827 static tree
5828 va_list_skip_additions (tree lhs)
5830 tree rhs, stmt;
5832 if (TREE_CODE (lhs) != SSA_NAME)
5833 return lhs;
5835 for (;;)
5837 stmt = SSA_NAME_DEF_STMT (lhs);
5839 if (TREE_CODE (stmt) == PHI_NODE)
5840 return stmt;
5842 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5843 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5844 return lhs;
5846 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5847 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5848 rhs = TREE_OPERAND (rhs, 0);
5850 if ((TREE_CODE (rhs) != NOP_EXPR
5851 && TREE_CODE (rhs) != CONVERT_EXPR
5852 && ((TREE_CODE (rhs) != PLUS_EXPR
5853 && TREE_CODE (rhs) != POINTER_PLUS_EXPR)
5854 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5855 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5856 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5857 return rhs;
5859 lhs = TREE_OPERAND (rhs, 0);
5863 /* Check if LHS = RHS statement is
5864 LHS = *(ap.__base + ap.__offset + cst)
5866 LHS = *(ap.__base
5867 + ((ap.__offset + cst <= 47)
5868 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5869 If the former, indicate that GPR registers are needed,
5870 if the latter, indicate that FPR registers are needed.
5872 Also look for LHS = (*ptr).field, where ptr is one of the forms
5873 listed above.
5875 On alpha, cfun->va_list_gpr_size is used as size of the needed
5876 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5877 registers are needed and bit 1 set if FPR registers are needed.
5878 Return true if va_list references should not be scanned for the
5879 current statement. */
5881 static bool
5882 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_tree lhs, const_tree rhs)
5884 tree base, offset, arg1, arg2;
5885 int offset_arg = 1;
5887 while (handled_component_p (rhs))
5888 rhs = TREE_OPERAND (rhs, 0);
5889 if (TREE_CODE (rhs) != INDIRECT_REF
5890 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5891 return false;
5893 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5894 if (lhs == NULL_TREE
5895 || TREE_CODE (lhs) != POINTER_PLUS_EXPR)
5896 return false;
5898 base = TREE_OPERAND (lhs, 0);
5899 if (TREE_CODE (base) == SSA_NAME)
5900 base = va_list_skip_additions (base);
5902 if (TREE_CODE (base) != COMPONENT_REF
5903 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5905 base = TREE_OPERAND (lhs, 0);
5906 if (TREE_CODE (base) == SSA_NAME)
5907 base = va_list_skip_additions (base);
5909 if (TREE_CODE (base) != COMPONENT_REF
5910 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5911 return false;
5913 offset_arg = 0;
5916 base = get_base_address (base);
5917 if (TREE_CODE (base) != VAR_DECL
5918 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5919 return false;
5921 offset = TREE_OPERAND (lhs, offset_arg);
5922 if (TREE_CODE (offset) == SSA_NAME)
5923 offset = va_list_skip_additions (offset);
5925 if (TREE_CODE (offset) == PHI_NODE)
5927 HOST_WIDE_INT sub;
5929 if (PHI_NUM_ARGS (offset) != 2)
5930 goto escapes;
5932 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5933 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5934 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5936 tree tem = arg1;
5937 arg1 = arg2;
5938 arg2 = tem;
5940 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5941 goto escapes;
5943 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5944 goto escapes;
5946 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5947 if (TREE_CODE (arg2) == MINUS_EXPR)
5948 sub = -sub;
5949 if (sub < -48 || sub > -32)
5950 goto escapes;
5952 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5953 if (arg1 != arg2)
5954 goto escapes;
5956 if (TREE_CODE (arg1) == SSA_NAME)
5957 arg1 = va_list_skip_additions (arg1);
5959 if (TREE_CODE (arg1) != COMPONENT_REF
5960 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5961 || get_base_address (arg1) != base)
5962 goto escapes;
5964 /* Need floating point regs. */
5965 cfun->va_list_fpr_size |= 2;
5967 else if (TREE_CODE (offset) != COMPONENT_REF
5968 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5969 || get_base_address (offset) != base)
5970 goto escapes;
5971 else
5972 /* Need general regs. */
5973 cfun->va_list_fpr_size |= 1;
5974 return false;
5976 escapes:
5977 si->va_list_escapes = true;
5978 return false;
5980 #endif
5982 /* Perform any needed actions needed for a function that is receiving a
5983 variable number of arguments. */
5985 static void
5986 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5987 tree type, int *pretend_size, int no_rtl)
5989 CUMULATIVE_ARGS cum = *pcum;
5991 /* Skip the current argument. */
5992 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5994 #if TARGET_ABI_UNICOSMK
5995 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5996 arguments on the stack. Unfortunately, it doesn't always store the first
5997 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5998 with stdargs as we always have at least one named argument there. */
5999 if (cum.num_reg_words < 6)
6001 if (!no_rtl)
6003 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6004 emit_insn (gen_arg_home_umk ());
6006 *pretend_size = 0;
6008 #elif TARGET_ABI_OPEN_VMS
6009 /* For VMS, we allocate space for all 6 arg registers plus a count.
6011 However, if NO registers need to be saved, don't allocate any space.
6012 This is not only because we won't need the space, but because AP
6013 includes the current_pretend_args_size and we don't want to mess up
6014 any ap-relative addresses already made. */
6015 if (cum.num_args < 6)
6017 if (!no_rtl)
6019 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6020 emit_insn (gen_arg_home ());
6022 *pretend_size = 7 * UNITS_PER_WORD;
6024 #else
6025 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6026 only push those that are remaining. However, if NO registers need to
6027 be saved, don't allocate any space. This is not only because we won't
6028 need the space, but because AP includes the current_pretend_args_size
6029 and we don't want to mess up any ap-relative addresses already made.
6031 If we are not to use the floating-point registers, save the integer
6032 registers where we would put the floating-point registers. This is
6033 not the most efficient way to implement varargs with just one register
6034 class, but it isn't worth doing anything more efficient in this rare
6035 case. */
6036 if (cum >= 6)
6037 return;
6039 if (!no_rtl)
6041 int count;
6042 alias_set_type set = get_varargs_alias_set ();
6043 rtx tmp;
6045 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6046 if (count > 6 - cum)
6047 count = 6 - cum;
6049 /* Detect whether integer registers or floating-point registers
6050 are needed by the detected va_arg statements. See above for
6051 how these values are computed. Note that the "escape" value
6052 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6053 these bits set. */
6054 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6056 if (cfun->va_list_fpr_size & 1)
6058 tmp = gen_rtx_MEM (BLKmode,
6059 plus_constant (virtual_incoming_args_rtx,
6060 (cum + 6) * UNITS_PER_WORD));
6061 MEM_NOTRAP_P (tmp) = 1;
6062 set_mem_alias_set (tmp, set);
6063 move_block_from_reg (16 + cum, tmp, count);
6066 if (cfun->va_list_fpr_size & 2)
6068 tmp = gen_rtx_MEM (BLKmode,
6069 plus_constant (virtual_incoming_args_rtx,
6070 cum * UNITS_PER_WORD));
6071 MEM_NOTRAP_P (tmp) = 1;
6072 set_mem_alias_set (tmp, set);
6073 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6076 *pretend_size = 12 * UNITS_PER_WORD;
6077 #endif
6080 static void
6081 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6083 HOST_WIDE_INT offset;
6084 tree t, offset_field, base_field;
6086 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6087 return;
6089 if (TARGET_ABI_UNICOSMK)
6090 std_expand_builtin_va_start (valist, nextarg);
6092 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6093 up by 48, storing fp arg registers in the first 48 bytes, and the
6094 integer arg registers in the next 48 bytes. This is only done,
6095 however, if any integer registers need to be stored.
6097 If no integer registers need be stored, then we must subtract 48
6098 in order to account for the integer arg registers which are counted
6099 in argsize above, but which are not actually stored on the stack.
6100 Must further be careful here about structures straddling the last
6101 integer argument register; that futzes with pretend_args_size,
6102 which changes the meaning of AP. */
6104 if (NUM_ARGS < 6)
6105 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6106 else
6107 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6109 if (TARGET_ABI_OPEN_VMS)
6111 nextarg = plus_constant (nextarg, offset);
6112 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6113 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6114 make_tree (ptr_type_node, nextarg));
6115 TREE_SIDE_EFFECTS (t) = 1;
6117 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6119 else
6121 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6122 offset_field = TREE_CHAIN (base_field);
6124 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6125 valist, base_field, NULL_TREE);
6126 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6127 valist, offset_field, NULL_TREE);
6129 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6130 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6131 size_int (offset));
6132 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6133 TREE_SIDE_EFFECTS (t) = 1;
6134 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6136 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6137 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6138 offset_field, t);
6139 TREE_SIDE_EFFECTS (t) = 1;
6140 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6144 static tree
6145 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6147 tree type_size, ptr_type, addend, t, addr, internal_post;
6149 /* If the type could not be passed in registers, skip the block
6150 reserved for the registers. */
6151 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6153 t = build_int_cst (TREE_TYPE (offset), 6*8);
6154 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6155 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6156 gimplify_and_add (t, pre_p);
6159 addend = offset;
6160 ptr_type = build_pointer_type (type);
6162 if (TREE_CODE (type) == COMPLEX_TYPE)
6164 tree real_part, imag_part, real_temp;
6166 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6167 offset, pre_p);
6169 /* Copy the value into a new temporary, lest the formal temporary
6170 be reused out from under us. */
6171 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6173 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6174 offset, pre_p);
6176 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6178 else if (TREE_CODE (type) == REAL_TYPE)
6180 tree fpaddend, cond, fourtyeight;
6182 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6183 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6184 addend, fourtyeight);
6185 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6186 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6187 fpaddend, addend);
6190 /* Build the final address and force that value into a temporary. */
6191 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6192 fold_convert (sizetype, addend));
6193 internal_post = NULL;
6194 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6195 append_to_statement_list (internal_post, pre_p);
6197 /* Update the offset field. */
6198 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6199 if (type_size == NULL || TREE_OVERFLOW (type_size))
6200 t = size_zero_node;
6201 else
6203 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6204 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6205 t = size_binop (MULT_EXPR, t, size_int (8));
6207 t = fold_convert (TREE_TYPE (offset), t);
6208 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6209 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6210 gimplify_and_add (t, pre_p);
6212 return build_va_arg_indirect_ref (addr);
6215 static tree
6216 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6218 tree offset_field, base_field, offset, base, t, r;
6219 bool indirect;
6221 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6222 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6224 base_field = TYPE_FIELDS (va_list_type_node);
6225 offset_field = TREE_CHAIN (base_field);
6226 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6227 valist, base_field, NULL_TREE);
6228 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6229 valist, offset_field, NULL_TREE);
6231 /* Pull the fields of the structure out into temporaries. Since we never
6232 modify the base field, we can use a formal temporary. Sign-extend the
6233 offset field so that it's the proper width for pointer arithmetic. */
6234 base = get_formal_tmp_var (base_field, pre_p);
6236 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6237 offset = get_initialized_tmp_var (t, pre_p, NULL);
6239 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6240 if (indirect)
6241 type = build_pointer_type (type);
6243 /* Find the value. Note that this will be a stable indirection, or
6244 a composite of stable indirections in the case of complex. */
6245 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6247 /* Stuff the offset temporary back into its field. */
6248 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6249 fold_convert (TREE_TYPE (offset_field), offset));
6250 gimplify_and_add (t, pre_p);
6252 if (indirect)
6253 r = build_va_arg_indirect_ref (r);
6255 return r;
6258 /* Builtins. */
6260 enum alpha_builtin
6262 ALPHA_BUILTIN_CMPBGE,
6263 ALPHA_BUILTIN_EXTBL,
6264 ALPHA_BUILTIN_EXTWL,
6265 ALPHA_BUILTIN_EXTLL,
6266 ALPHA_BUILTIN_EXTQL,
6267 ALPHA_BUILTIN_EXTWH,
6268 ALPHA_BUILTIN_EXTLH,
6269 ALPHA_BUILTIN_EXTQH,
6270 ALPHA_BUILTIN_INSBL,
6271 ALPHA_BUILTIN_INSWL,
6272 ALPHA_BUILTIN_INSLL,
6273 ALPHA_BUILTIN_INSQL,
6274 ALPHA_BUILTIN_INSWH,
6275 ALPHA_BUILTIN_INSLH,
6276 ALPHA_BUILTIN_INSQH,
6277 ALPHA_BUILTIN_MSKBL,
6278 ALPHA_BUILTIN_MSKWL,
6279 ALPHA_BUILTIN_MSKLL,
6280 ALPHA_BUILTIN_MSKQL,
6281 ALPHA_BUILTIN_MSKWH,
6282 ALPHA_BUILTIN_MSKLH,
6283 ALPHA_BUILTIN_MSKQH,
6284 ALPHA_BUILTIN_UMULH,
6285 ALPHA_BUILTIN_ZAP,
6286 ALPHA_BUILTIN_ZAPNOT,
6287 ALPHA_BUILTIN_AMASK,
6288 ALPHA_BUILTIN_IMPLVER,
6289 ALPHA_BUILTIN_RPCC,
6290 ALPHA_BUILTIN_THREAD_POINTER,
6291 ALPHA_BUILTIN_SET_THREAD_POINTER,
6293 /* TARGET_MAX */
6294 ALPHA_BUILTIN_MINUB8,
6295 ALPHA_BUILTIN_MINSB8,
6296 ALPHA_BUILTIN_MINUW4,
6297 ALPHA_BUILTIN_MINSW4,
6298 ALPHA_BUILTIN_MAXUB8,
6299 ALPHA_BUILTIN_MAXSB8,
6300 ALPHA_BUILTIN_MAXUW4,
6301 ALPHA_BUILTIN_MAXSW4,
6302 ALPHA_BUILTIN_PERR,
6303 ALPHA_BUILTIN_PKLB,
6304 ALPHA_BUILTIN_PKWB,
6305 ALPHA_BUILTIN_UNPKBL,
6306 ALPHA_BUILTIN_UNPKBW,
6308 /* TARGET_CIX */
6309 ALPHA_BUILTIN_CTTZ,
6310 ALPHA_BUILTIN_CTLZ,
6311 ALPHA_BUILTIN_CTPOP,
6313 ALPHA_BUILTIN_max
6316 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6317 CODE_FOR_builtin_cmpbge,
6318 CODE_FOR_builtin_extbl,
6319 CODE_FOR_builtin_extwl,
6320 CODE_FOR_builtin_extll,
6321 CODE_FOR_builtin_extql,
6322 CODE_FOR_builtin_extwh,
6323 CODE_FOR_builtin_extlh,
6324 CODE_FOR_builtin_extqh,
6325 CODE_FOR_builtin_insbl,
6326 CODE_FOR_builtin_inswl,
6327 CODE_FOR_builtin_insll,
6328 CODE_FOR_builtin_insql,
6329 CODE_FOR_builtin_inswh,
6330 CODE_FOR_builtin_inslh,
6331 CODE_FOR_builtin_insqh,
6332 CODE_FOR_builtin_mskbl,
6333 CODE_FOR_builtin_mskwl,
6334 CODE_FOR_builtin_mskll,
6335 CODE_FOR_builtin_mskql,
6336 CODE_FOR_builtin_mskwh,
6337 CODE_FOR_builtin_msklh,
6338 CODE_FOR_builtin_mskqh,
6339 CODE_FOR_umuldi3_highpart,
6340 CODE_FOR_builtin_zap,
6341 CODE_FOR_builtin_zapnot,
6342 CODE_FOR_builtin_amask,
6343 CODE_FOR_builtin_implver,
6344 CODE_FOR_builtin_rpcc,
6345 CODE_FOR_load_tp,
6346 CODE_FOR_set_tp,
6348 /* TARGET_MAX */
6349 CODE_FOR_builtin_minub8,
6350 CODE_FOR_builtin_minsb8,
6351 CODE_FOR_builtin_minuw4,
6352 CODE_FOR_builtin_minsw4,
6353 CODE_FOR_builtin_maxub8,
6354 CODE_FOR_builtin_maxsb8,
6355 CODE_FOR_builtin_maxuw4,
6356 CODE_FOR_builtin_maxsw4,
6357 CODE_FOR_builtin_perr,
6358 CODE_FOR_builtin_pklb,
6359 CODE_FOR_builtin_pkwb,
6360 CODE_FOR_builtin_unpkbl,
6361 CODE_FOR_builtin_unpkbw,
6363 /* TARGET_CIX */
6364 CODE_FOR_ctzdi2,
6365 CODE_FOR_clzdi2,
6366 CODE_FOR_popcountdi2
6369 struct alpha_builtin_def
6371 const char *name;
6372 enum alpha_builtin code;
6373 unsigned int target_mask;
6374 bool is_const;
6377 static struct alpha_builtin_def const zero_arg_builtins[] = {
6378 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6379 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6382 static struct alpha_builtin_def const one_arg_builtins[] = {
6383 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6384 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6385 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6386 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6387 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6388 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6389 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6390 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6393 static struct alpha_builtin_def const two_arg_builtins[] = {
6394 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6395 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6396 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6397 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6398 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6399 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6400 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6401 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6402 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6403 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6404 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6405 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6406 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6407 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6408 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6409 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6410 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6411 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6412 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6413 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6414 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6415 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6416 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6417 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6418 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6419 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6420 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6421 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6422 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6423 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6424 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6425 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6426 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6427 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6430 static GTY(()) tree alpha_v8qi_u;
6431 static GTY(()) tree alpha_v8qi_s;
6432 static GTY(()) tree alpha_v4hi_u;
6433 static GTY(()) tree alpha_v4hi_s;
6435 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6436 functions pointed to by P, with function type FTYPE. */
6438 static void
6439 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6440 tree ftype)
6442 tree decl;
6443 size_t i;
6445 for (i = 0; i < count; ++i, ++p)
6446 if ((target_flags & p->target_mask) == p->target_mask)
6448 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6449 NULL, NULL);
6450 if (p->is_const)
6451 TREE_READONLY (decl) = 1;
6452 TREE_NOTHROW (decl) = 1;
6457 static void
6458 alpha_init_builtins (void)
6460 tree dimode_integer_type_node;
6461 tree ftype, decl;
6463 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6465 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6466 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6467 ftype);
6469 ftype = build_function_type_list (dimode_integer_type_node,
6470 dimode_integer_type_node, NULL_TREE);
6471 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6472 ftype);
6474 ftype = build_function_type_list (dimode_integer_type_node,
6475 dimode_integer_type_node,
6476 dimode_integer_type_node, NULL_TREE);
6477 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6478 ftype);
6480 ftype = build_function_type (ptr_type_node, void_list_node);
6481 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6482 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6483 NULL, NULL);
6484 TREE_NOTHROW (decl) = 1;
6486 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6487 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6488 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6489 NULL, NULL);
6490 TREE_NOTHROW (decl) = 1;
6492 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6493 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6494 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6495 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6498 /* Expand an expression EXP that calls a built-in function,
6499 with result going to TARGET if that's convenient
6500 (and in mode MODE if that's convenient).
6501 SUBTARGET may be used as the target for computing one of EXP's operands.
6502 IGNORE is nonzero if the value is to be ignored. */
6504 static rtx
6505 alpha_expand_builtin (tree exp, rtx target,
6506 rtx subtarget ATTRIBUTE_UNUSED,
6507 enum machine_mode mode ATTRIBUTE_UNUSED,
6508 int ignore ATTRIBUTE_UNUSED)
6510 #define MAX_ARGS 2
6512 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6513 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6514 tree arg;
6515 call_expr_arg_iterator iter;
6516 enum insn_code icode;
6517 rtx op[MAX_ARGS], pat;
6518 int arity;
6519 bool nonvoid;
6521 if (fcode >= ALPHA_BUILTIN_max)
6522 internal_error ("bad builtin fcode");
6523 icode = code_for_builtin[fcode];
6524 if (icode == 0)
6525 internal_error ("bad builtin fcode");
6527 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6529 arity = 0;
6530 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6532 const struct insn_operand_data *insn_op;
6534 if (arg == error_mark_node)
6535 return NULL_RTX;
6536 if (arity > MAX_ARGS)
6537 return NULL_RTX;
6539 insn_op = &insn_data[icode].operand[arity + nonvoid];
6541 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6543 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6544 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6545 arity++;
6548 if (nonvoid)
6550 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6551 if (!target
6552 || GET_MODE (target) != tmode
6553 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6554 target = gen_reg_rtx (tmode);
6557 switch (arity)
6559 case 0:
6560 pat = GEN_FCN (icode) (target);
6561 break;
6562 case 1:
6563 if (nonvoid)
6564 pat = GEN_FCN (icode) (target, op[0]);
6565 else
6566 pat = GEN_FCN (icode) (op[0]);
6567 break;
6568 case 2:
6569 pat = GEN_FCN (icode) (target, op[0], op[1]);
6570 break;
6571 default:
6572 gcc_unreachable ();
6574 if (!pat)
6575 return NULL_RTX;
6576 emit_insn (pat);
6578 if (nonvoid)
6579 return target;
6580 else
6581 return const0_rtx;
6585 /* Several bits below assume HWI >= 64 bits. This should be enforced
6586 by config.gcc. */
6587 #if HOST_BITS_PER_WIDE_INT < 64
6588 # error "HOST_WIDE_INT too small"
6589 #endif
6591 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6592 with an 8-bit output vector. OPINT contains the integer operands; bit N
6593 of OP_CONST is set if OPINT[N] is valid. */
6595 static tree
6596 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6598 if (op_const == 3)
6600 int i, val;
6601 for (i = 0, val = 0; i < 8; ++i)
6603 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6604 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6605 if (c0 >= c1)
6606 val |= 1 << i;
6608 return build_int_cst (long_integer_type_node, val);
6610 else if (op_const == 2 && opint[1] == 0)
6611 return build_int_cst (long_integer_type_node, 0xff);
6612 return NULL;
6615 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6616 specialized form of an AND operation. Other byte manipulation instructions
6617 are defined in terms of this instruction, so this is also used as a
6618 subroutine for other builtins.
6620 OP contains the tree operands; OPINT contains the extracted integer values.
6621 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6622 OPINT may be considered. */
6624 static tree
6625 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6626 long op_const)
6628 if (op_const & 2)
6630 unsigned HOST_WIDE_INT mask = 0;
6631 int i;
6633 for (i = 0; i < 8; ++i)
6634 if ((opint[1] >> i) & 1)
6635 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6637 if (op_const & 1)
6638 return build_int_cst (long_integer_type_node, opint[0] & mask);
6640 if (op)
6641 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6642 build_int_cst (long_integer_type_node, mask));
6644 else if ((op_const & 1) && opint[0] == 0)
6645 return build_int_cst (long_integer_type_node, 0);
6646 return NULL;
6649 /* Fold the builtins for the EXT family of instructions. */
6651 static tree
6652 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6653 long op_const, unsigned HOST_WIDE_INT bytemask,
6654 bool is_high)
6656 long zap_const = 2;
6657 tree *zap_op = NULL;
6659 if (op_const & 2)
6661 unsigned HOST_WIDE_INT loc;
6663 loc = opint[1] & 7;
6664 if (BYTES_BIG_ENDIAN)
6665 loc ^= 7;
6666 loc *= 8;
6668 if (loc != 0)
6670 if (op_const & 1)
6672 unsigned HOST_WIDE_INT temp = opint[0];
6673 if (is_high)
6674 temp <<= loc;
6675 else
6676 temp >>= loc;
6677 opint[0] = temp;
6678 zap_const = 3;
6681 else
6682 zap_op = op;
6685 opint[1] = bytemask;
6686 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6689 /* Fold the builtins for the INS family of instructions. */
6691 static tree
6692 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6693 long op_const, unsigned HOST_WIDE_INT bytemask,
6694 bool is_high)
6696 if ((op_const & 1) && opint[0] == 0)
6697 return build_int_cst (long_integer_type_node, 0);
6699 if (op_const & 2)
6701 unsigned HOST_WIDE_INT temp, loc, byteloc;
6702 tree *zap_op = NULL;
6704 loc = opint[1] & 7;
6705 if (BYTES_BIG_ENDIAN)
6706 loc ^= 7;
6707 bytemask <<= loc;
6709 temp = opint[0];
6710 if (is_high)
6712 byteloc = (64 - (loc * 8)) & 0x3f;
6713 if (byteloc == 0)
6714 zap_op = op;
6715 else
6716 temp >>= byteloc;
6717 bytemask >>= 8;
6719 else
6721 byteloc = loc * 8;
6722 if (byteloc == 0)
6723 zap_op = op;
6724 else
6725 temp <<= byteloc;
6728 opint[0] = temp;
6729 opint[1] = bytemask;
6730 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6733 return NULL;
6736 static tree
6737 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6738 long op_const, unsigned HOST_WIDE_INT bytemask,
6739 bool is_high)
6741 if (op_const & 2)
6743 unsigned HOST_WIDE_INT loc;
6745 loc = opint[1] & 7;
6746 if (BYTES_BIG_ENDIAN)
6747 loc ^= 7;
6748 bytemask <<= loc;
6750 if (is_high)
6751 bytemask >>= 8;
6753 opint[1] = bytemask ^ 0xff;
6756 return alpha_fold_builtin_zapnot (op, opint, op_const);
6759 static tree
6760 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6762 switch (op_const)
6764 case 3:
6766 unsigned HOST_WIDE_INT l;
6767 HOST_WIDE_INT h;
6769 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6771 #if HOST_BITS_PER_WIDE_INT > 64
6772 # error fixme
6773 #endif
6775 return build_int_cst (long_integer_type_node, h);
6778 case 1:
6779 opint[1] = opint[0];
6780 /* FALLTHRU */
6781 case 2:
6782 /* Note that (X*1) >> 64 == 0. */
6783 if (opint[1] == 0 || opint[1] == 1)
6784 return build_int_cst (long_integer_type_node, 0);
6785 break;
6787 return NULL;
6790 static tree
6791 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6793 tree op0 = fold_convert (vtype, op[0]);
6794 tree op1 = fold_convert (vtype, op[1]);
6795 tree val = fold_build2 (code, vtype, op0, op1);
6796 return fold_convert (long_integer_type_node, val);
6799 static tree
6800 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6802 unsigned HOST_WIDE_INT temp = 0;
6803 int i;
6805 if (op_const != 3)
6806 return NULL;
6808 for (i = 0; i < 8; ++i)
6810 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6811 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6812 if (a >= b)
6813 temp += a - b;
6814 else
6815 temp += b - a;
6818 return build_int_cst (long_integer_type_node, temp);
6821 static tree
6822 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6824 unsigned HOST_WIDE_INT temp;
6826 if (op_const == 0)
6827 return NULL;
6829 temp = opint[0] & 0xff;
6830 temp |= (opint[0] >> 24) & 0xff00;
6832 return build_int_cst (long_integer_type_node, temp);
6835 static tree
6836 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6838 unsigned HOST_WIDE_INT temp;
6840 if (op_const == 0)
6841 return NULL;
6843 temp = opint[0] & 0xff;
6844 temp |= (opint[0] >> 8) & 0xff00;
6845 temp |= (opint[0] >> 16) & 0xff0000;
6846 temp |= (opint[0] >> 24) & 0xff000000;
6848 return build_int_cst (long_integer_type_node, temp);
6851 static tree
6852 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6854 unsigned HOST_WIDE_INT temp;
6856 if (op_const == 0)
6857 return NULL;
6859 temp = opint[0] & 0xff;
6860 temp |= (opint[0] & 0xff00) << 24;
6862 return build_int_cst (long_integer_type_node, temp);
6865 static tree
6866 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6868 unsigned HOST_WIDE_INT temp;
6870 if (op_const == 0)
6871 return NULL;
6873 temp = opint[0] & 0xff;
6874 temp |= (opint[0] & 0x0000ff00) << 8;
6875 temp |= (opint[0] & 0x00ff0000) << 16;
6876 temp |= (opint[0] & 0xff000000) << 24;
6878 return build_int_cst (long_integer_type_node, temp);
6881 static tree
6882 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6884 unsigned HOST_WIDE_INT temp;
6886 if (op_const == 0)
6887 return NULL;
6889 if (opint[0] == 0)
6890 temp = 64;
6891 else
6892 temp = exact_log2 (opint[0] & -opint[0]);
6894 return build_int_cst (long_integer_type_node, temp);
6897 static tree
6898 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6900 unsigned HOST_WIDE_INT temp;
6902 if (op_const == 0)
6903 return NULL;
6905 if (opint[0] == 0)
6906 temp = 64;
6907 else
6908 temp = 64 - floor_log2 (opint[0]) - 1;
6910 return build_int_cst (long_integer_type_node, temp);
6913 static tree
6914 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6916 unsigned HOST_WIDE_INT temp, op;
6918 if (op_const == 0)
6919 return NULL;
6921 op = opint[0];
6922 temp = 0;
6923 while (op)
6924 temp++, op &= op - 1;
6926 return build_int_cst (long_integer_type_node, temp);
6929 /* Fold one of our builtin functions. */
6931 static tree
6932 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6934 tree op[MAX_ARGS], t;
6935 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6936 long op_const = 0, arity = 0;
6938 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6940 tree arg = TREE_VALUE (t);
6941 if (arg == error_mark_node)
6942 return NULL;
6943 if (arity >= MAX_ARGS)
6944 return NULL;
6946 op[arity] = arg;
6947 opint[arity] = 0;
6948 if (TREE_CODE (arg) == INTEGER_CST)
6950 op_const |= 1L << arity;
6951 opint[arity] = int_cst_value (arg);
6955 switch (DECL_FUNCTION_CODE (fndecl))
6957 case ALPHA_BUILTIN_CMPBGE:
6958 return alpha_fold_builtin_cmpbge (opint, op_const);
6960 case ALPHA_BUILTIN_EXTBL:
6961 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6962 case ALPHA_BUILTIN_EXTWL:
6963 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6964 case ALPHA_BUILTIN_EXTLL:
6965 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6966 case ALPHA_BUILTIN_EXTQL:
6967 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6968 case ALPHA_BUILTIN_EXTWH:
6969 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6970 case ALPHA_BUILTIN_EXTLH:
6971 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6972 case ALPHA_BUILTIN_EXTQH:
6973 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6975 case ALPHA_BUILTIN_INSBL:
6976 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6977 case ALPHA_BUILTIN_INSWL:
6978 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6979 case ALPHA_BUILTIN_INSLL:
6980 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6981 case ALPHA_BUILTIN_INSQL:
6982 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6983 case ALPHA_BUILTIN_INSWH:
6984 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6985 case ALPHA_BUILTIN_INSLH:
6986 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6987 case ALPHA_BUILTIN_INSQH:
6988 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6990 case ALPHA_BUILTIN_MSKBL:
6991 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6992 case ALPHA_BUILTIN_MSKWL:
6993 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6994 case ALPHA_BUILTIN_MSKLL:
6995 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6996 case ALPHA_BUILTIN_MSKQL:
6997 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6998 case ALPHA_BUILTIN_MSKWH:
6999 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7000 case ALPHA_BUILTIN_MSKLH:
7001 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7002 case ALPHA_BUILTIN_MSKQH:
7003 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7005 case ALPHA_BUILTIN_UMULH:
7006 return alpha_fold_builtin_umulh (opint, op_const);
7008 case ALPHA_BUILTIN_ZAP:
7009 opint[1] ^= 0xff;
7010 /* FALLTHRU */
7011 case ALPHA_BUILTIN_ZAPNOT:
7012 return alpha_fold_builtin_zapnot (op, opint, op_const);
7014 case ALPHA_BUILTIN_MINUB8:
7015 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7016 case ALPHA_BUILTIN_MINSB8:
7017 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7018 case ALPHA_BUILTIN_MINUW4:
7019 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7020 case ALPHA_BUILTIN_MINSW4:
7021 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7022 case ALPHA_BUILTIN_MAXUB8:
7023 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7024 case ALPHA_BUILTIN_MAXSB8:
7025 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7026 case ALPHA_BUILTIN_MAXUW4:
7027 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7028 case ALPHA_BUILTIN_MAXSW4:
7029 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7031 case ALPHA_BUILTIN_PERR:
7032 return alpha_fold_builtin_perr (opint, op_const);
7033 case ALPHA_BUILTIN_PKLB:
7034 return alpha_fold_builtin_pklb (opint, op_const);
7035 case ALPHA_BUILTIN_PKWB:
7036 return alpha_fold_builtin_pkwb (opint, op_const);
7037 case ALPHA_BUILTIN_UNPKBL:
7038 return alpha_fold_builtin_unpkbl (opint, op_const);
7039 case ALPHA_BUILTIN_UNPKBW:
7040 return alpha_fold_builtin_unpkbw (opint, op_const);
7042 case ALPHA_BUILTIN_CTTZ:
7043 return alpha_fold_builtin_cttz (opint, op_const);
7044 case ALPHA_BUILTIN_CTLZ:
7045 return alpha_fold_builtin_ctlz (opint, op_const);
7046 case ALPHA_BUILTIN_CTPOP:
7047 return alpha_fold_builtin_ctpop (opint, op_const);
7049 case ALPHA_BUILTIN_AMASK:
7050 case ALPHA_BUILTIN_IMPLVER:
7051 case ALPHA_BUILTIN_RPCC:
7052 case ALPHA_BUILTIN_THREAD_POINTER:
7053 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7054 /* None of these are foldable at compile-time. */
7055 default:
7056 return NULL;
7060 /* This page contains routines that are used to determine what the function
7061 prologue and epilogue code will do and write them out. */
7063 /* Compute the size of the save area in the stack. */
7065 /* These variables are used for communication between the following functions.
7066 They indicate various things about the current function being compiled
7067 that are used to tell what kind of prologue, epilogue and procedure
7068 descriptor to generate. */
7070 /* Nonzero if we need a stack procedure. */
7071 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7072 static enum alpha_procedure_types alpha_procedure_type;
7074 /* Register number (either FP or SP) that is used to unwind the frame. */
7075 static int vms_unwind_regno;
7077 /* Register number used to save FP. We need not have one for RA since
7078 we don't modify it for register procedures. This is only defined
7079 for register frame procedures. */
7080 static int vms_save_fp_regno;
7082 /* Register number used to reference objects off our PV. */
7083 static int vms_base_regno;
7085 /* Compute register masks for saved registers. */
7087 static void
7088 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7090 unsigned long imask = 0;
7091 unsigned long fmask = 0;
7092 unsigned int i;
7094 /* When outputting a thunk, we don't have valid register life info,
7095 but assemble_start_function wants to output .frame and .mask
7096 directives. */
7097 if (current_function_is_thunk)
7099 *imaskP = 0;
7100 *fmaskP = 0;
7101 return;
7104 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7105 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7107 /* One for every register we have to save. */
7108 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7109 if (! fixed_regs[i] && ! call_used_regs[i]
7110 && df_regs_ever_live_p (i) && i != REG_RA
7111 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7113 if (i < 32)
7114 imask |= (1UL << i);
7115 else
7116 fmask |= (1UL << (i - 32));
7119 /* We need to restore these for the handler. */
7120 if (current_function_calls_eh_return)
7122 for (i = 0; ; ++i)
7124 unsigned regno = EH_RETURN_DATA_REGNO (i);
7125 if (regno == INVALID_REGNUM)
7126 break;
7127 imask |= 1UL << regno;
7131 /* If any register spilled, then spill the return address also. */
7132 /* ??? This is required by the Digital stack unwind specification
7133 and isn't needed if we're doing Dwarf2 unwinding. */
7134 if (imask || fmask || alpha_ra_ever_killed ())
7135 imask |= (1UL << REG_RA);
7137 *imaskP = imask;
7138 *fmaskP = fmask;
7142 alpha_sa_size (void)
7144 unsigned long mask[2];
7145 int sa_size = 0;
7146 int i, j;
7148 alpha_sa_mask (&mask[0], &mask[1]);
7150 if (TARGET_ABI_UNICOSMK)
7152 if (mask[0] || mask[1])
7153 sa_size = 14;
7155 else
7157 for (j = 0; j < 2; ++j)
7158 for (i = 0; i < 32; ++i)
7159 if ((mask[j] >> i) & 1)
7160 sa_size++;
7163 if (TARGET_ABI_UNICOSMK)
7165 /* We might not need to generate a frame if we don't make any calls
7166 (including calls to __T3E_MISMATCH if this is a vararg function),
7167 don't have any local variables which require stack slots, don't
7168 use alloca and have not determined that we need a frame for other
7169 reasons. */
7171 alpha_procedure_type
7172 = (sa_size || get_frame_size() != 0
7173 || current_function_outgoing_args_size
7174 || current_function_stdarg || current_function_calls_alloca
7175 || frame_pointer_needed)
7176 ? PT_STACK : PT_REGISTER;
7178 /* Always reserve space for saving callee-saved registers if we
7179 need a frame as required by the calling convention. */
7180 if (alpha_procedure_type == PT_STACK)
7181 sa_size = 14;
7183 else if (TARGET_ABI_OPEN_VMS)
7185 /* Start by assuming we can use a register procedure if we don't
7186 make any calls (REG_RA not used) or need to save any
7187 registers and a stack procedure if we do. */
7188 if ((mask[0] >> REG_RA) & 1)
7189 alpha_procedure_type = PT_STACK;
7190 else if (get_frame_size() != 0)
7191 alpha_procedure_type = PT_REGISTER;
7192 else
7193 alpha_procedure_type = PT_NULL;
7195 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7196 made the final decision on stack procedure vs register procedure. */
7197 if (alpha_procedure_type == PT_STACK)
7198 sa_size -= 2;
7200 /* Decide whether to refer to objects off our PV via FP or PV.
7201 If we need FP for something else or if we receive a nonlocal
7202 goto (which expects PV to contain the value), we must use PV.
7203 Otherwise, start by assuming we can use FP. */
7205 vms_base_regno
7206 = (frame_pointer_needed
7207 || current_function_has_nonlocal_label
7208 || alpha_procedure_type == PT_STACK
7209 || current_function_outgoing_args_size)
7210 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7212 /* If we want to copy PV into FP, we need to find some register
7213 in which to save FP. */
7215 vms_save_fp_regno = -1;
7216 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7217 for (i = 0; i < 32; i++)
7218 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7219 vms_save_fp_regno = i;
7221 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7222 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7223 else if (alpha_procedure_type == PT_NULL)
7224 vms_base_regno = REG_PV;
7226 /* Stack unwinding should be done via FP unless we use it for PV. */
7227 vms_unwind_regno = (vms_base_regno == REG_PV
7228 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7230 /* If this is a stack procedure, allow space for saving FP and RA. */
7231 if (alpha_procedure_type == PT_STACK)
7232 sa_size += 2;
7234 else
7236 /* Our size must be even (multiple of 16 bytes). */
7237 if (sa_size & 1)
7238 sa_size++;
7241 return sa_size * 8;
7244 /* Define the offset between two registers, one to be eliminated,
7245 and the other its replacement, at the start of a routine. */
7247 HOST_WIDE_INT
7248 alpha_initial_elimination_offset (unsigned int from,
7249 unsigned int to ATTRIBUTE_UNUSED)
7251 HOST_WIDE_INT ret;
7253 ret = alpha_sa_size ();
7254 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7256 switch (from)
7258 case FRAME_POINTER_REGNUM:
7259 break;
7261 case ARG_POINTER_REGNUM:
7262 ret += (ALPHA_ROUND (get_frame_size ()
7263 + current_function_pretend_args_size)
7264 - current_function_pretend_args_size);
7265 break;
7267 default:
7268 gcc_unreachable ();
7271 return ret;
7275 alpha_pv_save_size (void)
7277 alpha_sa_size ();
7278 return alpha_procedure_type == PT_STACK ? 8 : 0;
7282 alpha_using_fp (void)
7284 alpha_sa_size ();
7285 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7288 #if TARGET_ABI_OPEN_VMS
7290 const struct attribute_spec vms_attribute_table[] =
7292 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7293 { "overlaid", 0, 0, true, false, false, NULL },
7294 { "global", 0, 0, true, false, false, NULL },
7295 { "initialize", 0, 0, true, false, false, NULL },
7296 { NULL, 0, 0, false, false, false, NULL }
7299 #endif
7301 static int
7302 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7304 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7308 alpha_find_lo_sum_using_gp (rtx insn)
7310 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7313 static int
7314 alpha_does_function_need_gp (void)
7316 rtx insn;
7318 /* The GP being variable is an OSF abi thing. */
7319 if (! TARGET_ABI_OSF)
7320 return 0;
7322 /* We need the gp to load the address of __mcount. */
7323 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7324 return 1;
7326 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7327 if (current_function_is_thunk)
7328 return 1;
7330 /* The nonlocal receiver pattern assumes that the gp is valid for
7331 the nested function. Reasonable because it's almost always set
7332 correctly already. For the cases where that's wrong, make sure
7333 the nested function loads its gp on entry. */
7334 if (current_function_has_nonlocal_goto)
7335 return 1;
7337 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7338 Even if we are a static function, we still need to do this in case
7339 our address is taken and passed to something like qsort. */
7341 push_topmost_sequence ();
7342 insn = get_insns ();
7343 pop_topmost_sequence ();
7345 for (; insn; insn = NEXT_INSN (insn))
7346 if (INSN_P (insn)
7347 && ! JUMP_TABLE_DATA_P (insn)
7348 && GET_CODE (PATTERN (insn)) != USE
7349 && GET_CODE (PATTERN (insn)) != CLOBBER
7350 && get_attr_usegp (insn))
7351 return 1;
7353 return 0;
7357 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7358 sequences. */
7360 static rtx
7361 set_frame_related_p (void)
7363 rtx seq = get_insns ();
7364 rtx insn;
7366 end_sequence ();
7368 if (!seq)
7369 return NULL_RTX;
7371 if (INSN_P (seq))
7373 insn = seq;
7374 while (insn != NULL_RTX)
7376 RTX_FRAME_RELATED_P (insn) = 1;
7377 insn = NEXT_INSN (insn);
7379 seq = emit_insn (seq);
7381 else
7383 seq = emit_insn (seq);
7384 RTX_FRAME_RELATED_P (seq) = 1;
7386 return seq;
7389 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7391 /* Generates a store with the proper unwind info attached. VALUE is
7392 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7393 contains SP+FRAME_BIAS, and that is the unwind info that should be
7394 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7395 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7397 static void
7398 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7399 HOST_WIDE_INT base_ofs, rtx frame_reg)
7401 rtx addr, mem, insn;
7403 addr = plus_constant (base_reg, base_ofs);
7404 mem = gen_rtx_MEM (DImode, addr);
7405 set_mem_alias_set (mem, alpha_sr_alias_set);
7407 insn = emit_move_insn (mem, value);
7408 RTX_FRAME_RELATED_P (insn) = 1;
7410 if (frame_bias || value != frame_reg)
7412 if (frame_bias)
7414 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7415 mem = gen_rtx_MEM (DImode, addr);
7418 REG_NOTES (insn)
7419 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7420 gen_rtx_SET (VOIDmode, mem, frame_reg),
7421 REG_NOTES (insn));
7425 static void
7426 emit_frame_store (unsigned int regno, rtx base_reg,
7427 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7429 rtx reg = gen_rtx_REG (DImode, regno);
7430 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7433 /* Write function prologue. */
7435 /* On vms we have two kinds of functions:
7437 - stack frame (PROC_STACK)
7438 these are 'normal' functions with local vars and which are
7439 calling other functions
7440 - register frame (PROC_REGISTER)
7441 keeps all data in registers, needs no stack
7443 We must pass this to the assembler so it can generate the
7444 proper pdsc (procedure descriptor)
7445 This is done with the '.pdesc' command.
7447 On not-vms, we don't really differentiate between the two, as we can
7448 simply allocate stack without saving registers. */
7450 void
7451 alpha_expand_prologue (void)
7453 /* Registers to save. */
7454 unsigned long imask = 0;
7455 unsigned long fmask = 0;
7456 /* Stack space needed for pushing registers clobbered by us. */
7457 HOST_WIDE_INT sa_size;
7458 /* Complete stack size needed. */
7459 HOST_WIDE_INT frame_size;
7460 /* Offset from base reg to register save area. */
7461 HOST_WIDE_INT reg_offset;
7462 rtx sa_reg;
7463 int i;
7465 sa_size = alpha_sa_size ();
7467 frame_size = get_frame_size ();
7468 if (TARGET_ABI_OPEN_VMS)
7469 frame_size = ALPHA_ROUND (sa_size
7470 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7471 + frame_size
7472 + current_function_pretend_args_size);
7473 else if (TARGET_ABI_UNICOSMK)
7474 /* We have to allocate space for the DSIB if we generate a frame. */
7475 frame_size = ALPHA_ROUND (sa_size
7476 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7477 + ALPHA_ROUND (frame_size
7478 + current_function_outgoing_args_size);
7479 else
7480 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7481 + sa_size
7482 + ALPHA_ROUND (frame_size
7483 + current_function_pretend_args_size));
7485 if (TARGET_ABI_OPEN_VMS)
7486 reg_offset = 8;
7487 else
7488 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7490 alpha_sa_mask (&imask, &fmask);
7492 /* Emit an insn to reload GP, if needed. */
7493 if (TARGET_ABI_OSF)
7495 alpha_function_needs_gp = alpha_does_function_need_gp ();
7496 if (alpha_function_needs_gp)
7497 emit_insn (gen_prologue_ldgp ());
7500 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7501 the call to mcount ourselves, rather than having the linker do it
7502 magically in response to -pg. Since _mcount has special linkage,
7503 don't represent the call as a call. */
7504 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7505 emit_insn (gen_prologue_mcount ());
7507 if (TARGET_ABI_UNICOSMK)
7508 unicosmk_gen_dsib (&imask);
7510 /* Adjust the stack by the frame size. If the frame size is > 4096
7511 bytes, we need to be sure we probe somewhere in the first and last
7512 4096 bytes (we can probably get away without the latter test) and
7513 every 8192 bytes in between. If the frame size is > 32768, we
7514 do this in a loop. Otherwise, we generate the explicit probe
7515 instructions.
7517 Note that we are only allowed to adjust sp once in the prologue. */
7519 if (frame_size <= 32768)
7521 if (frame_size > 4096)
7523 int probed;
7525 for (probed = 4096; probed < frame_size; probed += 8192)
7526 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7527 ? -probed + 64
7528 : -probed)));
7530 /* We only have to do this probe if we aren't saving registers. */
7531 if (sa_size == 0 && frame_size > probed - 4096)
7532 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7535 if (frame_size != 0)
7536 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7537 GEN_INT (TARGET_ABI_UNICOSMK
7538 ? -frame_size + 64
7539 : -frame_size))));
7541 else
7543 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7544 number of 8192 byte blocks to probe. We then probe each block
7545 in the loop and then set SP to the proper location. If the
7546 amount remaining is > 4096, we have to do one more probe if we
7547 are not saving any registers. */
7549 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7550 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7551 rtx ptr = gen_rtx_REG (DImode, 22);
7552 rtx count = gen_rtx_REG (DImode, 23);
7553 rtx seq;
7555 emit_move_insn (count, GEN_INT (blocks));
7556 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7557 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7559 /* Because of the difficulty in emitting a new basic block this
7560 late in the compilation, generate the loop as a single insn. */
7561 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7563 if (leftover > 4096 && sa_size == 0)
7565 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7566 MEM_VOLATILE_P (last) = 1;
7567 emit_move_insn (last, const0_rtx);
7570 if (TARGET_ABI_WINDOWS_NT)
7572 /* For NT stack unwind (done by 'reverse execution'), it's
7573 not OK to take the result of a loop, even though the value
7574 is already in ptr, so we reload it via a single operation
7575 and subtract it to sp.
7577 Yes, that's correct -- we have to reload the whole constant
7578 into a temporary via ldah+lda then subtract from sp. */
7580 HOST_WIDE_INT lo, hi;
7581 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7582 hi = frame_size - lo;
7584 emit_move_insn (ptr, GEN_INT (hi));
7585 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7586 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7587 ptr));
7589 else
7591 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7592 GEN_INT (-leftover)));
7595 /* This alternative is special, because the DWARF code cannot
7596 possibly intuit through the loop above. So we invent this
7597 note it looks at instead. */
7598 RTX_FRAME_RELATED_P (seq) = 1;
7599 REG_NOTES (seq)
7600 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7601 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7602 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7603 GEN_INT (TARGET_ABI_UNICOSMK
7604 ? -frame_size + 64
7605 : -frame_size))),
7606 REG_NOTES (seq));
7609 if (!TARGET_ABI_UNICOSMK)
7611 HOST_WIDE_INT sa_bias = 0;
7613 /* Cope with very large offsets to the register save area. */
7614 sa_reg = stack_pointer_rtx;
7615 if (reg_offset + sa_size > 0x8000)
7617 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7618 rtx sa_bias_rtx;
7620 if (low + sa_size <= 0x8000)
7621 sa_bias = reg_offset - low, reg_offset = low;
7622 else
7623 sa_bias = reg_offset, reg_offset = 0;
7625 sa_reg = gen_rtx_REG (DImode, 24);
7626 sa_bias_rtx = GEN_INT (sa_bias);
7628 if (add_operand (sa_bias_rtx, DImode))
7629 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7630 else
7632 emit_move_insn (sa_reg, sa_bias_rtx);
7633 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7637 /* Save regs in stack order. Beginning with VMS PV. */
7638 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7639 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7641 /* Save register RA next. */
7642 if (imask & (1UL << REG_RA))
7644 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7645 imask &= ~(1UL << REG_RA);
7646 reg_offset += 8;
7649 /* Now save any other registers required to be saved. */
7650 for (i = 0; i < 31; i++)
7651 if (imask & (1UL << i))
7653 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7654 reg_offset += 8;
7657 for (i = 0; i < 31; i++)
7658 if (fmask & (1UL << i))
7660 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7661 reg_offset += 8;
7664 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7666 /* The standard frame on the T3E includes space for saving registers.
7667 We just have to use it. We don't have to save the return address and
7668 the old frame pointer here - they are saved in the DSIB. */
7670 reg_offset = -56;
7671 for (i = 9; i < 15; i++)
7672 if (imask & (1UL << i))
7674 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7675 reg_offset -= 8;
7677 for (i = 2; i < 10; i++)
7678 if (fmask & (1UL << i))
7680 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7681 reg_offset -= 8;
7685 if (TARGET_ABI_OPEN_VMS)
7687 if (alpha_procedure_type == PT_REGISTER)
7688 /* Register frame procedures save the fp.
7689 ?? Ought to have a dwarf2 save for this. */
7690 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7691 hard_frame_pointer_rtx);
7693 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7694 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7695 gen_rtx_REG (DImode, REG_PV)));
7697 if (alpha_procedure_type != PT_NULL
7698 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7699 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7701 /* If we have to allocate space for outgoing args, do it now. */
7702 if (current_function_outgoing_args_size != 0)
7704 rtx seq
7705 = emit_move_insn (stack_pointer_rtx,
7706 plus_constant
7707 (hard_frame_pointer_rtx,
7708 - (ALPHA_ROUND
7709 (current_function_outgoing_args_size))));
7711 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7712 if ! frame_pointer_needed. Setting the bit will change the CFA
7713 computation rule to use sp again, which would be wrong if we had
7714 frame_pointer_needed, as this means sp might move unpredictably
7715 later on.
7717 Also, note that
7718 frame_pointer_needed
7719 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7721 current_function_outgoing_args_size != 0
7722 => alpha_procedure_type != PT_NULL,
7724 so when we are not setting the bit here, we are guaranteed to
7725 have emitted an FRP frame pointer update just before. */
7726 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7729 else if (!TARGET_ABI_UNICOSMK)
7731 /* If we need a frame pointer, set it from the stack pointer. */
7732 if (frame_pointer_needed)
7734 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7735 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7736 else
7737 /* This must always be the last instruction in the
7738 prologue, thus we emit a special move + clobber. */
7739 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7740 stack_pointer_rtx, sa_reg)));
7744 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7745 the prologue, for exception handling reasons, we cannot do this for
7746 any insn that might fault. We could prevent this for mems with a
7747 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7748 have to prevent all such scheduling with a blockage.
7750 Linux, on the other hand, never bothered to implement OSF/1's
7751 exception handling, and so doesn't care about such things. Anyone
7752 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7754 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7755 emit_insn (gen_blockage ());
7758 /* Count the number of .file directives, so that .loc is up to date. */
7759 int num_source_filenames = 0;
7761 /* Output the textual info surrounding the prologue. */
7763 void
7764 alpha_start_function (FILE *file, const char *fnname,
7765 tree decl ATTRIBUTE_UNUSED)
7767 unsigned long imask = 0;
7768 unsigned long fmask = 0;
7769 /* Stack space needed for pushing registers clobbered by us. */
7770 HOST_WIDE_INT sa_size;
7771 /* Complete stack size needed. */
7772 unsigned HOST_WIDE_INT frame_size;
7773 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7774 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7775 ? 524288
7776 : 1UL << 31;
7777 /* Offset from base reg to register save area. */
7778 HOST_WIDE_INT reg_offset;
7779 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7780 int i;
7782 /* Don't emit an extern directive for functions defined in the same file. */
7783 if (TARGET_ABI_UNICOSMK)
7785 tree name_tree;
7786 name_tree = get_identifier (fnname);
7787 TREE_ASM_WRITTEN (name_tree) = 1;
7790 alpha_fnname = fnname;
7791 sa_size = alpha_sa_size ();
7793 frame_size = get_frame_size ();
7794 if (TARGET_ABI_OPEN_VMS)
7795 frame_size = ALPHA_ROUND (sa_size
7796 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7797 + frame_size
7798 + current_function_pretend_args_size);
7799 else if (TARGET_ABI_UNICOSMK)
7800 frame_size = ALPHA_ROUND (sa_size
7801 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7802 + ALPHA_ROUND (frame_size
7803 + current_function_outgoing_args_size);
7804 else
7805 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7806 + sa_size
7807 + ALPHA_ROUND (frame_size
7808 + current_function_pretend_args_size));
7810 if (TARGET_ABI_OPEN_VMS)
7811 reg_offset = 8;
7812 else
7813 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7815 alpha_sa_mask (&imask, &fmask);
7817 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7818 We have to do that before the .ent directive as we cannot switch
7819 files within procedures with native ecoff because line numbers are
7820 linked to procedure descriptors.
7821 Outputting the lineno helps debugging of one line functions as they
7822 would otherwise get no line number at all. Please note that we would
7823 like to put out last_linenum from final.c, but it is not accessible. */
7825 if (write_symbols == SDB_DEBUG)
7827 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7828 ASM_OUTPUT_SOURCE_FILENAME (file,
7829 DECL_SOURCE_FILE (current_function_decl));
7830 #endif
7831 #ifdef SDB_OUTPUT_SOURCE_LINE
7832 if (debug_info_level != DINFO_LEVEL_TERSE)
7833 SDB_OUTPUT_SOURCE_LINE (file,
7834 DECL_SOURCE_LINE (current_function_decl));
7835 #endif
7838 /* Issue function start and label. */
7839 if (TARGET_ABI_OPEN_VMS
7840 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7842 fputs ("\t.ent ", file);
7843 assemble_name (file, fnname);
7844 putc ('\n', file);
7846 /* If the function needs GP, we'll write the "..ng" label there.
7847 Otherwise, do it here. */
7848 if (TARGET_ABI_OSF
7849 && ! alpha_function_needs_gp
7850 && ! current_function_is_thunk)
7852 putc ('$', file);
7853 assemble_name (file, fnname);
7854 fputs ("..ng:\n", file);
7858 strcpy (entry_label, fnname);
7859 if (TARGET_ABI_OPEN_VMS)
7860 strcat (entry_label, "..en");
7862 /* For public functions, the label must be globalized by appending an
7863 additional colon. */
7864 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7865 strcat (entry_label, ":");
7867 ASM_OUTPUT_LABEL (file, entry_label);
7868 inside_function = TRUE;
7870 if (TARGET_ABI_OPEN_VMS)
7871 fprintf (file, "\t.base $%d\n", vms_base_regno);
7873 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7874 && !flag_inhibit_size_directive)
7876 /* Set flags in procedure descriptor to request IEEE-conformant
7877 math-library routines. The value we set it to is PDSC_EXC_IEEE
7878 (/usr/include/pdsc.h). */
7879 fputs ("\t.eflag 48\n", file);
7882 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7883 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7884 alpha_arg_offset = -frame_size + 48;
7886 /* Describe our frame. If the frame size is larger than an integer,
7887 print it as zero to avoid an assembler error. We won't be
7888 properly describing such a frame, but that's the best we can do. */
7889 if (TARGET_ABI_UNICOSMK)
7891 else if (TARGET_ABI_OPEN_VMS)
7892 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7893 HOST_WIDE_INT_PRINT_DEC "\n",
7894 vms_unwind_regno,
7895 frame_size >= (1UL << 31) ? 0 : frame_size,
7896 reg_offset);
7897 else if (!flag_inhibit_size_directive)
7898 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7899 (frame_pointer_needed
7900 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7901 frame_size >= max_frame_size ? 0 : frame_size,
7902 current_function_pretend_args_size);
7904 /* Describe which registers were spilled. */
7905 if (TARGET_ABI_UNICOSMK)
7907 else if (TARGET_ABI_OPEN_VMS)
7909 if (imask)
7910 /* ??? Does VMS care if mask contains ra? The old code didn't
7911 set it, so I don't here. */
7912 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7913 if (fmask)
7914 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7915 if (alpha_procedure_type == PT_REGISTER)
7916 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7918 else if (!flag_inhibit_size_directive)
7920 if (imask)
7922 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7923 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7925 for (i = 0; i < 32; ++i)
7926 if (imask & (1UL << i))
7927 reg_offset += 8;
7930 if (fmask)
7931 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7932 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7935 #if TARGET_ABI_OPEN_VMS
7936 /* Ifdef'ed cause link_section are only available then. */
7937 switch_to_section (readonly_data_section);
7938 fprintf (file, "\t.align 3\n");
7939 assemble_name (file, fnname); fputs ("..na:\n", file);
7940 fputs ("\t.ascii \"", file);
7941 assemble_name (file, fnname);
7942 fputs ("\\0\"\n", file);
7943 alpha_need_linkage (fnname, 1);
7944 switch_to_section (text_section);
7945 #endif
7948 /* Emit the .prologue note at the scheduled end of the prologue. */
7950 static void
7951 alpha_output_function_end_prologue (FILE *file)
7953 if (TARGET_ABI_UNICOSMK)
7955 else if (TARGET_ABI_OPEN_VMS)
7956 fputs ("\t.prologue\n", file);
7957 else if (TARGET_ABI_WINDOWS_NT)
7958 fputs ("\t.prologue 0\n", file);
7959 else if (!flag_inhibit_size_directive)
7960 fprintf (file, "\t.prologue %d\n",
7961 alpha_function_needs_gp || current_function_is_thunk);
7964 /* Write function epilogue. */
7966 /* ??? At some point we will want to support full unwind, and so will
7967 need to mark the epilogue as well. At the moment, we just confuse
7968 dwarf2out. */
7969 #undef FRP
7970 #define FRP(exp) exp
7972 void
7973 alpha_expand_epilogue (void)
7975 /* Registers to save. */
7976 unsigned long imask = 0;
7977 unsigned long fmask = 0;
7978 /* Stack space needed for pushing registers clobbered by us. */
7979 HOST_WIDE_INT sa_size;
7980 /* Complete stack size needed. */
7981 HOST_WIDE_INT frame_size;
7982 /* Offset from base reg to register save area. */
7983 HOST_WIDE_INT reg_offset;
7984 int fp_is_frame_pointer, fp_offset;
7985 rtx sa_reg, sa_reg_exp = NULL;
7986 rtx sp_adj1, sp_adj2, mem;
7987 rtx eh_ofs;
7988 int i;
7990 sa_size = alpha_sa_size ();
7992 frame_size = get_frame_size ();
7993 if (TARGET_ABI_OPEN_VMS)
7994 frame_size = ALPHA_ROUND (sa_size
7995 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7996 + frame_size
7997 + current_function_pretend_args_size);
7998 else if (TARGET_ABI_UNICOSMK)
7999 frame_size = ALPHA_ROUND (sa_size
8000 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8001 + ALPHA_ROUND (frame_size
8002 + current_function_outgoing_args_size);
8003 else
8004 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8005 + sa_size
8006 + ALPHA_ROUND (frame_size
8007 + current_function_pretend_args_size));
8009 if (TARGET_ABI_OPEN_VMS)
8011 if (alpha_procedure_type == PT_STACK)
8012 reg_offset = 8;
8013 else
8014 reg_offset = 0;
8016 else
8017 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8019 alpha_sa_mask (&imask, &fmask);
8021 fp_is_frame_pointer
8022 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8023 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8024 fp_offset = 0;
8025 sa_reg = stack_pointer_rtx;
8027 if (current_function_calls_eh_return)
8028 eh_ofs = EH_RETURN_STACKADJ_RTX;
8029 else
8030 eh_ofs = NULL_RTX;
8032 if (!TARGET_ABI_UNICOSMK && sa_size)
8034 /* If we have a frame pointer, restore SP from it. */
8035 if ((TARGET_ABI_OPEN_VMS
8036 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8037 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8038 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8040 /* Cope with very large offsets to the register save area. */
8041 if (reg_offset + sa_size > 0x8000)
8043 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8044 HOST_WIDE_INT bias;
8046 if (low + sa_size <= 0x8000)
8047 bias = reg_offset - low, reg_offset = low;
8048 else
8049 bias = reg_offset, reg_offset = 0;
8051 sa_reg = gen_rtx_REG (DImode, 22);
8052 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8054 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8057 /* Restore registers in order, excepting a true frame pointer. */
8059 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8060 if (! eh_ofs)
8061 set_mem_alias_set (mem, alpha_sr_alias_set);
8062 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8064 reg_offset += 8;
8065 imask &= ~(1UL << REG_RA);
8067 for (i = 0; i < 31; ++i)
8068 if (imask & (1UL << i))
8070 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8071 fp_offset = reg_offset;
8072 else
8074 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8075 set_mem_alias_set (mem, alpha_sr_alias_set);
8076 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8078 reg_offset += 8;
8081 for (i = 0; i < 31; ++i)
8082 if (fmask & (1UL << i))
8084 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8085 set_mem_alias_set (mem, alpha_sr_alias_set);
8086 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8087 reg_offset += 8;
8090 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8092 /* Restore callee-saved general-purpose registers. */
8094 reg_offset = -56;
8096 for (i = 9; i < 15; i++)
8097 if (imask & (1UL << i))
8099 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8100 reg_offset));
8101 set_mem_alias_set (mem, alpha_sr_alias_set);
8102 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8103 reg_offset -= 8;
8106 for (i = 2; i < 10; i++)
8107 if (fmask & (1UL << i))
8109 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8110 reg_offset));
8111 set_mem_alias_set (mem, alpha_sr_alias_set);
8112 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8113 reg_offset -= 8;
8116 /* Restore the return address from the DSIB. */
8118 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8119 set_mem_alias_set (mem, alpha_sr_alias_set);
8120 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8123 if (frame_size || eh_ofs)
8125 sp_adj1 = stack_pointer_rtx;
8127 if (eh_ofs)
8129 sp_adj1 = gen_rtx_REG (DImode, 23);
8130 emit_move_insn (sp_adj1,
8131 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8134 /* If the stack size is large, begin computation into a temporary
8135 register so as not to interfere with a potential fp restore,
8136 which must be consecutive with an SP restore. */
8137 if (frame_size < 32768
8138 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8139 sp_adj2 = GEN_INT (frame_size);
8140 else if (TARGET_ABI_UNICOSMK)
8142 sp_adj1 = gen_rtx_REG (DImode, 23);
8143 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8144 sp_adj2 = const0_rtx;
8146 else if (frame_size < 0x40007fffL)
8148 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8150 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8151 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8152 sp_adj1 = sa_reg;
8153 else
8155 sp_adj1 = gen_rtx_REG (DImode, 23);
8156 FRP (emit_move_insn (sp_adj1, sp_adj2));
8158 sp_adj2 = GEN_INT (low);
8160 else
8162 rtx tmp = gen_rtx_REG (DImode, 23);
8163 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8164 3, false));
8165 if (!sp_adj2)
8167 /* We can't drop new things to memory this late, afaik,
8168 so build it up by pieces. */
8169 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8170 -(frame_size < 0)));
8171 gcc_assert (sp_adj2);
8175 /* From now on, things must be in order. So emit blockages. */
8177 /* Restore the frame pointer. */
8178 if (TARGET_ABI_UNICOSMK)
8180 emit_insn (gen_blockage ());
8181 mem = gen_rtx_MEM (DImode,
8182 plus_constant (hard_frame_pointer_rtx, -16));
8183 set_mem_alias_set (mem, alpha_sr_alias_set);
8184 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8186 else if (fp_is_frame_pointer)
8188 emit_insn (gen_blockage ());
8189 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8190 set_mem_alias_set (mem, alpha_sr_alias_set);
8191 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8193 else if (TARGET_ABI_OPEN_VMS)
8195 emit_insn (gen_blockage ());
8196 FRP (emit_move_insn (hard_frame_pointer_rtx,
8197 gen_rtx_REG (DImode, vms_save_fp_regno)));
8200 /* Restore the stack pointer. */
8201 emit_insn (gen_blockage ());
8202 if (sp_adj2 == const0_rtx)
8203 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8204 else
8205 FRP (emit_move_insn (stack_pointer_rtx,
8206 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8208 else
8210 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8212 emit_insn (gen_blockage ());
8213 FRP (emit_move_insn (hard_frame_pointer_rtx,
8214 gen_rtx_REG (DImode, vms_save_fp_regno)));
8216 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8218 /* Decrement the frame pointer if the function does not have a
8219 frame. */
8221 emit_insn (gen_blockage ());
8222 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8223 hard_frame_pointer_rtx, constm1_rtx)));
8228 /* Output the rest of the textual info surrounding the epilogue. */
8230 void
8231 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8233 rtx insn;
8235 /* We output a nop after noreturn calls at the very end of the function to
8236 ensure that the return address always remains in the caller's code range,
8237 as not doing so might confuse unwinding engines. */
8238 insn = get_last_insn ();
8239 if (!INSN_P (insn))
8240 insn = prev_active_insn (insn);
8241 if (GET_CODE (insn) == CALL_INSN)
8242 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8244 #if TARGET_ABI_OPEN_VMS
8245 alpha_write_linkage (file, fnname, decl);
8246 #endif
8248 /* End the function. */
8249 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8251 fputs ("\t.end ", file);
8252 assemble_name (file, fnname);
8253 putc ('\n', file);
8255 inside_function = FALSE;
8257 /* Output jump tables and the static subroutine information block. */
8258 if (TARGET_ABI_UNICOSMK)
8260 unicosmk_output_ssib (file, fnname);
8261 unicosmk_output_deferred_case_vectors (file);
8265 #if TARGET_ABI_OSF
8266 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8268 In order to avoid the hordes of differences between generated code
8269 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8270 lots of code loading up large constants, generate rtl and emit it
8271 instead of going straight to text.
8273 Not sure why this idea hasn't been explored before... */
8275 static void
8276 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8277 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8278 tree function)
8280 HOST_WIDE_INT hi, lo;
8281 rtx this, insn, funexp;
8283 /* We always require a valid GP. */
8284 emit_insn (gen_prologue_ldgp ());
8285 emit_note (NOTE_INSN_PROLOGUE_END);
8287 /* Find the "this" pointer. If the function returns a structure,
8288 the structure return pointer is in $16. */
8289 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8290 this = gen_rtx_REG (Pmode, 17);
8291 else
8292 this = gen_rtx_REG (Pmode, 16);
8294 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8295 entire constant for the add. */
8296 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8297 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8298 if (hi + lo == delta)
8300 if (hi)
8301 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8302 if (lo)
8303 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8305 else
8307 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8308 delta, -(delta < 0));
8309 emit_insn (gen_adddi3 (this, this, tmp));
8312 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8313 if (vcall_offset)
8315 rtx tmp, tmp2;
8317 tmp = gen_rtx_REG (Pmode, 0);
8318 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8320 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8321 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8322 if (hi + lo == vcall_offset)
8324 if (hi)
8325 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8327 else
8329 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8330 vcall_offset, -(vcall_offset < 0));
8331 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8332 lo = 0;
8334 if (lo)
8335 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8336 else
8337 tmp2 = tmp;
8338 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8340 emit_insn (gen_adddi3 (this, this, tmp));
8343 /* Generate a tail call to the target function. */
8344 if (! TREE_USED (function))
8346 assemble_external (function);
8347 TREE_USED (function) = 1;
8349 funexp = XEXP (DECL_RTL (function), 0);
8350 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8351 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8352 SIBLING_CALL_P (insn) = 1;
8354 /* Run just enough of rest_of_compilation to get the insns emitted.
8355 There's not really enough bulk here to make other passes such as
8356 instruction scheduling worth while. Note that use_thunk calls
8357 assemble_start_function and assemble_end_function. */
8358 insn = get_insns ();
8359 insn_locators_alloc ();
8360 shorten_branches (insn);
8361 final_start_function (insn, file, 1);
8362 final (insn, file, 1);
8363 final_end_function ();
8365 #endif /* TARGET_ABI_OSF */
8367 /* Debugging support. */
8369 #include "gstab.h"
8371 /* Count the number of sdb related labels are generated (to find block
8372 start and end boundaries). */
8374 int sdb_label_count = 0;
8376 /* Name of the file containing the current function. */
8378 static const char *current_function_file = "";
8380 /* Offsets to alpha virtual arg/local debugging pointers. */
8382 long alpha_arg_offset;
8383 long alpha_auto_offset;
8385 /* Emit a new filename to a stream. */
8387 void
8388 alpha_output_filename (FILE *stream, const char *name)
8390 static int first_time = TRUE;
8392 if (first_time)
8394 first_time = FALSE;
8395 ++num_source_filenames;
8396 current_function_file = name;
8397 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8398 output_quoted_string (stream, name);
8399 fprintf (stream, "\n");
8400 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8401 fprintf (stream, "\t#@stabs\n");
8404 else if (write_symbols == DBX_DEBUG)
8405 /* dbxout.c will emit an appropriate .stabs directive. */
8406 return;
8408 else if (name != current_function_file
8409 && strcmp (name, current_function_file) != 0)
8411 if (inside_function && ! TARGET_GAS)
8412 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8413 else
8415 ++num_source_filenames;
8416 current_function_file = name;
8417 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8420 output_quoted_string (stream, name);
8421 fprintf (stream, "\n");
8425 /* Structure to show the current status of registers and memory. */
8427 struct shadow_summary
8429 struct {
8430 unsigned int i : 31; /* Mask of int regs */
8431 unsigned int fp : 31; /* Mask of fp regs */
8432 unsigned int mem : 1; /* mem == imem | fpmem */
8433 } used, defd;
8436 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8437 to the summary structure. SET is nonzero if the insn is setting the
8438 object, otherwise zero. */
8440 static void
8441 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8443 const char *format_ptr;
8444 int i, j;
8446 if (x == 0)
8447 return;
8449 switch (GET_CODE (x))
8451 /* ??? Note that this case would be incorrect if the Alpha had a
8452 ZERO_EXTRACT in SET_DEST. */
8453 case SET:
8454 summarize_insn (SET_SRC (x), sum, 0);
8455 summarize_insn (SET_DEST (x), sum, 1);
8456 break;
8458 case CLOBBER:
8459 summarize_insn (XEXP (x, 0), sum, 1);
8460 break;
8462 case USE:
8463 summarize_insn (XEXP (x, 0), sum, 0);
8464 break;
8466 case ASM_OPERANDS:
8467 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8468 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8469 break;
8471 case PARALLEL:
8472 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8473 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8474 break;
8476 case SUBREG:
8477 summarize_insn (SUBREG_REG (x), sum, 0);
8478 break;
8480 case REG:
8482 int regno = REGNO (x);
8483 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8485 if (regno == 31 || regno == 63)
8486 break;
8488 if (set)
8490 if (regno < 32)
8491 sum->defd.i |= mask;
8492 else
8493 sum->defd.fp |= mask;
8495 else
8497 if (regno < 32)
8498 sum->used.i |= mask;
8499 else
8500 sum->used.fp |= mask;
8503 break;
8505 case MEM:
8506 if (set)
8507 sum->defd.mem = 1;
8508 else
8509 sum->used.mem = 1;
8511 /* Find the regs used in memory address computation: */
8512 summarize_insn (XEXP (x, 0), sum, 0);
8513 break;
8515 case CONST_INT: case CONST_DOUBLE:
8516 case SYMBOL_REF: case LABEL_REF: case CONST:
8517 case SCRATCH: case ASM_INPUT:
8518 break;
8520 /* Handle common unary and binary ops for efficiency. */
8521 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8522 case MOD: case UDIV: case UMOD: case AND: case IOR:
8523 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8524 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8525 case NE: case EQ: case GE: case GT: case LE:
8526 case LT: case GEU: case GTU: case LEU: case LTU:
8527 summarize_insn (XEXP (x, 0), sum, 0);
8528 summarize_insn (XEXP (x, 1), sum, 0);
8529 break;
8531 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8532 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8533 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8534 case SQRT: case FFS:
8535 summarize_insn (XEXP (x, 0), sum, 0);
8536 break;
8538 default:
8539 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8540 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8541 switch (format_ptr[i])
8543 case 'e':
8544 summarize_insn (XEXP (x, i), sum, 0);
8545 break;
8547 case 'E':
8548 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8549 summarize_insn (XVECEXP (x, i, j), sum, 0);
8550 break;
8552 case 'i':
8553 break;
8555 default:
8556 gcc_unreachable ();
8561 /* Ensure a sufficient number of `trapb' insns are in the code when
8562 the user requests code with a trap precision of functions or
8563 instructions.
8565 In naive mode, when the user requests a trap-precision of
8566 "instruction", a trapb is needed after every instruction that may
8567 generate a trap. This ensures that the code is resumption safe but
8568 it is also slow.
8570 When optimizations are turned on, we delay issuing a trapb as long
8571 as possible. In this context, a trap shadow is the sequence of
8572 instructions that starts with a (potentially) trap generating
8573 instruction and extends to the next trapb or call_pal instruction
8574 (but GCC never generates call_pal by itself). We can delay (and
8575 therefore sometimes omit) a trapb subject to the following
8576 conditions:
8578 (a) On entry to the trap shadow, if any Alpha register or memory
8579 location contains a value that is used as an operand value by some
8580 instruction in the trap shadow (live on entry), then no instruction
8581 in the trap shadow may modify the register or memory location.
8583 (b) Within the trap shadow, the computation of the base register
8584 for a memory load or store instruction may not involve using the
8585 result of an instruction that might generate an UNPREDICTABLE
8586 result.
8588 (c) Within the trap shadow, no register may be used more than once
8589 as a destination register. (This is to make life easier for the
8590 trap-handler.)
8592 (d) The trap shadow may not include any branch instructions. */
8594 static void
8595 alpha_handle_trap_shadows (void)
8597 struct shadow_summary shadow;
8598 int trap_pending, exception_nesting;
8599 rtx i, n;
8601 trap_pending = 0;
8602 exception_nesting = 0;
8603 shadow.used.i = 0;
8604 shadow.used.fp = 0;
8605 shadow.used.mem = 0;
8606 shadow.defd = shadow.used;
8608 for (i = get_insns (); i ; i = NEXT_INSN (i))
8610 if (GET_CODE (i) == NOTE)
8612 switch (NOTE_KIND (i))
8614 case NOTE_INSN_EH_REGION_BEG:
8615 exception_nesting++;
8616 if (trap_pending)
8617 goto close_shadow;
8618 break;
8620 case NOTE_INSN_EH_REGION_END:
8621 exception_nesting--;
8622 if (trap_pending)
8623 goto close_shadow;
8624 break;
8626 case NOTE_INSN_EPILOGUE_BEG:
8627 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8628 goto close_shadow;
8629 break;
8632 else if (trap_pending)
8634 if (alpha_tp == ALPHA_TP_FUNC)
8636 if (GET_CODE (i) == JUMP_INSN
8637 && GET_CODE (PATTERN (i)) == RETURN)
8638 goto close_shadow;
8640 else if (alpha_tp == ALPHA_TP_INSN)
8642 if (optimize > 0)
8644 struct shadow_summary sum;
8646 sum.used.i = 0;
8647 sum.used.fp = 0;
8648 sum.used.mem = 0;
8649 sum.defd = sum.used;
8651 switch (GET_CODE (i))
8653 case INSN:
8654 /* Annoyingly, get_attr_trap will die on these. */
8655 if (GET_CODE (PATTERN (i)) == USE
8656 || GET_CODE (PATTERN (i)) == CLOBBER)
8657 break;
8659 summarize_insn (PATTERN (i), &sum, 0);
8661 if ((sum.defd.i & shadow.defd.i)
8662 || (sum.defd.fp & shadow.defd.fp))
8664 /* (c) would be violated */
8665 goto close_shadow;
8668 /* Combine shadow with summary of current insn: */
8669 shadow.used.i |= sum.used.i;
8670 shadow.used.fp |= sum.used.fp;
8671 shadow.used.mem |= sum.used.mem;
8672 shadow.defd.i |= sum.defd.i;
8673 shadow.defd.fp |= sum.defd.fp;
8674 shadow.defd.mem |= sum.defd.mem;
8676 if ((sum.defd.i & shadow.used.i)
8677 || (sum.defd.fp & shadow.used.fp)
8678 || (sum.defd.mem & shadow.used.mem))
8680 /* (a) would be violated (also takes care of (b)) */
8681 gcc_assert (get_attr_trap (i) != TRAP_YES
8682 || (!(sum.defd.i & sum.used.i)
8683 && !(sum.defd.fp & sum.used.fp)));
8685 goto close_shadow;
8687 break;
8689 case JUMP_INSN:
8690 case CALL_INSN:
8691 case CODE_LABEL:
8692 goto close_shadow;
8694 default:
8695 gcc_unreachable ();
8698 else
8700 close_shadow:
8701 n = emit_insn_before (gen_trapb (), i);
8702 PUT_MODE (n, TImode);
8703 PUT_MODE (i, TImode);
8704 trap_pending = 0;
8705 shadow.used.i = 0;
8706 shadow.used.fp = 0;
8707 shadow.used.mem = 0;
8708 shadow.defd = shadow.used;
8713 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8714 && GET_CODE (i) == INSN
8715 && GET_CODE (PATTERN (i)) != USE
8716 && GET_CODE (PATTERN (i)) != CLOBBER
8717 && get_attr_trap (i) == TRAP_YES)
8719 if (optimize && !trap_pending)
8720 summarize_insn (PATTERN (i), &shadow, 0);
8721 trap_pending = 1;
8726 /* Alpha can only issue instruction groups simultaneously if they are
8727 suitably aligned. This is very processor-specific. */
8728 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8729 that are marked "fake". These instructions do not exist on that target,
8730 but it is possible to see these insns with deranged combinations of
8731 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8732 choose a result at random. */
8734 enum alphaev4_pipe {
8735 EV4_STOP = 0,
8736 EV4_IB0 = 1,
8737 EV4_IB1 = 2,
8738 EV4_IBX = 4
8741 enum alphaev5_pipe {
8742 EV5_STOP = 0,
8743 EV5_NONE = 1,
8744 EV5_E01 = 2,
8745 EV5_E0 = 4,
8746 EV5_E1 = 8,
8747 EV5_FAM = 16,
8748 EV5_FA = 32,
8749 EV5_FM = 64
8752 static enum alphaev4_pipe
8753 alphaev4_insn_pipe (rtx insn)
8755 if (recog_memoized (insn) < 0)
8756 return EV4_STOP;
8757 if (get_attr_length (insn) != 4)
8758 return EV4_STOP;
8760 switch (get_attr_type (insn))
8762 case TYPE_ILD:
8763 case TYPE_LDSYM:
8764 case TYPE_FLD:
8765 case TYPE_LD_L:
8766 return EV4_IBX;
8768 case TYPE_IADD:
8769 case TYPE_ILOG:
8770 case TYPE_ICMOV:
8771 case TYPE_ICMP:
8772 case TYPE_FST:
8773 case TYPE_SHIFT:
8774 case TYPE_IMUL:
8775 case TYPE_FBR:
8776 case TYPE_MVI: /* fake */
8777 return EV4_IB0;
8779 case TYPE_IST:
8780 case TYPE_MISC:
8781 case TYPE_IBR:
8782 case TYPE_JSR:
8783 case TYPE_CALLPAL:
8784 case TYPE_FCPYS:
8785 case TYPE_FCMOV:
8786 case TYPE_FADD:
8787 case TYPE_FDIV:
8788 case TYPE_FMUL:
8789 case TYPE_ST_C:
8790 case TYPE_MB:
8791 case TYPE_FSQRT: /* fake */
8792 case TYPE_FTOI: /* fake */
8793 case TYPE_ITOF: /* fake */
8794 return EV4_IB1;
8796 default:
8797 gcc_unreachable ();
8801 static enum alphaev5_pipe
8802 alphaev5_insn_pipe (rtx insn)
8804 if (recog_memoized (insn) < 0)
8805 return EV5_STOP;
8806 if (get_attr_length (insn) != 4)
8807 return EV5_STOP;
8809 switch (get_attr_type (insn))
8811 case TYPE_ILD:
8812 case TYPE_FLD:
8813 case TYPE_LDSYM:
8814 case TYPE_IADD:
8815 case TYPE_ILOG:
8816 case TYPE_ICMOV:
8817 case TYPE_ICMP:
8818 return EV5_E01;
8820 case TYPE_IST:
8821 case TYPE_FST:
8822 case TYPE_SHIFT:
8823 case TYPE_IMUL:
8824 case TYPE_MISC:
8825 case TYPE_MVI:
8826 case TYPE_LD_L:
8827 case TYPE_ST_C:
8828 case TYPE_MB:
8829 case TYPE_FTOI: /* fake */
8830 case TYPE_ITOF: /* fake */
8831 return EV5_E0;
8833 case TYPE_IBR:
8834 case TYPE_JSR:
8835 case TYPE_CALLPAL:
8836 return EV5_E1;
8838 case TYPE_FCPYS:
8839 return EV5_FAM;
8841 case TYPE_FBR:
8842 case TYPE_FCMOV:
8843 case TYPE_FADD:
8844 case TYPE_FDIV:
8845 case TYPE_FSQRT: /* fake */
8846 return EV5_FA;
8848 case TYPE_FMUL:
8849 return EV5_FM;
8851 default:
8852 gcc_unreachable ();
8856 /* IN_USE is a mask of the slots currently filled within the insn group.
8857 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8858 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8860 LEN is, of course, the length of the group in bytes. */
8862 static rtx
8863 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8865 int len, in_use;
8867 len = in_use = 0;
8869 if (! INSN_P (insn)
8870 || GET_CODE (PATTERN (insn)) == CLOBBER
8871 || GET_CODE (PATTERN (insn)) == USE)
8872 goto next_and_done;
8874 while (1)
8876 enum alphaev4_pipe pipe;
8878 pipe = alphaev4_insn_pipe (insn);
8879 switch (pipe)
8881 case EV4_STOP:
8882 /* Force complex instructions to start new groups. */
8883 if (in_use)
8884 goto done;
8886 /* If this is a completely unrecognized insn, it's an asm.
8887 We don't know how long it is, so record length as -1 to
8888 signal a needed realignment. */
8889 if (recog_memoized (insn) < 0)
8890 len = -1;
8891 else
8892 len = get_attr_length (insn);
8893 goto next_and_done;
8895 case EV4_IBX:
8896 if (in_use & EV4_IB0)
8898 if (in_use & EV4_IB1)
8899 goto done;
8900 in_use |= EV4_IB1;
8902 else
8903 in_use |= EV4_IB0 | EV4_IBX;
8904 break;
8906 case EV4_IB0:
8907 if (in_use & EV4_IB0)
8909 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8910 goto done;
8911 in_use |= EV4_IB1;
8913 in_use |= EV4_IB0;
8914 break;
8916 case EV4_IB1:
8917 if (in_use & EV4_IB1)
8918 goto done;
8919 in_use |= EV4_IB1;
8920 break;
8922 default:
8923 gcc_unreachable ();
8925 len += 4;
8927 /* Haifa doesn't do well scheduling branches. */
8928 if (GET_CODE (insn) == JUMP_INSN)
8929 goto next_and_done;
8931 next:
8932 insn = next_nonnote_insn (insn);
8934 if (!insn || ! INSN_P (insn))
8935 goto done;
8937 /* Let Haifa tell us where it thinks insn group boundaries are. */
8938 if (GET_MODE (insn) == TImode)
8939 goto done;
8941 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8942 goto next;
8945 next_and_done:
8946 insn = next_nonnote_insn (insn);
8948 done:
8949 *plen = len;
8950 *pin_use = in_use;
8951 return insn;
8954 /* IN_USE is a mask of the slots currently filled within the insn group.
8955 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8956 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8958 LEN is, of course, the length of the group in bytes. */
8960 static rtx
8961 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8963 int len, in_use;
8965 len = in_use = 0;
8967 if (! INSN_P (insn)
8968 || GET_CODE (PATTERN (insn)) == CLOBBER
8969 || GET_CODE (PATTERN (insn)) == USE)
8970 goto next_and_done;
8972 while (1)
8974 enum alphaev5_pipe pipe;
8976 pipe = alphaev5_insn_pipe (insn);
8977 switch (pipe)
8979 case EV5_STOP:
8980 /* Force complex instructions to start new groups. */
8981 if (in_use)
8982 goto done;
8984 /* If this is a completely unrecognized insn, it's an asm.
8985 We don't know how long it is, so record length as -1 to
8986 signal a needed realignment. */
8987 if (recog_memoized (insn) < 0)
8988 len = -1;
8989 else
8990 len = get_attr_length (insn);
8991 goto next_and_done;
8993 /* ??? Most of the places below, we would like to assert never
8994 happen, as it would indicate an error either in Haifa, or
8995 in the scheduling description. Unfortunately, Haifa never
8996 schedules the last instruction of the BB, so we don't have
8997 an accurate TI bit to go off. */
8998 case EV5_E01:
8999 if (in_use & EV5_E0)
9001 if (in_use & EV5_E1)
9002 goto done;
9003 in_use |= EV5_E1;
9005 else
9006 in_use |= EV5_E0 | EV5_E01;
9007 break;
9009 case EV5_E0:
9010 if (in_use & EV5_E0)
9012 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9013 goto done;
9014 in_use |= EV5_E1;
9016 in_use |= EV5_E0;
9017 break;
9019 case EV5_E1:
9020 if (in_use & EV5_E1)
9021 goto done;
9022 in_use |= EV5_E1;
9023 break;
9025 case EV5_FAM:
9026 if (in_use & EV5_FA)
9028 if (in_use & EV5_FM)
9029 goto done;
9030 in_use |= EV5_FM;
9032 else
9033 in_use |= EV5_FA | EV5_FAM;
9034 break;
9036 case EV5_FA:
9037 if (in_use & EV5_FA)
9038 goto done;
9039 in_use |= EV5_FA;
9040 break;
9042 case EV5_FM:
9043 if (in_use & EV5_FM)
9044 goto done;
9045 in_use |= EV5_FM;
9046 break;
9048 case EV5_NONE:
9049 break;
9051 default:
9052 gcc_unreachable ();
9054 len += 4;
9056 /* Haifa doesn't do well scheduling branches. */
9057 /* ??? If this is predicted not-taken, slotting continues, except
9058 that no more IBR, FBR, or JSR insns may be slotted. */
9059 if (GET_CODE (insn) == JUMP_INSN)
9060 goto next_and_done;
9062 next:
9063 insn = next_nonnote_insn (insn);
9065 if (!insn || ! INSN_P (insn))
9066 goto done;
9068 /* Let Haifa tell us where it thinks insn group boundaries are. */
9069 if (GET_MODE (insn) == TImode)
9070 goto done;
9072 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9073 goto next;
9076 next_and_done:
9077 insn = next_nonnote_insn (insn);
9079 done:
9080 *plen = len;
9081 *pin_use = in_use;
9082 return insn;
9085 static rtx
9086 alphaev4_next_nop (int *pin_use)
9088 int in_use = *pin_use;
9089 rtx nop;
9091 if (!(in_use & EV4_IB0))
9093 in_use |= EV4_IB0;
9094 nop = gen_nop ();
9096 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9098 in_use |= EV4_IB1;
9099 nop = gen_nop ();
9101 else if (TARGET_FP && !(in_use & EV4_IB1))
9103 in_use |= EV4_IB1;
9104 nop = gen_fnop ();
9106 else
9107 nop = gen_unop ();
9109 *pin_use = in_use;
9110 return nop;
9113 static rtx
9114 alphaev5_next_nop (int *pin_use)
9116 int in_use = *pin_use;
9117 rtx nop;
9119 if (!(in_use & EV5_E1))
9121 in_use |= EV5_E1;
9122 nop = gen_nop ();
9124 else if (TARGET_FP && !(in_use & EV5_FA))
9126 in_use |= EV5_FA;
9127 nop = gen_fnop ();
9129 else if (TARGET_FP && !(in_use & EV5_FM))
9131 in_use |= EV5_FM;
9132 nop = gen_fnop ();
9134 else
9135 nop = gen_unop ();
9137 *pin_use = in_use;
9138 return nop;
9141 /* The instruction group alignment main loop. */
9143 static void
9144 alpha_align_insns (unsigned int max_align,
9145 rtx (*next_group) (rtx, int *, int *),
9146 rtx (*next_nop) (int *))
9148 /* ALIGN is the known alignment for the insn group. */
9149 unsigned int align;
9150 /* OFS is the offset of the current insn in the insn group. */
9151 int ofs;
9152 int prev_in_use, in_use, len, ldgp;
9153 rtx i, next;
9155 /* Let shorten branches care for assigning alignments to code labels. */
9156 shorten_branches (get_insns ());
9158 if (align_functions < 4)
9159 align = 4;
9160 else if ((unsigned int) align_functions < max_align)
9161 align = align_functions;
9162 else
9163 align = max_align;
9165 ofs = prev_in_use = 0;
9166 i = get_insns ();
9167 if (GET_CODE (i) == NOTE)
9168 i = next_nonnote_insn (i);
9170 ldgp = alpha_function_needs_gp ? 8 : 0;
9172 while (i)
9174 next = (*next_group) (i, &in_use, &len);
9176 /* When we see a label, resync alignment etc. */
9177 if (GET_CODE (i) == CODE_LABEL)
9179 unsigned int new_align = 1 << label_to_alignment (i);
9181 if (new_align >= align)
9183 align = new_align < max_align ? new_align : max_align;
9184 ofs = 0;
9187 else if (ofs & (new_align-1))
9188 ofs = (ofs | (new_align-1)) + 1;
9189 gcc_assert (!len);
9192 /* Handle complex instructions special. */
9193 else if (in_use == 0)
9195 /* Asms will have length < 0. This is a signal that we have
9196 lost alignment knowledge. Assume, however, that the asm
9197 will not mis-align instructions. */
9198 if (len < 0)
9200 ofs = 0;
9201 align = 4;
9202 len = 0;
9206 /* If the known alignment is smaller than the recognized insn group,
9207 realign the output. */
9208 else if ((int) align < len)
9210 unsigned int new_log_align = len > 8 ? 4 : 3;
9211 rtx prev, where;
9213 where = prev = prev_nonnote_insn (i);
9214 if (!where || GET_CODE (where) != CODE_LABEL)
9215 where = i;
9217 /* Can't realign between a call and its gp reload. */
9218 if (! (TARGET_EXPLICIT_RELOCS
9219 && prev && GET_CODE (prev) == CALL_INSN))
9221 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9222 align = 1 << new_log_align;
9223 ofs = 0;
9227 /* We may not insert padding inside the initial ldgp sequence. */
9228 else if (ldgp > 0)
9229 ldgp -= len;
9231 /* If the group won't fit in the same INT16 as the previous,
9232 we need to add padding to keep the group together. Rather
9233 than simply leaving the insn filling to the assembler, we
9234 can make use of the knowledge of what sorts of instructions
9235 were issued in the previous group to make sure that all of
9236 the added nops are really free. */
9237 else if (ofs + len > (int) align)
9239 int nop_count = (align - ofs) / 4;
9240 rtx where;
9242 /* Insert nops before labels, branches, and calls to truly merge
9243 the execution of the nops with the previous instruction group. */
9244 where = prev_nonnote_insn (i);
9245 if (where)
9247 if (GET_CODE (where) == CODE_LABEL)
9249 rtx where2 = prev_nonnote_insn (where);
9250 if (where2 && GET_CODE (where2) == JUMP_INSN)
9251 where = where2;
9253 else if (GET_CODE (where) == INSN)
9254 where = i;
9256 else
9257 where = i;
9260 emit_insn_before ((*next_nop)(&prev_in_use), where);
9261 while (--nop_count);
9262 ofs = 0;
9265 ofs = (ofs + len) & (align - 1);
9266 prev_in_use = in_use;
9267 i = next;
9271 /* Machine dependent reorg pass. */
9273 static void
9274 alpha_reorg (void)
9276 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9277 alpha_handle_trap_shadows ();
9279 /* Due to the number of extra trapb insns, don't bother fixing up
9280 alignment when trap precision is instruction. Moreover, we can
9281 only do our job when sched2 is run. */
9282 if (optimize && !optimize_size
9283 && alpha_tp != ALPHA_TP_INSN
9284 && flag_schedule_insns_after_reload)
9286 if (alpha_tune == PROCESSOR_EV4)
9287 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9288 else if (alpha_tune == PROCESSOR_EV5)
9289 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9293 #if !TARGET_ABI_UNICOSMK
9295 #ifdef HAVE_STAMP_H
9296 #include <stamp.h>
9297 #endif
9299 static void
9300 alpha_file_start (void)
9302 #ifdef OBJECT_FORMAT_ELF
9303 /* If emitting dwarf2 debug information, we cannot generate a .file
9304 directive to start the file, as it will conflict with dwarf2out
9305 file numbers. So it's only useful when emitting mdebug output. */
9306 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9307 #endif
9309 default_file_start ();
9310 #ifdef MS_STAMP
9311 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9312 #endif
9314 fputs ("\t.set noreorder\n", asm_out_file);
9315 fputs ("\t.set volatile\n", asm_out_file);
9316 if (!TARGET_ABI_OPEN_VMS)
9317 fputs ("\t.set noat\n", asm_out_file);
9318 if (TARGET_EXPLICIT_RELOCS)
9319 fputs ("\t.set nomacro\n", asm_out_file);
9320 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9322 const char *arch;
9324 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9325 arch = "ev6";
9326 else if (TARGET_MAX)
9327 arch = "pca56";
9328 else if (TARGET_BWX)
9329 arch = "ev56";
9330 else if (alpha_cpu == PROCESSOR_EV5)
9331 arch = "ev5";
9332 else
9333 arch = "ev4";
9335 fprintf (asm_out_file, "\t.arch %s\n", arch);
9338 #endif
9340 #ifdef OBJECT_FORMAT_ELF
9341 /* Since we don't have a .dynbss section, we should not allow global
9342 relocations in the .rodata section. */
9344 static int
9345 alpha_elf_reloc_rw_mask (void)
9347 return flag_pic ? 3 : 2;
9350 /* Return a section for X. The only special thing we do here is to
9351 honor small data. */
9353 static section *
9354 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9355 unsigned HOST_WIDE_INT align)
9357 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9358 /* ??? Consider using mergeable sdata sections. */
9359 return sdata_section;
9360 else
9361 return default_elf_select_rtx_section (mode, x, align);
9364 static unsigned int
9365 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9367 unsigned int flags = 0;
9369 if (strcmp (name, ".sdata") == 0
9370 || strncmp (name, ".sdata.", 7) == 0
9371 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9372 || strcmp (name, ".sbss") == 0
9373 || strncmp (name, ".sbss.", 6) == 0
9374 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9375 flags = SECTION_SMALL;
9377 flags |= default_section_type_flags (decl, name, reloc);
9378 return flags;
9380 #endif /* OBJECT_FORMAT_ELF */
9382 /* Structure to collect function names for final output in link section. */
9383 /* Note that items marked with GTY can't be ifdef'ed out. */
9385 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9386 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9388 struct alpha_links GTY(())
9390 int num;
9391 rtx linkage;
9392 enum links_kind lkind;
9393 enum reloc_kind rkind;
9396 struct alpha_funcs GTY(())
9398 int num;
9399 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9400 links;
9403 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9404 splay_tree alpha_links_tree;
9405 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9406 splay_tree alpha_funcs_tree;
9408 static GTY(()) int alpha_funcs_num;
9410 #if TARGET_ABI_OPEN_VMS
9412 /* Return the VMS argument type corresponding to MODE. */
9414 enum avms_arg_type
9415 alpha_arg_type (enum machine_mode mode)
9417 switch (mode)
9419 case SFmode:
9420 return TARGET_FLOAT_VAX ? FF : FS;
9421 case DFmode:
9422 return TARGET_FLOAT_VAX ? FD : FT;
9423 default:
9424 return I64;
9428 /* Return an rtx for an integer representing the VMS Argument Information
9429 register value. */
9432 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9434 unsigned HOST_WIDE_INT regval = cum.num_args;
9435 int i;
9437 for (i = 0; i < 6; i++)
9438 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9440 return GEN_INT (regval);
9443 /* Make (or fake) .linkage entry for function call.
9445 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9447 Return an SYMBOL_REF rtx for the linkage. */
9450 alpha_need_linkage (const char *name, int is_local)
9452 splay_tree_node node;
9453 struct alpha_links *al;
9455 if (name[0] == '*')
9456 name++;
9458 if (is_local)
9460 struct alpha_funcs *cfaf;
9462 if (!alpha_funcs_tree)
9463 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9464 splay_tree_compare_pointers);
9466 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9468 cfaf->links = 0;
9469 cfaf->num = ++alpha_funcs_num;
9471 splay_tree_insert (alpha_funcs_tree,
9472 (splay_tree_key) current_function_decl,
9473 (splay_tree_value) cfaf);
9476 if (alpha_links_tree)
9478 /* Is this name already defined? */
9480 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9481 if (node)
9483 al = (struct alpha_links *) node->value;
9484 if (is_local)
9486 /* Defined here but external assumed. */
9487 if (al->lkind == KIND_EXTERN)
9488 al->lkind = KIND_LOCAL;
9490 else
9492 /* Used here but unused assumed. */
9493 if (al->lkind == KIND_UNUSED)
9494 al->lkind = KIND_LOCAL;
9496 return al->linkage;
9499 else
9500 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9502 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9503 name = ggc_strdup (name);
9505 /* Assume external if no definition. */
9506 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9508 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9509 get_identifier (name);
9511 /* Construct a SYMBOL_REF for us to call. */
9513 size_t name_len = strlen (name);
9514 char *linksym = alloca (name_len + 6);
9515 linksym[0] = '$';
9516 memcpy (linksym + 1, name, name_len);
9517 memcpy (linksym + 1 + name_len, "..lk", 5);
9518 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9519 ggc_alloc_string (linksym, name_len + 5));
9522 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9523 (splay_tree_value) al);
9525 return al->linkage;
9529 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9531 splay_tree_node cfunnode;
9532 struct alpha_funcs *cfaf;
9533 struct alpha_links *al;
9534 const char *name = XSTR (linkage, 0);
9536 cfaf = (struct alpha_funcs *) 0;
9537 al = (struct alpha_links *) 0;
9539 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9540 cfaf = (struct alpha_funcs *) cfunnode->value;
9542 if (cfaf->links)
9544 splay_tree_node lnode;
9546 /* Is this name already defined? */
9548 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9549 if (lnode)
9550 al = (struct alpha_links *) lnode->value;
9552 else
9553 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9555 if (!al)
9557 size_t name_len;
9558 size_t buflen;
9559 char buf [512];
9560 char *linksym;
9561 splay_tree_node node = 0;
9562 struct alpha_links *anl;
9564 if (name[0] == '*')
9565 name++;
9567 name_len = strlen (name);
9569 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9570 al->num = cfaf->num;
9572 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9573 if (node)
9575 anl = (struct alpha_links *) node->value;
9576 al->lkind = anl->lkind;
9579 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9580 buflen = strlen (buf);
9581 linksym = alloca (buflen + 1);
9582 memcpy (linksym, buf, buflen + 1);
9584 al->linkage = gen_rtx_SYMBOL_REF
9585 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9587 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9588 (splay_tree_value) al);
9591 if (rflag)
9592 al->rkind = KIND_CODEADDR;
9593 else
9594 al->rkind = KIND_LINKAGE;
9596 if (lflag)
9597 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9598 else
9599 return al->linkage;
9602 static int
9603 alpha_write_one_linkage (splay_tree_node node, void *data)
9605 const char *const name = (const char *) node->key;
9606 struct alpha_links *link = (struct alpha_links *) node->value;
9607 FILE *stream = (FILE *) data;
9609 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9610 if (link->rkind == KIND_CODEADDR)
9612 if (link->lkind == KIND_LOCAL)
9614 /* Local and used */
9615 fprintf (stream, "\t.quad %s..en\n", name);
9617 else
9619 /* External and used, request code address. */
9620 fprintf (stream, "\t.code_address %s\n", name);
9623 else
9625 if (link->lkind == KIND_LOCAL)
9627 /* Local and used, build linkage pair. */
9628 fprintf (stream, "\t.quad %s..en\n", name);
9629 fprintf (stream, "\t.quad %s\n", name);
9631 else
9633 /* External and used, request linkage pair. */
9634 fprintf (stream, "\t.linkage %s\n", name);
9638 return 0;
9641 static void
9642 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9644 splay_tree_node node;
9645 struct alpha_funcs *func;
9647 fprintf (stream, "\t.link\n");
9648 fprintf (stream, "\t.align 3\n");
9649 in_section = NULL;
9651 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9652 func = (struct alpha_funcs *) node->value;
9654 fputs ("\t.name ", stream);
9655 assemble_name (stream, funname);
9656 fputs ("..na\n", stream);
9657 ASM_OUTPUT_LABEL (stream, funname);
9658 fprintf (stream, "\t.pdesc ");
9659 assemble_name (stream, funname);
9660 fprintf (stream, "..en,%s\n",
9661 alpha_procedure_type == PT_STACK ? "stack"
9662 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9664 if (func->links)
9666 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9667 /* splay_tree_delete (func->links); */
9671 /* Given a decl, a section name, and whether the decl initializer
9672 has relocs, choose attributes for the section. */
9674 #define SECTION_VMS_OVERLAY SECTION_FORGET
9675 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9676 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9678 static unsigned int
9679 vms_section_type_flags (tree decl, const char *name, int reloc)
9681 unsigned int flags = default_section_type_flags (decl, name, reloc);
9683 if (decl && DECL_ATTRIBUTES (decl)
9684 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9685 flags |= SECTION_VMS_OVERLAY;
9686 if (decl && DECL_ATTRIBUTES (decl)
9687 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9688 flags |= SECTION_VMS_GLOBAL;
9689 if (decl && DECL_ATTRIBUTES (decl)
9690 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9691 flags |= SECTION_VMS_INITIALIZE;
9693 return flags;
9696 /* Switch to an arbitrary section NAME with attributes as specified
9697 by FLAGS. ALIGN specifies any known alignment requirements for
9698 the section; 0 if the default should be used. */
9700 static void
9701 vms_asm_named_section (const char *name, unsigned int flags,
9702 tree decl ATTRIBUTE_UNUSED)
9704 fputc ('\n', asm_out_file);
9705 fprintf (asm_out_file, ".section\t%s", name);
9707 if (flags & SECTION_VMS_OVERLAY)
9708 fprintf (asm_out_file, ",OVR");
9709 if (flags & SECTION_VMS_GLOBAL)
9710 fprintf (asm_out_file, ",GBL");
9711 if (flags & SECTION_VMS_INITIALIZE)
9712 fprintf (asm_out_file, ",NOMOD");
9713 if (flags & SECTION_DEBUG)
9714 fprintf (asm_out_file, ",NOWRT");
9716 fputc ('\n', asm_out_file);
9719 /* Record an element in the table of global constructors. SYMBOL is
9720 a SYMBOL_REF of the function to be called; PRIORITY is a number
9721 between 0 and MAX_INIT_PRIORITY.
9723 Differs from default_ctors_section_asm_out_constructor in that the
9724 width of the .ctors entry is always 64 bits, rather than the 32 bits
9725 used by a normal pointer. */
9727 static void
9728 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9730 switch_to_section (ctors_section);
9731 assemble_align (BITS_PER_WORD);
9732 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9735 static void
9736 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9738 switch_to_section (dtors_section);
9739 assemble_align (BITS_PER_WORD);
9740 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9742 #else
9745 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9746 int is_local ATTRIBUTE_UNUSED)
9748 return NULL_RTX;
9752 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9753 tree cfundecl ATTRIBUTE_UNUSED,
9754 int lflag ATTRIBUTE_UNUSED,
9755 int rflag ATTRIBUTE_UNUSED)
9757 return NULL_RTX;
9760 #endif /* TARGET_ABI_OPEN_VMS */
9762 #if TARGET_ABI_UNICOSMK
9764 /* This evaluates to true if we do not know how to pass TYPE solely in
9765 registers. This is the case for all arguments that do not fit in two
9766 registers. */
9768 static bool
9769 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9771 if (type == NULL)
9772 return false;
9774 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9775 return true;
9776 if (TREE_ADDRESSABLE (type))
9777 return true;
9779 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9782 /* Define the offset between two registers, one to be eliminated, and the
9783 other its replacement, at the start of a routine. */
9786 unicosmk_initial_elimination_offset (int from, int to)
9788 int fixed_size;
9790 fixed_size = alpha_sa_size();
9791 if (fixed_size != 0)
9792 fixed_size += 48;
9794 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9795 return -fixed_size;
9796 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9797 return 0;
9798 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9799 return (ALPHA_ROUND (current_function_outgoing_args_size)
9800 + ALPHA_ROUND (get_frame_size()));
9801 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9802 return (ALPHA_ROUND (fixed_size)
9803 + ALPHA_ROUND (get_frame_size()
9804 + current_function_outgoing_args_size));
9805 else
9806 gcc_unreachable ();
9809 /* Output the module name for .ident and .end directives. We have to strip
9810 directories and add make sure that the module name starts with a letter
9811 or '$'. */
9813 static void
9814 unicosmk_output_module_name (FILE *file)
9816 const char *name = lbasename (main_input_filename);
9817 unsigned len = strlen (name);
9818 char *clean_name = alloca (len + 2);
9819 char *ptr = clean_name;
9821 /* CAM only accepts module names that start with a letter or '$'. We
9822 prefix the module name with a '$' if necessary. */
9824 if (!ISALPHA (*name))
9825 *ptr++ = '$';
9826 memcpy (ptr, name, len + 1);
9827 clean_symbol_name (clean_name);
9828 fputs (clean_name, file);
9831 /* Output the definition of a common variable. */
9833 void
9834 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9836 tree name_tree;
9837 printf ("T3E__: common %s\n", name);
9839 in_section = NULL;
9840 fputs("\t.endp\n\n\t.psect ", file);
9841 assemble_name(file, name);
9842 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9843 fprintf(file, "\t.byte\t0:%d\n", size);
9845 /* Mark the symbol as defined in this module. */
9846 name_tree = get_identifier (name);
9847 TREE_ASM_WRITTEN (name_tree) = 1;
9850 #define SECTION_PUBLIC SECTION_MACH_DEP
9851 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9852 static int current_section_align;
9854 /* A get_unnamed_section callback for switching to the text section. */
9856 static void
9857 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9859 static int count = 0;
9860 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9863 /* A get_unnamed_section callback for switching to the data section. */
9865 static void
9866 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9868 static int count = 1;
9869 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9872 /* Implement TARGET_ASM_INIT_SECTIONS.
9874 The Cray assembler is really weird with respect to sections. It has only
9875 named sections and you can't reopen a section once it has been closed.
9876 This means that we have to generate unique names whenever we want to
9877 reenter the text or the data section. */
9879 static void
9880 unicosmk_init_sections (void)
9882 text_section = get_unnamed_section (SECTION_CODE,
9883 unicosmk_output_text_section_asm_op,
9884 NULL);
9885 data_section = get_unnamed_section (SECTION_WRITE,
9886 unicosmk_output_data_section_asm_op,
9887 NULL);
9888 readonly_data_section = data_section;
9891 static unsigned int
9892 unicosmk_section_type_flags (tree decl, const char *name,
9893 int reloc ATTRIBUTE_UNUSED)
9895 unsigned int flags = default_section_type_flags (decl, name, reloc);
9897 if (!decl)
9898 return flags;
9900 if (TREE_CODE (decl) == FUNCTION_DECL)
9902 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9903 if (align_functions_log > current_section_align)
9904 current_section_align = align_functions_log;
9906 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9907 flags |= SECTION_MAIN;
9909 else
9910 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9912 if (TREE_PUBLIC (decl))
9913 flags |= SECTION_PUBLIC;
9915 return flags;
9918 /* Generate a section name for decl and associate it with the
9919 declaration. */
9921 static void
9922 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9924 const char *name;
9925 int len;
9927 gcc_assert (decl);
9929 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9930 name = default_strip_name_encoding (name);
9931 len = strlen (name);
9933 if (TREE_CODE (decl) == FUNCTION_DECL)
9935 char *string;
9937 /* It is essential that we prefix the section name here because
9938 otherwise the section names generated for constructors and
9939 destructors confuse collect2. */
9941 string = alloca (len + 6);
9942 sprintf (string, "code@%s", name);
9943 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9945 else if (TREE_PUBLIC (decl))
9946 DECL_SECTION_NAME (decl) = build_string (len, name);
9947 else
9949 char *string;
9951 string = alloca (len + 6);
9952 sprintf (string, "data@%s", name);
9953 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9957 /* Switch to an arbitrary section NAME with attributes as specified
9958 by FLAGS. ALIGN specifies any known alignment requirements for
9959 the section; 0 if the default should be used. */
9961 static void
9962 unicosmk_asm_named_section (const char *name, unsigned int flags,
9963 tree decl ATTRIBUTE_UNUSED)
9965 const char *kind;
9967 /* Close the previous section. */
9969 fputs ("\t.endp\n\n", asm_out_file);
9971 /* Find out what kind of section we are opening. */
9973 if (flags & SECTION_MAIN)
9974 fputs ("\t.start\tmain\n", asm_out_file);
9976 if (flags & SECTION_CODE)
9977 kind = "code";
9978 else if (flags & SECTION_PUBLIC)
9979 kind = "common";
9980 else
9981 kind = "data";
9983 if (current_section_align != 0)
9984 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9985 current_section_align, kind);
9986 else
9987 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9990 static void
9991 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9993 if (DECL_P (decl)
9994 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9995 unicosmk_unique_section (decl, 0);
9998 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9999 in code sections because .align fill unused space with zeroes. */
10001 void
10002 unicosmk_output_align (FILE *file, int align)
10004 if (inside_function)
10005 fprintf (file, "\tgcc@code@align\t%d\n", align);
10006 else
10007 fprintf (file, "\t.align\t%d\n", align);
10010 /* Add a case vector to the current function's list of deferred case
10011 vectors. Case vectors have to be put into a separate section because CAM
10012 does not allow data definitions in code sections. */
10014 void
10015 unicosmk_defer_case_vector (rtx lab, rtx vec)
10017 struct machine_function *machine = cfun->machine;
10019 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10020 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10021 machine->addr_list);
10024 /* Output a case vector. */
10026 static void
10027 unicosmk_output_addr_vec (FILE *file, rtx vec)
10029 rtx lab = XEXP (vec, 0);
10030 rtx body = XEXP (vec, 1);
10031 int vlen = XVECLEN (body, 0);
10032 int idx;
10034 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10036 for (idx = 0; idx < vlen; idx++)
10038 ASM_OUTPUT_ADDR_VEC_ELT
10039 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10043 /* Output current function's deferred case vectors. */
10045 static void
10046 unicosmk_output_deferred_case_vectors (FILE *file)
10048 struct machine_function *machine = cfun->machine;
10049 rtx t;
10051 if (machine->addr_list == NULL_RTX)
10052 return;
10054 switch_to_section (data_section);
10055 for (t = machine->addr_list; t; t = XEXP (t, 1))
10056 unicosmk_output_addr_vec (file, XEXP (t, 0));
10059 /* Generate the name of the SSIB section for the current function. */
10061 #define SSIB_PREFIX "__SSIB_"
10062 #define SSIB_PREFIX_LEN 7
10064 static const char *
10065 unicosmk_ssib_name (void)
10067 /* This is ok since CAM won't be able to deal with names longer than that
10068 anyway. */
10070 static char name[256];
10072 rtx x;
10073 const char *fnname;
10074 int len;
10076 x = DECL_RTL (cfun->decl);
10077 gcc_assert (GET_CODE (x) == MEM);
10078 x = XEXP (x, 0);
10079 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10080 fnname = XSTR (x, 0);
10082 len = strlen (fnname);
10083 if (len + SSIB_PREFIX_LEN > 255)
10084 len = 255 - SSIB_PREFIX_LEN;
10086 strcpy (name, SSIB_PREFIX);
10087 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10088 name[len + SSIB_PREFIX_LEN] = 0;
10090 return name;
10093 /* Set up the dynamic subprogram information block (DSIB) and update the
10094 frame pointer register ($15) for subroutines which have a frame. If the
10095 subroutine doesn't have a frame, simply increment $15. */
10097 static void
10098 unicosmk_gen_dsib (unsigned long *imaskP)
10100 if (alpha_procedure_type == PT_STACK)
10102 const char *ssib_name;
10103 rtx mem;
10105 /* Allocate 64 bytes for the DSIB. */
10107 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10108 GEN_INT (-64))));
10109 emit_insn (gen_blockage ());
10111 /* Save the return address. */
10113 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10114 set_mem_alias_set (mem, alpha_sr_alias_set);
10115 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10116 (*imaskP) &= ~(1UL << REG_RA);
10118 /* Save the old frame pointer. */
10120 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10121 set_mem_alias_set (mem, alpha_sr_alias_set);
10122 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10123 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10125 emit_insn (gen_blockage ());
10127 /* Store the SSIB pointer. */
10129 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10130 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10131 set_mem_alias_set (mem, alpha_sr_alias_set);
10133 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10134 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10135 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10137 /* Save the CIW index. */
10139 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10140 set_mem_alias_set (mem, alpha_sr_alias_set);
10141 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10143 emit_insn (gen_blockage ());
10145 /* Set the new frame pointer. */
10147 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10148 stack_pointer_rtx, GEN_INT (64))));
10151 else
10153 /* Increment the frame pointer register to indicate that we do not
10154 have a frame. */
10156 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10157 hard_frame_pointer_rtx, const1_rtx)));
10161 /* Output the static subroutine information block for the current
10162 function. */
10164 static void
10165 unicosmk_output_ssib (FILE *file, const char *fnname)
10167 int len;
10168 int i;
10169 rtx x;
10170 rtx ciw;
10171 struct machine_function *machine = cfun->machine;
10173 in_section = NULL;
10174 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10175 unicosmk_ssib_name ());
10177 /* Some required stuff and the function name length. */
10179 len = strlen (fnname);
10180 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10182 /* Saved registers
10183 ??? We don't do that yet. */
10185 fputs ("\t.quad\t0\n", file);
10187 /* Function address. */
10189 fputs ("\t.quad\t", file);
10190 assemble_name (file, fnname);
10191 putc ('\n', file);
10193 fputs ("\t.quad\t0\n", file);
10194 fputs ("\t.quad\t0\n", file);
10196 /* Function name.
10197 ??? We do it the same way Cray CC does it but this could be
10198 simplified. */
10200 for( i = 0; i < len; i++ )
10201 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10202 if( (len % 8) == 0 )
10203 fputs ("\t.quad\t0\n", file);
10204 else
10205 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10207 /* All call information words used in the function. */
10209 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10211 ciw = XEXP (x, 0);
10212 #if HOST_BITS_PER_WIDE_INT == 32
10213 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10214 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10215 #else
10216 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10217 #endif
10221 /* Add a call information word (CIW) to the list of the current function's
10222 CIWs and return its index.
10224 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10227 unicosmk_add_call_info_word (rtx x)
10229 rtx node;
10230 struct machine_function *machine = cfun->machine;
10232 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10233 if (machine->first_ciw == NULL_RTX)
10234 machine->first_ciw = node;
10235 else
10236 XEXP (machine->last_ciw, 1) = node;
10238 machine->last_ciw = node;
10239 ++machine->ciw_count;
10241 return GEN_INT (machine->ciw_count
10242 + strlen (current_function_name ())/8 + 5);
10245 /* The Cray assembler doesn't accept extern declarations for symbols which
10246 are defined in the same file. We have to keep track of all global
10247 symbols which are referenced and/or defined in a source file and output
10248 extern declarations for those which are referenced but not defined at
10249 the end of file. */
10251 /* List of identifiers for which an extern declaration might have to be
10252 emitted. */
10253 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10255 struct unicosmk_extern_list
10257 struct unicosmk_extern_list *next;
10258 const char *name;
10261 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10263 /* Output extern declarations which are required for every asm file. */
10265 static void
10266 unicosmk_output_default_externs (FILE *file)
10268 static const char *const externs[] =
10269 { "__T3E_MISMATCH" };
10271 int i;
10272 int n;
10274 n = ARRAY_SIZE (externs);
10276 for (i = 0; i < n; i++)
10277 fprintf (file, "\t.extern\t%s\n", externs[i]);
10280 /* Output extern declarations for global symbols which are have been
10281 referenced but not defined. */
10283 static void
10284 unicosmk_output_externs (FILE *file)
10286 struct unicosmk_extern_list *p;
10287 const char *real_name;
10288 int len;
10289 tree name_tree;
10291 len = strlen (user_label_prefix);
10292 for (p = unicosmk_extern_head; p != 0; p = p->next)
10294 /* We have to strip the encoding and possibly remove user_label_prefix
10295 from the identifier in order to handle -fleading-underscore and
10296 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10297 real_name = default_strip_name_encoding (p->name);
10298 if (len && p->name[0] == '*'
10299 && !memcmp (real_name, user_label_prefix, len))
10300 real_name += len;
10302 name_tree = get_identifier (real_name);
10303 if (! TREE_ASM_WRITTEN (name_tree))
10305 TREE_ASM_WRITTEN (name_tree) = 1;
10306 fputs ("\t.extern\t", file);
10307 assemble_name (file, p->name);
10308 putc ('\n', file);
10313 /* Record an extern. */
10315 void
10316 unicosmk_add_extern (const char *name)
10318 struct unicosmk_extern_list *p;
10320 p = (struct unicosmk_extern_list *)
10321 xmalloc (sizeof (struct unicosmk_extern_list));
10322 p->next = unicosmk_extern_head;
10323 p->name = name;
10324 unicosmk_extern_head = p;
10327 /* The Cray assembler generates incorrect code if identifiers which
10328 conflict with register names are used as instruction operands. We have
10329 to replace such identifiers with DEX expressions. */
10331 /* Structure to collect identifiers which have been replaced by DEX
10332 expressions. */
10333 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10335 struct unicosmk_dex {
10336 struct unicosmk_dex *next;
10337 const char *name;
10340 /* List of identifiers which have been replaced by DEX expressions. The DEX
10341 number is determined by the position in the list. */
10343 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10345 /* The number of elements in the DEX list. */
10347 static int unicosmk_dex_count = 0;
10349 /* Check if NAME must be replaced by a DEX expression. */
10351 static int
10352 unicosmk_special_name (const char *name)
10354 if (name[0] == '*')
10355 ++name;
10357 if (name[0] == '$')
10358 ++name;
10360 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10361 return 0;
10363 switch (name[1])
10365 case '1': case '2':
10366 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10368 case '3':
10369 return (name[2] == '\0'
10370 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10372 default:
10373 return (ISDIGIT (name[1]) && name[2] == '\0');
10377 /* Return the DEX number if X must be replaced by a DEX expression and 0
10378 otherwise. */
10380 static int
10381 unicosmk_need_dex (rtx x)
10383 struct unicosmk_dex *dex;
10384 const char *name;
10385 int i;
10387 if (GET_CODE (x) != SYMBOL_REF)
10388 return 0;
10390 name = XSTR (x,0);
10391 if (! unicosmk_special_name (name))
10392 return 0;
10394 i = unicosmk_dex_count;
10395 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10397 if (! strcmp (name, dex->name))
10398 return i;
10399 --i;
10402 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10403 dex->name = name;
10404 dex->next = unicosmk_dex_list;
10405 unicosmk_dex_list = dex;
10407 ++unicosmk_dex_count;
10408 return unicosmk_dex_count;
10411 /* Output the DEX definitions for this file. */
10413 static void
10414 unicosmk_output_dex (FILE *file)
10416 struct unicosmk_dex *dex;
10417 int i;
10419 if (unicosmk_dex_list == NULL)
10420 return;
10422 fprintf (file, "\t.dexstart\n");
10424 i = unicosmk_dex_count;
10425 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10427 fprintf (file, "\tDEX (%d) = ", i);
10428 assemble_name (file, dex->name);
10429 putc ('\n', file);
10430 --i;
10433 fprintf (file, "\t.dexend\n");
10436 /* Output text that to appear at the beginning of an assembler file. */
10438 static void
10439 unicosmk_file_start (void)
10441 int i;
10443 fputs ("\t.ident\t", asm_out_file);
10444 unicosmk_output_module_name (asm_out_file);
10445 fputs ("\n\n", asm_out_file);
10447 /* The Unicos/Mk assembler uses different register names. Instead of trying
10448 to support them, we simply use micro definitions. */
10450 /* CAM has different register names: rN for the integer register N and fN
10451 for the floating-point register N. Instead of trying to use these in
10452 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10453 register. */
10455 for (i = 0; i < 32; ++i)
10456 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10458 for (i = 0; i < 32; ++i)
10459 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10461 putc ('\n', asm_out_file);
10463 /* The .align directive fill unused space with zeroes which does not work
10464 in code sections. We define the macro 'gcc@code@align' which uses nops
10465 instead. Note that it assumes that code sections always have the
10466 biggest possible alignment since . refers to the current offset from
10467 the beginning of the section. */
10469 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10470 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10471 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10472 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10473 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10474 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10475 fputs ("\t.endr\n", asm_out_file);
10476 fputs ("\t.endif\n", asm_out_file);
10477 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10479 /* Output extern declarations which should always be visible. */
10480 unicosmk_output_default_externs (asm_out_file);
10482 /* Open a dummy section. We always need to be inside a section for the
10483 section-switching code to work correctly.
10484 ??? This should be a module id or something like that. I still have to
10485 figure out what the rules for those are. */
10486 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10489 /* Output text to appear at the end of an assembler file. This includes all
10490 pending extern declarations and DEX expressions. */
10492 static void
10493 unicosmk_file_end (void)
10495 fputs ("\t.endp\n\n", asm_out_file);
10497 /* Output all pending externs. */
10499 unicosmk_output_externs (asm_out_file);
10501 /* Output dex definitions used for functions whose names conflict with
10502 register names. */
10504 unicosmk_output_dex (asm_out_file);
10506 fputs ("\t.end\t", asm_out_file);
10507 unicosmk_output_module_name (asm_out_file);
10508 putc ('\n', asm_out_file);
10511 #else
10513 static void
10514 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10517 static void
10518 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10521 static void
10522 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10523 const char * fnname ATTRIBUTE_UNUSED)
10527 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10529 return NULL_RTX;
10532 static int
10533 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10535 return 0;
10538 #endif /* TARGET_ABI_UNICOSMK */
10540 static void
10541 alpha_init_libfuncs (void)
10543 if (TARGET_ABI_UNICOSMK)
10545 /* Prevent gcc from generating calls to __divsi3. */
10546 set_optab_libfunc (sdiv_optab, SImode, 0);
10547 set_optab_libfunc (udiv_optab, SImode, 0);
10549 /* Use the functions provided by the system library
10550 for DImode integer division. */
10551 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10552 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10554 else if (TARGET_ABI_OPEN_VMS)
10556 /* Use the VMS runtime library functions for division and
10557 remainder. */
10558 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10559 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10560 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10561 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10562 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10563 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10564 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10565 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10570 /* Initialize the GCC target structure. */
10571 #if TARGET_ABI_OPEN_VMS
10572 # undef TARGET_ATTRIBUTE_TABLE
10573 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10574 # undef TARGET_SECTION_TYPE_FLAGS
10575 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10576 #endif
10578 #undef TARGET_IN_SMALL_DATA_P
10579 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10581 #if TARGET_ABI_UNICOSMK
10582 # undef TARGET_INSERT_ATTRIBUTES
10583 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10584 # undef TARGET_SECTION_TYPE_FLAGS
10585 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10586 # undef TARGET_ASM_UNIQUE_SECTION
10587 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10588 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10589 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10590 # undef TARGET_ASM_GLOBALIZE_LABEL
10591 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10592 # undef TARGET_MUST_PASS_IN_STACK
10593 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10594 #endif
10596 #undef TARGET_ASM_ALIGNED_HI_OP
10597 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10598 #undef TARGET_ASM_ALIGNED_DI_OP
10599 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10601 /* Default unaligned ops are provided for ELF systems. To get unaligned
10602 data for non-ELF systems, we have to turn off auto alignment. */
10603 #ifndef OBJECT_FORMAT_ELF
10604 #undef TARGET_ASM_UNALIGNED_HI_OP
10605 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10606 #undef TARGET_ASM_UNALIGNED_SI_OP
10607 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10608 #undef TARGET_ASM_UNALIGNED_DI_OP
10609 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10610 #endif
10612 #ifdef OBJECT_FORMAT_ELF
10613 #undef TARGET_ASM_RELOC_RW_MASK
10614 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10615 #undef TARGET_ASM_SELECT_RTX_SECTION
10616 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10617 #undef TARGET_SECTION_TYPE_FLAGS
10618 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10619 #endif
10621 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10622 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10624 #undef TARGET_INIT_LIBFUNCS
10625 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10627 #if TARGET_ABI_UNICOSMK
10628 #undef TARGET_ASM_FILE_START
10629 #define TARGET_ASM_FILE_START unicosmk_file_start
10630 #undef TARGET_ASM_FILE_END
10631 #define TARGET_ASM_FILE_END unicosmk_file_end
10632 #else
10633 #undef TARGET_ASM_FILE_START
10634 #define TARGET_ASM_FILE_START alpha_file_start
10635 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10636 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10637 #endif
10639 #undef TARGET_SCHED_ADJUST_COST
10640 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10641 #undef TARGET_SCHED_ISSUE_RATE
10642 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10643 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10644 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10645 alpha_multipass_dfa_lookahead
10647 #undef TARGET_HAVE_TLS
10648 #define TARGET_HAVE_TLS HAVE_AS_TLS
10650 #undef TARGET_INIT_BUILTINS
10651 #define TARGET_INIT_BUILTINS alpha_init_builtins
10652 #undef TARGET_EXPAND_BUILTIN
10653 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10654 #undef TARGET_FOLD_BUILTIN
10655 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10657 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10658 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10659 #undef TARGET_CANNOT_COPY_INSN_P
10660 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10661 #undef TARGET_CANNOT_FORCE_CONST_MEM
10662 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10664 #if TARGET_ABI_OSF
10665 #undef TARGET_ASM_OUTPUT_MI_THUNK
10666 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10667 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10668 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10669 #undef TARGET_STDARG_OPTIMIZE_HOOK
10670 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10671 #endif
10673 #undef TARGET_RTX_COSTS
10674 #define TARGET_RTX_COSTS alpha_rtx_costs
10675 #undef TARGET_ADDRESS_COST
10676 #define TARGET_ADDRESS_COST hook_int_rtx_0
10678 #undef TARGET_MACHINE_DEPENDENT_REORG
10679 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10681 #undef TARGET_PROMOTE_FUNCTION_ARGS
10682 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10683 #undef TARGET_PROMOTE_FUNCTION_RETURN
10684 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10685 #undef TARGET_PROMOTE_PROTOTYPES
10686 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10687 #undef TARGET_RETURN_IN_MEMORY
10688 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10689 #undef TARGET_PASS_BY_REFERENCE
10690 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10691 #undef TARGET_SETUP_INCOMING_VARARGS
10692 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10693 #undef TARGET_STRICT_ARGUMENT_NAMING
10694 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10695 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10696 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10697 #undef TARGET_SPLIT_COMPLEX_ARG
10698 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10699 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10700 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10701 #undef TARGET_ARG_PARTIAL_BYTES
10702 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10704 #undef TARGET_SECONDARY_RELOAD
10705 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10707 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10708 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10709 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10710 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10712 #undef TARGET_BUILD_BUILTIN_VA_LIST
10713 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10715 #undef TARGET_EXPAND_BUILTIN_VA_START
10716 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10718 /* The Alpha architecture does not require sequential consistency. See
10719 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10720 for an example of how it can be violated in practice. */
10721 #undef TARGET_RELAXED_ORDERING
10722 #define TARGET_RELAXED_ORDERING true
10724 #undef TARGET_DEFAULT_TARGET_FLAGS
10725 #define TARGET_DEFAULT_TARGET_FLAGS \
10726 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10727 #undef TARGET_HANDLE_OPTION
10728 #define TARGET_HANDLE_OPTION alpha_handle_option
10730 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10731 #undef TARGET_MANGLE_TYPE
10732 #define TARGET_MANGLE_TYPE alpha_mangle_type
10733 #endif
10735 struct gcc_target targetm = TARGET_INITIALIZER;
10738 #include "gt-alpha.h"