2018-07-04 Denys Vlasenko <dvlasenk@redhat.com>
[official-gcc.git] / gcc / config / alpha / alpha.c
blob9adfe159381963f4eb538e0035df29319b17e461
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #define IN_TARGET_CODE 1
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "stringpool.h"
32 #include "attribs.h"
33 #include "memmodel.h"
34 #include "gimple.h"
35 #include "df.h"
36 #include "predict.h"
37 #include "tm_p.h"
38 #include "ssa.h"
39 #include "expmed.h"
40 #include "optabs.h"
41 #include "regs.h"
42 #include "emit-rtl.h"
43 #include "recog.h"
44 #include "diagnostic-core.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "stor-layout.h"
48 #include "calls.h"
49 #include "varasm.h"
50 #include "output.h"
51 #include "insn-attr.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "reload.h"
55 #include "except.h"
56 #include "common/common-target.h"
57 #include "debug.h"
58 #include "langhooks.h"
59 #include "cfgrtl.h"
60 #include "tree-pass.h"
61 #include "context.h"
62 #include "gimple-iterator.h"
63 #include "gimplify.h"
64 #include "tree-stdarg.h"
65 #include "tm-constrs.h"
66 #include "libfuncs.h"
67 #include "params.h"
68 #include "builtins.h"
69 #include "rtl-iter.h"
70 #include "flags.h"
72 /* This file should be included last. */
73 #include "target-def.h"
75 /* Specify which cpu to schedule for. */
76 enum processor_type alpha_tune;
78 /* Which cpu we're generating code for. */
79 enum processor_type alpha_cpu;
81 static const char * const alpha_cpu_name[] =
83 "ev4", "ev5", "ev6"
86 /* Specify how accurate floating-point traps need to be. */
88 enum alpha_trap_precision alpha_tp;
90 /* Specify the floating-point rounding mode. */
92 enum alpha_fp_rounding_mode alpha_fprm;
94 /* Specify which things cause traps. */
96 enum alpha_fp_trap_mode alpha_fptm;
98 /* Nonzero if inside of a function, because the Alpha asm can't
99 handle .files inside of functions. */
101 static int inside_function = FALSE;
103 /* The number of cycles of latency we should assume on memory reads. */
105 static int alpha_memory_latency = 3;
107 /* Whether the function needs the GP. */
109 static int alpha_function_needs_gp;
111 /* The assembler name of the current function. */
113 static const char *alpha_fnname;
115 /* The next explicit relocation sequence number. */
116 extern GTY(()) int alpha_next_sequence_number;
117 int alpha_next_sequence_number = 1;
119 /* The literal and gpdisp sequence numbers for this insn, as printed
120 by %# and %* respectively. */
121 extern GTY(()) int alpha_this_literal_sequence_number;
122 extern GTY(()) int alpha_this_gpdisp_sequence_number;
123 int alpha_this_literal_sequence_number;
124 int alpha_this_gpdisp_sequence_number;
126 /* Costs of various operations on the different architectures. */
128 struct alpha_rtx_cost_data
130 unsigned char fp_add;
131 unsigned char fp_mult;
132 unsigned char fp_div_sf;
133 unsigned char fp_div_df;
134 unsigned char int_mult_si;
135 unsigned char int_mult_di;
136 unsigned char int_shift;
137 unsigned char int_cmov;
138 unsigned short int_div;
141 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
143 { /* EV4 */
144 COSTS_N_INSNS (6), /* fp_add */
145 COSTS_N_INSNS (6), /* fp_mult */
146 COSTS_N_INSNS (34), /* fp_div_sf */
147 COSTS_N_INSNS (63), /* fp_div_df */
148 COSTS_N_INSNS (23), /* int_mult_si */
149 COSTS_N_INSNS (23), /* int_mult_di */
150 COSTS_N_INSNS (2), /* int_shift */
151 COSTS_N_INSNS (2), /* int_cmov */
152 COSTS_N_INSNS (97), /* int_div */
154 { /* EV5 */
155 COSTS_N_INSNS (4), /* fp_add */
156 COSTS_N_INSNS (4), /* fp_mult */
157 COSTS_N_INSNS (15), /* fp_div_sf */
158 COSTS_N_INSNS (22), /* fp_div_df */
159 COSTS_N_INSNS (8), /* int_mult_si */
160 COSTS_N_INSNS (12), /* int_mult_di */
161 COSTS_N_INSNS (1) + 1, /* int_shift */
162 COSTS_N_INSNS (1), /* int_cmov */
163 COSTS_N_INSNS (83), /* int_div */
165 { /* EV6 */
166 COSTS_N_INSNS (4), /* fp_add */
167 COSTS_N_INSNS (4), /* fp_mult */
168 COSTS_N_INSNS (12), /* fp_div_sf */
169 COSTS_N_INSNS (15), /* fp_div_df */
170 COSTS_N_INSNS (7), /* int_mult_si */
171 COSTS_N_INSNS (7), /* int_mult_di */
172 COSTS_N_INSNS (1), /* int_shift */
173 COSTS_N_INSNS (2), /* int_cmov */
174 COSTS_N_INSNS (86), /* int_div */
178 /* Similar but tuned for code size instead of execution latency. The
179 extra +N is fractional cost tuning based on latency. It's used to
180 encourage use of cheaper insns like shift, but only if there's just
181 one of them. */
183 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
185 COSTS_N_INSNS (1), /* fp_add */
186 COSTS_N_INSNS (1), /* fp_mult */
187 COSTS_N_INSNS (1), /* fp_div_sf */
188 COSTS_N_INSNS (1) + 1, /* fp_div_df */
189 COSTS_N_INSNS (1) + 1, /* int_mult_si */
190 COSTS_N_INSNS (1) + 2, /* int_mult_di */
191 COSTS_N_INSNS (1), /* int_shift */
192 COSTS_N_INSNS (1), /* int_cmov */
193 COSTS_N_INSNS (6), /* int_div */
196 /* Get the number of args of a function in one of two ways. */
197 #if TARGET_ABI_OPEN_VMS
198 #define NUM_ARGS crtl->args.info.num_args
199 #else
200 #define NUM_ARGS crtl->args.info
201 #endif
203 #define REG_PV 27
204 #define REG_RA 26
206 /* Declarations of static functions. */
207 static struct machine_function *alpha_init_machine_status (void);
208 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
209 static void alpha_handle_trap_shadows (void);
210 static void alpha_align_insns (void);
211 static void alpha_override_options_after_change (void);
213 #if TARGET_ABI_OPEN_VMS
214 static void alpha_write_linkage (FILE *, const char *);
215 static bool vms_valid_pointer_mode (scalar_int_mode);
216 #else
217 #define vms_patch_builtins() gcc_unreachable()
218 #endif
220 static unsigned int
221 rest_of_handle_trap_shadows (void)
223 alpha_handle_trap_shadows ();
224 return 0;
227 namespace {
229 const pass_data pass_data_handle_trap_shadows =
231 RTL_PASS,
232 "trap_shadows", /* name */
233 OPTGROUP_NONE, /* optinfo_flags */
234 TV_NONE, /* tv_id */
235 0, /* properties_required */
236 0, /* properties_provided */
237 0, /* properties_destroyed */
238 0, /* todo_flags_start */
239 TODO_df_finish, /* todo_flags_finish */
242 class pass_handle_trap_shadows : public rtl_opt_pass
244 public:
245 pass_handle_trap_shadows(gcc::context *ctxt)
246 : rtl_opt_pass(pass_data_handle_trap_shadows, ctxt)
249 /* opt_pass methods: */
250 virtual bool gate (function *)
252 return alpha_tp != ALPHA_TP_PROG || flag_exceptions;
255 virtual unsigned int execute (function *)
257 return rest_of_handle_trap_shadows ();
260 }; // class pass_handle_trap_shadows
262 } // anon namespace
264 rtl_opt_pass *
265 make_pass_handle_trap_shadows (gcc::context *ctxt)
267 return new pass_handle_trap_shadows (ctxt);
270 static unsigned int
271 rest_of_align_insns (void)
273 alpha_align_insns ();
274 return 0;
277 namespace {
279 const pass_data pass_data_align_insns =
281 RTL_PASS,
282 "align_insns", /* name */
283 OPTGROUP_NONE, /* optinfo_flags */
284 TV_NONE, /* tv_id */
285 0, /* properties_required */
286 0, /* properties_provided */
287 0, /* properties_destroyed */
288 0, /* todo_flags_start */
289 TODO_df_finish, /* todo_flags_finish */
292 class pass_align_insns : public rtl_opt_pass
294 public:
295 pass_align_insns(gcc::context *ctxt)
296 : rtl_opt_pass(pass_data_align_insns, ctxt)
299 /* opt_pass methods: */
300 virtual bool gate (function *)
302 /* Due to the number of extra trapb insns, don't bother fixing up
303 alignment when trap precision is instruction. Moreover, we can
304 only do our job when sched2 is run. */
305 return ((alpha_tune == PROCESSOR_EV4
306 || alpha_tune == PROCESSOR_EV5)
307 && optimize && !optimize_size
308 && alpha_tp != ALPHA_TP_INSN
309 && flag_schedule_insns_after_reload);
312 virtual unsigned int execute (function *)
314 return rest_of_align_insns ();
317 }; // class pass_align_insns
319 } // anon namespace
321 rtl_opt_pass *
322 make_pass_align_insns (gcc::context *ctxt)
324 return new pass_align_insns (ctxt);
327 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
328 /* Implement TARGET_MANGLE_TYPE. */
330 static const char *
331 alpha_mangle_type (const_tree type)
333 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
334 && TARGET_LONG_DOUBLE_128)
335 return "g";
337 /* For all other types, use normal C++ mangling. */
338 return NULL;
340 #endif
342 /* Parse target option strings. */
344 static void
345 alpha_option_override (void)
347 static const struct cpu_table {
348 const char *const name;
349 const enum processor_type processor;
350 const int flags;
351 const unsigned short line_size; /* in bytes */
352 const unsigned short l1_size; /* in kb. */
353 const unsigned short l2_size; /* in kb. */
354 } cpu_table[] = {
355 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
356 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
357 had 64k to 8M 8-byte direct Bcache. */
358 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
359 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
360 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
362 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
363 and 1M to 16M 64 byte L3 (not modeled).
364 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
365 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
366 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
367 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
368 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
369 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
370 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
371 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
372 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
374 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
375 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
376 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
377 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
378 64, 64, 16*1024 },
379 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
380 64, 64, 16*1024 }
383 int const ct_size = ARRAY_SIZE (cpu_table);
384 int line_size = 0, l1_size = 0, l2_size = 0;
385 int i;
387 #ifdef SUBTARGET_OVERRIDE_OPTIONS
388 SUBTARGET_OVERRIDE_OPTIONS;
389 #endif
391 /* Default to full IEEE compliance mode for Go language. */
392 if (strcmp (lang_hooks.name, "GNU Go") == 0
393 && !(target_flags_explicit & MASK_IEEE))
394 target_flags |= MASK_IEEE;
396 alpha_fprm = ALPHA_FPRM_NORM;
397 alpha_tp = ALPHA_TP_PROG;
398 alpha_fptm = ALPHA_FPTM_N;
400 if (TARGET_IEEE)
402 alpha_tp = ALPHA_TP_INSN;
403 alpha_fptm = ALPHA_FPTM_SU;
405 if (TARGET_IEEE_WITH_INEXACT)
407 alpha_tp = ALPHA_TP_INSN;
408 alpha_fptm = ALPHA_FPTM_SUI;
411 if (alpha_tp_string)
413 if (! strcmp (alpha_tp_string, "p"))
414 alpha_tp = ALPHA_TP_PROG;
415 else if (! strcmp (alpha_tp_string, "f"))
416 alpha_tp = ALPHA_TP_FUNC;
417 else if (! strcmp (alpha_tp_string, "i"))
418 alpha_tp = ALPHA_TP_INSN;
419 else
420 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
423 if (alpha_fprm_string)
425 if (! strcmp (alpha_fprm_string, "n"))
426 alpha_fprm = ALPHA_FPRM_NORM;
427 else if (! strcmp (alpha_fprm_string, "m"))
428 alpha_fprm = ALPHA_FPRM_MINF;
429 else if (! strcmp (alpha_fprm_string, "c"))
430 alpha_fprm = ALPHA_FPRM_CHOP;
431 else if (! strcmp (alpha_fprm_string,"d"))
432 alpha_fprm = ALPHA_FPRM_DYN;
433 else
434 error ("bad value %qs for -mfp-rounding-mode switch",
435 alpha_fprm_string);
438 if (alpha_fptm_string)
440 if (strcmp (alpha_fptm_string, "n") == 0)
441 alpha_fptm = ALPHA_FPTM_N;
442 else if (strcmp (alpha_fptm_string, "u") == 0)
443 alpha_fptm = ALPHA_FPTM_U;
444 else if (strcmp (alpha_fptm_string, "su") == 0)
445 alpha_fptm = ALPHA_FPTM_SU;
446 else if (strcmp (alpha_fptm_string, "sui") == 0)
447 alpha_fptm = ALPHA_FPTM_SUI;
448 else
449 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
452 if (alpha_cpu_string)
454 for (i = 0; i < ct_size; i++)
455 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
457 alpha_tune = alpha_cpu = cpu_table[i].processor;
458 line_size = cpu_table[i].line_size;
459 l1_size = cpu_table[i].l1_size;
460 l2_size = cpu_table[i].l2_size;
461 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
462 target_flags |= cpu_table[i].flags;
463 break;
465 if (i == ct_size)
466 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
469 if (alpha_tune_string)
471 for (i = 0; i < ct_size; i++)
472 if (! strcmp (alpha_tune_string, cpu_table [i].name))
474 alpha_tune = cpu_table[i].processor;
475 line_size = cpu_table[i].line_size;
476 l1_size = cpu_table[i].l1_size;
477 l2_size = cpu_table[i].l2_size;
478 break;
480 if (i == ct_size)
481 error ("bad value %qs for -mtune switch", alpha_tune_string);
484 if (line_size)
485 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
486 global_options.x_param_values,
487 global_options_set.x_param_values);
488 if (l1_size)
489 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
490 global_options.x_param_values,
491 global_options_set.x_param_values);
492 if (l2_size)
493 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
494 global_options.x_param_values,
495 global_options_set.x_param_values);
497 /* Do some sanity checks on the above options. */
499 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
500 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
502 warning (0, "fp software completion requires -mtrap-precision=i");
503 alpha_tp = ALPHA_TP_INSN;
506 if (alpha_cpu == PROCESSOR_EV6)
508 /* Except for EV6 pass 1 (not released), we always have precise
509 arithmetic traps. Which means we can do software completion
510 without minding trap shadows. */
511 alpha_tp = ALPHA_TP_PROG;
514 if (TARGET_FLOAT_VAX)
516 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
518 warning (0, "rounding mode not supported for VAX floats");
519 alpha_fprm = ALPHA_FPRM_NORM;
521 if (alpha_fptm == ALPHA_FPTM_SUI)
523 warning (0, "trap mode not supported for VAX floats");
524 alpha_fptm = ALPHA_FPTM_SU;
526 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
527 warning (0, "128-bit long double not supported for VAX floats");
528 target_flags &= ~MASK_LONG_DOUBLE_128;
532 char *end;
533 int lat;
535 if (!alpha_mlat_string)
536 alpha_mlat_string = "L1";
538 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
539 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
541 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
542 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
543 && alpha_mlat_string[2] == '\0')
545 static int const cache_latency[][4] =
547 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
548 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
549 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
552 lat = alpha_mlat_string[1] - '0';
553 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
555 warning (0, "L%d cache latency unknown for %s",
556 lat, alpha_cpu_name[alpha_tune]);
557 lat = 3;
559 else
560 lat = cache_latency[alpha_tune][lat-1];
562 else if (! strcmp (alpha_mlat_string, "main"))
564 /* Most current memories have about 370ns latency. This is
565 a reasonable guess for a fast cpu. */
566 lat = 150;
568 else
570 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
571 lat = 3;
574 alpha_memory_latency = lat;
577 /* Default the definition of "small data" to 8 bytes. */
578 if (!global_options_set.x_g_switch_value)
579 g_switch_value = 8;
581 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
582 if (flag_pic == 1)
583 target_flags |= MASK_SMALL_DATA;
584 else if (flag_pic == 2)
585 target_flags &= ~MASK_SMALL_DATA;
587 alpha_override_options_after_change ();
589 /* Register variables and functions with the garbage collector. */
591 /* Set up function hooks. */
592 init_machine_status = alpha_init_machine_status;
594 /* Tell the compiler when we're using VAX floating point. */
595 if (TARGET_FLOAT_VAX)
597 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
598 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
599 REAL_MODE_FORMAT (TFmode) = NULL;
602 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
603 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
604 target_flags |= MASK_LONG_DOUBLE_128;
605 #endif
609 /* Implement targetm.override_options_after_change. */
611 static void
612 alpha_override_options_after_change (void)
614 /* Align labels and loops for optimal branching. */
615 /* ??? Kludge these by not doing anything if we don't optimize. */
616 if (optimize > 0)
618 if (flag_align_loops && !str_align_loops)
619 str_align_loops = "16";
620 if (flag_align_jumps && !str_align_jumps)
621 str_align_jumps = "16";
623 if (flag_align_functions && !str_align_functions)
624 str_align_functions = "16";
627 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
630 zap_mask (HOST_WIDE_INT value)
632 int i;
634 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
635 i++, value >>= 8)
636 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
637 return 0;
639 return 1;
642 /* Return true if OP is valid for a particular TLS relocation.
643 We are already guaranteed that OP is a CONST. */
646 tls_symbolic_operand_1 (rtx op, int size, int unspec)
648 op = XEXP (op, 0);
650 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
651 return 0;
652 op = XVECEXP (op, 0, 0);
654 if (GET_CODE (op) != SYMBOL_REF)
655 return 0;
657 switch (SYMBOL_REF_TLS_MODEL (op))
659 case TLS_MODEL_LOCAL_DYNAMIC:
660 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
661 case TLS_MODEL_INITIAL_EXEC:
662 return unspec == UNSPEC_TPREL && size == 64;
663 case TLS_MODEL_LOCAL_EXEC:
664 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
665 default:
666 gcc_unreachable ();
670 /* Used by aligned_memory_operand and unaligned_memory_operand to
671 resolve what reload is going to do with OP if it's a register. */
674 resolve_reload_operand (rtx op)
676 if (reload_in_progress)
678 rtx tmp = op;
679 if (SUBREG_P (tmp))
680 tmp = SUBREG_REG (tmp);
681 if (REG_P (tmp)
682 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
684 op = reg_equiv_memory_loc (REGNO (tmp));
685 if (op == 0)
686 return 0;
689 return op;
692 /* The scalar modes supported differs from the default check-what-c-supports
693 version in that sometimes TFmode is available even when long double
694 indicates only DFmode. */
696 static bool
697 alpha_scalar_mode_supported_p (scalar_mode mode)
699 switch (mode)
701 case E_QImode:
702 case E_HImode:
703 case E_SImode:
704 case E_DImode:
705 case E_TImode: /* via optabs.c */
706 return true;
708 case E_SFmode:
709 case E_DFmode:
710 return true;
712 case E_TFmode:
713 return TARGET_HAS_XFLOATING_LIBS;
715 default:
716 return false;
720 /* Alpha implements a couple of integer vector mode operations when
721 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
722 which allows the vectorizer to operate on e.g. move instructions,
723 or when expand_vector_operations can do something useful. */
725 static bool
726 alpha_vector_mode_supported_p (machine_mode mode)
728 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
731 /* Return 1 if this function can directly return via $26. */
734 direct_return (void)
736 return (TARGET_ABI_OSF
737 && reload_completed
738 && alpha_sa_size () == 0
739 && get_frame_size () == 0
740 && crtl->outgoing_args_size == 0
741 && crtl->args.pretend_args_size == 0);
744 /* Return the TLS model to use for SYMBOL. */
746 static enum tls_model
747 tls_symbolic_operand_type (rtx symbol)
749 enum tls_model model;
751 if (GET_CODE (symbol) != SYMBOL_REF)
752 return TLS_MODEL_NONE;
753 model = SYMBOL_REF_TLS_MODEL (symbol);
755 /* Local-exec with a 64-bit size is the same code as initial-exec. */
756 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
757 model = TLS_MODEL_INITIAL_EXEC;
759 return model;
762 /* Return true if the function DECL will share the same GP as any
763 function in the current unit of translation. */
765 static bool
766 decl_has_samegp (const_tree decl)
768 /* Functions that are not local can be overridden, and thus may
769 not share the same gp. */
770 if (!(*targetm.binds_local_p) (decl))
771 return false;
773 /* If -msmall-data is in effect, assume that there is only one GP
774 for the module, and so any local symbol has this property. We
775 need explicit relocations to be able to enforce this for symbols
776 not defined in this unit of translation, however. */
777 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
778 return true;
780 /* Functions that are not external are defined in this UoT. */
781 /* ??? Irritatingly, static functions not yet emitted are still
782 marked "external". Apply this to non-static functions only. */
783 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
786 /* Return true if EXP should be placed in the small data section. */
788 static bool
789 alpha_in_small_data_p (const_tree exp)
791 /* We want to merge strings, so we never consider them small data. */
792 if (TREE_CODE (exp) == STRING_CST)
793 return false;
795 /* Functions are never in the small data area. Duh. */
796 if (TREE_CODE (exp) == FUNCTION_DECL)
797 return false;
799 /* COMMON symbols are never small data. */
800 if (TREE_CODE (exp) == VAR_DECL && DECL_COMMON (exp))
801 return false;
803 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
805 const char *section = DECL_SECTION_NAME (exp);
806 if (strcmp (section, ".sdata") == 0
807 || strcmp (section, ".sbss") == 0)
808 return true;
810 else
812 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
814 /* If this is an incomplete type with size 0, then we can't put it
815 in sdata because it might be too big when completed. */
816 if (size > 0 && size <= g_switch_value)
817 return true;
820 return false;
823 #if TARGET_ABI_OPEN_VMS
824 static bool
825 vms_valid_pointer_mode (scalar_int_mode mode)
827 return (mode == SImode || mode == DImode);
830 static bool
831 alpha_linkage_symbol_p (const char *symname)
833 int symlen = strlen (symname);
835 if (symlen > 4)
836 return strcmp (&symname [symlen - 4], "..lk") == 0;
838 return false;
841 #define LINKAGE_SYMBOL_REF_P(X) \
842 ((GET_CODE (X) == SYMBOL_REF \
843 && alpha_linkage_symbol_p (XSTR (X, 0))) \
844 || (GET_CODE (X) == CONST \
845 && GET_CODE (XEXP (X, 0)) == PLUS \
846 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
847 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
848 #endif
850 /* legitimate_address_p recognizes an RTL expression that is a valid
851 memory address for an instruction. The MODE argument is the
852 machine mode for the MEM expression that wants to use this address.
854 For Alpha, we have either a constant address or the sum of a
855 register and a constant address, or just a register. For DImode,
856 any of those forms can be surrounded with an AND that clear the
857 low-order three bits; this is an "unaligned" access. */
859 static bool
860 alpha_legitimate_address_p (machine_mode mode, rtx x, bool strict)
862 /* If this is an ldq_u type address, discard the outer AND. */
863 if (mode == DImode
864 && GET_CODE (x) == AND
865 && CONST_INT_P (XEXP (x, 1))
866 && INTVAL (XEXP (x, 1)) == -8)
867 x = XEXP (x, 0);
869 /* Discard non-paradoxical subregs. */
870 if (SUBREG_P (x)
871 && (GET_MODE_SIZE (GET_MODE (x))
872 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
873 x = SUBREG_REG (x);
875 /* Unadorned general registers are valid. */
876 if (REG_P (x)
877 && (strict
878 ? STRICT_REG_OK_FOR_BASE_P (x)
879 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
880 return true;
882 /* Constant addresses (i.e. +/- 32k) are valid. */
883 if (CONSTANT_ADDRESS_P (x))
884 return true;
886 #if TARGET_ABI_OPEN_VMS
887 if (LINKAGE_SYMBOL_REF_P (x))
888 return true;
889 #endif
891 /* Register plus a small constant offset is valid. */
892 if (GET_CODE (x) == PLUS)
894 rtx ofs = XEXP (x, 1);
895 x = XEXP (x, 0);
897 /* Discard non-paradoxical subregs. */
898 if (SUBREG_P (x)
899 && (GET_MODE_SIZE (GET_MODE (x))
900 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
901 x = SUBREG_REG (x);
903 if (REG_P (x))
905 if (! strict
906 && NONSTRICT_REG_OK_FP_BASE_P (x)
907 && CONST_INT_P (ofs))
908 return true;
909 if ((strict
910 ? STRICT_REG_OK_FOR_BASE_P (x)
911 : NONSTRICT_REG_OK_FOR_BASE_P (x))
912 && CONSTANT_ADDRESS_P (ofs))
913 return true;
917 /* If we're managing explicit relocations, LO_SUM is valid, as are small
918 data symbols. Avoid explicit relocations of modes larger than word
919 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
920 else if (TARGET_EXPLICIT_RELOCS
921 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
923 if (small_symbolic_operand (x, Pmode))
924 return true;
926 if (GET_CODE (x) == LO_SUM)
928 rtx ofs = XEXP (x, 1);
929 x = XEXP (x, 0);
931 /* Discard non-paradoxical subregs. */
932 if (SUBREG_P (x)
933 && (GET_MODE_SIZE (GET_MODE (x))
934 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
935 x = SUBREG_REG (x);
937 /* Must have a valid base register. */
938 if (! (REG_P (x)
939 && (strict
940 ? STRICT_REG_OK_FOR_BASE_P (x)
941 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
942 return false;
944 /* The symbol must be local. */
945 if (local_symbolic_operand (ofs, Pmode)
946 || dtp32_symbolic_operand (ofs, Pmode)
947 || tp32_symbolic_operand (ofs, Pmode))
948 return true;
952 return false;
955 /* Build the SYMBOL_REF for __tls_get_addr. */
957 static GTY(()) rtx tls_get_addr_libfunc;
959 static rtx
960 get_tls_get_addr (void)
962 if (!tls_get_addr_libfunc)
963 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
964 return tls_get_addr_libfunc;
967 /* Try machine-dependent ways of modifying an illegitimate address
968 to be legitimate. If we find one, return the new, valid address. */
970 static rtx
971 alpha_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode)
973 HOST_WIDE_INT addend;
975 /* If the address is (plus reg const_int) and the CONST_INT is not a
976 valid offset, compute the high part of the constant and add it to
977 the register. Then our address is (plus temp low-part-const). */
978 if (GET_CODE (x) == PLUS
979 && REG_P (XEXP (x, 0))
980 && CONST_INT_P (XEXP (x, 1))
981 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
983 addend = INTVAL (XEXP (x, 1));
984 x = XEXP (x, 0);
985 goto split_addend;
988 /* If the address is (const (plus FOO const_int)), find the low-order
989 part of the CONST_INT. Then load FOO plus any high-order part of the
990 CONST_INT into a register. Our address is (plus reg low-part-const).
991 This is done to reduce the number of GOT entries. */
992 if (can_create_pseudo_p ()
993 && GET_CODE (x) == CONST
994 && GET_CODE (XEXP (x, 0)) == PLUS
995 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
997 addend = INTVAL (XEXP (XEXP (x, 0), 1));
998 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
999 goto split_addend;
1002 /* If we have a (plus reg const), emit the load as in (2), then add
1003 the two registers, and finally generate (plus reg low-part-const) as
1004 our address. */
1005 if (can_create_pseudo_p ()
1006 && GET_CODE (x) == PLUS
1007 && REG_P (XEXP (x, 0))
1008 && GET_CODE (XEXP (x, 1)) == CONST
1009 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1010 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
1012 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1013 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1014 XEXP (XEXP (XEXP (x, 1), 0), 0),
1015 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1016 goto split_addend;
1019 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1020 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1021 around +/- 32k offset. */
1022 if (TARGET_EXPLICIT_RELOCS
1023 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1024 && symbolic_operand (x, Pmode))
1026 rtx r0, r16, eqv, tga, tp, dest, seq;
1027 rtx_insn *insn;
1029 switch (tls_symbolic_operand_type (x))
1031 case TLS_MODEL_NONE:
1032 break;
1034 case TLS_MODEL_GLOBAL_DYNAMIC:
1036 start_sequence ();
1038 r0 = gen_rtx_REG (Pmode, 0);
1039 r16 = gen_rtx_REG (Pmode, 16);
1040 tga = get_tls_get_addr ();
1041 dest = gen_reg_rtx (Pmode);
1042 seq = GEN_INT (alpha_next_sequence_number++);
1044 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1045 rtx val = gen_call_value_osf_tlsgd (r0, tga, seq);
1046 insn = emit_call_insn (val);
1047 RTL_CONST_CALL_P (insn) = 1;
1048 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1050 insn = get_insns ();
1051 end_sequence ();
1053 emit_libcall_block (insn, dest, r0, x);
1054 return dest;
1057 case TLS_MODEL_LOCAL_DYNAMIC:
1059 start_sequence ();
1061 r0 = gen_rtx_REG (Pmode, 0);
1062 r16 = gen_rtx_REG (Pmode, 16);
1063 tga = get_tls_get_addr ();
1064 scratch = gen_reg_rtx (Pmode);
1065 seq = GEN_INT (alpha_next_sequence_number++);
1067 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1068 rtx val = gen_call_value_osf_tlsldm (r0, tga, seq);
1069 insn = emit_call_insn (val);
1070 RTL_CONST_CALL_P (insn) = 1;
1071 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1073 insn = get_insns ();
1074 end_sequence ();
1076 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1077 UNSPEC_TLSLDM_CALL);
1078 emit_libcall_block (insn, scratch, r0, eqv);
1080 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1081 eqv = gen_rtx_CONST (Pmode, eqv);
1083 if (alpha_tls_size == 64)
1085 dest = gen_reg_rtx (Pmode);
1086 emit_insn (gen_rtx_SET (dest, eqv));
1087 emit_insn (gen_adddi3 (dest, dest, scratch));
1088 return dest;
1090 if (alpha_tls_size == 32)
1092 rtx temp = gen_rtx_HIGH (Pmode, eqv);
1093 temp = gen_rtx_PLUS (Pmode, scratch, temp);
1094 scratch = gen_reg_rtx (Pmode);
1095 emit_insn (gen_rtx_SET (scratch, temp));
1097 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1100 case TLS_MODEL_INITIAL_EXEC:
1101 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1102 eqv = gen_rtx_CONST (Pmode, eqv);
1103 tp = gen_reg_rtx (Pmode);
1104 scratch = gen_reg_rtx (Pmode);
1105 dest = gen_reg_rtx (Pmode);
1107 emit_insn (gen_get_thread_pointerdi (tp));
1108 emit_insn (gen_rtx_SET (scratch, eqv));
1109 emit_insn (gen_adddi3 (dest, tp, scratch));
1110 return dest;
1112 case TLS_MODEL_LOCAL_EXEC:
1113 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1114 eqv = gen_rtx_CONST (Pmode, eqv);
1115 tp = gen_reg_rtx (Pmode);
1117 emit_insn (gen_get_thread_pointerdi (tp));
1118 if (alpha_tls_size == 32)
1120 rtx temp = gen_rtx_HIGH (Pmode, eqv);
1121 temp = gen_rtx_PLUS (Pmode, tp, temp);
1122 tp = gen_reg_rtx (Pmode);
1123 emit_insn (gen_rtx_SET (tp, temp));
1125 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1127 default:
1128 gcc_unreachable ();
1131 if (local_symbolic_operand (x, Pmode))
1133 if (small_symbolic_operand (x, Pmode))
1134 return x;
1135 else
1137 if (can_create_pseudo_p ())
1138 scratch = gen_reg_rtx (Pmode);
1139 emit_insn (gen_rtx_SET (scratch, gen_rtx_HIGH (Pmode, x)));
1140 return gen_rtx_LO_SUM (Pmode, scratch, x);
1145 return NULL;
1147 split_addend:
1149 HOST_WIDE_INT low, high;
1151 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1152 addend -= low;
1153 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1154 addend -= high;
1156 if (addend)
1157 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1158 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1159 1, OPTAB_LIB_WIDEN);
1160 if (high)
1161 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1162 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1163 1, OPTAB_LIB_WIDEN);
1165 return plus_constant (Pmode, x, low);
1170 /* Try machine-dependent ways of modifying an illegitimate address
1171 to be legitimate. Return X or the new, valid address. */
1173 static rtx
1174 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1175 machine_mode mode)
1177 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1178 return new_x ? new_x : x;
1181 /* Return true if ADDR has an effect that depends on the machine mode it
1182 is used for. On the Alpha this is true only for the unaligned modes.
1183 We can simplify the test since we know that the address must be valid. */
1185 static bool
1186 alpha_mode_dependent_address_p (const_rtx addr,
1187 addr_space_t as ATTRIBUTE_UNUSED)
1189 return GET_CODE (addr) == AND;
1192 /* Primarily this is required for TLS symbols, but given that our move
1193 patterns *ought* to be able to handle any symbol at any time, we
1194 should never be spilling symbolic operands to the constant pool, ever. */
1196 static bool
1197 alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1199 enum rtx_code code = GET_CODE (x);
1200 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1203 /* We do not allow indirect calls to be optimized into sibling calls, nor
1204 can we allow a call to a function with a different GP to be optimized
1205 into a sibcall. */
1207 static bool
1208 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1210 /* Can't do indirect tail calls, since we don't know if the target
1211 uses the same GP. */
1212 if (!decl)
1213 return false;
1215 /* Otherwise, we can make a tail call if the target function shares
1216 the same GP. */
1217 return decl_has_samegp (decl);
1220 bool
1221 some_small_symbolic_operand_int (rtx x)
1223 subrtx_var_iterator::array_type array;
1224 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
1226 rtx x = *iter;
1227 /* Don't re-split. */
1228 if (GET_CODE (x) == LO_SUM)
1229 iter.skip_subrtxes ();
1230 else if (small_symbolic_operand (x, Pmode))
1231 return true;
1233 return false;
1237 split_small_symbolic_operand (rtx x)
1239 x = copy_insn (x);
1240 subrtx_ptr_iterator::array_type array;
1241 FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL)
1243 rtx *ptr = *iter;
1244 rtx x = *ptr;
1245 /* Don't re-split. */
1246 if (GET_CODE (x) == LO_SUM)
1247 iter.skip_subrtxes ();
1248 else if (small_symbolic_operand (x, Pmode))
1250 *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1251 iter.skip_subrtxes ();
1254 return x;
1257 /* Indicate that INSN cannot be duplicated. This is true for any insn
1258 that we've marked with gpdisp relocs, since those have to stay in
1259 1-1 correspondence with one another.
1261 Technically we could copy them if we could set up a mapping from one
1262 sequence number to another, across the set of insns to be duplicated.
1263 This seems overly complicated and error-prone since interblock motion
1264 from sched-ebb could move one of the pair of insns to a different block.
1266 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1267 then they'll be in a different block from their ldgp. Which could lead
1268 the bb reorder code to think that it would be ok to copy just the block
1269 containing the call and branch to the block containing the ldgp. */
1271 static bool
1272 alpha_cannot_copy_insn_p (rtx_insn *insn)
1274 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1275 return false;
1276 if (recog_memoized (insn) >= 0)
1277 return get_attr_cannot_copy (insn);
1278 else
1279 return false;
1283 /* Try a machine-dependent way of reloading an illegitimate address
1284 operand. If we find one, push the reload and return the new rtx. */
1287 alpha_legitimize_reload_address (rtx x,
1288 machine_mode mode ATTRIBUTE_UNUSED,
1289 int opnum, int type,
1290 int ind_levels ATTRIBUTE_UNUSED)
1292 /* We must recognize output that we have already generated ourselves. */
1293 if (GET_CODE (x) == PLUS
1294 && GET_CODE (XEXP (x, 0)) == PLUS
1295 && REG_P (XEXP (XEXP (x, 0), 0))
1296 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1297 && CONST_INT_P (XEXP (x, 1)))
1299 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1300 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1301 opnum, (enum reload_type) type);
1302 return x;
1305 /* We wish to handle large displacements off a base register by
1306 splitting the addend across an ldah and the mem insn. This
1307 cuts number of extra insns needed from 3 to 1. */
1308 if (GET_CODE (x) == PLUS
1309 && REG_P (XEXP (x, 0))
1310 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1311 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1312 && CONST_INT_P (XEXP (x, 1)))
1314 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1315 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1316 HOST_WIDE_INT high
1317 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1319 /* Check for 32-bit overflow. */
1320 if (high + low != val)
1321 return NULL_RTX;
1323 /* Reload the high part into a base reg; leave the low part
1324 in the mem directly. */
1325 x = gen_rtx_PLUS (GET_MODE (x),
1326 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1327 GEN_INT (high)),
1328 GEN_INT (low));
1330 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1331 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1332 opnum, (enum reload_type) type);
1333 return x;
1336 return NULL_RTX;
1339 /* Return the cost of moving between registers of various classes. Moving
1340 between FLOAT_REGS and anything else except float regs is expensive.
1341 In fact, we make it quite expensive because we really don't want to
1342 do these moves unless it is clearly worth it. Optimizations may
1343 reduce the impact of not being able to allocate a pseudo to a
1344 hard register. */
1346 static int
1347 alpha_register_move_cost (machine_mode /*mode*/,
1348 reg_class_t from, reg_class_t to)
1350 if ((from == FLOAT_REGS) == (to == FLOAT_REGS))
1351 return 2;
1353 if (TARGET_FIX)
1354 return (from == FLOAT_REGS) ? 6 : 8;
1356 return 4 + 2 * alpha_memory_latency;
1359 /* Return the cost of moving data of MODE from a register to
1360 or from memory. On the Alpha, bump this up a bit. */
1362 static int
1363 alpha_memory_move_cost (machine_mode /*mode*/, reg_class_t /*regclass*/,
1364 bool /*in*/)
1366 return 2 * alpha_memory_latency;
1369 /* Compute a (partial) cost for rtx X. Return true if the complete
1370 cost has been computed, and false if subexpressions should be
1371 scanned. In either case, *TOTAL contains the cost result. */
1373 static bool
1374 alpha_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno, int *total,
1375 bool speed)
1377 int code = GET_CODE (x);
1378 bool float_mode_p = FLOAT_MODE_P (mode);
1379 const struct alpha_rtx_cost_data *cost_data;
1381 if (!speed)
1382 cost_data = &alpha_rtx_cost_size;
1383 else
1384 cost_data = &alpha_rtx_cost_data[alpha_tune];
1386 switch (code)
1388 case CONST_INT:
1389 /* If this is an 8-bit constant, return zero since it can be used
1390 nearly anywhere with no cost. If it is a valid operand for an
1391 ADD or AND, likewise return 0 if we know it will be used in that
1392 context. Otherwise, return 2 since it might be used there later.
1393 All other constants take at least two insns. */
1394 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1396 *total = 0;
1397 return true;
1399 /* FALLTHRU */
1401 case CONST_DOUBLE:
1402 case CONST_WIDE_INT:
1403 if (x == CONST0_RTX (mode))
1404 *total = 0;
1405 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1406 || (outer_code == AND && and_operand (x, VOIDmode)))
1407 *total = 0;
1408 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1409 *total = 2;
1410 else
1411 *total = COSTS_N_INSNS (2);
1412 return true;
1414 case CONST:
1415 case SYMBOL_REF:
1416 case LABEL_REF:
1417 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1418 *total = COSTS_N_INSNS (outer_code != MEM);
1419 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1420 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1421 else if (tls_symbolic_operand_type (x))
1422 /* Estimate of cost for call_pal rduniq. */
1423 /* ??? How many insns do we emit here? More than one... */
1424 *total = COSTS_N_INSNS (15);
1425 else
1426 /* Otherwise we do a load from the GOT. */
1427 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1428 return true;
1430 case HIGH:
1431 /* This is effectively an add_operand. */
1432 *total = 2;
1433 return true;
1435 case PLUS:
1436 case MINUS:
1437 if (float_mode_p)
1438 *total = cost_data->fp_add;
1439 else if (GET_CODE (XEXP (x, 0)) == ASHIFT
1440 && const23_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1442 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), mode,
1443 (enum rtx_code) outer_code, opno, speed)
1444 + rtx_cost (XEXP (x, 1), mode,
1445 (enum rtx_code) outer_code, opno, speed)
1446 + COSTS_N_INSNS (1));
1447 return true;
1449 return false;
1451 case MULT:
1452 if (float_mode_p)
1453 *total = cost_data->fp_mult;
1454 else if (mode == DImode)
1455 *total = cost_data->int_mult_di;
1456 else
1457 *total = cost_data->int_mult_si;
1458 return false;
1460 case ASHIFT:
1461 if (CONST_INT_P (XEXP (x, 1))
1462 && INTVAL (XEXP (x, 1)) <= 3)
1464 *total = COSTS_N_INSNS (1);
1465 return false;
1467 /* FALLTHRU */
1469 case ASHIFTRT:
1470 case LSHIFTRT:
1471 *total = cost_data->int_shift;
1472 return false;
1474 case IF_THEN_ELSE:
1475 if (float_mode_p)
1476 *total = cost_data->fp_add;
1477 else
1478 *total = cost_data->int_cmov;
1479 return false;
1481 case DIV:
1482 case UDIV:
1483 case MOD:
1484 case UMOD:
1485 if (!float_mode_p)
1486 *total = cost_data->int_div;
1487 else if (mode == SFmode)
1488 *total = cost_data->fp_div_sf;
1489 else
1490 *total = cost_data->fp_div_df;
1491 return false;
1493 case MEM:
1494 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1495 return true;
1497 case NEG:
1498 if (! float_mode_p)
1500 *total = COSTS_N_INSNS (1);
1501 return false;
1503 /* FALLTHRU */
1505 case ABS:
1506 if (! float_mode_p)
1508 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1509 return false;
1511 /* FALLTHRU */
1513 case FLOAT:
1514 case UNSIGNED_FLOAT:
1515 case FIX:
1516 case UNSIGNED_FIX:
1517 case FLOAT_TRUNCATE:
1518 *total = cost_data->fp_add;
1519 return false;
1521 case FLOAT_EXTEND:
1522 if (MEM_P (XEXP (x, 0)))
1523 *total = 0;
1524 else
1525 *total = cost_data->fp_add;
1526 return false;
1528 default:
1529 return false;
1533 /* REF is an alignable memory location. Place an aligned SImode
1534 reference into *PALIGNED_MEM and the number of bits to shift into
1535 *PBITNUM. SCRATCH is a free register for use in reloading out
1536 of range stack slots. */
1538 void
1539 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1541 rtx base;
1542 HOST_WIDE_INT disp, offset;
1544 gcc_assert (MEM_P (ref));
1546 if (reload_in_progress)
1548 base = find_replacement (&XEXP (ref, 0));
1549 gcc_assert (memory_address_p (GET_MODE (ref), base));
1551 else
1552 base = XEXP (ref, 0);
1554 if (GET_CODE (base) == PLUS)
1555 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1556 else
1557 disp = 0;
1559 /* Find the byte offset within an aligned word. If the memory itself is
1560 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1561 will have examined the base register and determined it is aligned, and
1562 thus displacements from it are naturally alignable. */
1563 if (MEM_ALIGN (ref) >= 32)
1564 offset = 0;
1565 else
1566 offset = disp & 3;
1568 /* The location should not cross aligned word boundary. */
1569 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1570 <= GET_MODE_SIZE (SImode));
1572 /* Access the entire aligned word. */
1573 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1575 /* Convert the byte offset within the word to a bit offset. */
1576 offset *= BITS_PER_UNIT;
1577 *pbitnum = GEN_INT (offset);
1580 /* Similar, but just get the address. Handle the two reload cases.
1581 Add EXTRA_OFFSET to the address we return. */
1584 get_unaligned_address (rtx ref)
1586 rtx base;
1587 HOST_WIDE_INT offset = 0;
1589 gcc_assert (MEM_P (ref));
1591 if (reload_in_progress)
1593 base = find_replacement (&XEXP (ref, 0));
1594 gcc_assert (memory_address_p (GET_MODE (ref), base));
1596 else
1597 base = XEXP (ref, 0);
1599 if (GET_CODE (base) == PLUS)
1600 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1602 return plus_constant (Pmode, base, offset);
1605 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1606 X is always returned in a register. */
1609 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1611 if (GET_CODE (addr) == PLUS)
1613 ofs += INTVAL (XEXP (addr, 1));
1614 addr = XEXP (addr, 0);
1617 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1618 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1621 /* On the Alpha, all (non-symbolic) constants except zero go into
1622 a floating-point register via memory. Note that we cannot
1623 return anything that is not a subset of RCLASS, and that some
1624 symbolic constants cannot be dropped to memory. */
1626 enum reg_class
1627 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1629 /* Zero is present in any register class. */
1630 if (x == CONST0_RTX (GET_MODE (x)))
1631 return rclass;
1633 /* These sorts of constants we can easily drop to memory. */
1634 if (CONST_SCALAR_INT_P (x)
1635 || CONST_DOUBLE_P (x)
1636 || GET_CODE (x) == CONST_VECTOR)
1638 if (rclass == FLOAT_REGS)
1639 return NO_REGS;
1640 if (rclass == ALL_REGS)
1641 return GENERAL_REGS;
1642 return rclass;
1645 /* All other kinds of constants should not (and in the case of HIGH
1646 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1647 secondary reload. */
1648 if (CONSTANT_P (x))
1649 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1651 return rclass;
1654 /* Inform reload about cases where moving X with a mode MODE to a register in
1655 RCLASS requires an extra scratch or immediate register. Return the class
1656 needed for the immediate register. */
1658 static reg_class_t
1659 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1660 machine_mode mode, secondary_reload_info *sri)
1662 enum reg_class rclass = (enum reg_class) rclass_i;
1664 /* Loading and storing HImode or QImode values to and from memory
1665 usually requires a scratch register. */
1666 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1668 if (any_memory_operand (x, mode))
1670 if (in_p)
1672 if (!aligned_memory_operand (x, mode))
1673 sri->icode = direct_optab_handler (reload_in_optab, mode);
1675 else
1676 sri->icode = direct_optab_handler (reload_out_optab, mode);
1677 return NO_REGS;
1681 /* We also cannot do integral arithmetic into FP regs, as might result
1682 from register elimination into a DImode fp register. */
1683 if (rclass == FLOAT_REGS)
1685 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1686 return GENERAL_REGS;
1687 if (in_p && INTEGRAL_MODE_P (mode)
1688 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1689 return GENERAL_REGS;
1692 return NO_REGS;
1695 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
1697 If we are copying between general and FP registers, we need a memory
1698 location unless the FIX extension is available. */
1700 static bool
1701 alpha_secondary_memory_needed (machine_mode, reg_class_t class1,
1702 reg_class_t class2)
1704 return (!TARGET_FIX
1705 && ((class1 == FLOAT_REGS && class2 != FLOAT_REGS)
1706 || (class2 == FLOAT_REGS && class1 != FLOAT_REGS)));
1709 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE. If MODE is
1710 floating-point, use it. Otherwise, widen to a word like the default.
1711 This is needed because we always store integers in FP registers in
1712 quadword format. This whole area is very tricky! */
1714 static machine_mode
1715 alpha_secondary_memory_needed_mode (machine_mode mode)
1717 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1718 return mode;
1719 if (GET_MODE_SIZE (mode) >= 4)
1720 return mode;
1721 return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
1724 /* Given SEQ, which is an INSN list, look for any MEMs in either
1725 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1726 volatile flags from REF into each of the MEMs found. If REF is not
1727 a MEM, don't do anything. */
1729 void
1730 alpha_set_memflags (rtx seq, rtx ref)
1732 rtx_insn *insn;
1734 if (!MEM_P (ref))
1735 return;
1737 /* This is only called from alpha.md, after having had something
1738 generated from one of the insn patterns. So if everything is
1739 zero, the pattern is already up-to-date. */
1740 if (!MEM_VOLATILE_P (ref)
1741 && !MEM_NOTRAP_P (ref)
1742 && !MEM_READONLY_P (ref))
1743 return;
1745 subrtx_var_iterator::array_type array;
1746 for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
1747 if (INSN_P (insn))
1748 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1750 rtx x = *iter;
1751 if (MEM_P (x))
1753 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref);
1754 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref);
1755 MEM_READONLY_P (x) = MEM_READONLY_P (ref);
1756 /* Sadly, we cannot use alias sets because the extra
1757 aliasing produced by the AND interferes. Given that
1758 two-byte quantities are the only thing we would be
1759 able to differentiate anyway, there does not seem to
1760 be any point in convoluting the early out of the
1761 alias check. */
1762 iter.skip_subrtxes ();
1765 else
1766 gcc_unreachable ();
1769 static rtx alpha_emit_set_const (rtx, machine_mode, HOST_WIDE_INT,
1770 int, bool);
1772 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1773 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1774 and return pc_rtx if successful. */
1776 static rtx
1777 alpha_emit_set_const_1 (rtx target, machine_mode mode,
1778 HOST_WIDE_INT c, int n, bool no_output)
1780 HOST_WIDE_INT new_const;
1781 int i, bits;
1782 /* Use a pseudo if highly optimizing and still generating RTL. */
1783 rtx subtarget
1784 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1785 rtx temp, insn;
1787 /* If this is a sign-extended 32-bit constant, we can do this in at most
1788 three insns, so do it if we have enough insns left. */
1790 if (c >> 31 == -1 || c >> 31 == 0)
1792 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1793 HOST_WIDE_INT tmp1 = c - low;
1794 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1795 HOST_WIDE_INT extra = 0;
1797 /* If HIGH will be interpreted as negative but the constant is
1798 positive, we must adjust it to do two ldha insns. */
1800 if ((high & 0x8000) != 0 && c >= 0)
1802 extra = 0x4000;
1803 tmp1 -= 0x40000000;
1804 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1807 if (c == low || (low == 0 && extra == 0))
1809 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1810 but that meant that we can't handle INT_MIN on 32-bit machines
1811 (like NT/Alpha), because we recurse indefinitely through
1812 emit_move_insn to gen_movdi. So instead, since we know exactly
1813 what we want, create it explicitly. */
1815 if (no_output)
1816 return pc_rtx;
1817 if (target == NULL)
1818 target = gen_reg_rtx (mode);
1819 emit_insn (gen_rtx_SET (target, GEN_INT (c)));
1820 return target;
1822 else if (n >= 2 + (extra != 0))
1824 if (no_output)
1825 return pc_rtx;
1826 if (!can_create_pseudo_p ())
1828 emit_insn (gen_rtx_SET (target, GEN_INT (high << 16)));
1829 temp = target;
1831 else
1832 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1833 subtarget, mode);
1835 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1836 This means that if we go through expand_binop, we'll try to
1837 generate extensions, etc, which will require new pseudos, which
1838 will fail during some split phases. The SImode add patterns
1839 still exist, but are not named. So build the insns by hand. */
1841 if (extra != 0)
1843 if (! subtarget)
1844 subtarget = gen_reg_rtx (mode);
1845 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1846 insn = gen_rtx_SET (subtarget, insn);
1847 emit_insn (insn);
1848 temp = subtarget;
1851 if (target == NULL)
1852 target = gen_reg_rtx (mode);
1853 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1854 insn = gen_rtx_SET (target, insn);
1855 emit_insn (insn);
1856 return target;
1860 /* If we couldn't do it that way, try some other methods. But if we have
1861 no instructions left, don't bother. Likewise, if this is SImode and
1862 we can't make pseudos, we can't do anything since the expand_binop
1863 and expand_unop calls will widen and try to make pseudos. */
1865 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1866 return 0;
1868 /* Next, see if we can load a related constant and then shift and possibly
1869 negate it to get the constant we want. Try this once each increasing
1870 numbers of insns. */
1872 for (i = 1; i < n; i++)
1874 /* First, see if minus some low bits, we've an easy load of
1875 high bits. */
1877 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1878 if (new_const != 0)
1880 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1881 if (temp)
1883 if (no_output)
1884 return temp;
1885 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1886 target, 0, OPTAB_WIDEN);
1890 /* Next try complementing. */
1891 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1892 if (temp)
1894 if (no_output)
1895 return temp;
1896 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1899 /* Next try to form a constant and do a left shift. We can do this
1900 if some low-order bits are zero; the exact_log2 call below tells
1901 us that information. The bits we are shifting out could be any
1902 value, but here we'll just try the 0- and sign-extended forms of
1903 the constant. To try to increase the chance of having the same
1904 constant in more than one insn, start at the highest number of
1905 bits to shift, but try all possibilities in case a ZAPNOT will
1906 be useful. */
1908 bits = exact_log2 (c & -c);
1909 if (bits > 0)
1910 for (; bits > 0; bits--)
1912 new_const = c >> bits;
1913 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1914 if (!temp && c < 0)
1916 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1917 temp = alpha_emit_set_const (subtarget, mode, new_const,
1918 i, no_output);
1920 if (temp)
1922 if (no_output)
1923 return temp;
1924 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1925 target, 0, OPTAB_WIDEN);
1929 /* Now try high-order zero bits. Here we try the shifted-in bits as
1930 all zero and all ones. Be careful to avoid shifting outside the
1931 mode and to avoid shifting outside the host wide int size. */
1933 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1934 - floor_log2 (c) - 1);
1935 if (bits > 0)
1936 for (; bits > 0; bits--)
1938 new_const = c << bits;
1939 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1940 if (!temp)
1942 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
1943 temp = alpha_emit_set_const (subtarget, mode, new_const,
1944 i, no_output);
1946 if (temp)
1948 if (no_output)
1949 return temp;
1950 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1951 target, 1, OPTAB_WIDEN);
1955 /* Now try high-order 1 bits. We get that with a sign-extension.
1956 But one bit isn't enough here. Be careful to avoid shifting outside
1957 the mode and to avoid shifting outside the host wide int size. */
1959 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1960 - floor_log2 (~ c) - 2);
1961 if (bits > 0)
1962 for (; bits > 0; bits--)
1964 new_const = c << bits;
1965 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1966 if (!temp)
1968 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
1969 temp = alpha_emit_set_const (subtarget, mode, new_const,
1970 i, no_output);
1972 if (temp)
1974 if (no_output)
1975 return temp;
1976 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1977 target, 0, OPTAB_WIDEN);
1982 /* Finally, see if can load a value into the target that is the same as the
1983 constant except that all bytes that are 0 are changed to be 0xff. If we
1984 can, then we can do a ZAPNOT to obtain the desired constant. */
1986 new_const = c;
1987 for (i = 0; i < 64; i += 8)
1988 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1989 new_const |= (HOST_WIDE_INT) 0xff << i;
1991 /* We are only called for SImode and DImode. If this is SImode, ensure that
1992 we are sign extended to a full word. */
1994 if (mode == SImode)
1995 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1997 if (new_const != c)
1999 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
2000 if (temp)
2002 if (no_output)
2003 return temp;
2004 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
2005 target, 0, OPTAB_WIDEN);
2009 return 0;
2012 /* Try to output insns to set TARGET equal to the constant C if it can be
2013 done in less than N insns. Do all computations in MODE. Returns the place
2014 where the output has been placed if it can be done and the insns have been
2015 emitted. If it would take more than N insns, zero is returned and no
2016 insns and emitted. */
2018 static rtx
2019 alpha_emit_set_const (rtx target, machine_mode mode,
2020 HOST_WIDE_INT c, int n, bool no_output)
2022 machine_mode orig_mode = mode;
2023 rtx orig_target = target;
2024 rtx result = 0;
2025 int i;
2027 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2028 can't load this constant in one insn, do this in DImode. */
2029 if (!can_create_pseudo_p () && mode == SImode
2030 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
2032 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
2033 if (result)
2034 return result;
2036 target = no_output ? NULL : gen_lowpart (DImode, target);
2037 mode = DImode;
2039 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
2041 target = no_output ? NULL : gen_lowpart (DImode, target);
2042 mode = DImode;
2045 /* Try 1 insn, then 2, then up to N. */
2046 for (i = 1; i <= n; i++)
2048 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
2049 if (result)
2051 rtx_insn *insn;
2052 rtx set;
2054 if (no_output)
2055 return result;
2057 insn = get_last_insn ();
2058 set = single_set (insn);
2059 if (! CONSTANT_P (SET_SRC (set)))
2060 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2061 break;
2065 /* Allow for the case where we changed the mode of TARGET. */
2066 if (result)
2068 if (result == target)
2069 result = orig_target;
2070 else if (mode != orig_mode)
2071 result = gen_lowpart (orig_mode, result);
2074 return result;
2077 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2078 fall back to a straight forward decomposition. We do this to avoid
2079 exponential run times encountered when looking for longer sequences
2080 with alpha_emit_set_const. */
2082 static rtx
2083 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1)
2085 HOST_WIDE_INT d1, d2, d3, d4;
2087 /* Decompose the entire word */
2089 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2090 c1 -= d1;
2091 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2092 c1 = (c1 - d2) >> 32;
2093 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2094 c1 -= d3;
2095 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2096 gcc_assert (c1 == d4);
2098 /* Construct the high word */
2099 if (d4)
2101 emit_move_insn (target, GEN_INT (d4));
2102 if (d3)
2103 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2105 else
2106 emit_move_insn (target, GEN_INT (d3));
2108 /* Shift it into place */
2109 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2111 /* Add in the low bits. */
2112 if (d2)
2113 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2114 if (d1)
2115 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2117 return target;
2120 /* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */
2122 static HOST_WIDE_INT
2123 alpha_extract_integer (rtx x)
2125 if (GET_CODE (x) == CONST_VECTOR)
2126 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2128 gcc_assert (CONST_INT_P (x));
2130 return INTVAL (x);
2133 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2134 we are willing to load the value into a register via a move pattern.
2135 Normally this is all symbolic constants, integral constants that
2136 take three or fewer instructions, and floating-point zero. */
2138 bool
2139 alpha_legitimate_constant_p (machine_mode mode, rtx x)
2141 HOST_WIDE_INT i0;
2143 switch (GET_CODE (x))
2145 case LABEL_REF:
2146 case HIGH:
2147 return true;
2149 case CONST:
2150 if (GET_CODE (XEXP (x, 0)) == PLUS
2151 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
2152 x = XEXP (XEXP (x, 0), 0);
2153 else
2154 return true;
2156 if (GET_CODE (x) != SYMBOL_REF)
2157 return true;
2158 /* FALLTHRU */
2160 case SYMBOL_REF:
2161 /* TLS symbols are never valid. */
2162 return SYMBOL_REF_TLS_MODEL (x) == 0;
2164 case CONST_WIDE_INT:
2165 if (TARGET_BUILD_CONSTANTS)
2166 return true;
2167 if (x == CONST0_RTX (mode))
2168 return true;
2169 mode = DImode;
2170 gcc_assert (CONST_WIDE_INT_NUNITS (x) == 2);
2171 i0 = CONST_WIDE_INT_ELT (x, 1);
2172 if (alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) == NULL)
2173 return false;
2174 i0 = CONST_WIDE_INT_ELT (x, 0);
2175 goto do_integer;
2177 case CONST_DOUBLE:
2178 if (x == CONST0_RTX (mode))
2179 return true;
2180 return false;
2182 case CONST_VECTOR:
2183 if (x == CONST0_RTX (mode))
2184 return true;
2185 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2186 return false;
2187 if (GET_MODE_SIZE (mode) != 8)
2188 return false;
2189 /* FALLTHRU */
2191 case CONST_INT:
2192 if (TARGET_BUILD_CONSTANTS)
2193 return true;
2194 i0 = alpha_extract_integer (x);
2195 do_integer:
2196 return alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) != NULL;
2198 default:
2199 return false;
2203 /* Operand 1 is known to be a constant, and should require more than one
2204 instruction to load. Emit that multi-part load. */
2206 bool
2207 alpha_split_const_mov (machine_mode mode, rtx *operands)
2209 HOST_WIDE_INT i0;
2210 rtx temp = NULL_RTX;
2212 i0 = alpha_extract_integer (operands[1]);
2214 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2216 if (!temp && TARGET_BUILD_CONSTANTS)
2217 temp = alpha_emit_set_long_const (operands[0], i0);
2219 if (temp)
2221 if (!rtx_equal_p (operands[0], temp))
2222 emit_move_insn (operands[0], temp);
2223 return true;
2226 return false;
2229 /* Expand a move instruction; return true if all work is done.
2230 We don't handle non-bwx subword loads here. */
2232 bool
2233 alpha_expand_mov (machine_mode mode, rtx *operands)
2235 rtx tmp;
2237 /* If the output is not a register, the input must be. */
2238 if (MEM_P (operands[0])
2239 && ! reg_or_0_operand (operands[1], mode))
2240 operands[1] = force_reg (mode, operands[1]);
2242 /* Allow legitimize_address to perform some simplifications. */
2243 if (mode == Pmode && symbolic_operand (operands[1], mode))
2245 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2246 if (tmp)
2248 if (tmp == operands[0])
2249 return true;
2250 operands[1] = tmp;
2251 return false;
2255 /* Early out for non-constants and valid constants. */
2256 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2257 return false;
2259 /* Split large integers. */
2260 if (CONST_INT_P (operands[1])
2261 || GET_CODE (operands[1]) == CONST_VECTOR)
2263 if (alpha_split_const_mov (mode, operands))
2264 return true;
2267 /* Otherwise we've nothing left but to drop the thing to memory. */
2268 tmp = force_const_mem (mode, operands[1]);
2270 if (tmp == NULL_RTX)
2271 return false;
2273 if (reload_in_progress)
2275 emit_move_insn (operands[0], XEXP (tmp, 0));
2276 operands[1] = replace_equiv_address (tmp, operands[0]);
2278 else
2279 operands[1] = validize_mem (tmp);
2280 return false;
2283 /* Expand a non-bwx QImode or HImode move instruction;
2284 return true if all work is done. */
2286 bool
2287 alpha_expand_mov_nobwx (machine_mode mode, rtx *operands)
2289 rtx seq;
2291 /* If the output is not a register, the input must be. */
2292 if (MEM_P (operands[0]))
2293 operands[1] = force_reg (mode, operands[1]);
2295 /* Handle four memory cases, unaligned and aligned for either the input
2296 or the output. The only case where we can be called during reload is
2297 for aligned loads; all other cases require temporaries. */
2299 if (any_memory_operand (operands[1], mode))
2301 if (aligned_memory_operand (operands[1], mode))
2303 if (reload_in_progress)
2305 if (mode == QImode)
2306 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2307 else
2308 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2309 emit_insn (seq);
2311 else
2313 rtx aligned_mem, bitnum;
2314 rtx scratch = gen_reg_rtx (SImode);
2315 rtx subtarget;
2316 bool copyout;
2318 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2320 subtarget = operands[0];
2321 if (REG_P (subtarget))
2322 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2323 else
2324 subtarget = gen_reg_rtx (DImode), copyout = true;
2326 if (mode == QImode)
2327 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2328 bitnum, scratch);
2329 else
2330 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2331 bitnum, scratch);
2332 emit_insn (seq);
2334 if (copyout)
2335 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2338 else
2340 /* Don't pass these as parameters since that makes the generated
2341 code depend on parameter evaluation order which will cause
2342 bootstrap failures. */
2344 rtx temp1, temp2, subtarget, ua;
2345 bool copyout;
2347 temp1 = gen_reg_rtx (DImode);
2348 temp2 = gen_reg_rtx (DImode);
2350 subtarget = operands[0];
2351 if (REG_P (subtarget))
2352 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2353 else
2354 subtarget = gen_reg_rtx (DImode), copyout = true;
2356 ua = get_unaligned_address (operands[1]);
2357 if (mode == QImode)
2358 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2359 else
2360 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2362 alpha_set_memflags (seq, operands[1]);
2363 emit_insn (seq);
2365 if (copyout)
2366 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2368 return true;
2371 if (any_memory_operand (operands[0], mode))
2373 if (aligned_memory_operand (operands[0], mode))
2375 rtx aligned_mem, bitnum;
2376 rtx temp1 = gen_reg_rtx (SImode);
2377 rtx temp2 = gen_reg_rtx (SImode);
2379 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2381 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2382 temp1, temp2));
2384 else
2386 rtx temp1 = gen_reg_rtx (DImode);
2387 rtx temp2 = gen_reg_rtx (DImode);
2388 rtx temp3 = gen_reg_rtx (DImode);
2389 rtx ua = get_unaligned_address (operands[0]);
2391 if (mode == QImode)
2392 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2393 else
2394 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2396 alpha_set_memflags (seq, operands[0]);
2397 emit_insn (seq);
2399 return true;
2402 return false;
2405 /* Implement the movmisalign patterns. One of the operands is a memory
2406 that is not naturally aligned. Emit instructions to load it. */
2408 void
2409 alpha_expand_movmisalign (machine_mode mode, rtx *operands)
2411 /* Honor misaligned loads, for those we promised to do so. */
2412 if (MEM_P (operands[1]))
2414 rtx tmp;
2416 if (register_operand (operands[0], mode))
2417 tmp = operands[0];
2418 else
2419 tmp = gen_reg_rtx (mode);
2421 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2422 if (tmp != operands[0])
2423 emit_move_insn (operands[0], tmp);
2425 else if (MEM_P (operands[0]))
2427 if (!reg_or_0_operand (operands[1], mode))
2428 operands[1] = force_reg (mode, operands[1]);
2429 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2431 else
2432 gcc_unreachable ();
2435 /* Generate an unsigned DImode to FP conversion. This is the same code
2436 optabs would emit if we didn't have TFmode patterns.
2438 For SFmode, this is the only construction I've found that can pass
2439 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2440 intermediates will work, because you'll get intermediate rounding
2441 that ruins the end result. Some of this could be fixed by turning
2442 on round-to-positive-infinity, but that requires diddling the fpsr,
2443 which kills performance. I tried turning this around and converting
2444 to a negative number, so that I could turn on /m, but either I did
2445 it wrong or there's something else cause I wound up with the exact
2446 same single-bit error. There is a branch-less form of this same code:
2448 srl $16,1,$1
2449 and $16,1,$2
2450 cmplt $16,0,$3
2451 or $1,$2,$2
2452 cmovge $16,$16,$2
2453 itoft $3,$f10
2454 itoft $2,$f11
2455 cvtqs $f11,$f11
2456 adds $f11,$f11,$f0
2457 fcmoveq $f10,$f11,$f0
2459 I'm not using it because it's the same number of instructions as
2460 this branch-full form, and it has more serialized long latency
2461 instructions on the critical path.
2463 For DFmode, we can avoid rounding errors by breaking up the word
2464 into two pieces, converting them separately, and adding them back:
2466 LC0: .long 0,0x5f800000
2468 itoft $16,$f11
2469 lda $2,LC0
2470 cmplt $16,0,$1
2471 cpyse $f11,$f31,$f10
2472 cpyse $f31,$f11,$f11
2473 s4addq $1,$2,$1
2474 lds $f12,0($1)
2475 cvtqt $f10,$f10
2476 cvtqt $f11,$f11
2477 addt $f12,$f10,$f0
2478 addt $f0,$f11,$f0
2480 This doesn't seem to be a clear-cut win over the optabs form.
2481 It probably all depends on the distribution of numbers being
2482 converted -- in the optabs form, all but high-bit-set has a
2483 much lower minimum execution time. */
2485 void
2486 alpha_emit_floatuns (rtx operands[2])
2488 rtx neglab, donelab, i0, i1, f0, in, out;
2489 machine_mode mode;
2491 out = operands[0];
2492 in = force_reg (DImode, operands[1]);
2493 mode = GET_MODE (out);
2494 neglab = gen_label_rtx ();
2495 donelab = gen_label_rtx ();
2496 i0 = gen_reg_rtx (DImode);
2497 i1 = gen_reg_rtx (DImode);
2498 f0 = gen_reg_rtx (mode);
2500 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2502 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
2503 emit_jump_insn (gen_jump (donelab));
2504 emit_barrier ();
2506 emit_label (neglab);
2508 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2509 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2510 emit_insn (gen_iordi3 (i0, i0, i1));
2511 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
2512 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
2514 emit_label (donelab);
2517 /* Generate the comparison for a conditional branch. */
2519 void
2520 alpha_emit_conditional_branch (rtx operands[], machine_mode cmp_mode)
2522 enum rtx_code cmp_code, branch_code;
2523 machine_mode branch_mode = VOIDmode;
2524 enum rtx_code code = GET_CODE (operands[0]);
2525 rtx op0 = operands[1], op1 = operands[2];
2526 rtx tem;
2528 if (cmp_mode == TFmode)
2530 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2531 op1 = const0_rtx;
2532 cmp_mode = DImode;
2535 /* The general case: fold the comparison code to the types of compares
2536 that we have, choosing the branch as necessary. */
2537 switch (code)
2539 case EQ: case LE: case LT: case LEU: case LTU:
2540 case UNORDERED:
2541 /* We have these compares. */
2542 cmp_code = code, branch_code = NE;
2543 break;
2545 case NE:
2546 case ORDERED:
2547 /* These must be reversed. */
2548 cmp_code = reverse_condition (code), branch_code = EQ;
2549 break;
2551 case GE: case GT: case GEU: case GTU:
2552 /* For FP, we swap them, for INT, we reverse them. */
2553 if (cmp_mode == DFmode)
2555 cmp_code = swap_condition (code);
2556 branch_code = NE;
2557 std::swap (op0, op1);
2559 else
2561 cmp_code = reverse_condition (code);
2562 branch_code = EQ;
2564 break;
2566 default:
2567 gcc_unreachable ();
2570 if (cmp_mode == DFmode)
2572 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2574 /* When we are not as concerned about non-finite values, and we
2575 are comparing against zero, we can branch directly. */
2576 if (op1 == CONST0_RTX (DFmode))
2577 cmp_code = UNKNOWN, branch_code = code;
2578 else if (op0 == CONST0_RTX (DFmode))
2580 /* Undo the swap we probably did just above. */
2581 std::swap (op0, op1);
2582 branch_code = swap_condition (cmp_code);
2583 cmp_code = UNKNOWN;
2586 else
2588 /* ??? We mark the branch mode to be CCmode to prevent the
2589 compare and branch from being combined, since the compare
2590 insn follows IEEE rules that the branch does not. */
2591 branch_mode = CCmode;
2594 else
2596 /* The following optimizations are only for signed compares. */
2597 if (code != LEU && code != LTU && code != GEU && code != GTU)
2599 /* Whee. Compare and branch against 0 directly. */
2600 if (op1 == const0_rtx)
2601 cmp_code = UNKNOWN, branch_code = code;
2603 /* If the constants doesn't fit into an immediate, but can
2604 be generated by lda/ldah, we adjust the argument and
2605 compare against zero, so we can use beq/bne directly. */
2606 /* ??? Don't do this when comparing against symbols, otherwise
2607 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2608 be declared false out of hand (at least for non-weak). */
2609 else if (CONST_INT_P (op1)
2610 && (code == EQ || code == NE)
2611 && !(symbolic_operand (op0, VOIDmode)
2612 || (REG_P (op0) && REG_POINTER (op0))))
2614 rtx n_op1 = GEN_INT (-INTVAL (op1));
2616 if (! satisfies_constraint_I (op1)
2617 && (satisfies_constraint_K (n_op1)
2618 || satisfies_constraint_L (n_op1)))
2619 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2623 if (!reg_or_0_operand (op0, DImode))
2624 op0 = force_reg (DImode, op0);
2625 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2626 op1 = force_reg (DImode, op1);
2629 /* Emit an initial compare instruction, if necessary. */
2630 tem = op0;
2631 if (cmp_code != UNKNOWN)
2633 tem = gen_reg_rtx (cmp_mode);
2634 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2637 /* Emit the branch instruction. */
2638 tem = gen_rtx_SET (pc_rtx,
2639 gen_rtx_IF_THEN_ELSE (VOIDmode,
2640 gen_rtx_fmt_ee (branch_code,
2641 branch_mode, tem,
2642 CONST0_RTX (cmp_mode)),
2643 gen_rtx_LABEL_REF (VOIDmode,
2644 operands[3]),
2645 pc_rtx));
2646 emit_jump_insn (tem);
2649 /* Certain simplifications can be done to make invalid setcc operations
2650 valid. Return the final comparison, or NULL if we can't work. */
2652 bool
2653 alpha_emit_setcc (rtx operands[], machine_mode cmp_mode)
2655 enum rtx_code cmp_code;
2656 enum rtx_code code = GET_CODE (operands[1]);
2657 rtx op0 = operands[2], op1 = operands[3];
2658 rtx tmp;
2660 if (cmp_mode == TFmode)
2662 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2663 op1 = const0_rtx;
2664 cmp_mode = DImode;
2667 if (cmp_mode == DFmode && !TARGET_FIX)
2668 return 0;
2670 /* The general case: fold the comparison code to the types of compares
2671 that we have, choosing the branch as necessary. */
2673 cmp_code = UNKNOWN;
2674 switch (code)
2676 case EQ: case LE: case LT: case LEU: case LTU:
2677 case UNORDERED:
2678 /* We have these compares. */
2679 if (cmp_mode == DFmode)
2680 cmp_code = code, code = NE;
2681 break;
2683 case NE:
2684 if (cmp_mode == DImode && op1 == const0_rtx)
2685 break;
2686 /* FALLTHRU */
2688 case ORDERED:
2689 cmp_code = reverse_condition (code);
2690 code = EQ;
2691 break;
2693 case GE: case GT: case GEU: case GTU:
2694 /* These normally need swapping, but for integer zero we have
2695 special patterns that recognize swapped operands. */
2696 if (cmp_mode == DImode && op1 == const0_rtx)
2697 break;
2698 code = swap_condition (code);
2699 if (cmp_mode == DFmode)
2700 cmp_code = code, code = NE;
2701 std::swap (op0, op1);
2702 break;
2704 default:
2705 gcc_unreachable ();
2708 if (cmp_mode == DImode)
2710 if (!register_operand (op0, DImode))
2711 op0 = force_reg (DImode, op0);
2712 if (!reg_or_8bit_operand (op1, DImode))
2713 op1 = force_reg (DImode, op1);
2716 /* Emit an initial compare instruction, if necessary. */
2717 if (cmp_code != UNKNOWN)
2719 tmp = gen_reg_rtx (cmp_mode);
2720 emit_insn (gen_rtx_SET (tmp, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2721 op0, op1)));
2723 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2724 op1 = const0_rtx;
2727 /* Emit the setcc instruction. */
2728 emit_insn (gen_rtx_SET (operands[0], gen_rtx_fmt_ee (code, DImode,
2729 op0, op1)));
2730 return true;
2734 /* Rewrite a comparison against zero CMP of the form
2735 (CODE (cc0) (const_int 0)) so it can be written validly in
2736 a conditional move (if_then_else CMP ...).
2737 If both of the operands that set cc0 are nonzero we must emit
2738 an insn to perform the compare (it can't be done within
2739 the conditional move). */
2742 alpha_emit_conditional_move (rtx cmp, machine_mode mode)
2744 enum rtx_code code = GET_CODE (cmp);
2745 enum rtx_code cmov_code = NE;
2746 rtx op0 = XEXP (cmp, 0);
2747 rtx op1 = XEXP (cmp, 1);
2748 machine_mode cmp_mode
2749 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2750 machine_mode cmov_mode = VOIDmode;
2751 int local_fast_math = flag_unsafe_math_optimizations;
2752 rtx tem;
2754 if (cmp_mode == TFmode)
2756 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2757 op1 = const0_rtx;
2758 cmp_mode = DImode;
2761 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2763 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2765 enum rtx_code cmp_code;
2767 if (! TARGET_FIX)
2768 return 0;
2770 /* If we have fp<->int register move instructions, do a cmov by
2771 performing the comparison in fp registers, and move the
2772 zero/nonzero value to integer registers, where we can then
2773 use a normal cmov, or vice-versa. */
2775 switch (code)
2777 case EQ: case LE: case LT: case LEU: case LTU:
2778 case UNORDERED:
2779 /* We have these compares. */
2780 cmp_code = code, code = NE;
2781 break;
2783 case NE:
2784 case ORDERED:
2785 /* These must be reversed. */
2786 cmp_code = reverse_condition (code), code = EQ;
2787 break;
2789 case GE: case GT: case GEU: case GTU:
2790 /* These normally need swapping, but for integer zero we have
2791 special patterns that recognize swapped operands. */
2792 if (cmp_mode == DImode && op1 == const0_rtx)
2793 cmp_code = code, code = NE;
2794 else
2796 cmp_code = swap_condition (code);
2797 code = NE;
2798 std::swap (op0, op1);
2800 break;
2802 default:
2803 gcc_unreachable ();
2806 if (cmp_mode == DImode)
2808 if (!reg_or_0_operand (op0, DImode))
2809 op0 = force_reg (DImode, op0);
2810 if (!reg_or_8bit_operand (op1, DImode))
2811 op1 = force_reg (DImode, op1);
2814 tem = gen_reg_rtx (cmp_mode);
2815 emit_insn (gen_rtx_SET (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2816 op0, op1)));
2818 cmp_mode = cmp_mode == DImode ? E_DFmode : E_DImode;
2819 op0 = gen_lowpart (cmp_mode, tem);
2820 op1 = CONST0_RTX (cmp_mode);
2821 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2822 local_fast_math = 1;
2825 if (cmp_mode == DImode)
2827 if (!reg_or_0_operand (op0, DImode))
2828 op0 = force_reg (DImode, op0);
2829 if (!reg_or_8bit_operand (op1, DImode))
2830 op1 = force_reg (DImode, op1);
2833 /* We may be able to use a conditional move directly.
2834 This avoids emitting spurious compares. */
2835 if (signed_comparison_operator (cmp, VOIDmode)
2836 && (cmp_mode == DImode || local_fast_math)
2837 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2838 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2840 /* We can't put the comparison inside the conditional move;
2841 emit a compare instruction and put that inside the
2842 conditional move. Make sure we emit only comparisons we have;
2843 swap or reverse as necessary. */
2845 if (!can_create_pseudo_p ())
2846 return NULL_RTX;
2848 switch (code)
2850 case EQ: case LE: case LT: case LEU: case LTU:
2851 case UNORDERED:
2852 /* We have these compares: */
2853 break;
2855 case NE:
2856 case ORDERED:
2857 /* These must be reversed. */
2858 code = reverse_condition (code);
2859 cmov_code = EQ;
2860 break;
2862 case GE: case GT: case GEU: case GTU:
2863 /* These normally need swapping, but for integer zero we have
2864 special patterns that recognize swapped operands. */
2865 if (cmp_mode == DImode && op1 == const0_rtx)
2866 break;
2867 code = swap_condition (code);
2868 std::swap (op0, op1);
2869 break;
2871 default:
2872 gcc_unreachable ();
2875 if (cmp_mode == DImode)
2877 if (!reg_or_0_operand (op0, DImode))
2878 op0 = force_reg (DImode, op0);
2879 if (!reg_or_8bit_operand (op1, DImode))
2880 op1 = force_reg (DImode, op1);
2883 /* ??? We mark the branch mode to be CCmode to prevent the compare
2884 and cmov from being combined, since the compare insn follows IEEE
2885 rules that the cmov does not. */
2886 if (cmp_mode == DFmode && !local_fast_math)
2887 cmov_mode = CCmode;
2889 tem = gen_reg_rtx (cmp_mode);
2890 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2891 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2894 /* Simplify a conditional move of two constants into a setcc with
2895 arithmetic. This is done with a splitter since combine would
2896 just undo the work if done during code generation. It also catches
2897 cases we wouldn't have before cse. */
2900 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2901 rtx t_rtx, rtx f_rtx)
2903 HOST_WIDE_INT t, f, diff;
2904 machine_mode mode;
2905 rtx target, subtarget, tmp;
2907 mode = GET_MODE (dest);
2908 t = INTVAL (t_rtx);
2909 f = INTVAL (f_rtx);
2910 diff = t - f;
2912 if (((code == NE || code == EQ) && diff < 0)
2913 || (code == GE || code == GT))
2915 code = reverse_condition (code);
2916 std::swap (t, f);
2917 diff = -diff;
2920 subtarget = target = dest;
2921 if (mode != DImode)
2923 target = gen_lowpart (DImode, dest);
2924 if (can_create_pseudo_p ())
2925 subtarget = gen_reg_rtx (DImode);
2926 else
2927 subtarget = target;
2929 /* Below, we must be careful to use copy_rtx on target and subtarget
2930 in intermediate insns, as they may be a subreg rtx, which may not
2931 be shared. */
2933 if (f == 0 && exact_log2 (diff) > 0
2934 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2935 viable over a longer latency cmove. On EV5, the E0 slot is a
2936 scarce resource, and on EV4 shift has the same latency as a cmove. */
2937 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2939 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2940 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
2942 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2943 GEN_INT (exact_log2 (t)));
2944 emit_insn (gen_rtx_SET (target, tmp));
2946 else if (f == 0 && t == -1)
2948 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2949 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
2951 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2953 else if (diff == 1 || diff == 4 || diff == 8)
2955 rtx add_op;
2957 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2958 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
2960 if (diff == 1)
2961 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2962 else
2964 add_op = GEN_INT (f);
2965 if (sext_add_operand (add_op, mode))
2967 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2968 GEN_INT (exact_log2 (diff)));
2969 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2970 emit_insn (gen_rtx_SET (target, tmp));
2972 else
2973 return 0;
2976 else
2977 return 0;
2979 return 1;
2982 /* Look up the function X_floating library function name for the
2983 given operation. */
2985 struct GTY(()) xfloating_op
2987 const enum rtx_code code;
2988 const char *const GTY((skip)) osf_func;
2989 const char *const GTY((skip)) vms_func;
2990 rtx libcall;
2993 static GTY(()) struct xfloating_op xfloating_ops[] =
2995 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2996 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2997 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2998 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2999 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
3000 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
3001 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
3002 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
3003 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
3004 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
3005 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
3006 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
3007 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
3008 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
3009 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
3012 static GTY(()) struct xfloating_op vax_cvt_ops[] =
3014 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
3015 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3018 static rtx
3019 alpha_lookup_xfloating_lib_func (enum rtx_code code)
3021 struct xfloating_op *ops = xfloating_ops;
3022 long n = ARRAY_SIZE (xfloating_ops);
3023 long i;
3025 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
3027 /* How irritating. Nothing to key off for the main table. */
3028 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
3030 ops = vax_cvt_ops;
3031 n = ARRAY_SIZE (vax_cvt_ops);
3034 for (i = 0; i < n; ++i, ++ops)
3035 if (ops->code == code)
3037 rtx func = ops->libcall;
3038 if (!func)
3040 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3041 ? ops->vms_func : ops->osf_func);
3042 ops->libcall = func;
3044 return func;
3047 gcc_unreachable ();
3050 /* Most X_floating operations take the rounding mode as an argument.
3051 Compute that here. */
3053 static int
3054 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3055 enum alpha_fp_rounding_mode round)
3057 int mode;
3059 switch (round)
3061 case ALPHA_FPRM_NORM:
3062 mode = 2;
3063 break;
3064 case ALPHA_FPRM_MINF:
3065 mode = 1;
3066 break;
3067 case ALPHA_FPRM_CHOP:
3068 mode = 0;
3069 break;
3070 case ALPHA_FPRM_DYN:
3071 mode = 4;
3072 break;
3073 default:
3074 gcc_unreachable ();
3076 /* XXX For reference, round to +inf is mode = 3. */
3079 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3080 mode |= 0x10000;
3082 return mode;
3085 /* Emit an X_floating library function call.
3087 Note that these functions do not follow normal calling conventions:
3088 TFmode arguments are passed in two integer registers (as opposed to
3089 indirect); TFmode return values appear in R16+R17.
3091 FUNC is the function to call.
3092 TARGET is where the output belongs.
3093 OPERANDS are the inputs.
3094 NOPERANDS is the count of inputs.
3095 EQUIV is the expression equivalent for the function.
3098 static void
3099 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3100 int noperands, rtx equiv)
3102 rtx usage = NULL_RTX, reg;
3103 int regno = 16, i;
3105 start_sequence ();
3107 for (i = 0; i < noperands; ++i)
3109 switch (GET_MODE (operands[i]))
3111 case E_TFmode:
3112 reg = gen_rtx_REG (TFmode, regno);
3113 regno += 2;
3114 break;
3116 case E_DFmode:
3117 reg = gen_rtx_REG (DFmode, regno + 32);
3118 regno += 1;
3119 break;
3121 case E_VOIDmode:
3122 gcc_assert (CONST_INT_P (operands[i]));
3123 /* FALLTHRU */
3124 case E_DImode:
3125 reg = gen_rtx_REG (DImode, regno);
3126 regno += 1;
3127 break;
3129 default:
3130 gcc_unreachable ();
3133 emit_move_insn (reg, operands[i]);
3134 use_reg (&usage, reg);
3137 switch (GET_MODE (target))
3139 case E_TFmode:
3140 reg = gen_rtx_REG (TFmode, 16);
3141 break;
3142 case E_DFmode:
3143 reg = gen_rtx_REG (DFmode, 32);
3144 break;
3145 case E_DImode:
3146 reg = gen_rtx_REG (DImode, 0);
3147 break;
3148 default:
3149 gcc_unreachable ();
3152 rtx mem = gen_rtx_MEM (QImode, func);
3153 rtx_insn *tmp = emit_call_insn (gen_call_value (reg, mem, const0_rtx,
3154 const0_rtx, const0_rtx));
3155 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3156 RTL_CONST_CALL_P (tmp) = 1;
3158 tmp = get_insns ();
3159 end_sequence ();
3161 emit_libcall_block (tmp, target, reg, equiv);
3164 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3166 void
3167 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3169 rtx func;
3170 int mode;
3171 rtx out_operands[3];
3173 func = alpha_lookup_xfloating_lib_func (code);
3174 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3176 out_operands[0] = operands[1];
3177 out_operands[1] = operands[2];
3178 out_operands[2] = GEN_INT (mode);
3179 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3180 gen_rtx_fmt_ee (code, TFmode, operands[1],
3181 operands[2]));
3184 /* Emit an X_floating library function call for a comparison. */
3186 static rtx
3187 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3189 enum rtx_code cmp_code, res_code;
3190 rtx func, out, operands[2], note;
3192 /* X_floating library comparison functions return
3193 -1 unordered
3194 0 false
3195 1 true
3196 Convert the compare against the raw return value. */
3198 cmp_code = *pcode;
3199 switch (cmp_code)
3201 case UNORDERED:
3202 cmp_code = EQ;
3203 res_code = LT;
3204 break;
3205 case ORDERED:
3206 cmp_code = EQ;
3207 res_code = GE;
3208 break;
3209 case NE:
3210 res_code = NE;
3211 break;
3212 case EQ:
3213 case LT:
3214 case GT:
3215 case LE:
3216 case GE:
3217 res_code = GT;
3218 break;
3219 default:
3220 gcc_unreachable ();
3222 *pcode = res_code;
3224 func = alpha_lookup_xfloating_lib_func (cmp_code);
3226 operands[0] = op0;
3227 operands[1] = op1;
3228 out = gen_reg_rtx (DImode);
3230 /* What's actually returned is -1,0,1, not a proper boolean value. */
3231 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3232 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
3233 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3235 return out;
3238 /* Emit an X_floating library function call for a conversion. */
3240 void
3241 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3243 int noperands = 1, mode;
3244 rtx out_operands[2];
3245 rtx func;
3246 enum rtx_code code = orig_code;
3248 if (code == UNSIGNED_FIX)
3249 code = FIX;
3251 func = alpha_lookup_xfloating_lib_func (code);
3253 out_operands[0] = operands[1];
3255 switch (code)
3257 case FIX:
3258 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3259 out_operands[1] = GEN_INT (mode);
3260 noperands = 2;
3261 break;
3262 case FLOAT_TRUNCATE:
3263 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3264 out_operands[1] = GEN_INT (mode);
3265 noperands = 2;
3266 break;
3267 default:
3268 break;
3271 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3272 gen_rtx_fmt_e (orig_code,
3273 GET_MODE (operands[0]),
3274 operands[1]));
3277 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3278 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3279 guarantee that the sequence
3280 set (OP[0] OP[2])
3281 set (OP[1] OP[3])
3282 is valid. Naturally, output operand ordering is little-endian.
3283 This is used by *movtf_internal and *movti_internal. */
3285 void
3286 alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
3287 bool fixup_overlap)
3289 switch (GET_CODE (operands[1]))
3291 case REG:
3292 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3293 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3294 break;
3296 case MEM:
3297 operands[3] = adjust_address (operands[1], DImode, 8);
3298 operands[2] = adjust_address (operands[1], DImode, 0);
3299 break;
3301 CASE_CONST_SCALAR_INT:
3302 case CONST_DOUBLE:
3303 gcc_assert (operands[1] == CONST0_RTX (mode));
3304 operands[2] = operands[3] = const0_rtx;
3305 break;
3307 default:
3308 gcc_unreachable ();
3311 switch (GET_CODE (operands[0]))
3313 case REG:
3314 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3315 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3316 break;
3318 case MEM:
3319 operands[1] = adjust_address (operands[0], DImode, 8);
3320 operands[0] = adjust_address (operands[0], DImode, 0);
3321 break;
3323 default:
3324 gcc_unreachable ();
3327 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3329 std::swap (operands[0], operands[1]);
3330 std::swap (operands[2], operands[3]);
3334 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3335 op2 is a register containing the sign bit, operation is the
3336 logical operation to be performed. */
3338 void
3339 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3341 rtx high_bit = operands[2];
3342 rtx scratch;
3343 int move;
3345 alpha_split_tmode_pair (operands, TFmode, false);
3347 /* Detect three flavors of operand overlap. */
3348 move = 1;
3349 if (rtx_equal_p (operands[0], operands[2]))
3350 move = 0;
3351 else if (rtx_equal_p (operands[1], operands[2]))
3353 if (rtx_equal_p (operands[0], high_bit))
3354 move = 2;
3355 else
3356 move = -1;
3359 if (move < 0)
3360 emit_move_insn (operands[0], operands[2]);
3362 /* ??? If the destination overlaps both source tf and high_bit, then
3363 assume source tf is dead in its entirety and use the other half
3364 for a scratch register. Otherwise "scratch" is just the proper
3365 destination register. */
3366 scratch = operands[move < 2 ? 1 : 3];
3368 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3370 if (move > 0)
3372 emit_move_insn (operands[0], operands[2]);
3373 if (move > 1)
3374 emit_move_insn (operands[1], scratch);
3378 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3379 unaligned data:
3381 unsigned: signed:
3382 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3383 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3384 lda r3,X(r11) lda r3,X+2(r11)
3385 extwl r1,r3,r1 extql r1,r3,r1
3386 extwh r2,r3,r2 extqh r2,r3,r2
3387 or r1.r2.r1 or r1,r2,r1
3388 sra r1,48,r1
3390 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3391 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3392 lda r3,X(r11) lda r3,X(r11)
3393 extll r1,r3,r1 extll r1,r3,r1
3394 extlh r2,r3,r2 extlh r2,r3,r2
3395 or r1.r2.r1 addl r1,r2,r1
3397 quad: ldq_u r1,X(r11)
3398 ldq_u r2,X+7(r11)
3399 lda r3,X(r11)
3400 extql r1,r3,r1
3401 extqh r2,r3,r2
3402 or r1.r2.r1
3405 void
3406 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3407 HOST_WIDE_INT ofs, int sign)
3409 rtx meml, memh, addr, extl, exth, tmp, mema;
3410 machine_mode mode;
3412 if (TARGET_BWX && size == 2)
3414 meml = adjust_address (mem, QImode, ofs);
3415 memh = adjust_address (mem, QImode, ofs+1);
3416 extl = gen_reg_rtx (DImode);
3417 exth = gen_reg_rtx (DImode);
3418 emit_insn (gen_zero_extendqidi2 (extl, meml));
3419 emit_insn (gen_zero_extendqidi2 (exth, memh));
3420 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3421 NULL, 1, OPTAB_LIB_WIDEN);
3422 addr = expand_simple_binop (DImode, IOR, extl, exth,
3423 NULL, 1, OPTAB_LIB_WIDEN);
3425 if (sign && GET_MODE (tgt) != HImode)
3427 addr = gen_lowpart (HImode, addr);
3428 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3430 else
3432 if (GET_MODE (tgt) != DImode)
3433 addr = gen_lowpart (GET_MODE (tgt), addr);
3434 emit_move_insn (tgt, addr);
3436 return;
3439 meml = gen_reg_rtx (DImode);
3440 memh = gen_reg_rtx (DImode);
3441 addr = gen_reg_rtx (DImode);
3442 extl = gen_reg_rtx (DImode);
3443 exth = gen_reg_rtx (DImode);
3445 mema = XEXP (mem, 0);
3446 if (GET_CODE (mema) == LO_SUM)
3447 mema = force_reg (Pmode, mema);
3449 /* AND addresses cannot be in any alias set, since they may implicitly
3450 alias surrounding code. Ideally we'd have some alias set that
3451 covered all types except those with alignment 8 or higher. */
3453 tmp = change_address (mem, DImode,
3454 gen_rtx_AND (DImode,
3455 plus_constant (DImode, mema, ofs),
3456 GEN_INT (-8)));
3457 set_mem_alias_set (tmp, 0);
3458 emit_move_insn (meml, tmp);
3460 tmp = change_address (mem, DImode,
3461 gen_rtx_AND (DImode,
3462 plus_constant (DImode, mema,
3463 ofs + size - 1),
3464 GEN_INT (-8)));
3465 set_mem_alias_set (tmp, 0);
3466 emit_move_insn (memh, tmp);
3468 if (sign && size == 2)
3470 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3472 emit_insn (gen_extql (extl, meml, addr));
3473 emit_insn (gen_extqh (exth, memh, addr));
3475 /* We must use tgt here for the target. Alpha-vms port fails if we use
3476 addr for the target, because addr is marked as a pointer and combine
3477 knows that pointers are always sign-extended 32-bit values. */
3478 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3479 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3480 addr, 1, OPTAB_WIDEN);
3482 else
3484 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3485 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3486 switch ((int) size)
3488 case 2:
3489 emit_insn (gen_extwh (exth, memh, addr));
3490 mode = HImode;
3491 break;
3492 case 4:
3493 emit_insn (gen_extlh (exth, memh, addr));
3494 mode = SImode;
3495 break;
3496 case 8:
3497 emit_insn (gen_extqh (exth, memh, addr));
3498 mode = DImode;
3499 break;
3500 default:
3501 gcc_unreachable ();
3504 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3505 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3506 sign, OPTAB_WIDEN);
3509 if (addr != tgt)
3510 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3513 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3515 void
3516 alpha_expand_unaligned_store (rtx dst, rtx src,
3517 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3519 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3521 if (TARGET_BWX && size == 2)
3523 if (src != const0_rtx)
3525 dstl = gen_lowpart (QImode, src);
3526 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3527 NULL, 1, OPTAB_LIB_WIDEN);
3528 dsth = gen_lowpart (QImode, dsth);
3530 else
3531 dstl = dsth = const0_rtx;
3533 meml = adjust_address (dst, QImode, ofs);
3534 memh = adjust_address (dst, QImode, ofs+1);
3536 emit_move_insn (meml, dstl);
3537 emit_move_insn (memh, dsth);
3538 return;
3541 dstl = gen_reg_rtx (DImode);
3542 dsth = gen_reg_rtx (DImode);
3543 insl = gen_reg_rtx (DImode);
3544 insh = gen_reg_rtx (DImode);
3546 dsta = XEXP (dst, 0);
3547 if (GET_CODE (dsta) == LO_SUM)
3548 dsta = force_reg (Pmode, dsta);
3550 /* AND addresses cannot be in any alias set, since they may implicitly
3551 alias surrounding code. Ideally we'd have some alias set that
3552 covered all types except those with alignment 8 or higher. */
3554 meml = change_address (dst, DImode,
3555 gen_rtx_AND (DImode,
3556 plus_constant (DImode, dsta, ofs),
3557 GEN_INT (-8)));
3558 set_mem_alias_set (meml, 0);
3560 memh = change_address (dst, DImode,
3561 gen_rtx_AND (DImode,
3562 plus_constant (DImode, dsta,
3563 ofs + size - 1),
3564 GEN_INT (-8)));
3565 set_mem_alias_set (memh, 0);
3567 emit_move_insn (dsth, memh);
3568 emit_move_insn (dstl, meml);
3570 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3572 if (src != CONST0_RTX (GET_MODE (src)))
3574 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3575 GEN_INT (size*8), addr));
3577 switch ((int) size)
3579 case 2:
3580 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3581 break;
3582 case 4:
3583 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3584 break;
3585 case 8:
3586 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3587 break;
3588 default:
3589 gcc_unreachable ();
3593 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3595 switch ((int) size)
3597 case 2:
3598 emit_insn (gen_mskwl (dstl, dstl, addr));
3599 break;
3600 case 4:
3601 emit_insn (gen_mskll (dstl, dstl, addr));
3602 break;
3603 case 8:
3604 emit_insn (gen_mskql (dstl, dstl, addr));
3605 break;
3606 default:
3607 gcc_unreachable ();
3610 if (src != CONST0_RTX (GET_MODE (src)))
3612 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3613 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3616 /* Must store high before low for degenerate case of aligned. */
3617 emit_move_insn (memh, dsth);
3618 emit_move_insn (meml, dstl);
3621 /* The block move code tries to maximize speed by separating loads and
3622 stores at the expense of register pressure: we load all of the data
3623 before we store it back out. There are two secondary effects worth
3624 mentioning, that this speeds copying to/from aligned and unaligned
3625 buffers, and that it makes the code significantly easier to write. */
3627 #define MAX_MOVE_WORDS 8
3629 /* Load an integral number of consecutive unaligned quadwords. */
3631 static void
3632 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3633 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3635 rtx const im8 = GEN_INT (-8);
3636 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3637 rtx sreg, areg, tmp, smema;
3638 HOST_WIDE_INT i;
3640 smema = XEXP (smem, 0);
3641 if (GET_CODE (smema) == LO_SUM)
3642 smema = force_reg (Pmode, smema);
3644 /* Generate all the tmp registers we need. */
3645 for (i = 0; i < words; ++i)
3647 data_regs[i] = out_regs[i];
3648 ext_tmps[i] = gen_reg_rtx (DImode);
3650 data_regs[words] = gen_reg_rtx (DImode);
3652 if (ofs != 0)
3653 smem = adjust_address (smem, GET_MODE (smem), ofs);
3655 /* Load up all of the source data. */
3656 for (i = 0; i < words; ++i)
3658 tmp = change_address (smem, DImode,
3659 gen_rtx_AND (DImode,
3660 plus_constant (DImode, smema, 8*i),
3661 im8));
3662 set_mem_alias_set (tmp, 0);
3663 emit_move_insn (data_regs[i], tmp);
3666 tmp = change_address (smem, DImode,
3667 gen_rtx_AND (DImode,
3668 plus_constant (DImode, smema,
3669 8*words - 1),
3670 im8));
3671 set_mem_alias_set (tmp, 0);
3672 emit_move_insn (data_regs[words], tmp);
3674 /* Extract the half-word fragments. Unfortunately DEC decided to make
3675 extxh with offset zero a noop instead of zeroing the register, so
3676 we must take care of that edge condition ourselves with cmov. */
3678 sreg = copy_addr_to_reg (smema);
3679 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3680 1, OPTAB_WIDEN);
3681 for (i = 0; i < words; ++i)
3683 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3684 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3685 emit_insn (gen_rtx_SET (ext_tmps[i],
3686 gen_rtx_IF_THEN_ELSE (DImode,
3687 gen_rtx_EQ (DImode, areg,
3688 const0_rtx),
3689 const0_rtx, ext_tmps[i])));
3692 /* Merge the half-words into whole words. */
3693 for (i = 0; i < words; ++i)
3695 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3696 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3700 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3701 may be NULL to store zeros. */
3703 static void
3704 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3705 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3707 rtx const im8 = GEN_INT (-8);
3708 rtx ins_tmps[MAX_MOVE_WORDS];
3709 rtx st_tmp_1, st_tmp_2, dreg;
3710 rtx st_addr_1, st_addr_2, dmema;
3711 HOST_WIDE_INT i;
3713 dmema = XEXP (dmem, 0);
3714 if (GET_CODE (dmema) == LO_SUM)
3715 dmema = force_reg (Pmode, dmema);
3717 /* Generate all the tmp registers we need. */
3718 if (data_regs != NULL)
3719 for (i = 0; i < words; ++i)
3720 ins_tmps[i] = gen_reg_rtx(DImode);
3721 st_tmp_1 = gen_reg_rtx(DImode);
3722 st_tmp_2 = gen_reg_rtx(DImode);
3724 if (ofs != 0)
3725 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3727 st_addr_2 = change_address (dmem, DImode,
3728 gen_rtx_AND (DImode,
3729 plus_constant (DImode, dmema,
3730 words*8 - 1),
3731 im8));
3732 set_mem_alias_set (st_addr_2, 0);
3734 st_addr_1 = change_address (dmem, DImode,
3735 gen_rtx_AND (DImode, dmema, im8));
3736 set_mem_alias_set (st_addr_1, 0);
3738 /* Load up the destination end bits. */
3739 emit_move_insn (st_tmp_2, st_addr_2);
3740 emit_move_insn (st_tmp_1, st_addr_1);
3742 /* Shift the input data into place. */
3743 dreg = copy_addr_to_reg (dmema);
3744 if (data_regs != NULL)
3746 for (i = words-1; i >= 0; --i)
3748 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3749 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3751 for (i = words-1; i > 0; --i)
3753 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3754 ins_tmps[i-1], ins_tmps[i-1], 1,
3755 OPTAB_WIDEN);
3759 /* Split and merge the ends with the destination data. */
3760 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3761 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3763 if (data_regs != NULL)
3765 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3766 st_tmp_2, 1, OPTAB_WIDEN);
3767 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3768 st_tmp_1, 1, OPTAB_WIDEN);
3771 /* Store it all. */
3772 emit_move_insn (st_addr_2, st_tmp_2);
3773 for (i = words-1; i > 0; --i)
3775 rtx tmp = change_address (dmem, DImode,
3776 gen_rtx_AND (DImode,
3777 plus_constant (DImode,
3778 dmema, i*8),
3779 im8));
3780 set_mem_alias_set (tmp, 0);
3781 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3783 emit_move_insn (st_addr_1, st_tmp_1);
3787 /* Expand string/block move operations.
3789 operands[0] is the pointer to the destination.
3790 operands[1] is the pointer to the source.
3791 operands[2] is the number of bytes to move.
3792 operands[3] is the alignment. */
3795 alpha_expand_block_move (rtx operands[])
3797 rtx bytes_rtx = operands[2];
3798 rtx align_rtx = operands[3];
3799 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3800 HOST_WIDE_INT bytes = orig_bytes;
3801 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3802 HOST_WIDE_INT dst_align = src_align;
3803 rtx orig_src = operands[1];
3804 rtx orig_dst = operands[0];
3805 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3806 rtx tmp;
3807 unsigned int i, words, ofs, nregs = 0;
3809 if (orig_bytes <= 0)
3810 return 1;
3811 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3812 return 0;
3814 /* Look for additional alignment information from recorded register info. */
3816 tmp = XEXP (orig_src, 0);
3817 if (REG_P (tmp))
3818 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3819 else if (GET_CODE (tmp) == PLUS
3820 && REG_P (XEXP (tmp, 0))
3821 && CONST_INT_P (XEXP (tmp, 1)))
3823 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3824 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3826 if (a > src_align)
3828 if (a >= 64 && c % 8 == 0)
3829 src_align = 64;
3830 else if (a >= 32 && c % 4 == 0)
3831 src_align = 32;
3832 else if (a >= 16 && c % 2 == 0)
3833 src_align = 16;
3837 tmp = XEXP (orig_dst, 0);
3838 if (REG_P (tmp))
3839 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3840 else if (GET_CODE (tmp) == PLUS
3841 && REG_P (XEXP (tmp, 0))
3842 && CONST_INT_P (XEXP (tmp, 1)))
3844 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3845 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3847 if (a > dst_align)
3849 if (a >= 64 && c % 8 == 0)
3850 dst_align = 64;
3851 else if (a >= 32 && c % 4 == 0)
3852 dst_align = 32;
3853 else if (a >= 16 && c % 2 == 0)
3854 dst_align = 16;
3858 ofs = 0;
3859 if (src_align >= 64 && bytes >= 8)
3861 words = bytes / 8;
3863 for (i = 0; i < words; ++i)
3864 data_regs[nregs + i] = gen_reg_rtx (DImode);
3866 for (i = 0; i < words; ++i)
3867 emit_move_insn (data_regs[nregs + i],
3868 adjust_address (orig_src, DImode, ofs + i * 8));
3870 nregs += words;
3871 bytes -= words * 8;
3872 ofs += words * 8;
3875 if (src_align >= 32 && bytes >= 4)
3877 words = bytes / 4;
3879 for (i = 0; i < words; ++i)
3880 data_regs[nregs + i] = gen_reg_rtx (SImode);
3882 for (i = 0; i < words; ++i)
3883 emit_move_insn (data_regs[nregs + i],
3884 adjust_address (orig_src, SImode, ofs + i * 4));
3886 nregs += words;
3887 bytes -= words * 4;
3888 ofs += words * 4;
3891 if (bytes >= 8)
3893 words = bytes / 8;
3895 for (i = 0; i < words+1; ++i)
3896 data_regs[nregs + i] = gen_reg_rtx (DImode);
3898 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3899 words, ofs);
3901 nregs += words;
3902 bytes -= words * 8;
3903 ofs += words * 8;
3906 if (! TARGET_BWX && bytes >= 4)
3908 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3909 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3910 bytes -= 4;
3911 ofs += 4;
3914 if (bytes >= 2)
3916 if (src_align >= 16)
3918 do {
3919 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3921 bytes -= 2;
3922 ofs += 2;
3923 } while (bytes >= 2);
3925 else if (! TARGET_BWX)
3927 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3928 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3929 bytes -= 2;
3930 ofs += 2;
3934 while (bytes > 0)
3936 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3937 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3938 bytes -= 1;
3939 ofs += 1;
3942 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3944 /* Now save it back out again. */
3946 i = 0, ofs = 0;
3948 /* Write out the data in whatever chunks reading the source allowed. */
3949 if (dst_align >= 64)
3951 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3953 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3954 data_regs[i]);
3955 ofs += 8;
3956 i++;
3960 if (dst_align >= 32)
3962 /* If the source has remaining DImode regs, write them out in
3963 two pieces. */
3964 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3966 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3967 NULL_RTX, 1, OPTAB_WIDEN);
3969 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3970 gen_lowpart (SImode, data_regs[i]));
3971 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3972 gen_lowpart (SImode, tmp));
3973 ofs += 8;
3974 i++;
3977 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3979 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3980 data_regs[i]);
3981 ofs += 4;
3982 i++;
3986 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3988 /* Write out a remaining block of words using unaligned methods. */
3990 for (words = 1; i + words < nregs; words++)
3991 if (GET_MODE (data_regs[i + words]) != DImode)
3992 break;
3994 if (words == 1)
3995 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3996 else
3997 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3998 words, ofs);
4000 i += words;
4001 ofs += words * 8;
4004 /* Due to the above, this won't be aligned. */
4005 /* ??? If we have more than one of these, consider constructing full
4006 words in registers and using alpha_expand_unaligned_store_words. */
4007 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4009 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4010 ofs += 4;
4011 i++;
4014 if (dst_align >= 16)
4015 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4017 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4018 i++;
4019 ofs += 2;
4021 else
4022 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4024 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4025 i++;
4026 ofs += 2;
4029 /* The remainder must be byte copies. */
4030 while (i < nregs)
4032 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4033 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4034 i++;
4035 ofs += 1;
4038 return 1;
4042 alpha_expand_block_clear (rtx operands[])
4044 rtx bytes_rtx = operands[1];
4045 rtx align_rtx = operands[3];
4046 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4047 HOST_WIDE_INT bytes = orig_bytes;
4048 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4049 HOST_WIDE_INT alignofs = 0;
4050 rtx orig_dst = operands[0];
4051 rtx tmp;
4052 int i, words, ofs = 0;
4054 if (orig_bytes <= 0)
4055 return 1;
4056 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4057 return 0;
4059 /* Look for stricter alignment. */
4060 tmp = XEXP (orig_dst, 0);
4061 if (REG_P (tmp))
4062 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4063 else if (GET_CODE (tmp) == PLUS
4064 && REG_P (XEXP (tmp, 0))
4065 && CONST_INT_P (XEXP (tmp, 1)))
4067 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4068 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4070 if (a > align)
4072 if (a >= 64)
4073 align = a, alignofs = 8 - c % 8;
4074 else if (a >= 32)
4075 align = a, alignofs = 4 - c % 4;
4076 else if (a >= 16)
4077 align = a, alignofs = 2 - c % 2;
4081 /* Handle an unaligned prefix first. */
4083 if (alignofs > 0)
4085 /* Given that alignofs is bounded by align, the only time BWX could
4086 generate three stores is for a 7 byte fill. Prefer two individual
4087 stores over a load/mask/store sequence. */
4088 if ((!TARGET_BWX || alignofs == 7)
4089 && align >= 32
4090 && !(alignofs == 4 && bytes >= 4))
4092 machine_mode mode = (align >= 64 ? DImode : SImode);
4093 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4094 rtx mem, tmp;
4095 HOST_WIDE_INT mask;
4097 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4098 set_mem_alias_set (mem, 0);
4100 mask = ~(HOST_WIDE_INT_M1U << (inv_alignofs * 8));
4101 if (bytes < alignofs)
4103 mask |= HOST_WIDE_INT_M1U << ((inv_alignofs + bytes) * 8);
4104 ofs += bytes;
4105 bytes = 0;
4107 else
4109 bytes -= alignofs;
4110 ofs += alignofs;
4112 alignofs = 0;
4114 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4115 NULL_RTX, 1, OPTAB_WIDEN);
4117 emit_move_insn (mem, tmp);
4120 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4122 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4123 bytes -= 1;
4124 ofs += 1;
4125 alignofs -= 1;
4127 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4129 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4130 bytes -= 2;
4131 ofs += 2;
4132 alignofs -= 2;
4134 if (alignofs == 4 && bytes >= 4)
4136 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4137 bytes -= 4;
4138 ofs += 4;
4139 alignofs = 0;
4142 /* If we've not used the extra lead alignment information by now,
4143 we won't be able to. Downgrade align to match what's left over. */
4144 if (alignofs > 0)
4146 alignofs = alignofs & -alignofs;
4147 align = MIN (align, alignofs * BITS_PER_UNIT);
4151 /* Handle a block of contiguous long-words. */
4153 if (align >= 64 && bytes >= 8)
4155 words = bytes / 8;
4157 for (i = 0; i < words; ++i)
4158 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4159 const0_rtx);
4161 bytes -= words * 8;
4162 ofs += words * 8;
4165 /* If the block is large and appropriately aligned, emit a single
4166 store followed by a sequence of stq_u insns. */
4168 if (align >= 32 && bytes > 16)
4170 rtx orig_dsta;
4172 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4173 bytes -= 4;
4174 ofs += 4;
4176 orig_dsta = XEXP (orig_dst, 0);
4177 if (GET_CODE (orig_dsta) == LO_SUM)
4178 orig_dsta = force_reg (Pmode, orig_dsta);
4180 words = bytes / 8;
4181 for (i = 0; i < words; ++i)
4183 rtx mem
4184 = change_address (orig_dst, DImode,
4185 gen_rtx_AND (DImode,
4186 plus_constant (DImode, orig_dsta,
4187 ofs + i*8),
4188 GEN_INT (-8)));
4189 set_mem_alias_set (mem, 0);
4190 emit_move_insn (mem, const0_rtx);
4193 /* Depending on the alignment, the first stq_u may have overlapped
4194 with the initial stl, which means that the last stq_u didn't
4195 write as much as it would appear. Leave those questionable bytes
4196 unaccounted for. */
4197 bytes -= words * 8 - 4;
4198 ofs += words * 8 - 4;
4201 /* Handle a smaller block of aligned words. */
4203 if ((align >= 64 && bytes == 4)
4204 || (align == 32 && bytes >= 4))
4206 words = bytes / 4;
4208 for (i = 0; i < words; ++i)
4209 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4210 const0_rtx);
4212 bytes -= words * 4;
4213 ofs += words * 4;
4216 /* An unaligned block uses stq_u stores for as many as possible. */
4218 if (bytes >= 8)
4220 words = bytes / 8;
4222 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4224 bytes -= words * 8;
4225 ofs += words * 8;
4228 /* Next clean up any trailing pieces. */
4230 /* Count the number of bits in BYTES for which aligned stores could
4231 be emitted. */
4232 words = 0;
4233 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4234 if (bytes & i)
4235 words += 1;
4237 /* If we have appropriate alignment (and it wouldn't take too many
4238 instructions otherwise), mask out the bytes we need. */
4239 if (TARGET_BWX ? words > 2 : bytes > 0)
4241 if (align >= 64)
4243 rtx mem, tmp;
4244 HOST_WIDE_INT mask;
4246 mem = adjust_address (orig_dst, DImode, ofs);
4247 set_mem_alias_set (mem, 0);
4249 mask = HOST_WIDE_INT_M1U << (bytes * 8);
4251 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4252 NULL_RTX, 1, OPTAB_WIDEN);
4254 emit_move_insn (mem, tmp);
4255 return 1;
4257 else if (align >= 32 && bytes < 4)
4259 rtx mem, tmp;
4260 HOST_WIDE_INT mask;
4262 mem = adjust_address (orig_dst, SImode, ofs);
4263 set_mem_alias_set (mem, 0);
4265 mask = HOST_WIDE_INT_M1U << (bytes * 8);
4267 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4268 NULL_RTX, 1, OPTAB_WIDEN);
4270 emit_move_insn (mem, tmp);
4271 return 1;
4275 if (!TARGET_BWX && bytes >= 4)
4277 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4278 bytes -= 4;
4279 ofs += 4;
4282 if (bytes >= 2)
4284 if (align >= 16)
4286 do {
4287 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4288 const0_rtx);
4289 bytes -= 2;
4290 ofs += 2;
4291 } while (bytes >= 2);
4293 else if (! TARGET_BWX)
4295 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4296 bytes -= 2;
4297 ofs += 2;
4301 while (bytes > 0)
4303 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4304 bytes -= 1;
4305 ofs += 1;
4308 return 1;
4311 /* Returns a mask so that zap(x, value) == x & mask. */
4314 alpha_expand_zap_mask (HOST_WIDE_INT value)
4316 rtx result;
4317 int i;
4318 HOST_WIDE_INT mask = 0;
4320 for (i = 7; i >= 0; --i)
4322 mask <<= 8;
4323 if (!((value >> i) & 1))
4324 mask |= 0xff;
4327 result = gen_int_mode (mask, DImode);
4328 return result;
4331 void
4332 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4333 machine_mode mode,
4334 rtx op0, rtx op1, rtx op2)
4336 op0 = gen_lowpart (mode, op0);
4338 if (op1 == const0_rtx)
4339 op1 = CONST0_RTX (mode);
4340 else
4341 op1 = gen_lowpart (mode, op1);
4343 if (op2 == const0_rtx)
4344 op2 = CONST0_RTX (mode);
4345 else
4346 op2 = gen_lowpart (mode, op2);
4348 emit_insn ((*gen) (op0, op1, op2));
4351 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4352 COND is true. Mark the jump as unlikely to be taken. */
4354 static void
4355 emit_unlikely_jump (rtx cond, rtx label)
4357 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4358 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
4359 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
4362 /* A subroutine of the atomic operation splitters. Emit a load-locked
4363 instruction in MODE. */
4365 static void
4366 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
4368 rtx (*fn) (rtx, rtx) = NULL;
4369 if (mode == SImode)
4370 fn = gen_load_locked_si;
4371 else if (mode == DImode)
4372 fn = gen_load_locked_di;
4373 emit_insn (fn (reg, mem));
4376 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4377 instruction in MODE. */
4379 static void
4380 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
4382 rtx (*fn) (rtx, rtx, rtx) = NULL;
4383 if (mode == SImode)
4384 fn = gen_store_conditional_si;
4385 else if (mode == DImode)
4386 fn = gen_store_conditional_di;
4387 emit_insn (fn (res, mem, val));
4390 /* Subroutines of the atomic operation splitters. Emit barriers
4391 as needed for the memory MODEL. */
4393 static void
4394 alpha_pre_atomic_barrier (enum memmodel model)
4396 if (need_atomic_barrier_p (model, true))
4397 emit_insn (gen_memory_barrier ());
4400 static void
4401 alpha_post_atomic_barrier (enum memmodel model)
4403 if (need_atomic_barrier_p (model, false))
4404 emit_insn (gen_memory_barrier ());
4407 /* A subroutine of the atomic operation splitters. Emit an insxl
4408 instruction in MODE. */
4410 static rtx
4411 emit_insxl (machine_mode mode, rtx op1, rtx op2)
4413 rtx ret = gen_reg_rtx (DImode);
4414 rtx (*fn) (rtx, rtx, rtx);
4416 switch (mode)
4418 case E_QImode:
4419 fn = gen_insbl;
4420 break;
4421 case E_HImode:
4422 fn = gen_inswl;
4423 break;
4424 case E_SImode:
4425 fn = gen_insll;
4426 break;
4427 case E_DImode:
4428 fn = gen_insql;
4429 break;
4430 default:
4431 gcc_unreachable ();
4434 op1 = force_reg (mode, op1);
4435 emit_insn (fn (ret, op1, op2));
4437 return ret;
4440 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4441 to perform. MEM is the memory on which to operate. VAL is the second
4442 operand of the binary operator. BEFORE and AFTER are optional locations to
4443 return the value of MEM either before of after the operation. SCRATCH is
4444 a scratch register. */
4446 void
4447 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4448 rtx after, rtx scratch, enum memmodel model)
4450 machine_mode mode = GET_MODE (mem);
4451 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4453 alpha_pre_atomic_barrier (model);
4455 label = gen_label_rtx ();
4456 emit_label (label);
4457 label = gen_rtx_LABEL_REF (DImode, label);
4459 if (before == NULL)
4460 before = scratch;
4461 emit_load_locked (mode, before, mem);
4463 if (code == NOT)
4465 x = gen_rtx_AND (mode, before, val);
4466 emit_insn (gen_rtx_SET (val, x));
4468 x = gen_rtx_NOT (mode, val);
4470 else
4471 x = gen_rtx_fmt_ee (code, mode, before, val);
4472 if (after)
4473 emit_insn (gen_rtx_SET (after, copy_rtx (x)));
4474 emit_insn (gen_rtx_SET (scratch, x));
4476 emit_store_conditional (mode, cond, mem, scratch);
4478 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4479 emit_unlikely_jump (x, label);
4481 alpha_post_atomic_barrier (model);
4484 /* Expand a compare and swap operation. */
4486 void
4487 alpha_split_compare_and_swap (rtx operands[])
4489 rtx cond, retval, mem, oldval, newval;
4490 bool is_weak;
4491 enum memmodel mod_s, mod_f;
4492 machine_mode mode;
4493 rtx label1, label2, x;
4495 cond = operands[0];
4496 retval = operands[1];
4497 mem = operands[2];
4498 oldval = operands[3];
4499 newval = operands[4];
4500 is_weak = (operands[5] != const0_rtx);
4501 mod_s = memmodel_from_int (INTVAL (operands[6]));
4502 mod_f = memmodel_from_int (INTVAL (operands[7]));
4503 mode = GET_MODE (mem);
4505 alpha_pre_atomic_barrier (mod_s);
4507 label1 = NULL_RTX;
4508 if (!is_weak)
4510 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4511 emit_label (XEXP (label1, 0));
4513 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4515 emit_load_locked (mode, retval, mem);
4517 x = gen_lowpart (DImode, retval);
4518 if (oldval == const0_rtx)
4520 emit_move_insn (cond, const0_rtx);
4521 x = gen_rtx_NE (DImode, x, const0_rtx);
4523 else
4525 x = gen_rtx_EQ (DImode, x, oldval);
4526 emit_insn (gen_rtx_SET (cond, x));
4527 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4529 emit_unlikely_jump (x, label2);
4531 emit_move_insn (cond, newval);
4532 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4534 if (!is_weak)
4536 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4537 emit_unlikely_jump (x, label1);
4540 if (!is_mm_relaxed (mod_f))
4541 emit_label (XEXP (label2, 0));
4543 alpha_post_atomic_barrier (mod_s);
4545 if (is_mm_relaxed (mod_f))
4546 emit_label (XEXP (label2, 0));
4549 void
4550 alpha_expand_compare_and_swap_12 (rtx operands[])
4552 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4553 machine_mode mode;
4554 rtx addr, align, wdst;
4555 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4557 cond = operands[0];
4558 dst = operands[1];
4559 mem = operands[2];
4560 oldval = operands[3];
4561 newval = operands[4];
4562 is_weak = operands[5];
4563 mod_s = operands[6];
4564 mod_f = operands[7];
4565 mode = GET_MODE (mem);
4567 /* We forced the address into a register via mem_noofs_operand. */
4568 addr = XEXP (mem, 0);
4569 gcc_assert (register_operand (addr, DImode));
4571 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4572 NULL_RTX, 1, OPTAB_DIRECT);
4574 oldval = convert_modes (DImode, mode, oldval, 1);
4576 if (newval != const0_rtx)
4577 newval = emit_insxl (mode, newval, addr);
4579 wdst = gen_reg_rtx (DImode);
4580 if (mode == QImode)
4581 gen = gen_atomic_compare_and_swapqi_1;
4582 else
4583 gen = gen_atomic_compare_and_swaphi_1;
4584 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4585 is_weak, mod_s, mod_f));
4587 emit_move_insn (dst, gen_lowpart (mode, wdst));
4590 void
4591 alpha_split_compare_and_swap_12 (rtx operands[])
4593 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4594 machine_mode mode;
4595 bool is_weak;
4596 enum memmodel mod_s, mod_f;
4597 rtx label1, label2, mem, addr, width, mask, x;
4599 cond = operands[0];
4600 dest = operands[1];
4601 orig_mem = operands[2];
4602 oldval = operands[3];
4603 newval = operands[4];
4604 align = operands[5];
4605 is_weak = (operands[6] != const0_rtx);
4606 mod_s = memmodel_from_int (INTVAL (operands[7]));
4607 mod_f = memmodel_from_int (INTVAL (operands[8]));
4608 scratch = operands[9];
4609 mode = GET_MODE (orig_mem);
4610 addr = XEXP (orig_mem, 0);
4612 mem = gen_rtx_MEM (DImode, align);
4613 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4614 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4615 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4617 alpha_pre_atomic_barrier (mod_s);
4619 label1 = NULL_RTX;
4620 if (!is_weak)
4622 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4623 emit_label (XEXP (label1, 0));
4625 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4627 emit_load_locked (DImode, scratch, mem);
4629 width = GEN_INT (GET_MODE_BITSIZE (mode));
4630 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4631 emit_insn (gen_extxl (dest, scratch, width, addr));
4633 if (oldval == const0_rtx)
4635 emit_move_insn (cond, const0_rtx);
4636 x = gen_rtx_NE (DImode, dest, const0_rtx);
4638 else
4640 x = gen_rtx_EQ (DImode, dest, oldval);
4641 emit_insn (gen_rtx_SET (cond, x));
4642 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4644 emit_unlikely_jump (x, label2);
4646 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4648 if (newval != const0_rtx)
4649 emit_insn (gen_iordi3 (cond, cond, newval));
4651 emit_store_conditional (DImode, cond, mem, cond);
4653 if (!is_weak)
4655 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4656 emit_unlikely_jump (x, label1);
4659 if (!is_mm_relaxed (mod_f))
4660 emit_label (XEXP (label2, 0));
4662 alpha_post_atomic_barrier (mod_s);
4664 if (is_mm_relaxed (mod_f))
4665 emit_label (XEXP (label2, 0));
4668 /* Expand an atomic exchange operation. */
4670 void
4671 alpha_split_atomic_exchange (rtx operands[])
4673 rtx retval, mem, val, scratch;
4674 enum memmodel model;
4675 machine_mode mode;
4676 rtx label, x, cond;
4678 retval = operands[0];
4679 mem = operands[1];
4680 val = operands[2];
4681 model = (enum memmodel) INTVAL (operands[3]);
4682 scratch = operands[4];
4683 mode = GET_MODE (mem);
4684 cond = gen_lowpart (DImode, scratch);
4686 alpha_pre_atomic_barrier (model);
4688 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4689 emit_label (XEXP (label, 0));
4691 emit_load_locked (mode, retval, mem);
4692 emit_move_insn (scratch, val);
4693 emit_store_conditional (mode, cond, mem, scratch);
4695 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4696 emit_unlikely_jump (x, label);
4698 alpha_post_atomic_barrier (model);
4701 void
4702 alpha_expand_atomic_exchange_12 (rtx operands[])
4704 rtx dst, mem, val, model;
4705 machine_mode mode;
4706 rtx addr, align, wdst;
4707 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4709 dst = operands[0];
4710 mem = operands[1];
4711 val = operands[2];
4712 model = operands[3];
4713 mode = GET_MODE (mem);
4715 /* We forced the address into a register via mem_noofs_operand. */
4716 addr = XEXP (mem, 0);
4717 gcc_assert (register_operand (addr, DImode));
4719 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4720 NULL_RTX, 1, OPTAB_DIRECT);
4722 /* Insert val into the correct byte location within the word. */
4723 if (val != const0_rtx)
4724 val = emit_insxl (mode, val, addr);
4726 wdst = gen_reg_rtx (DImode);
4727 if (mode == QImode)
4728 gen = gen_atomic_exchangeqi_1;
4729 else
4730 gen = gen_atomic_exchangehi_1;
4731 emit_insn (gen (wdst, mem, val, align, model));
4733 emit_move_insn (dst, gen_lowpart (mode, wdst));
4736 void
4737 alpha_split_atomic_exchange_12 (rtx operands[])
4739 rtx dest, orig_mem, addr, val, align, scratch;
4740 rtx label, mem, width, mask, x;
4741 machine_mode mode;
4742 enum memmodel model;
4744 dest = operands[0];
4745 orig_mem = operands[1];
4746 val = operands[2];
4747 align = operands[3];
4748 model = (enum memmodel) INTVAL (operands[4]);
4749 scratch = operands[5];
4750 mode = GET_MODE (orig_mem);
4751 addr = XEXP (orig_mem, 0);
4753 mem = gen_rtx_MEM (DImode, align);
4754 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4755 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4756 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4758 alpha_pre_atomic_barrier (model);
4760 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4761 emit_label (XEXP (label, 0));
4763 emit_load_locked (DImode, scratch, mem);
4765 width = GEN_INT (GET_MODE_BITSIZE (mode));
4766 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4767 emit_insn (gen_extxl (dest, scratch, width, addr));
4768 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4769 if (val != const0_rtx)
4770 emit_insn (gen_iordi3 (scratch, scratch, val));
4772 emit_store_conditional (DImode, scratch, mem, scratch);
4774 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4775 emit_unlikely_jump (x, label);
4777 alpha_post_atomic_barrier (model);
4780 /* Adjust the cost of a scheduling dependency. Return the new cost of
4781 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4783 static int
4784 alpha_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4785 unsigned int)
4787 enum attr_type dep_insn_type;
4789 /* If the dependence is an anti-dependence, there is no cost. For an
4790 output dependence, there is sometimes a cost, but it doesn't seem
4791 worth handling those few cases. */
4792 if (dep_type != 0)
4793 return cost;
4795 /* If we can't recognize the insns, we can't really do anything. */
4796 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4797 return cost;
4799 dep_insn_type = get_attr_type (dep_insn);
4801 /* Bring in the user-defined memory latency. */
4802 if (dep_insn_type == TYPE_ILD
4803 || dep_insn_type == TYPE_FLD
4804 || dep_insn_type == TYPE_LDSYM)
4805 cost += alpha_memory_latency-1;
4807 /* Everything else handled in DFA bypasses now. */
4809 return cost;
4812 /* The number of instructions that can be issued per cycle. */
4814 static int
4815 alpha_issue_rate (void)
4817 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4820 /* How many alternative schedules to try. This should be as wide as the
4821 scheduling freedom in the DFA, but no wider. Making this value too
4822 large results extra work for the scheduler.
4824 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4825 alternative schedules. For EV5, we can choose between E0/E1 and
4826 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4828 static int
4829 alpha_multipass_dfa_lookahead (void)
4831 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4834 /* Machine-specific function data. */
4836 struct GTY(()) alpha_links;
4838 struct GTY(()) machine_function
4840 /* For flag_reorder_blocks_and_partition. */
4841 rtx gp_save_rtx;
4843 /* For VMS condition handlers. */
4844 bool uses_condition_handler;
4846 /* Linkage entries. */
4847 hash_map<nofree_string_hash, alpha_links *> *links;
4850 /* How to allocate a 'struct machine_function'. */
4852 static struct machine_function *
4853 alpha_init_machine_status (void)
4855 return ggc_cleared_alloc<machine_function> ();
4858 /* Support for frame based VMS condition handlers. */
4860 /* A VMS condition handler may be established for a function with a call to
4861 __builtin_establish_vms_condition_handler, and cancelled with a call to
4862 __builtin_revert_vms_condition_handler.
4864 The VMS Condition Handling Facility knows about the existence of a handler
4865 from the procedure descriptor .handler field. As the VMS native compilers,
4866 we store the user specified handler's address at a fixed location in the
4867 stack frame and point the procedure descriptor at a common wrapper which
4868 fetches the real handler's address and issues an indirect call.
4870 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4872 We force the procedure kind to PT_STACK, and the fixed frame location is
4873 fp+8, just before the register save area. We use the handler_data field in
4874 the procedure descriptor to state the fp offset at which the installed
4875 handler address can be found. */
4877 #define VMS_COND_HANDLER_FP_OFFSET 8
4879 /* Expand code to store the currently installed user VMS condition handler
4880 into TARGET and install HANDLER as the new condition handler. */
4882 void
4883 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4885 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4886 VMS_COND_HANDLER_FP_OFFSET);
4888 rtx handler_slot
4889 = gen_rtx_MEM (DImode, handler_slot_address);
4891 emit_move_insn (target, handler_slot);
4892 emit_move_insn (handler_slot, handler);
4894 /* Notify the start/prologue/epilogue emitters that the condition handler
4895 slot is needed. In addition to reserving the slot space, this will force
4896 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4897 use above is correct. */
4898 cfun->machine->uses_condition_handler = true;
4901 /* Expand code to store the current VMS condition handler into TARGET and
4902 nullify it. */
4904 void
4905 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4907 /* We implement this by establishing a null condition handler, with the tiny
4908 side effect of setting uses_condition_handler. This is a little bit
4909 pessimistic if no actual builtin_establish call is ever issued, which is
4910 not a real problem and expected never to happen anyway. */
4912 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4915 /* Functions to save and restore alpha_return_addr_rtx. */
4917 /* Start the ball rolling with RETURN_ADDR_RTX. */
4920 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4922 if (count != 0)
4923 return const0_rtx;
4925 return get_hard_reg_initial_val (Pmode, REG_RA);
4928 /* Return or create a memory slot containing the gp value for the current
4929 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4932 alpha_gp_save_rtx (void)
4934 rtx_insn *seq;
4935 rtx m = cfun->machine->gp_save_rtx;
4937 if (m == NULL)
4939 start_sequence ();
4941 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4942 m = validize_mem (m);
4943 emit_move_insn (m, pic_offset_table_rtx);
4945 seq = get_insns ();
4946 end_sequence ();
4948 /* We used to simply emit the sequence after entry_of_function.
4949 However this breaks the CFG if the first instruction in the
4950 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4951 label. Emit the sequence properly on the edge. We are only
4952 invoked from dw2_build_landing_pads and finish_eh_generation
4953 will call commit_edge_insertions thanks to a kludge. */
4954 insert_insn_on_edge (seq,
4955 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4957 cfun->machine->gp_save_rtx = m;
4960 return m;
4963 static void
4964 alpha_instantiate_decls (void)
4966 if (cfun->machine->gp_save_rtx != NULL_RTX)
4967 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4970 static int
4971 alpha_ra_ever_killed (void)
4973 rtx_insn *top;
4975 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4976 return (int)df_regs_ever_live_p (REG_RA);
4978 push_topmost_sequence ();
4979 top = get_insns ();
4980 pop_topmost_sequence ();
4982 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL);
4986 /* Return the trap mode suffix applicable to the current
4987 instruction, or NULL. */
4989 static const char *
4990 get_trap_mode_suffix (void)
4992 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4994 switch (s)
4996 case TRAP_SUFFIX_NONE:
4997 return NULL;
4999 case TRAP_SUFFIX_SU:
5000 if (alpha_fptm >= ALPHA_FPTM_SU)
5001 return "su";
5002 return NULL;
5004 case TRAP_SUFFIX_SUI:
5005 if (alpha_fptm >= ALPHA_FPTM_SUI)
5006 return "sui";
5007 return NULL;
5009 case TRAP_SUFFIX_V_SV:
5010 switch (alpha_fptm)
5012 case ALPHA_FPTM_N:
5013 return NULL;
5014 case ALPHA_FPTM_U:
5015 return "v";
5016 case ALPHA_FPTM_SU:
5017 case ALPHA_FPTM_SUI:
5018 return "sv";
5019 default:
5020 gcc_unreachable ();
5023 case TRAP_SUFFIX_V_SV_SVI:
5024 switch (alpha_fptm)
5026 case ALPHA_FPTM_N:
5027 return NULL;
5028 case ALPHA_FPTM_U:
5029 return "v";
5030 case ALPHA_FPTM_SU:
5031 return "sv";
5032 case ALPHA_FPTM_SUI:
5033 return "svi";
5034 default:
5035 gcc_unreachable ();
5037 break;
5039 case TRAP_SUFFIX_U_SU_SUI:
5040 switch (alpha_fptm)
5042 case ALPHA_FPTM_N:
5043 return NULL;
5044 case ALPHA_FPTM_U:
5045 return "u";
5046 case ALPHA_FPTM_SU:
5047 return "su";
5048 case ALPHA_FPTM_SUI:
5049 return "sui";
5050 default:
5051 gcc_unreachable ();
5053 break;
5055 default:
5056 gcc_unreachable ();
5058 gcc_unreachable ();
5061 /* Return the rounding mode suffix applicable to the current
5062 instruction, or NULL. */
5064 static const char *
5065 get_round_mode_suffix (void)
5067 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5069 switch (s)
5071 case ROUND_SUFFIX_NONE:
5072 return NULL;
5073 case ROUND_SUFFIX_NORMAL:
5074 switch (alpha_fprm)
5076 case ALPHA_FPRM_NORM:
5077 return NULL;
5078 case ALPHA_FPRM_MINF:
5079 return "m";
5080 case ALPHA_FPRM_CHOP:
5081 return "c";
5082 case ALPHA_FPRM_DYN:
5083 return "d";
5084 default:
5085 gcc_unreachable ();
5087 break;
5089 case ROUND_SUFFIX_C:
5090 return "c";
5092 default:
5093 gcc_unreachable ();
5095 gcc_unreachable ();
5098 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5100 static bool
5101 alpha_print_operand_punct_valid_p (unsigned char code)
5103 return (code == '/' || code == ',' || code == '-' || code == '~'
5104 || code == '#' || code == '*' || code == '&');
5107 /* Implement TARGET_PRINT_OPERAND. The alpha-specific
5108 operand codes are documented below. */
5110 static void
5111 alpha_print_operand (FILE *file, rtx x, int code)
5113 int i;
5115 switch (code)
5117 case '~':
5118 /* Print the assembler name of the current function. */
5119 assemble_name (file, alpha_fnname);
5120 break;
5122 case '&':
5123 if (const char *name = get_some_local_dynamic_name ())
5124 assemble_name (file, name);
5125 else
5126 output_operand_lossage ("'%%&' used without any "
5127 "local dynamic TLS references");
5128 break;
5130 case '/':
5131 /* Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX
5132 attributes are examined to determine what is appropriate. */
5134 const char *trap = get_trap_mode_suffix ();
5135 const char *round = get_round_mode_suffix ();
5137 if (trap || round)
5138 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5139 break;
5142 case ',':
5143 /* Generates single precision suffix for floating point
5144 instructions (s for IEEE, f for VAX). */
5145 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5146 break;
5148 case '-':
5149 /* Generates double precision suffix for floating point
5150 instructions (t for IEEE, g for VAX). */
5151 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5152 break;
5154 case '#':
5155 if (alpha_this_literal_sequence_number == 0)
5156 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5157 fprintf (file, "%d", alpha_this_literal_sequence_number);
5158 break;
5160 case '*':
5161 if (alpha_this_gpdisp_sequence_number == 0)
5162 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5163 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5164 break;
5166 case 'J':
5168 const char *lituse;
5170 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5172 x = XVECEXP (x, 0, 0);
5173 lituse = "lituse_tlsgd";
5175 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5177 x = XVECEXP (x, 0, 0);
5178 lituse = "lituse_tlsldm";
5180 else if (CONST_INT_P (x))
5181 lituse = "lituse_jsr";
5182 else
5184 output_operand_lossage ("invalid %%J value");
5185 break;
5188 if (x != const0_rtx)
5189 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5191 break;
5193 case 'j':
5195 const char *lituse;
5197 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5198 lituse = "lituse_jsrdirect";
5199 #else
5200 lituse = "lituse_jsr";
5201 #endif
5203 gcc_assert (INTVAL (x) != 0);
5204 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5206 break;
5207 case 'r':
5208 /* If this operand is the constant zero, write it as "$31". */
5209 if (REG_P (x))
5210 fprintf (file, "%s", reg_names[REGNO (x)]);
5211 else if (x == CONST0_RTX (GET_MODE (x)))
5212 fprintf (file, "$31");
5213 else
5214 output_operand_lossage ("invalid %%r value");
5215 break;
5217 case 'R':
5218 /* Similar, but for floating-point. */
5219 if (REG_P (x))
5220 fprintf (file, "%s", reg_names[REGNO (x)]);
5221 else if (x == CONST0_RTX (GET_MODE (x)))
5222 fprintf (file, "$f31");
5223 else
5224 output_operand_lossage ("invalid %%R value");
5225 break;
5227 case 'N':
5228 /* Write the 1's complement of a constant. */
5229 if (!CONST_INT_P (x))
5230 output_operand_lossage ("invalid %%N value");
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5233 break;
5235 case 'P':
5236 /* Write 1 << C, for a constant C. */
5237 if (!CONST_INT_P (x))
5238 output_operand_lossage ("invalid %%P value");
5240 fprintf (file, HOST_WIDE_INT_PRINT_DEC, HOST_WIDE_INT_1 << INTVAL (x));
5241 break;
5243 case 'h':
5244 /* Write the high-order 16 bits of a constant, sign-extended. */
5245 if (!CONST_INT_P (x))
5246 output_operand_lossage ("invalid %%h value");
5248 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5249 break;
5251 case 'L':
5252 /* Write the low-order 16 bits of a constant, sign-extended. */
5253 if (!CONST_INT_P (x))
5254 output_operand_lossage ("invalid %%L value");
5256 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5257 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5258 break;
5260 case 'm':
5261 /* Write mask for ZAP insn. */
5262 if (CONST_INT_P (x))
5264 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5266 for (i = 0; i < 8; i++, value >>= 8)
5267 if (value & 0xff)
5268 mask |= (1 << i);
5270 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5272 else
5273 output_operand_lossage ("invalid %%m value");
5274 break;
5276 case 'M':
5277 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5278 if (!mode_width_operand (x, VOIDmode))
5279 output_operand_lossage ("invalid %%M value");
5281 fprintf (file, "%s",
5282 (INTVAL (x) == 8 ? "b"
5283 : INTVAL (x) == 16 ? "w"
5284 : INTVAL (x) == 32 ? "l"
5285 : "q"));
5286 break;
5288 case 'U':
5289 /* Similar, except do it from the mask. */
5290 if (CONST_INT_P (x))
5292 HOST_WIDE_INT value = INTVAL (x);
5294 if (value == 0xff)
5296 fputc ('b', file);
5297 break;
5299 if (value == 0xffff)
5301 fputc ('w', file);
5302 break;
5304 if (value == 0xffffffff)
5306 fputc ('l', file);
5307 break;
5309 if (value == -1)
5311 fputc ('q', file);
5312 break;
5316 output_operand_lossage ("invalid %%U value");
5317 break;
5319 case 's':
5320 /* Write the constant value divided by 8. */
5321 if (!CONST_INT_P (x)
5322 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5323 || (INTVAL (x) & 7) != 0)
5324 output_operand_lossage ("invalid %%s value");
5326 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5327 break;
5329 case 'C': case 'D': case 'c': case 'd':
5330 /* Write out comparison name. */
5332 enum rtx_code c = GET_CODE (x);
5334 if (!COMPARISON_P (x))
5335 output_operand_lossage ("invalid %%C value");
5337 else if (code == 'D')
5338 c = reverse_condition (c);
5339 else if (code == 'c')
5340 c = swap_condition (c);
5341 else if (code == 'd')
5342 c = swap_condition (reverse_condition (c));
5344 if (c == LEU)
5345 fprintf (file, "ule");
5346 else if (c == LTU)
5347 fprintf (file, "ult");
5348 else if (c == UNORDERED)
5349 fprintf (file, "un");
5350 else
5351 fprintf (file, "%s", GET_RTX_NAME (c));
5353 break;
5355 case 'E':
5356 /* Write the divide or modulus operator. */
5357 switch (GET_CODE (x))
5359 case DIV:
5360 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5361 break;
5362 case UDIV:
5363 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5364 break;
5365 case MOD:
5366 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5367 break;
5368 case UMOD:
5369 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5370 break;
5371 default:
5372 output_operand_lossage ("invalid %%E value");
5373 break;
5375 break;
5377 case 'A':
5378 /* Write "_u" for unaligned access. */
5379 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5380 fprintf (file, "_u");
5381 break;
5383 case 0:
5384 if (REG_P (x))
5385 fprintf (file, "%s", reg_names[REGNO (x)]);
5386 else if (MEM_P (x))
5387 output_address (GET_MODE (x), XEXP (x, 0));
5388 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5390 switch (XINT (XEXP (x, 0), 1))
5392 case UNSPEC_DTPREL:
5393 case UNSPEC_TPREL:
5394 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5395 break;
5396 default:
5397 output_operand_lossage ("unknown relocation unspec");
5398 break;
5401 else
5402 output_addr_const (file, x);
5403 break;
5405 default:
5406 output_operand_lossage ("invalid %%xn code");
5410 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
5412 static void
5413 alpha_print_operand_address (FILE *file, machine_mode /*mode*/, rtx addr)
5415 int basereg = 31;
5416 HOST_WIDE_INT offset = 0;
5418 if (GET_CODE (addr) == AND)
5419 addr = XEXP (addr, 0);
5421 if (GET_CODE (addr) == PLUS
5422 && CONST_INT_P (XEXP (addr, 1)))
5424 offset = INTVAL (XEXP (addr, 1));
5425 addr = XEXP (addr, 0);
5428 if (GET_CODE (addr) == LO_SUM)
5430 const char *reloc16, *reloclo;
5431 rtx op1 = XEXP (addr, 1);
5433 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5435 op1 = XEXP (op1, 0);
5436 switch (XINT (op1, 1))
5438 case UNSPEC_DTPREL:
5439 reloc16 = NULL;
5440 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5441 break;
5442 case UNSPEC_TPREL:
5443 reloc16 = NULL;
5444 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5445 break;
5446 default:
5447 output_operand_lossage ("unknown relocation unspec");
5448 return;
5451 output_addr_const (file, XVECEXP (op1, 0, 0));
5453 else
5455 reloc16 = "gprel";
5456 reloclo = "gprellow";
5457 output_addr_const (file, op1);
5460 if (offset)
5461 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5463 addr = XEXP (addr, 0);
5464 switch (GET_CODE (addr))
5466 case REG:
5467 basereg = REGNO (addr);
5468 break;
5470 case SUBREG:
5471 basereg = subreg_regno (addr);
5472 break;
5474 default:
5475 gcc_unreachable ();
5478 fprintf (file, "($%d)\t\t!%s", basereg,
5479 (basereg == 29 ? reloc16 : reloclo));
5480 return;
5483 switch (GET_CODE (addr))
5485 case REG:
5486 basereg = REGNO (addr);
5487 break;
5489 case SUBREG:
5490 basereg = subreg_regno (addr);
5491 break;
5493 case CONST_INT:
5494 offset = INTVAL (addr);
5495 break;
5497 case SYMBOL_REF:
5498 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5499 fprintf (file, "%s", XSTR (addr, 0));
5500 return;
5502 case CONST:
5503 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5504 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5505 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5506 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5507 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5508 INTVAL (XEXP (XEXP (addr, 0), 1)));
5509 return;
5511 default:
5512 output_operand_lossage ("invalid operand address");
5513 return;
5516 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5519 /* Emit RTL insns to initialize the variable parts of a trampoline at
5520 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5521 for the static chain value for the function. */
5523 static void
5524 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5526 rtx fnaddr, mem, word1, word2;
5528 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5530 #ifdef POINTERS_EXTEND_UNSIGNED
5531 fnaddr = convert_memory_address (Pmode, fnaddr);
5532 chain_value = convert_memory_address (Pmode, chain_value);
5533 #endif
5535 if (TARGET_ABI_OPEN_VMS)
5537 const char *fnname;
5538 char *trname;
5540 /* Construct the name of the trampoline entry point. */
5541 fnname = XSTR (fnaddr, 0);
5542 trname = (char *) alloca (strlen (fnname) + 5);
5543 strcpy (trname, fnname);
5544 strcat (trname, "..tr");
5545 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5546 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5548 /* Trampoline (or "bounded") procedure descriptor is constructed from
5549 the function's procedure descriptor with certain fields zeroed IAW
5550 the VMS calling standard. This is stored in the first quadword. */
5551 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5552 word1 = expand_and (DImode, word1,
5553 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5554 NULL);
5556 else
5558 /* These 4 instructions are:
5559 ldq $1,24($27)
5560 ldq $27,16($27)
5561 jmp $31,($27),0
5563 We don't bother setting the HINT field of the jump; the nop
5564 is merely there for padding. */
5565 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5566 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5569 /* Store the first two words, as computed above. */
5570 mem = adjust_address (m_tramp, DImode, 0);
5571 emit_move_insn (mem, word1);
5572 mem = adjust_address (m_tramp, DImode, 8);
5573 emit_move_insn (mem, word2);
5575 /* Store function address and static chain value. */
5576 mem = adjust_address (m_tramp, Pmode, 16);
5577 emit_move_insn (mem, fnaddr);
5578 mem = adjust_address (m_tramp, Pmode, 24);
5579 emit_move_insn (mem, chain_value);
5581 if (TARGET_ABI_OSF)
5583 emit_insn (gen_imb ());
5584 #ifdef HAVE_ENABLE_EXECUTE_STACK
5585 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5586 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
5587 #endif
5591 /* Determine where to put an argument to a function.
5592 Value is zero to push the argument on the stack,
5593 or a hard register in which to store the argument.
5595 MODE is the argument's machine mode.
5596 TYPE is the data type of the argument (as a tree).
5597 This is null for libcalls where that information may
5598 not be available.
5599 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5600 the preceding args and about the function being called.
5601 NAMED is nonzero if this argument is a named parameter
5602 (otherwise it is an extra parameter matching an ellipsis).
5604 On Alpha the first 6 words of args are normally in registers
5605 and the rest are pushed. */
5607 static rtx
5608 alpha_function_arg (cumulative_args_t cum_v, machine_mode mode,
5609 const_tree type, bool named ATTRIBUTE_UNUSED)
5611 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5612 int basereg;
5613 int num_args;
5615 /* Don't get confused and pass small structures in FP registers. */
5616 if (type && AGGREGATE_TYPE_P (type))
5617 basereg = 16;
5618 else
5620 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5621 values here. */
5622 gcc_checking_assert (!COMPLEX_MODE_P (mode));
5624 /* Set up defaults for FP operands passed in FP registers, and
5625 integral operands passed in integer registers. */
5626 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5627 basereg = 32 + 16;
5628 else
5629 basereg = 16;
5632 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5633 the two platforms, so we can't avoid conditional compilation. */
5634 #if TARGET_ABI_OPEN_VMS
5636 if (mode == VOIDmode)
5637 return alpha_arg_info_reg_val (*cum);
5639 num_args = cum->num_args;
5640 if (num_args >= 6
5641 || targetm.calls.must_pass_in_stack (mode, type))
5642 return NULL_RTX;
5644 #elif TARGET_ABI_OSF
5646 if (*cum >= 6)
5647 return NULL_RTX;
5648 num_args = *cum;
5650 /* VOID is passed as a special flag for "last argument". */
5651 if (type == void_type_node)
5652 basereg = 16;
5653 else if (targetm.calls.must_pass_in_stack (mode, type))
5654 return NULL_RTX;
5656 #else
5657 #error Unhandled ABI
5658 #endif
5660 return gen_rtx_REG (mode, num_args + basereg);
5663 /* Update the data in CUM to advance over an argument
5664 of mode MODE and data type TYPE.
5665 (TYPE is null for libcalls where that information may not be available.) */
5667 static void
5668 alpha_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
5669 const_tree type, bool named ATTRIBUTE_UNUSED)
5671 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5672 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5673 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type);
5675 #if TARGET_ABI_OSF
5676 *cum += increment;
5677 #else
5678 if (!onstack && cum->num_args < 6)
5679 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5680 cum->num_args += increment;
5681 #endif
5684 static int
5685 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5686 machine_mode mode ATTRIBUTE_UNUSED,
5687 tree type ATTRIBUTE_UNUSED,
5688 bool named ATTRIBUTE_UNUSED)
5690 int words = 0;
5691 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5693 #if TARGET_ABI_OPEN_VMS
5694 if (cum->num_args < 6
5695 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type))
5696 words = 6 - cum->num_args;
5697 #elif TARGET_ABI_OSF
5698 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type))
5699 words = 6 - *cum;
5700 #else
5701 #error Unhandled ABI
5702 #endif
5704 return words * UNITS_PER_WORD;
5708 /* Return true if TYPE must be returned in memory, instead of in registers. */
5710 static bool
5711 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5713 machine_mode mode = VOIDmode;
5714 int size;
5716 if (type)
5718 mode = TYPE_MODE (type);
5720 /* All aggregates are returned in memory, except on OpenVMS where
5721 records that fit 64 bits should be returned by immediate value
5722 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5723 if (TARGET_ABI_OPEN_VMS
5724 && TREE_CODE (type) != ARRAY_TYPE
5725 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5726 return false;
5728 if (AGGREGATE_TYPE_P (type))
5729 return true;
5732 size = GET_MODE_SIZE (mode);
5733 switch (GET_MODE_CLASS (mode))
5735 case MODE_VECTOR_FLOAT:
5736 /* Pass all float vectors in memory, like an aggregate. */
5737 return true;
5739 case MODE_COMPLEX_FLOAT:
5740 /* We judge complex floats on the size of their element,
5741 not the size of the whole type. */
5742 size = GET_MODE_UNIT_SIZE (mode);
5743 break;
5745 case MODE_INT:
5746 case MODE_FLOAT:
5747 case MODE_COMPLEX_INT:
5748 case MODE_VECTOR_INT:
5749 break;
5751 default:
5752 /* ??? We get called on all sorts of random stuff from
5753 aggregate_value_p. We must return something, but it's not
5754 clear what's safe to return. Pretend it's a struct I
5755 guess. */
5756 return true;
5759 /* Otherwise types must fit in one register. */
5760 return size > UNITS_PER_WORD;
5763 /* Return true if TYPE should be passed by invisible reference. */
5765 static bool
5766 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5767 machine_mode mode,
5768 const_tree type ATTRIBUTE_UNUSED,
5769 bool named)
5771 /* Pass float and _Complex float variable arguments by reference.
5772 This avoids 64-bit store from a FP register to a pretend args save area
5773 and subsequent 32-bit load from the saved location to a FP register.
5775 Note that 32-bit loads and stores to/from a FP register on alpha reorder
5776 bits to form a canonical 64-bit value in the FP register. This fact
5777 invalidates compiler assumption that 32-bit FP value lives in the lower
5778 32-bits of the passed 64-bit FP value, so loading the 32-bit value from
5779 the stored 64-bit location using 32-bit FP load is invalid on alpha.
5781 This introduces sort of ABI incompatibility, but until _Float32 was
5782 introduced, C-family languages promoted 32-bit float variable arg to
5783 a 64-bit double, and it was not allowed to pass float as a varible
5784 argument. Passing _Complex float as a variable argument never
5785 worked on alpha. Thus, we have no backward compatibility issues
5786 to worry about, and passing unpromoted _Float32 and _Complex float
5787 as a variable argument will actually work in the future. */
5789 if (mode == SFmode || mode == SCmode)
5790 return !named;
5792 return mode == TFmode || mode == TCmode;
5795 /* Define how to find the value returned by a function. VALTYPE is the
5796 data type of the value (as a tree). If the precise function being
5797 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5798 MODE is set instead of VALTYPE for libcalls.
5800 On Alpha the value is found in $0 for integer functions and
5801 $f0 for floating-point functions. */
5803 static rtx
5804 alpha_function_value_1 (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5805 machine_mode mode)
5807 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5808 enum mode_class mclass;
5810 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5812 if (valtype)
5813 mode = TYPE_MODE (valtype);
5815 mclass = GET_MODE_CLASS (mode);
5816 switch (mclass)
5818 case MODE_INT:
5819 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5820 where we have them returning both SImode and DImode. */
5821 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5822 PROMOTE_MODE (mode, dummy, valtype);
5823 /* FALLTHRU */
5825 case MODE_COMPLEX_INT:
5826 case MODE_VECTOR_INT:
5827 regnum = 0;
5828 break;
5830 case MODE_FLOAT:
5831 regnum = 32;
5832 break;
5834 case MODE_COMPLEX_FLOAT:
5836 machine_mode cmode = GET_MODE_INNER (mode);
5838 return gen_rtx_PARALLEL
5839 (VOIDmode,
5840 gen_rtvec (2,
5841 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5842 const0_rtx),
5843 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5844 GEN_INT (GET_MODE_SIZE (cmode)))));
5847 case MODE_RANDOM:
5848 /* We should only reach here for BLKmode on VMS. */
5849 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5850 regnum = 0;
5851 break;
5853 default:
5854 gcc_unreachable ();
5857 return gen_rtx_REG (mode, regnum);
5860 /* Implement TARGET_FUNCTION_VALUE. */
5862 static rtx
5863 alpha_function_value (const_tree valtype, const_tree fn_decl_or_type,
5864 bool /*outgoing*/)
5866 return alpha_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
5869 /* Implement TARGET_LIBCALL_VALUE. */
5871 static rtx
5872 alpha_libcall_value (machine_mode mode, const_rtx /*fun*/)
5874 return alpha_function_value_1 (NULL_TREE, NULL_TREE, mode);
5877 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5879 On the Alpha, $0 $1 and $f0 $f1 are the only register thus used. */
5881 static bool
5882 alpha_function_value_regno_p (const unsigned int regno)
5884 return (regno == 0 || regno == 1 || regno == 32 || regno == 33);
5887 /* TCmode complex values are passed by invisible reference. We
5888 should not split these values. */
5890 static bool
5891 alpha_split_complex_arg (const_tree type)
5893 return TYPE_MODE (type) != TCmode;
5896 static tree
5897 alpha_build_builtin_va_list (void)
5899 tree base, ofs, space, record, type_decl;
5901 if (TARGET_ABI_OPEN_VMS)
5902 return ptr_type_node;
5904 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5905 type_decl = build_decl (BUILTINS_LOCATION,
5906 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5907 TYPE_STUB_DECL (record) = type_decl;
5908 TYPE_NAME (record) = type_decl;
5910 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5912 /* Dummy field to prevent alignment warnings. */
5913 space = build_decl (BUILTINS_LOCATION,
5914 FIELD_DECL, NULL_TREE, integer_type_node);
5915 DECL_FIELD_CONTEXT (space) = record;
5916 DECL_ARTIFICIAL (space) = 1;
5917 DECL_IGNORED_P (space) = 1;
5919 ofs = build_decl (BUILTINS_LOCATION,
5920 FIELD_DECL, get_identifier ("__offset"),
5921 integer_type_node);
5922 DECL_FIELD_CONTEXT (ofs) = record;
5923 DECL_CHAIN (ofs) = space;
5925 base = build_decl (BUILTINS_LOCATION,
5926 FIELD_DECL, get_identifier ("__base"),
5927 ptr_type_node);
5928 DECL_FIELD_CONTEXT (base) = record;
5929 DECL_CHAIN (base) = ofs;
5931 TYPE_FIELDS (record) = base;
5932 layout_type (record);
5934 va_list_gpr_counter_field = ofs;
5935 return record;
5938 #if TARGET_ABI_OSF
5939 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5940 and constant additions. */
5942 static gimple *
5943 va_list_skip_additions (tree lhs)
5945 gimple *stmt;
5947 for (;;)
5949 enum tree_code code;
5951 stmt = SSA_NAME_DEF_STMT (lhs);
5953 if (gimple_code (stmt) == GIMPLE_PHI)
5954 return stmt;
5956 if (!is_gimple_assign (stmt)
5957 || gimple_assign_lhs (stmt) != lhs)
5958 return NULL;
5960 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5961 return stmt;
5962 code = gimple_assign_rhs_code (stmt);
5963 if (!CONVERT_EXPR_CODE_P (code)
5964 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5965 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5966 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))))
5967 return stmt;
5969 lhs = gimple_assign_rhs1 (stmt);
5973 /* Check if LHS = RHS statement is
5974 LHS = *(ap.__base + ap.__offset + cst)
5976 LHS = *(ap.__base
5977 + ((ap.__offset + cst <= 47)
5978 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5979 If the former, indicate that GPR registers are needed,
5980 if the latter, indicate that FPR registers are needed.
5982 Also look for LHS = (*ptr).field, where ptr is one of the forms
5983 listed above.
5985 On alpha, cfun->va_list_gpr_size is used as size of the needed
5986 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5987 registers are needed and bit 1 set if FPR registers are needed.
5988 Return true if va_list references should not be scanned for the
5989 current statement. */
5991 static bool
5992 alpha_stdarg_optimize_hook (struct stdarg_info *si, const gimple *stmt)
5994 tree base, offset, rhs;
5995 int offset_arg = 1;
5996 gimple *base_stmt;
5998 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5999 != GIMPLE_SINGLE_RHS)
6000 return false;
6002 rhs = gimple_assign_rhs1 (stmt);
6003 while (handled_component_p (rhs))
6004 rhs = TREE_OPERAND (rhs, 0);
6005 if (TREE_CODE (rhs) != MEM_REF
6006 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6007 return false;
6009 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6010 if (stmt == NULL
6011 || !is_gimple_assign (stmt)
6012 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6013 return false;
6015 base = gimple_assign_rhs1 (stmt);
6016 if (TREE_CODE (base) == SSA_NAME)
6018 base_stmt = va_list_skip_additions (base);
6019 if (base_stmt
6020 && is_gimple_assign (base_stmt)
6021 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6022 base = gimple_assign_rhs1 (base_stmt);
6025 if (TREE_CODE (base) != COMPONENT_REF
6026 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6028 base = gimple_assign_rhs2 (stmt);
6029 if (TREE_CODE (base) == SSA_NAME)
6031 base_stmt = va_list_skip_additions (base);
6032 if (base_stmt
6033 && is_gimple_assign (base_stmt)
6034 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6035 base = gimple_assign_rhs1 (base_stmt);
6038 if (TREE_CODE (base) != COMPONENT_REF
6039 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6040 return false;
6042 offset_arg = 0;
6045 base = get_base_address (base);
6046 if (TREE_CODE (base) != VAR_DECL
6047 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
6048 return false;
6050 offset = gimple_op (stmt, 1 + offset_arg);
6051 if (TREE_CODE (offset) == SSA_NAME)
6053 gimple *offset_stmt = va_list_skip_additions (offset);
6055 if (offset_stmt
6056 && gimple_code (offset_stmt) == GIMPLE_PHI)
6058 HOST_WIDE_INT sub;
6059 gimple *arg1_stmt, *arg2_stmt;
6060 tree arg1, arg2;
6061 enum tree_code code1, code2;
6063 if (gimple_phi_num_args (offset_stmt) != 2)
6064 goto escapes;
6066 arg1_stmt
6067 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6068 arg2_stmt
6069 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6070 if (arg1_stmt == NULL
6071 || !is_gimple_assign (arg1_stmt)
6072 || arg2_stmt == NULL
6073 || !is_gimple_assign (arg2_stmt))
6074 goto escapes;
6076 code1 = gimple_assign_rhs_code (arg1_stmt);
6077 code2 = gimple_assign_rhs_code (arg2_stmt);
6078 if (code1 == COMPONENT_REF
6079 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6080 /* Do nothing. */;
6081 else if (code2 == COMPONENT_REF
6082 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6084 std::swap (arg1_stmt, arg2_stmt);
6085 code2 = code1;
6087 else
6088 goto escapes;
6090 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt)))
6091 goto escapes;
6093 sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt));
6094 if (code2 == MINUS_EXPR)
6095 sub = -sub;
6096 if (sub < -48 || sub > -32)
6097 goto escapes;
6099 arg1 = gimple_assign_rhs1 (arg1_stmt);
6100 arg2 = gimple_assign_rhs1 (arg2_stmt);
6101 if (TREE_CODE (arg2) == SSA_NAME)
6103 arg2_stmt = va_list_skip_additions (arg2);
6104 if (arg2_stmt == NULL
6105 || !is_gimple_assign (arg2_stmt)
6106 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6107 goto escapes;
6108 arg2 = gimple_assign_rhs1 (arg2_stmt);
6110 if (arg1 != arg2)
6111 goto escapes;
6113 if (TREE_CODE (arg1) != COMPONENT_REF
6114 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6115 || get_base_address (arg1) != base)
6116 goto escapes;
6118 /* Need floating point regs. */
6119 cfun->va_list_fpr_size |= 2;
6120 return false;
6122 if (offset_stmt
6123 && is_gimple_assign (offset_stmt)
6124 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6125 offset = gimple_assign_rhs1 (offset_stmt);
6127 if (TREE_CODE (offset) != COMPONENT_REF
6128 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6129 || get_base_address (offset) != base)
6130 goto escapes;
6131 else
6132 /* Need general regs. */
6133 cfun->va_list_fpr_size |= 1;
6134 return false;
6136 escapes:
6137 si->va_list_escapes = true;
6138 return false;
6140 #endif
6142 /* Perform any needed actions needed for a function that is receiving a
6143 variable number of arguments. */
6145 static void
6146 alpha_setup_incoming_varargs (cumulative_args_t pcum, machine_mode mode,
6147 tree type, int *pretend_size, int no_rtl)
6149 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6151 /* Skip the current argument. */
6152 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6153 true);
6155 #if TARGET_ABI_OPEN_VMS
6156 /* For VMS, we allocate space for all 6 arg registers plus a count.
6158 However, if NO registers need to be saved, don't allocate any space.
6159 This is not only because we won't need the space, but because AP
6160 includes the current_pretend_args_size and we don't want to mess up
6161 any ap-relative addresses already made. */
6162 if (cum.num_args < 6)
6164 if (!no_rtl)
6166 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6167 emit_insn (gen_arg_home ());
6169 *pretend_size = 7 * UNITS_PER_WORD;
6171 #else
6172 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6173 only push those that are remaining. However, if NO registers need to
6174 be saved, don't allocate any space. This is not only because we won't
6175 need the space, but because AP includes the current_pretend_args_size
6176 and we don't want to mess up any ap-relative addresses already made.
6178 If we are not to use the floating-point registers, save the integer
6179 registers where we would put the floating-point registers. This is
6180 not the most efficient way to implement varargs with just one register
6181 class, but it isn't worth doing anything more efficient in this rare
6182 case. */
6183 if (cum >= 6)
6184 return;
6186 if (!no_rtl)
6188 int count;
6189 alias_set_type set = get_varargs_alias_set ();
6190 rtx tmp;
6192 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6193 if (count > 6 - cum)
6194 count = 6 - cum;
6196 /* Detect whether integer registers or floating-point registers
6197 are needed by the detected va_arg statements. See above for
6198 how these values are computed. Note that the "escape" value
6199 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6200 these bits set. */
6201 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6203 if (cfun->va_list_fpr_size & 1)
6205 tmp = gen_rtx_MEM (BLKmode,
6206 plus_constant (Pmode, virtual_incoming_args_rtx,
6207 (cum + 6) * UNITS_PER_WORD));
6208 MEM_NOTRAP_P (tmp) = 1;
6209 set_mem_alias_set (tmp, set);
6210 move_block_from_reg (16 + cum, tmp, count);
6213 if (cfun->va_list_fpr_size & 2)
6215 tmp = gen_rtx_MEM (BLKmode,
6216 plus_constant (Pmode, virtual_incoming_args_rtx,
6217 cum * UNITS_PER_WORD));
6218 MEM_NOTRAP_P (tmp) = 1;
6219 set_mem_alias_set (tmp, set);
6220 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6223 *pretend_size = 12 * UNITS_PER_WORD;
6224 #endif
6227 static void
6228 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6230 HOST_WIDE_INT offset;
6231 tree t, offset_field, base_field;
6233 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6234 return;
6236 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6237 up by 48, storing fp arg registers in the first 48 bytes, and the
6238 integer arg registers in the next 48 bytes. This is only done,
6239 however, if any integer registers need to be stored.
6241 If no integer registers need be stored, then we must subtract 48
6242 in order to account for the integer arg registers which are counted
6243 in argsize above, but which are not actually stored on the stack.
6244 Must further be careful here about structures straddling the last
6245 integer argument register; that futzes with pretend_args_size,
6246 which changes the meaning of AP. */
6248 if (NUM_ARGS < 6)
6249 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6250 else
6251 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6253 if (TARGET_ABI_OPEN_VMS)
6255 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6256 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6257 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6258 TREE_SIDE_EFFECTS (t) = 1;
6259 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6261 else
6263 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6264 offset_field = DECL_CHAIN (base_field);
6266 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6267 valist, base_field, NULL_TREE);
6268 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6269 valist, offset_field, NULL_TREE);
6271 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6272 t = fold_build_pointer_plus_hwi (t, offset);
6273 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6274 TREE_SIDE_EFFECTS (t) = 1;
6275 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6277 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6278 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6279 TREE_SIDE_EFFECTS (t) = 1;
6280 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6284 static tree
6285 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6286 gimple_seq *pre_p)
6288 tree type_size, ptr_type, addend, t, addr;
6289 gimple_seq internal_post;
6291 /* If the type could not be passed in registers, skip the block
6292 reserved for the registers. */
6293 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6295 t = build_int_cst (TREE_TYPE (offset), 6*8);
6296 gimplify_assign (offset,
6297 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6298 pre_p);
6301 addend = offset;
6302 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6304 if (TREE_CODE (type) == COMPLEX_TYPE)
6306 tree real_part, imag_part, real_temp;
6308 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6309 offset, pre_p);
6311 /* Copy the value into a new temporary, lest the formal temporary
6312 be reused out from under us. */
6313 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6315 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6316 offset, pre_p);
6318 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6320 else if (TREE_CODE (type) == REAL_TYPE)
6322 tree fpaddend, cond, fourtyeight;
6324 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6325 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6326 addend, fourtyeight);
6327 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6328 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6329 fpaddend, addend);
6332 /* Build the final address and force that value into a temporary. */
6333 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6334 internal_post = NULL;
6335 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6336 gimple_seq_add_seq (pre_p, internal_post);
6338 /* Update the offset field. */
6339 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6340 if (type_size == NULL || TREE_OVERFLOW (type_size))
6341 t = size_zero_node;
6342 else
6344 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6345 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6346 t = size_binop (MULT_EXPR, t, size_int (8));
6348 t = fold_convert (TREE_TYPE (offset), t);
6349 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6350 pre_p);
6352 return build_va_arg_indirect_ref (addr);
6355 static tree
6356 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6357 gimple_seq *post_p)
6359 tree offset_field, base_field, offset, base, t, r;
6360 bool indirect;
6362 if (TARGET_ABI_OPEN_VMS)
6363 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6365 base_field = TYPE_FIELDS (va_list_type_node);
6366 offset_field = DECL_CHAIN (base_field);
6367 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6368 valist, base_field, NULL_TREE);
6369 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6370 valist, offset_field, NULL_TREE);
6372 /* Pull the fields of the structure out into temporaries. Since we never
6373 modify the base field, we can use a formal temporary. Sign-extend the
6374 offset field so that it's the proper width for pointer arithmetic. */
6375 base = get_formal_tmp_var (base_field, pre_p);
6377 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6378 offset = get_initialized_tmp_var (t, pre_p, NULL);
6380 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6381 if (indirect)
6382 type = build_pointer_type_for_mode (type, ptr_mode, true);
6384 /* Find the value. Note that this will be a stable indirection, or
6385 a composite of stable indirections in the case of complex. */
6386 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6388 /* Stuff the offset temporary back into its field. */
6389 gimplify_assign (unshare_expr (offset_field),
6390 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6392 if (indirect)
6393 r = build_va_arg_indirect_ref (r);
6395 return r;
6398 /* Builtins. */
6400 enum alpha_builtin
6402 ALPHA_BUILTIN_CMPBGE,
6403 ALPHA_BUILTIN_EXTBL,
6404 ALPHA_BUILTIN_EXTWL,
6405 ALPHA_BUILTIN_EXTLL,
6406 ALPHA_BUILTIN_EXTQL,
6407 ALPHA_BUILTIN_EXTWH,
6408 ALPHA_BUILTIN_EXTLH,
6409 ALPHA_BUILTIN_EXTQH,
6410 ALPHA_BUILTIN_INSBL,
6411 ALPHA_BUILTIN_INSWL,
6412 ALPHA_BUILTIN_INSLL,
6413 ALPHA_BUILTIN_INSQL,
6414 ALPHA_BUILTIN_INSWH,
6415 ALPHA_BUILTIN_INSLH,
6416 ALPHA_BUILTIN_INSQH,
6417 ALPHA_BUILTIN_MSKBL,
6418 ALPHA_BUILTIN_MSKWL,
6419 ALPHA_BUILTIN_MSKLL,
6420 ALPHA_BUILTIN_MSKQL,
6421 ALPHA_BUILTIN_MSKWH,
6422 ALPHA_BUILTIN_MSKLH,
6423 ALPHA_BUILTIN_MSKQH,
6424 ALPHA_BUILTIN_UMULH,
6425 ALPHA_BUILTIN_ZAP,
6426 ALPHA_BUILTIN_ZAPNOT,
6427 ALPHA_BUILTIN_AMASK,
6428 ALPHA_BUILTIN_IMPLVER,
6429 ALPHA_BUILTIN_RPCC,
6430 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6431 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6433 /* TARGET_MAX */
6434 ALPHA_BUILTIN_MINUB8,
6435 ALPHA_BUILTIN_MINSB8,
6436 ALPHA_BUILTIN_MINUW4,
6437 ALPHA_BUILTIN_MINSW4,
6438 ALPHA_BUILTIN_MAXUB8,
6439 ALPHA_BUILTIN_MAXSB8,
6440 ALPHA_BUILTIN_MAXUW4,
6441 ALPHA_BUILTIN_MAXSW4,
6442 ALPHA_BUILTIN_PERR,
6443 ALPHA_BUILTIN_PKLB,
6444 ALPHA_BUILTIN_PKWB,
6445 ALPHA_BUILTIN_UNPKBL,
6446 ALPHA_BUILTIN_UNPKBW,
6448 /* TARGET_CIX */
6449 ALPHA_BUILTIN_CTTZ,
6450 ALPHA_BUILTIN_CTLZ,
6451 ALPHA_BUILTIN_CTPOP,
6453 ALPHA_BUILTIN_max
6456 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6457 CODE_FOR_builtin_cmpbge,
6458 CODE_FOR_extbl,
6459 CODE_FOR_extwl,
6460 CODE_FOR_extll,
6461 CODE_FOR_extql,
6462 CODE_FOR_extwh,
6463 CODE_FOR_extlh,
6464 CODE_FOR_extqh,
6465 CODE_FOR_builtin_insbl,
6466 CODE_FOR_builtin_inswl,
6467 CODE_FOR_builtin_insll,
6468 CODE_FOR_insql,
6469 CODE_FOR_inswh,
6470 CODE_FOR_inslh,
6471 CODE_FOR_insqh,
6472 CODE_FOR_mskbl,
6473 CODE_FOR_mskwl,
6474 CODE_FOR_mskll,
6475 CODE_FOR_mskql,
6476 CODE_FOR_mskwh,
6477 CODE_FOR_msklh,
6478 CODE_FOR_mskqh,
6479 CODE_FOR_umuldi3_highpart,
6480 CODE_FOR_builtin_zap,
6481 CODE_FOR_builtin_zapnot,
6482 CODE_FOR_builtin_amask,
6483 CODE_FOR_builtin_implver,
6484 CODE_FOR_builtin_rpcc,
6485 CODE_FOR_builtin_establish_vms_condition_handler,
6486 CODE_FOR_builtin_revert_vms_condition_handler,
6488 /* TARGET_MAX */
6489 CODE_FOR_builtin_minub8,
6490 CODE_FOR_builtin_minsb8,
6491 CODE_FOR_builtin_minuw4,
6492 CODE_FOR_builtin_minsw4,
6493 CODE_FOR_builtin_maxub8,
6494 CODE_FOR_builtin_maxsb8,
6495 CODE_FOR_builtin_maxuw4,
6496 CODE_FOR_builtin_maxsw4,
6497 CODE_FOR_builtin_perr,
6498 CODE_FOR_builtin_pklb,
6499 CODE_FOR_builtin_pkwb,
6500 CODE_FOR_builtin_unpkbl,
6501 CODE_FOR_builtin_unpkbw,
6503 /* TARGET_CIX */
6504 CODE_FOR_ctzdi2,
6505 CODE_FOR_clzdi2,
6506 CODE_FOR_popcountdi2
6509 struct alpha_builtin_def
6511 const char *name;
6512 enum alpha_builtin code;
6513 unsigned int target_mask;
6514 bool is_const;
6517 static struct alpha_builtin_def const zero_arg_builtins[] = {
6518 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6519 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6522 static struct alpha_builtin_def const one_arg_builtins[] = {
6523 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6524 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6525 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6526 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6527 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6528 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6529 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6530 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6533 static struct alpha_builtin_def const two_arg_builtins[] = {
6534 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6535 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6536 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6537 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6538 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6539 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6540 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6541 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6542 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6543 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6544 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6545 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6546 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6547 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6548 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6549 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6550 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6551 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6552 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6553 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6554 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6555 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6556 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6557 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6558 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6559 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6560 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6561 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6562 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6563 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6564 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6565 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6566 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6567 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6570 static GTY(()) tree alpha_dimode_u;
6571 static GTY(()) tree alpha_v8qi_u;
6572 static GTY(()) tree alpha_v8qi_s;
6573 static GTY(()) tree alpha_v4hi_u;
6574 static GTY(()) tree alpha_v4hi_s;
6576 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6578 /* Return the alpha builtin for CODE. */
6580 static tree
6581 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6583 if (code >= ALPHA_BUILTIN_max)
6584 return error_mark_node;
6585 return alpha_builtins[code];
6588 /* Helper function of alpha_init_builtins. Add the built-in specified
6589 by NAME, TYPE, CODE, and ECF. */
6591 static void
6592 alpha_builtin_function (const char *name, tree ftype,
6593 enum alpha_builtin code, unsigned ecf)
6595 tree decl = add_builtin_function (name, ftype, (int) code,
6596 BUILT_IN_MD, NULL, NULL_TREE);
6598 if (ecf & ECF_CONST)
6599 TREE_READONLY (decl) = 1;
6600 if (ecf & ECF_NOTHROW)
6601 TREE_NOTHROW (decl) = 1;
6603 alpha_builtins [(int) code] = decl;
6606 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6607 functions pointed to by P, with function type FTYPE. */
6609 static void
6610 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6611 tree ftype)
6613 size_t i;
6615 for (i = 0; i < count; ++i, ++p)
6616 if ((target_flags & p->target_mask) == p->target_mask)
6617 alpha_builtin_function (p->name, ftype, p->code,
6618 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6621 static void
6622 alpha_init_builtins (void)
6624 tree ftype;
6626 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6627 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6628 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6629 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6630 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6632 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6633 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6635 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6636 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6638 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6639 alpha_dimode_u, NULL_TREE);
6640 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6642 if (TARGET_ABI_OPEN_VMS)
6644 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6645 NULL_TREE);
6646 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6647 ftype,
6648 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6651 ftype = build_function_type_list (ptr_type_node, void_type_node,
6652 NULL_TREE);
6653 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6654 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6656 vms_patch_builtins ();
6660 /* Expand an expression EXP that calls a built-in function,
6661 with result going to TARGET if that's convenient
6662 (and in mode MODE if that's convenient).
6663 SUBTARGET may be used as the target for computing one of EXP's operands.
6664 IGNORE is nonzero if the value is to be ignored. */
6666 static rtx
6667 alpha_expand_builtin (tree exp, rtx target,
6668 rtx subtarget ATTRIBUTE_UNUSED,
6669 machine_mode mode ATTRIBUTE_UNUSED,
6670 int ignore ATTRIBUTE_UNUSED)
6672 #define MAX_ARGS 2
6674 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6675 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6676 tree arg;
6677 call_expr_arg_iterator iter;
6678 enum insn_code icode;
6679 rtx op[MAX_ARGS], pat;
6680 int arity;
6681 bool nonvoid;
6683 if (fcode >= ALPHA_BUILTIN_max)
6684 internal_error ("bad builtin fcode");
6685 icode = code_for_builtin[fcode];
6686 if (icode == 0)
6687 internal_error ("bad builtin fcode");
6689 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6691 arity = 0;
6692 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6694 const struct insn_operand_data *insn_op;
6696 if (arg == error_mark_node)
6697 return NULL_RTX;
6698 if (arity > MAX_ARGS)
6699 return NULL_RTX;
6701 insn_op = &insn_data[icode].operand[arity + nonvoid];
6703 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6705 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6706 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6707 arity++;
6710 if (nonvoid)
6712 machine_mode tmode = insn_data[icode].operand[0].mode;
6713 if (!target
6714 || GET_MODE (target) != tmode
6715 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6716 target = gen_reg_rtx (tmode);
6719 switch (arity)
6721 case 0:
6722 pat = GEN_FCN (icode) (target);
6723 break;
6724 case 1:
6725 if (nonvoid)
6726 pat = GEN_FCN (icode) (target, op[0]);
6727 else
6728 pat = GEN_FCN (icode) (op[0]);
6729 break;
6730 case 2:
6731 pat = GEN_FCN (icode) (target, op[0], op[1]);
6732 break;
6733 default:
6734 gcc_unreachable ();
6736 if (!pat)
6737 return NULL_RTX;
6738 emit_insn (pat);
6740 if (nonvoid)
6741 return target;
6742 else
6743 return const0_rtx;
6746 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6747 with an 8-bit output vector. OPINT contains the integer operands; bit N
6748 of OP_CONST is set if OPINT[N] is valid. */
6750 static tree
6751 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6753 if (op_const == 3)
6755 int i, val;
6756 for (i = 0, val = 0; i < 8; ++i)
6758 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6759 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6760 if (c0 >= c1)
6761 val |= 1 << i;
6763 return build_int_cst (alpha_dimode_u, val);
6765 else if (op_const == 2 && opint[1] == 0)
6766 return build_int_cst (alpha_dimode_u, 0xff);
6767 return NULL;
6770 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6771 specialized form of an AND operation. Other byte manipulation instructions
6772 are defined in terms of this instruction, so this is also used as a
6773 subroutine for other builtins.
6775 OP contains the tree operands; OPINT contains the extracted integer values.
6776 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6777 OPINT may be considered. */
6779 static tree
6780 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6781 long op_const)
6783 if (op_const & 2)
6785 unsigned HOST_WIDE_INT mask = 0;
6786 int i;
6788 for (i = 0; i < 8; ++i)
6789 if ((opint[1] >> i) & 1)
6790 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6792 if (op_const & 1)
6793 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6795 if (op)
6796 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6797 build_int_cst (alpha_dimode_u, mask));
6799 else if ((op_const & 1) && opint[0] == 0)
6800 return build_int_cst (alpha_dimode_u, 0);
6801 return NULL;
6804 /* Fold the builtins for the EXT family of instructions. */
6806 static tree
6807 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6808 long op_const, unsigned HOST_WIDE_INT bytemask,
6809 bool is_high)
6811 long zap_const = 2;
6812 tree *zap_op = NULL;
6814 if (op_const & 2)
6816 unsigned HOST_WIDE_INT loc;
6818 loc = opint[1] & 7;
6819 loc *= BITS_PER_UNIT;
6821 if (loc != 0)
6823 if (op_const & 1)
6825 unsigned HOST_WIDE_INT temp = opint[0];
6826 if (is_high)
6827 temp <<= loc;
6828 else
6829 temp >>= loc;
6830 opint[0] = temp;
6831 zap_const = 3;
6834 else
6835 zap_op = op;
6838 opint[1] = bytemask;
6839 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6842 /* Fold the builtins for the INS family of instructions. */
6844 static tree
6845 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6846 long op_const, unsigned HOST_WIDE_INT bytemask,
6847 bool is_high)
6849 if ((op_const & 1) && opint[0] == 0)
6850 return build_int_cst (alpha_dimode_u, 0);
6852 if (op_const & 2)
6854 unsigned HOST_WIDE_INT temp, loc, byteloc;
6855 tree *zap_op = NULL;
6857 loc = opint[1] & 7;
6858 bytemask <<= loc;
6860 temp = opint[0];
6861 if (is_high)
6863 byteloc = (64 - (loc * 8)) & 0x3f;
6864 if (byteloc == 0)
6865 zap_op = op;
6866 else
6867 temp >>= byteloc;
6868 bytemask >>= 8;
6870 else
6872 byteloc = loc * 8;
6873 if (byteloc == 0)
6874 zap_op = op;
6875 else
6876 temp <<= byteloc;
6879 opint[0] = temp;
6880 opint[1] = bytemask;
6881 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6884 return NULL;
6887 static tree
6888 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6889 long op_const, unsigned HOST_WIDE_INT bytemask,
6890 bool is_high)
6892 if (op_const & 2)
6894 unsigned HOST_WIDE_INT loc;
6896 loc = opint[1] & 7;
6897 bytemask <<= loc;
6899 if (is_high)
6900 bytemask >>= 8;
6902 opint[1] = bytemask ^ 0xff;
6905 return alpha_fold_builtin_zapnot (op, opint, op_const);
6908 static tree
6909 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6911 tree op0 = fold_convert (vtype, op[0]);
6912 tree op1 = fold_convert (vtype, op[1]);
6913 tree val = fold_build2 (code, vtype, op0, op1);
6914 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6917 static tree
6918 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6920 unsigned HOST_WIDE_INT temp = 0;
6921 int i;
6923 if (op_const != 3)
6924 return NULL;
6926 for (i = 0; i < 8; ++i)
6928 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6929 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6930 if (a >= b)
6931 temp += a - b;
6932 else
6933 temp += b - a;
6936 return build_int_cst (alpha_dimode_u, temp);
6939 static tree
6940 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6942 unsigned HOST_WIDE_INT temp;
6944 if (op_const == 0)
6945 return NULL;
6947 temp = opint[0] & 0xff;
6948 temp |= (opint[0] >> 24) & 0xff00;
6950 return build_int_cst (alpha_dimode_u, temp);
6953 static tree
6954 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6956 unsigned HOST_WIDE_INT temp;
6958 if (op_const == 0)
6959 return NULL;
6961 temp = opint[0] & 0xff;
6962 temp |= (opint[0] >> 8) & 0xff00;
6963 temp |= (opint[0] >> 16) & 0xff0000;
6964 temp |= (opint[0] >> 24) & 0xff000000;
6966 return build_int_cst (alpha_dimode_u, temp);
6969 static tree
6970 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6972 unsigned HOST_WIDE_INT temp;
6974 if (op_const == 0)
6975 return NULL;
6977 temp = opint[0] & 0xff;
6978 temp |= (opint[0] & 0xff00) << 24;
6980 return build_int_cst (alpha_dimode_u, temp);
6983 static tree
6984 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6986 unsigned HOST_WIDE_INT temp;
6988 if (op_const == 0)
6989 return NULL;
6991 temp = opint[0] & 0xff;
6992 temp |= (opint[0] & 0x0000ff00) << 8;
6993 temp |= (opint[0] & 0x00ff0000) << 16;
6994 temp |= (opint[0] & 0xff000000) << 24;
6996 return build_int_cst (alpha_dimode_u, temp);
6999 static tree
7000 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7002 unsigned HOST_WIDE_INT temp;
7004 if (op_const == 0)
7005 return NULL;
7007 if (opint[0] == 0)
7008 temp = 64;
7009 else
7010 temp = exact_log2 (opint[0] & -opint[0]);
7012 return build_int_cst (alpha_dimode_u, temp);
7015 static tree
7016 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7018 unsigned HOST_WIDE_INT temp;
7020 if (op_const == 0)
7021 return NULL;
7023 if (opint[0] == 0)
7024 temp = 64;
7025 else
7026 temp = 64 - floor_log2 (opint[0]) - 1;
7028 return build_int_cst (alpha_dimode_u, temp);
7031 static tree
7032 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7034 unsigned HOST_WIDE_INT temp, op;
7036 if (op_const == 0)
7037 return NULL;
7039 op = opint[0];
7040 temp = 0;
7041 while (op)
7042 temp++, op &= op - 1;
7044 return build_int_cst (alpha_dimode_u, temp);
7047 /* Fold one of our builtin functions. */
7049 static tree
7050 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7051 bool ignore ATTRIBUTE_UNUSED)
7053 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7054 long op_const = 0;
7055 int i;
7057 if (n_args > MAX_ARGS)
7058 return NULL;
7060 for (i = 0; i < n_args; i++)
7062 tree arg = op[i];
7063 if (arg == error_mark_node)
7064 return NULL;
7066 opint[i] = 0;
7067 if (TREE_CODE (arg) == INTEGER_CST)
7069 op_const |= 1L << i;
7070 opint[i] = int_cst_value (arg);
7074 switch (DECL_FUNCTION_CODE (fndecl))
7076 case ALPHA_BUILTIN_CMPBGE:
7077 return alpha_fold_builtin_cmpbge (opint, op_const);
7079 case ALPHA_BUILTIN_EXTBL:
7080 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7081 case ALPHA_BUILTIN_EXTWL:
7082 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7083 case ALPHA_BUILTIN_EXTLL:
7084 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7085 case ALPHA_BUILTIN_EXTQL:
7086 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7087 case ALPHA_BUILTIN_EXTWH:
7088 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7089 case ALPHA_BUILTIN_EXTLH:
7090 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7091 case ALPHA_BUILTIN_EXTQH:
7092 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7094 case ALPHA_BUILTIN_INSBL:
7095 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7096 case ALPHA_BUILTIN_INSWL:
7097 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7098 case ALPHA_BUILTIN_INSLL:
7099 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7100 case ALPHA_BUILTIN_INSQL:
7101 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7102 case ALPHA_BUILTIN_INSWH:
7103 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7104 case ALPHA_BUILTIN_INSLH:
7105 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7106 case ALPHA_BUILTIN_INSQH:
7107 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7109 case ALPHA_BUILTIN_MSKBL:
7110 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7111 case ALPHA_BUILTIN_MSKWL:
7112 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7113 case ALPHA_BUILTIN_MSKLL:
7114 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7115 case ALPHA_BUILTIN_MSKQL:
7116 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7117 case ALPHA_BUILTIN_MSKWH:
7118 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7119 case ALPHA_BUILTIN_MSKLH:
7120 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7121 case ALPHA_BUILTIN_MSKQH:
7122 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7124 case ALPHA_BUILTIN_ZAP:
7125 opint[1] ^= 0xff;
7126 /* FALLTHRU */
7127 case ALPHA_BUILTIN_ZAPNOT:
7128 return alpha_fold_builtin_zapnot (op, opint, op_const);
7130 case ALPHA_BUILTIN_MINUB8:
7131 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7132 case ALPHA_BUILTIN_MINSB8:
7133 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7134 case ALPHA_BUILTIN_MINUW4:
7135 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7136 case ALPHA_BUILTIN_MINSW4:
7137 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7138 case ALPHA_BUILTIN_MAXUB8:
7139 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7140 case ALPHA_BUILTIN_MAXSB8:
7141 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7142 case ALPHA_BUILTIN_MAXUW4:
7143 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7144 case ALPHA_BUILTIN_MAXSW4:
7145 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7147 case ALPHA_BUILTIN_PERR:
7148 return alpha_fold_builtin_perr (opint, op_const);
7149 case ALPHA_BUILTIN_PKLB:
7150 return alpha_fold_builtin_pklb (opint, op_const);
7151 case ALPHA_BUILTIN_PKWB:
7152 return alpha_fold_builtin_pkwb (opint, op_const);
7153 case ALPHA_BUILTIN_UNPKBL:
7154 return alpha_fold_builtin_unpkbl (opint, op_const);
7155 case ALPHA_BUILTIN_UNPKBW:
7156 return alpha_fold_builtin_unpkbw (opint, op_const);
7158 case ALPHA_BUILTIN_CTTZ:
7159 return alpha_fold_builtin_cttz (opint, op_const);
7160 case ALPHA_BUILTIN_CTLZ:
7161 return alpha_fold_builtin_ctlz (opint, op_const);
7162 case ALPHA_BUILTIN_CTPOP:
7163 return alpha_fold_builtin_ctpop (opint, op_const);
7165 case ALPHA_BUILTIN_AMASK:
7166 case ALPHA_BUILTIN_IMPLVER:
7167 case ALPHA_BUILTIN_RPCC:
7168 /* None of these are foldable at compile-time. */
7169 default:
7170 return NULL;
7174 bool
7175 alpha_gimple_fold_builtin (gimple_stmt_iterator *gsi)
7177 bool changed = false;
7178 gimple *stmt = gsi_stmt (*gsi);
7179 tree call = gimple_call_fn (stmt);
7180 gimple *new_stmt = NULL;
7182 if (call)
7184 tree fndecl = gimple_call_fndecl (stmt);
7186 if (fndecl)
7188 tree arg0, arg1;
7190 switch (DECL_FUNCTION_CODE (fndecl))
7192 case ALPHA_BUILTIN_UMULH:
7193 arg0 = gimple_call_arg (stmt, 0);
7194 arg1 = gimple_call_arg (stmt, 1);
7196 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
7197 MULT_HIGHPART_EXPR, arg0, arg1);
7198 break;
7199 default:
7200 break;
7205 if (new_stmt)
7207 gsi_replace (gsi, new_stmt, true);
7208 changed = true;
7211 return changed;
7214 /* This page contains routines that are used to determine what the function
7215 prologue and epilogue code will do and write them out. */
7217 /* Compute the size of the save area in the stack. */
7219 /* These variables are used for communication between the following functions.
7220 They indicate various things about the current function being compiled
7221 that are used to tell what kind of prologue, epilogue and procedure
7222 descriptor to generate. */
7224 /* Nonzero if we need a stack procedure. */
7225 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7226 static enum alpha_procedure_types alpha_procedure_type;
7228 /* Register number (either FP or SP) that is used to unwind the frame. */
7229 static int vms_unwind_regno;
7231 /* Register number used to save FP. We need not have one for RA since
7232 we don't modify it for register procedures. This is only defined
7233 for register frame procedures. */
7234 static int vms_save_fp_regno;
7236 /* Register number used to reference objects off our PV. */
7237 static int vms_base_regno;
7239 /* Compute register masks for saved registers. */
7241 static void
7242 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7244 unsigned long imask = 0;
7245 unsigned long fmask = 0;
7246 unsigned int i;
7248 /* When outputting a thunk, we don't have valid register life info,
7249 but assemble_start_function wants to output .frame and .mask
7250 directives. */
7251 if (cfun->is_thunk)
7253 *imaskP = 0;
7254 *fmaskP = 0;
7255 return;
7258 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7259 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7261 /* One for every register we have to save. */
7262 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7263 if (! fixed_regs[i] && ! call_used_regs[i]
7264 && df_regs_ever_live_p (i) && i != REG_RA)
7266 if (i < 32)
7267 imask |= (1UL << i);
7268 else
7269 fmask |= (1UL << (i - 32));
7272 /* We need to restore these for the handler. */
7273 if (crtl->calls_eh_return)
7275 for (i = 0; ; ++i)
7277 unsigned regno = EH_RETURN_DATA_REGNO (i);
7278 if (regno == INVALID_REGNUM)
7279 break;
7280 imask |= 1UL << regno;
7284 /* If any register spilled, then spill the return address also. */
7285 /* ??? This is required by the Digital stack unwind specification
7286 and isn't needed if we're doing Dwarf2 unwinding. */
7287 if (imask || fmask || alpha_ra_ever_killed ())
7288 imask |= (1UL << REG_RA);
7290 *imaskP = imask;
7291 *fmaskP = fmask;
7295 alpha_sa_size (void)
7297 unsigned long mask[2];
7298 int sa_size = 0;
7299 int i, j;
7301 alpha_sa_mask (&mask[0], &mask[1]);
7303 for (j = 0; j < 2; ++j)
7304 for (i = 0; i < 32; ++i)
7305 if ((mask[j] >> i) & 1)
7306 sa_size++;
7308 if (TARGET_ABI_OPEN_VMS)
7310 /* Start with a stack procedure if we make any calls (REG_RA used), or
7311 need a frame pointer, with a register procedure if we otherwise need
7312 at least a slot, and with a null procedure in other cases. */
7313 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7314 alpha_procedure_type = PT_STACK;
7315 else if (get_frame_size() != 0)
7316 alpha_procedure_type = PT_REGISTER;
7317 else
7318 alpha_procedure_type = PT_NULL;
7320 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7321 made the final decision on stack procedure vs register procedure. */
7322 if (alpha_procedure_type == PT_STACK)
7323 sa_size -= 2;
7325 /* Decide whether to refer to objects off our PV via FP or PV.
7326 If we need FP for something else or if we receive a nonlocal
7327 goto (which expects PV to contain the value), we must use PV.
7328 Otherwise, start by assuming we can use FP. */
7330 vms_base_regno
7331 = (frame_pointer_needed
7332 || cfun->has_nonlocal_label
7333 || alpha_procedure_type == PT_STACK
7334 || crtl->outgoing_args_size)
7335 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7337 /* If we want to copy PV into FP, we need to find some register
7338 in which to save FP. */
7340 vms_save_fp_regno = -1;
7341 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7342 for (i = 0; i < 32; i++)
7343 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7344 vms_save_fp_regno = i;
7346 /* A VMS condition handler requires a stack procedure in our
7347 implementation. (not required by the calling standard). */
7348 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7349 || cfun->machine->uses_condition_handler)
7350 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7351 else if (alpha_procedure_type == PT_NULL)
7352 vms_base_regno = REG_PV;
7354 /* Stack unwinding should be done via FP unless we use it for PV. */
7355 vms_unwind_regno = (vms_base_regno == REG_PV
7356 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7358 /* If this is a stack procedure, allow space for saving FP, RA and
7359 a condition handler slot if needed. */
7360 if (alpha_procedure_type == PT_STACK)
7361 sa_size += 2 + cfun->machine->uses_condition_handler;
7363 else
7365 /* Our size must be even (multiple of 16 bytes). */
7366 if (sa_size & 1)
7367 sa_size++;
7370 return sa_size * 8;
7373 /* Define the offset between two registers, one to be eliminated,
7374 and the other its replacement, at the start of a routine. */
7376 HOST_WIDE_INT
7377 alpha_initial_elimination_offset (unsigned int from,
7378 unsigned int to ATTRIBUTE_UNUSED)
7380 HOST_WIDE_INT ret;
7382 ret = alpha_sa_size ();
7383 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7385 switch (from)
7387 case FRAME_POINTER_REGNUM:
7388 break;
7390 case ARG_POINTER_REGNUM:
7391 ret += (ALPHA_ROUND (get_frame_size ()
7392 + crtl->args.pretend_args_size)
7393 - crtl->args.pretend_args_size);
7394 break;
7396 default:
7397 gcc_unreachable ();
7400 return ret;
7403 #if TARGET_ABI_OPEN_VMS
7405 /* Worker function for TARGET_CAN_ELIMINATE. */
7407 static bool
7408 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7410 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7411 alpha_sa_size ();
7413 switch (alpha_procedure_type)
7415 case PT_NULL:
7416 /* NULL procedures have no frame of their own and we only
7417 know how to resolve from the current stack pointer. */
7418 return to == STACK_POINTER_REGNUM;
7420 case PT_REGISTER:
7421 case PT_STACK:
7422 /* We always eliminate except to the stack pointer if there is no
7423 usable frame pointer at hand. */
7424 return (to != STACK_POINTER_REGNUM
7425 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7428 gcc_unreachable ();
7431 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7432 designates the same location as FROM. */
7434 HOST_WIDE_INT
7435 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7437 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7438 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7439 on the proper computations and will need the register save area size
7440 in most cases. */
7442 HOST_WIDE_INT sa_size = alpha_sa_size ();
7444 /* PT_NULL procedures have no frame of their own and we only allow
7445 elimination to the stack pointer. This is the argument pointer and we
7446 resolve the soft frame pointer to that as well. */
7448 if (alpha_procedure_type == PT_NULL)
7449 return 0;
7451 /* For a PT_STACK procedure the frame layout looks as follows
7453 -----> decreasing addresses
7455 < size rounded up to 16 | likewise >
7456 --------------#------------------------------+++--------------+++-------#
7457 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7458 --------------#---------------------------------------------------------#
7459 ^ ^ ^ ^
7460 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7463 PT_REGISTER procedures are similar in that they may have a frame of their
7464 own. They have no regs-sa/pv/outgoing-args area.
7466 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7467 to STACK_PTR if need be. */
7470 HOST_WIDE_INT offset;
7471 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7473 switch (from)
7475 case FRAME_POINTER_REGNUM:
7476 offset = ALPHA_ROUND (sa_size + pv_save_size);
7477 break;
7478 case ARG_POINTER_REGNUM:
7479 offset = (ALPHA_ROUND (sa_size + pv_save_size
7480 + get_frame_size ()
7481 + crtl->args.pretend_args_size)
7482 - crtl->args.pretend_args_size);
7483 break;
7484 default:
7485 gcc_unreachable ();
7488 if (to == STACK_POINTER_REGNUM)
7489 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7491 return offset;
7495 #define COMMON_OBJECT "common_object"
7497 static tree
7498 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7499 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7500 bool *no_add_attrs ATTRIBUTE_UNUSED)
7502 tree decl = *node;
7503 gcc_assert (DECL_P (decl));
7505 DECL_COMMON (decl) = 1;
7506 return NULL_TREE;
7509 static const struct attribute_spec vms_attribute_table[] =
7511 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
7512 affects_type_identity, handler, exclude } */
7513 { COMMON_OBJECT, 0, 1, true, false, false, false, common_object_handler,
7514 NULL },
7515 { NULL, 0, 0, false, false, false, false, NULL, NULL }
7518 void
7519 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7520 unsigned HOST_WIDE_INT size,
7521 unsigned int align)
7523 tree attr = DECL_ATTRIBUTES (decl);
7524 fprintf (file, "%s", COMMON_ASM_OP);
7525 assemble_name (file, name);
7526 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7527 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7528 fprintf (file, ",%u", align / BITS_PER_UNIT);
7529 if (attr)
7531 attr = lookup_attribute (COMMON_OBJECT, attr);
7532 if (attr)
7533 fprintf (file, ",%s",
7534 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7536 fputc ('\n', file);
7539 #undef COMMON_OBJECT
7541 #endif
7543 bool
7544 alpha_find_lo_sum_using_gp (rtx insn)
7546 subrtx_iterator::array_type array;
7547 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
7549 const_rtx x = *iter;
7550 if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx)
7551 return true;
7553 return false;
7556 static int
7557 alpha_does_function_need_gp (void)
7559 rtx_insn *insn;
7561 /* The GP being variable is an OSF abi thing. */
7562 if (! TARGET_ABI_OSF)
7563 return 0;
7565 /* We need the gp to load the address of __mcount. */
7566 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7567 return 1;
7569 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7570 if (cfun->is_thunk)
7571 return 1;
7573 /* The nonlocal receiver pattern assumes that the gp is valid for
7574 the nested function. Reasonable because it's almost always set
7575 correctly already. For the cases where that's wrong, make sure
7576 the nested function loads its gp on entry. */
7577 if (crtl->has_nonlocal_goto)
7578 return 1;
7580 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7581 Even if we are a static function, we still need to do this in case
7582 our address is taken and passed to something like qsort. */
7584 push_topmost_sequence ();
7585 insn = get_insns ();
7586 pop_topmost_sequence ();
7588 for (; insn; insn = NEXT_INSN (insn))
7589 if (NONDEBUG_INSN_P (insn)
7590 && GET_CODE (PATTERN (insn)) != USE
7591 && GET_CODE (PATTERN (insn)) != CLOBBER
7592 && get_attr_usegp (insn))
7593 return 1;
7595 return 0;
7599 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7600 sequences. */
7602 static rtx_insn *
7603 set_frame_related_p (void)
7605 rtx_insn *seq = get_insns ();
7606 rtx_insn *insn;
7608 end_sequence ();
7610 if (!seq)
7611 return NULL;
7613 if (INSN_P (seq))
7615 insn = seq;
7616 while (insn != NULL_RTX)
7618 RTX_FRAME_RELATED_P (insn) = 1;
7619 insn = NEXT_INSN (insn);
7621 seq = emit_insn (seq);
7623 else
7625 seq = emit_insn (seq);
7626 RTX_FRAME_RELATED_P (seq) = 1;
7628 return seq;
7631 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7633 /* Generates a store with the proper unwind info attached. VALUE is
7634 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7635 contains SP+FRAME_BIAS, and that is the unwind info that should be
7636 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7637 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7639 static void
7640 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7641 HOST_WIDE_INT base_ofs, rtx frame_reg)
7643 rtx addr, mem;
7644 rtx_insn *insn;
7646 addr = plus_constant (Pmode, base_reg, base_ofs);
7647 mem = gen_frame_mem (DImode, addr);
7649 insn = emit_move_insn (mem, value);
7650 RTX_FRAME_RELATED_P (insn) = 1;
7652 if (frame_bias || value != frame_reg)
7654 if (frame_bias)
7656 addr = plus_constant (Pmode, stack_pointer_rtx,
7657 frame_bias + base_ofs);
7658 mem = gen_rtx_MEM (DImode, addr);
7661 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7662 gen_rtx_SET (mem, frame_reg));
7666 static void
7667 emit_frame_store (unsigned int regno, rtx base_reg,
7668 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7670 rtx reg = gen_rtx_REG (DImode, regno);
7671 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7674 /* Compute the frame size. SIZE is the size of the "naked" frame
7675 and SA_SIZE is the size of the register save area. */
7677 static HOST_WIDE_INT
7678 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7680 if (TARGET_ABI_OPEN_VMS)
7681 return ALPHA_ROUND (sa_size
7682 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7683 + size
7684 + crtl->args.pretend_args_size);
7685 else
7686 return ALPHA_ROUND (crtl->outgoing_args_size)
7687 + sa_size
7688 + ALPHA_ROUND (size
7689 + crtl->args.pretend_args_size);
7692 /* Write function prologue. */
7694 /* On vms we have two kinds of functions:
7696 - stack frame (PROC_STACK)
7697 these are 'normal' functions with local vars and which are
7698 calling other functions
7699 - register frame (PROC_REGISTER)
7700 keeps all data in registers, needs no stack
7702 We must pass this to the assembler so it can generate the
7703 proper pdsc (procedure descriptor)
7704 This is done with the '.pdesc' command.
7706 On not-vms, we don't really differentiate between the two, as we can
7707 simply allocate stack without saving registers. */
7709 void
7710 alpha_expand_prologue (void)
7712 /* Registers to save. */
7713 unsigned long imask = 0;
7714 unsigned long fmask = 0;
7715 /* Stack space needed for pushing registers clobbered by us. */
7716 HOST_WIDE_INT sa_size, sa_bias;
7717 /* Complete stack size needed. */
7718 HOST_WIDE_INT frame_size;
7719 /* Probed stack size; it additionally includes the size of
7720 the "reserve region" if any. */
7721 HOST_WIDE_INT probed_size;
7722 /* Offset from base reg to register save area. */
7723 HOST_WIDE_INT reg_offset;
7724 rtx sa_reg;
7725 int i;
7727 sa_size = alpha_sa_size ();
7728 frame_size = compute_frame_size (get_frame_size (), sa_size);
7730 if (flag_stack_usage_info)
7731 current_function_static_stack_size = frame_size;
7733 if (TARGET_ABI_OPEN_VMS)
7734 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7735 else
7736 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7738 alpha_sa_mask (&imask, &fmask);
7740 /* Emit an insn to reload GP, if needed. */
7741 if (TARGET_ABI_OSF)
7743 alpha_function_needs_gp = alpha_does_function_need_gp ();
7744 if (alpha_function_needs_gp)
7745 emit_insn (gen_prologue_ldgp ());
7748 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7749 the call to mcount ourselves, rather than having the linker do it
7750 magically in response to -pg. Since _mcount has special linkage,
7751 don't represent the call as a call. */
7752 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7753 emit_insn (gen_prologue_mcount ());
7755 /* Adjust the stack by the frame size. If the frame size is > 4096
7756 bytes, we need to be sure we probe somewhere in the first and last
7757 4096 bytes (we can probably get away without the latter test) and
7758 every 8192 bytes in between. If the frame size is > 32768, we
7759 do this in a loop. Otherwise, we generate the explicit probe
7760 instructions.
7762 Note that we are only allowed to adjust sp once in the prologue. */
7764 probed_size = frame_size;
7765 if (flag_stack_check || flag_stack_clash_protection)
7766 probed_size += get_stack_check_protect ();
7768 if (probed_size <= 32768)
7770 if (probed_size > 4096)
7772 int probed;
7774 for (probed = 4096; probed < probed_size; probed += 8192)
7775 emit_insn (gen_stack_probe_internal (GEN_INT (-probed)));
7777 /* We only have to do this probe if we aren't saving registers or
7778 if we are probing beyond the frame because of -fstack-check. */
7779 if ((sa_size == 0 && probed_size > probed - 4096)
7780 || flag_stack_check || flag_stack_clash_protection)
7781 emit_insn (gen_stack_probe_internal (GEN_INT (-probed_size)));
7784 if (frame_size != 0)
7785 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7786 GEN_INT (-frame_size))));
7788 else
7790 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7791 number of 8192 byte blocks to probe. We then probe each block
7792 in the loop and then set SP to the proper location. If the
7793 amount remaining is > 4096, we have to do one more probe if we
7794 are not saving any registers or if we are probing beyond the
7795 frame because of -fstack-check. */
7797 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7798 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7799 rtx ptr = gen_rtx_REG (DImode, 22);
7800 rtx count = gen_rtx_REG (DImode, 23);
7801 rtx seq;
7803 emit_move_insn (count, GEN_INT (blocks));
7804 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7806 /* Because of the difficulty in emitting a new basic block this
7807 late in the compilation, generate the loop as a single insn. */
7808 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7810 if ((leftover > 4096 && sa_size == 0)
7811 || flag_stack_check || flag_stack_clash_protection)
7813 rtx last = gen_rtx_MEM (DImode,
7814 plus_constant (Pmode, ptr, -leftover));
7815 MEM_VOLATILE_P (last) = 1;
7816 emit_move_insn (last, const0_rtx);
7819 if (flag_stack_check || flag_stack_clash_protection)
7821 /* If -fstack-check is specified we have to load the entire
7822 constant into a register and subtract from the sp in one go,
7823 because the probed stack size is not equal to the frame size. */
7824 HOST_WIDE_INT lo, hi;
7825 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7826 hi = frame_size - lo;
7828 emit_move_insn (ptr, GEN_INT (hi));
7829 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7830 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7831 ptr));
7833 else
7835 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7836 GEN_INT (-leftover)));
7839 /* This alternative is special, because the DWARF code cannot
7840 possibly intuit through the loop above. So we invent this
7841 note it looks at instead. */
7842 RTX_FRAME_RELATED_P (seq) = 1;
7843 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7844 gen_rtx_SET (stack_pointer_rtx,
7845 plus_constant (Pmode, stack_pointer_rtx,
7846 -frame_size)));
7849 /* Cope with very large offsets to the register save area. */
7850 sa_bias = 0;
7851 sa_reg = stack_pointer_rtx;
7852 if (reg_offset + sa_size > 0x8000)
7854 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7855 rtx sa_bias_rtx;
7857 if (low + sa_size <= 0x8000)
7858 sa_bias = reg_offset - low, reg_offset = low;
7859 else
7860 sa_bias = reg_offset, reg_offset = 0;
7862 sa_reg = gen_rtx_REG (DImode, 24);
7863 sa_bias_rtx = GEN_INT (sa_bias);
7865 if (add_operand (sa_bias_rtx, DImode))
7866 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7867 else
7869 emit_move_insn (sa_reg, sa_bias_rtx);
7870 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7874 /* Save regs in stack order. Beginning with VMS PV. */
7875 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7876 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7878 /* Save register RA next. */
7879 if (imask & (1UL << REG_RA))
7881 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7882 imask &= ~(1UL << REG_RA);
7883 reg_offset += 8;
7886 /* Now save any other registers required to be saved. */
7887 for (i = 0; i < 31; i++)
7888 if (imask & (1UL << i))
7890 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7891 reg_offset += 8;
7894 for (i = 0; i < 31; i++)
7895 if (fmask & (1UL << i))
7897 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7898 reg_offset += 8;
7901 if (TARGET_ABI_OPEN_VMS)
7903 /* Register frame procedures save the fp. */
7904 if (alpha_procedure_type == PT_REGISTER)
7906 rtx_insn *insn =
7907 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7908 hard_frame_pointer_rtx);
7909 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7910 RTX_FRAME_RELATED_P (insn) = 1;
7913 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7914 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7915 gen_rtx_REG (DImode, REG_PV)));
7917 if (alpha_procedure_type != PT_NULL
7918 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7919 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7921 /* If we have to allocate space for outgoing args, do it now. */
7922 if (crtl->outgoing_args_size != 0)
7924 rtx_insn *seq
7925 = emit_move_insn (stack_pointer_rtx,
7926 plus_constant
7927 (Pmode, hard_frame_pointer_rtx,
7928 - (ALPHA_ROUND
7929 (crtl->outgoing_args_size))));
7931 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7932 if ! frame_pointer_needed. Setting the bit will change the CFA
7933 computation rule to use sp again, which would be wrong if we had
7934 frame_pointer_needed, as this means sp might move unpredictably
7935 later on.
7937 Also, note that
7938 frame_pointer_needed
7939 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7941 crtl->outgoing_args_size != 0
7942 => alpha_procedure_type != PT_NULL,
7944 so when we are not setting the bit here, we are guaranteed to
7945 have emitted an FRP frame pointer update just before. */
7946 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7949 else
7951 /* If we need a frame pointer, set it from the stack pointer. */
7952 if (frame_pointer_needed)
7954 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7955 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7956 else
7957 /* This must always be the last instruction in the
7958 prologue, thus we emit a special move + clobber. */
7959 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7960 stack_pointer_rtx, sa_reg)));
7964 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7965 the prologue, for exception handling reasons, we cannot do this for
7966 any insn that might fault. We could prevent this for mems with a
7967 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7968 have to prevent all such scheduling with a blockage.
7970 Linux, on the other hand, never bothered to implement OSF/1's
7971 exception handling, and so doesn't care about such things. Anyone
7972 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7974 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7975 emit_insn (gen_blockage ());
7978 /* Count the number of .file directives, so that .loc is up to date. */
7979 int num_source_filenames = 0;
7981 /* Output the textual info surrounding the prologue. */
7983 void
7984 alpha_start_function (FILE *file, const char *fnname,
7985 tree decl ATTRIBUTE_UNUSED)
7987 unsigned long imask = 0;
7988 unsigned long fmask = 0;
7989 /* Stack space needed for pushing registers clobbered by us. */
7990 HOST_WIDE_INT sa_size;
7991 /* Complete stack size needed. */
7992 unsigned HOST_WIDE_INT frame_size;
7993 /* The maximum debuggable frame size. */
7994 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
7995 /* Offset from base reg to register save area. */
7996 HOST_WIDE_INT reg_offset;
7997 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7998 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7999 int i;
8001 #if TARGET_ABI_OPEN_VMS
8002 vms_start_function (fnname);
8003 #endif
8005 alpha_fnname = fnname;
8006 sa_size = alpha_sa_size ();
8007 frame_size = compute_frame_size (get_frame_size (), sa_size);
8009 if (TARGET_ABI_OPEN_VMS)
8010 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8011 else
8012 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8014 alpha_sa_mask (&imask, &fmask);
8016 /* Issue function start and label. */
8017 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
8019 fputs ("\t.ent ", file);
8020 assemble_name (file, fnname);
8021 putc ('\n', file);
8023 /* If the function needs GP, we'll write the "..ng" label there.
8024 Otherwise, do it here. */
8025 if (TARGET_ABI_OSF
8026 && ! alpha_function_needs_gp
8027 && ! cfun->is_thunk)
8029 putc ('$', file);
8030 assemble_name (file, fnname);
8031 fputs ("..ng:\n", file);
8034 /* Nested functions on VMS that are potentially called via trampoline
8035 get a special transfer entry point that loads the called functions
8036 procedure descriptor and static chain. */
8037 if (TARGET_ABI_OPEN_VMS
8038 && !TREE_PUBLIC (decl)
8039 && DECL_CONTEXT (decl)
8040 && !TYPE_P (DECL_CONTEXT (decl))
8041 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
8043 strcpy (tramp_label, fnname);
8044 strcat (tramp_label, "..tr");
8045 ASM_OUTPUT_LABEL (file, tramp_label);
8046 fprintf (file, "\tldq $1,24($27)\n");
8047 fprintf (file, "\tldq $27,16($27)\n");
8050 strcpy (entry_label, fnname);
8051 if (TARGET_ABI_OPEN_VMS)
8052 strcat (entry_label, "..en");
8054 ASM_OUTPUT_LABEL (file, entry_label);
8055 inside_function = TRUE;
8057 if (TARGET_ABI_OPEN_VMS)
8058 fprintf (file, "\t.base $%d\n", vms_base_regno);
8060 if (TARGET_ABI_OSF
8061 && TARGET_IEEE_CONFORMANT
8062 && !flag_inhibit_size_directive)
8064 /* Set flags in procedure descriptor to request IEEE-conformant
8065 math-library routines. The value we set it to is PDSC_EXC_IEEE
8066 (/usr/include/pdsc.h). */
8067 fputs ("\t.eflag 48\n", file);
8070 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8071 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8072 alpha_arg_offset = -frame_size + 48;
8074 /* Describe our frame. If the frame size is larger than an integer,
8075 print it as zero to avoid an assembler error. We won't be
8076 properly describing such a frame, but that's the best we can do. */
8077 if (TARGET_ABI_OPEN_VMS)
8078 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8079 HOST_WIDE_INT_PRINT_DEC "\n",
8080 vms_unwind_regno,
8081 frame_size >= (1UL << 31) ? 0 : frame_size,
8082 reg_offset);
8083 else if (!flag_inhibit_size_directive)
8084 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8085 (frame_pointer_needed
8086 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8087 frame_size >= max_frame_size ? 0 : frame_size,
8088 crtl->args.pretend_args_size);
8090 /* Describe which registers were spilled. */
8091 if (TARGET_ABI_OPEN_VMS)
8093 if (imask)
8094 /* ??? Does VMS care if mask contains ra? The old code didn't
8095 set it, so I don't here. */
8096 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8097 if (fmask)
8098 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8099 if (alpha_procedure_type == PT_REGISTER)
8100 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8102 else if (!flag_inhibit_size_directive)
8104 if (imask)
8106 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8107 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8109 for (i = 0; i < 32; ++i)
8110 if (imask & (1UL << i))
8111 reg_offset += 8;
8114 if (fmask)
8115 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8116 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8119 #if TARGET_ABI_OPEN_VMS
8120 /* If a user condition handler has been installed at some point, emit
8121 the procedure descriptor bits to point the Condition Handling Facility
8122 at the indirection wrapper, and state the fp offset at which the user
8123 handler may be found. */
8124 if (cfun->machine->uses_condition_handler)
8126 fprintf (file, "\t.handler __gcc_shell_handler\n");
8127 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8130 #ifdef TARGET_VMS_CRASH_DEBUG
8131 /* Support of minimal traceback info. */
8132 switch_to_section (readonly_data_section);
8133 fprintf (file, "\t.align 3\n");
8134 assemble_name (file, fnname); fputs ("..na:\n", file);
8135 fputs ("\t.ascii \"", file);
8136 assemble_name (file, fnname);
8137 fputs ("\\0\"\n", file);
8138 switch_to_section (text_section);
8139 #endif
8140 #endif /* TARGET_ABI_OPEN_VMS */
8143 /* Emit the .prologue note at the scheduled end of the prologue. */
8145 static void
8146 alpha_output_function_end_prologue (FILE *file)
8148 if (TARGET_ABI_OPEN_VMS)
8149 fputs ("\t.prologue\n", file);
8150 else if (!flag_inhibit_size_directive)
8151 fprintf (file, "\t.prologue %d\n",
8152 alpha_function_needs_gp || cfun->is_thunk);
8155 /* Write function epilogue. */
8157 void
8158 alpha_expand_epilogue (void)
8160 /* Registers to save. */
8161 unsigned long imask = 0;
8162 unsigned long fmask = 0;
8163 /* Stack space needed for pushing registers clobbered by us. */
8164 HOST_WIDE_INT sa_size;
8165 /* Complete stack size needed. */
8166 HOST_WIDE_INT frame_size;
8167 /* Offset from base reg to register save area. */
8168 HOST_WIDE_INT reg_offset;
8169 int fp_is_frame_pointer, fp_offset;
8170 rtx sa_reg, sa_reg_exp = NULL;
8171 rtx sp_adj1, sp_adj2, mem, reg, insn;
8172 rtx eh_ofs;
8173 rtx cfa_restores = NULL_RTX;
8174 int i;
8176 sa_size = alpha_sa_size ();
8177 frame_size = compute_frame_size (get_frame_size (), sa_size);
8179 if (TARGET_ABI_OPEN_VMS)
8181 if (alpha_procedure_type == PT_STACK)
8182 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8183 else
8184 reg_offset = 0;
8186 else
8187 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8189 alpha_sa_mask (&imask, &fmask);
8191 fp_is_frame_pointer
8192 = (TARGET_ABI_OPEN_VMS
8193 ? alpha_procedure_type == PT_STACK
8194 : frame_pointer_needed);
8195 fp_offset = 0;
8196 sa_reg = stack_pointer_rtx;
8198 if (crtl->calls_eh_return)
8199 eh_ofs = EH_RETURN_STACKADJ_RTX;
8200 else
8201 eh_ofs = NULL_RTX;
8203 if (sa_size)
8205 /* If we have a frame pointer, restore SP from it. */
8206 if (TARGET_ABI_OPEN_VMS
8207 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8208 : frame_pointer_needed)
8209 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8211 /* Cope with very large offsets to the register save area. */
8212 if (reg_offset + sa_size > 0x8000)
8214 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8215 HOST_WIDE_INT bias;
8217 if (low + sa_size <= 0x8000)
8218 bias = reg_offset - low, reg_offset = low;
8219 else
8220 bias = reg_offset, reg_offset = 0;
8222 sa_reg = gen_rtx_REG (DImode, 22);
8223 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8225 emit_move_insn (sa_reg, sa_reg_exp);
8228 /* Restore registers in order, excepting a true frame pointer. */
8230 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8231 reg = gen_rtx_REG (DImode, REG_RA);
8232 emit_move_insn (reg, mem);
8233 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8235 reg_offset += 8;
8236 imask &= ~(1UL << REG_RA);
8238 for (i = 0; i < 31; ++i)
8239 if (imask & (1UL << i))
8241 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8242 fp_offset = reg_offset;
8243 else
8245 mem = gen_frame_mem (DImode,
8246 plus_constant (Pmode, sa_reg,
8247 reg_offset));
8248 reg = gen_rtx_REG (DImode, i);
8249 emit_move_insn (reg, mem);
8250 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8251 cfa_restores);
8253 reg_offset += 8;
8256 for (i = 0; i < 31; ++i)
8257 if (fmask & (1UL << i))
8259 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8260 reg_offset));
8261 reg = gen_rtx_REG (DFmode, i+32);
8262 emit_move_insn (reg, mem);
8263 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8264 reg_offset += 8;
8268 if (frame_size || eh_ofs)
8270 sp_adj1 = stack_pointer_rtx;
8272 if (eh_ofs)
8274 sp_adj1 = gen_rtx_REG (DImode, 23);
8275 emit_move_insn (sp_adj1,
8276 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8279 /* If the stack size is large, begin computation into a temporary
8280 register so as not to interfere with a potential fp restore,
8281 which must be consecutive with an SP restore. */
8282 if (frame_size < 32768 && !cfun->calls_alloca)
8283 sp_adj2 = GEN_INT (frame_size);
8284 else if (frame_size < 0x40007fffL)
8286 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8288 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8289 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8290 sp_adj1 = sa_reg;
8291 else
8293 sp_adj1 = gen_rtx_REG (DImode, 23);
8294 emit_move_insn (sp_adj1, sp_adj2);
8296 sp_adj2 = GEN_INT (low);
8298 else
8300 rtx tmp = gen_rtx_REG (DImode, 23);
8301 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8302 if (!sp_adj2)
8304 /* We can't drop new things to memory this late, afaik,
8305 so build it up by pieces. */
8306 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size);
8307 gcc_assert (sp_adj2);
8311 /* From now on, things must be in order. So emit blockages. */
8313 /* Restore the frame pointer. */
8314 if (fp_is_frame_pointer)
8316 emit_insn (gen_blockage ());
8317 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8318 fp_offset));
8319 emit_move_insn (hard_frame_pointer_rtx, mem);
8320 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8321 hard_frame_pointer_rtx, cfa_restores);
8323 else if (TARGET_ABI_OPEN_VMS)
8325 emit_insn (gen_blockage ());
8326 emit_move_insn (hard_frame_pointer_rtx,
8327 gen_rtx_REG (DImode, vms_save_fp_regno));
8328 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8329 hard_frame_pointer_rtx, cfa_restores);
8332 /* Restore the stack pointer. */
8333 emit_insn (gen_blockage ());
8334 if (sp_adj2 == const0_rtx)
8335 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8336 else
8337 insn = emit_move_insn (stack_pointer_rtx,
8338 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8339 REG_NOTES (insn) = cfa_restores;
8340 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8341 RTX_FRAME_RELATED_P (insn) = 1;
8343 else
8345 gcc_assert (cfa_restores == NULL);
8347 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8349 emit_insn (gen_blockage ());
8350 insn = emit_move_insn (hard_frame_pointer_rtx,
8351 gen_rtx_REG (DImode, vms_save_fp_regno));
8352 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8353 RTX_FRAME_RELATED_P (insn) = 1;
8358 /* Output the rest of the textual info surrounding the epilogue. */
8360 void
8361 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8363 rtx_insn *insn;
8365 /* We output a nop after noreturn calls at the very end of the function to
8366 ensure that the return address always remains in the caller's code range,
8367 as not doing so might confuse unwinding engines. */
8368 insn = get_last_insn ();
8369 if (!INSN_P (insn))
8370 insn = prev_active_insn (insn);
8371 if (insn && CALL_P (insn))
8372 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8374 #if TARGET_ABI_OPEN_VMS
8375 /* Write the linkage entries. */
8376 alpha_write_linkage (file, fnname);
8377 #endif
8379 /* End the function. */
8380 if (TARGET_ABI_OPEN_VMS
8381 || !flag_inhibit_size_directive)
8383 fputs ("\t.end ", file);
8384 assemble_name (file, fnname);
8385 putc ('\n', file);
8387 inside_function = FALSE;
8390 #if TARGET_ABI_OSF
8391 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8393 In order to avoid the hordes of differences between generated code
8394 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8395 lots of code loading up large constants, generate rtl and emit it
8396 instead of going straight to text.
8398 Not sure why this idea hasn't been explored before... */
8400 static void
8401 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8402 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8403 tree function)
8405 HOST_WIDE_INT hi, lo;
8406 rtx this_rtx, funexp;
8407 rtx_insn *insn;
8409 /* We always require a valid GP. */
8410 emit_insn (gen_prologue_ldgp ());
8411 emit_note (NOTE_INSN_PROLOGUE_END);
8413 /* Find the "this" pointer. If the function returns a structure,
8414 the structure return pointer is in $16. */
8415 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8416 this_rtx = gen_rtx_REG (Pmode, 17);
8417 else
8418 this_rtx = gen_rtx_REG (Pmode, 16);
8420 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8421 entire constant for the add. */
8422 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8423 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8424 if (hi + lo == delta)
8426 if (hi)
8427 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8428 if (lo)
8429 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8431 else
8433 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0), delta);
8434 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8437 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8438 if (vcall_offset)
8440 rtx tmp, tmp2;
8442 tmp = gen_rtx_REG (Pmode, 0);
8443 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8445 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8446 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8447 if (hi + lo == vcall_offset)
8449 if (hi)
8450 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8452 else
8454 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8455 vcall_offset);
8456 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8457 lo = 0;
8459 if (lo)
8460 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8461 else
8462 tmp2 = tmp;
8463 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8465 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8468 /* Generate a tail call to the target function. */
8469 if (! TREE_USED (function))
8471 assemble_external (function);
8472 TREE_USED (function) = 1;
8474 funexp = XEXP (DECL_RTL (function), 0);
8475 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8476 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8477 SIBLING_CALL_P (insn) = 1;
8479 /* Run just enough of rest_of_compilation to get the insns emitted.
8480 There's not really enough bulk here to make other passes such as
8481 instruction scheduling worth while. Note that use_thunk calls
8482 assemble_start_function and assemble_end_function. */
8483 insn = get_insns ();
8484 shorten_branches (insn);
8485 final_start_function (insn, file, 1);
8486 final (insn, file, 1);
8487 final_end_function ();
8489 #endif /* TARGET_ABI_OSF */
8491 /* Debugging support. */
8493 #include "gstab.h"
8495 /* Name of the file containing the current function. */
8497 static const char *current_function_file = "";
8499 /* Offsets to alpha virtual arg/local debugging pointers. */
8501 long alpha_arg_offset;
8502 long alpha_auto_offset;
8504 /* Emit a new filename to a stream. */
8506 void
8507 alpha_output_filename (FILE *stream, const char *name)
8509 static int first_time = TRUE;
8511 if (first_time)
8513 first_time = FALSE;
8514 ++num_source_filenames;
8515 current_function_file = name;
8516 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8517 output_quoted_string (stream, name);
8518 fprintf (stream, "\n");
8521 else if (name != current_function_file
8522 && strcmp (name, current_function_file) != 0)
8524 ++num_source_filenames;
8525 current_function_file = name;
8526 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8528 output_quoted_string (stream, name);
8529 fprintf (stream, "\n");
8533 /* Structure to show the current status of registers and memory. */
8535 struct shadow_summary
8537 struct {
8538 unsigned int i : 31; /* Mask of int regs */
8539 unsigned int fp : 31; /* Mask of fp regs */
8540 unsigned int mem : 1; /* mem == imem | fpmem */
8541 } used, defd;
8544 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8545 to the summary structure. SET is nonzero if the insn is setting the
8546 object, otherwise zero. */
8548 static void
8549 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8551 const char *format_ptr;
8552 int i, j;
8554 if (x == 0)
8555 return;
8557 switch (GET_CODE (x))
8559 /* ??? Note that this case would be incorrect if the Alpha had a
8560 ZERO_EXTRACT in SET_DEST. */
8561 case SET:
8562 summarize_insn (SET_SRC (x), sum, 0);
8563 summarize_insn (SET_DEST (x), sum, 1);
8564 break;
8566 case CLOBBER:
8567 summarize_insn (XEXP (x, 0), sum, 1);
8568 break;
8570 case USE:
8571 summarize_insn (XEXP (x, 0), sum, 0);
8572 break;
8574 case ASM_OPERANDS:
8575 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8576 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8577 break;
8579 case PARALLEL:
8580 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8581 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8582 break;
8584 case SUBREG:
8585 summarize_insn (SUBREG_REG (x), sum, 0);
8586 break;
8588 case REG:
8590 int regno = REGNO (x);
8591 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8593 if (regno == 31 || regno == 63)
8594 break;
8596 if (set)
8598 if (regno < 32)
8599 sum->defd.i |= mask;
8600 else
8601 sum->defd.fp |= mask;
8603 else
8605 if (regno < 32)
8606 sum->used.i |= mask;
8607 else
8608 sum->used.fp |= mask;
8611 break;
8613 case MEM:
8614 if (set)
8615 sum->defd.mem = 1;
8616 else
8617 sum->used.mem = 1;
8619 /* Find the regs used in memory address computation: */
8620 summarize_insn (XEXP (x, 0), sum, 0);
8621 break;
8623 case CONST_INT: case CONST_WIDE_INT: case CONST_DOUBLE:
8624 case SYMBOL_REF: case LABEL_REF: case CONST:
8625 case SCRATCH: case ASM_INPUT:
8626 break;
8628 /* Handle common unary and binary ops for efficiency. */
8629 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8630 case MOD: case UDIV: case UMOD: case AND: case IOR:
8631 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8632 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8633 case NE: case EQ: case GE: case GT: case LE:
8634 case LT: case GEU: case GTU: case LEU: case LTU:
8635 summarize_insn (XEXP (x, 0), sum, 0);
8636 summarize_insn (XEXP (x, 1), sum, 0);
8637 break;
8639 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8640 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8641 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8642 case SQRT: case FFS:
8643 summarize_insn (XEXP (x, 0), sum, 0);
8644 break;
8646 default:
8647 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8648 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8649 switch (format_ptr[i])
8651 case 'e':
8652 summarize_insn (XEXP (x, i), sum, 0);
8653 break;
8655 case 'E':
8656 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8657 summarize_insn (XVECEXP (x, i, j), sum, 0);
8658 break;
8660 case 'i':
8661 break;
8663 default:
8664 gcc_unreachable ();
8669 /* Ensure a sufficient number of `trapb' insns are in the code when
8670 the user requests code with a trap precision of functions or
8671 instructions.
8673 In naive mode, when the user requests a trap-precision of
8674 "instruction", a trapb is needed after every instruction that may
8675 generate a trap. This ensures that the code is resumption safe but
8676 it is also slow.
8678 When optimizations are turned on, we delay issuing a trapb as long
8679 as possible. In this context, a trap shadow is the sequence of
8680 instructions that starts with a (potentially) trap generating
8681 instruction and extends to the next trapb or call_pal instruction
8682 (but GCC never generates call_pal by itself). We can delay (and
8683 therefore sometimes omit) a trapb subject to the following
8684 conditions:
8686 (a) On entry to the trap shadow, if any Alpha register or memory
8687 location contains a value that is used as an operand value by some
8688 instruction in the trap shadow (live on entry), then no instruction
8689 in the trap shadow may modify the register or memory location.
8691 (b) Within the trap shadow, the computation of the base register
8692 for a memory load or store instruction may not involve using the
8693 result of an instruction that might generate an UNPREDICTABLE
8694 result.
8696 (c) Within the trap shadow, no register may be used more than once
8697 as a destination register. (This is to make life easier for the
8698 trap-handler.)
8700 (d) The trap shadow may not include any branch instructions. */
8702 static void
8703 alpha_handle_trap_shadows (void)
8705 struct shadow_summary shadow;
8706 int trap_pending, exception_nesting;
8707 rtx_insn *i, *n;
8709 trap_pending = 0;
8710 exception_nesting = 0;
8711 shadow.used.i = 0;
8712 shadow.used.fp = 0;
8713 shadow.used.mem = 0;
8714 shadow.defd = shadow.used;
8716 for (i = get_insns (); i ; i = NEXT_INSN (i))
8718 if (NOTE_P (i))
8720 switch (NOTE_KIND (i))
8722 case NOTE_INSN_EH_REGION_BEG:
8723 exception_nesting++;
8724 if (trap_pending)
8725 goto close_shadow;
8726 break;
8728 case NOTE_INSN_EH_REGION_END:
8729 exception_nesting--;
8730 if (trap_pending)
8731 goto close_shadow;
8732 break;
8734 case NOTE_INSN_EPILOGUE_BEG:
8735 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8736 goto close_shadow;
8737 break;
8740 else if (trap_pending)
8742 if (alpha_tp == ALPHA_TP_FUNC)
8744 if (JUMP_P (i)
8745 && GET_CODE (PATTERN (i)) == RETURN)
8746 goto close_shadow;
8748 else if (alpha_tp == ALPHA_TP_INSN)
8750 if (optimize > 0)
8752 struct shadow_summary sum;
8754 sum.used.i = 0;
8755 sum.used.fp = 0;
8756 sum.used.mem = 0;
8757 sum.defd = sum.used;
8759 switch (GET_CODE (i))
8761 case INSN:
8762 /* Annoyingly, get_attr_trap will die on these. */
8763 if (GET_CODE (PATTERN (i)) == USE
8764 || GET_CODE (PATTERN (i)) == CLOBBER)
8765 break;
8767 summarize_insn (PATTERN (i), &sum, 0);
8769 if ((sum.defd.i & shadow.defd.i)
8770 || (sum.defd.fp & shadow.defd.fp))
8772 /* (c) would be violated */
8773 goto close_shadow;
8776 /* Combine shadow with summary of current insn: */
8777 shadow.used.i |= sum.used.i;
8778 shadow.used.fp |= sum.used.fp;
8779 shadow.used.mem |= sum.used.mem;
8780 shadow.defd.i |= sum.defd.i;
8781 shadow.defd.fp |= sum.defd.fp;
8782 shadow.defd.mem |= sum.defd.mem;
8784 if ((sum.defd.i & shadow.used.i)
8785 || (sum.defd.fp & shadow.used.fp)
8786 || (sum.defd.mem & shadow.used.mem))
8788 /* (a) would be violated (also takes care of (b)) */
8789 gcc_assert (get_attr_trap (i) != TRAP_YES
8790 || (!(sum.defd.i & sum.used.i)
8791 && !(sum.defd.fp & sum.used.fp)));
8793 goto close_shadow;
8795 break;
8797 case BARRIER:
8798 /* __builtin_unreachable can expand to no code at all,
8799 leaving (barrier) RTXes in the instruction stream. */
8800 goto close_shadow_notrapb;
8802 case JUMP_INSN:
8803 case CALL_INSN:
8804 case CODE_LABEL:
8805 goto close_shadow;
8807 default:
8808 gcc_unreachable ();
8811 else
8813 close_shadow:
8814 n = emit_insn_before (gen_trapb (), i);
8815 PUT_MODE (n, TImode);
8816 PUT_MODE (i, TImode);
8817 close_shadow_notrapb:
8818 trap_pending = 0;
8819 shadow.used.i = 0;
8820 shadow.used.fp = 0;
8821 shadow.used.mem = 0;
8822 shadow.defd = shadow.used;
8827 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8828 && NONJUMP_INSN_P (i)
8829 && GET_CODE (PATTERN (i)) != USE
8830 && GET_CODE (PATTERN (i)) != CLOBBER
8831 && get_attr_trap (i) == TRAP_YES)
8833 if (optimize && !trap_pending)
8834 summarize_insn (PATTERN (i), &shadow, 0);
8835 trap_pending = 1;
8840 /* Alpha can only issue instruction groups simultaneously if they are
8841 suitably aligned. This is very processor-specific. */
8842 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8843 that are marked "fake". These instructions do not exist on that target,
8844 but it is possible to see these insns with deranged combinations of
8845 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8846 choose a result at random. */
8848 enum alphaev4_pipe {
8849 EV4_STOP = 0,
8850 EV4_IB0 = 1,
8851 EV4_IB1 = 2,
8852 EV4_IBX = 4
8855 enum alphaev5_pipe {
8856 EV5_STOP = 0,
8857 EV5_NONE = 1,
8858 EV5_E01 = 2,
8859 EV5_E0 = 4,
8860 EV5_E1 = 8,
8861 EV5_FAM = 16,
8862 EV5_FA = 32,
8863 EV5_FM = 64
8866 static enum alphaev4_pipe
8867 alphaev4_insn_pipe (rtx_insn *insn)
8869 if (recog_memoized (insn) < 0)
8870 return EV4_STOP;
8871 if (get_attr_length (insn) != 4)
8872 return EV4_STOP;
8874 switch (get_attr_type (insn))
8876 case TYPE_ILD:
8877 case TYPE_LDSYM:
8878 case TYPE_FLD:
8879 case TYPE_LD_L:
8880 return EV4_IBX;
8882 case TYPE_IADD:
8883 case TYPE_ILOG:
8884 case TYPE_ICMOV:
8885 case TYPE_ICMP:
8886 case TYPE_FST:
8887 case TYPE_SHIFT:
8888 case TYPE_IMUL:
8889 case TYPE_FBR:
8890 case TYPE_MVI: /* fake */
8891 return EV4_IB0;
8893 case TYPE_IST:
8894 case TYPE_MISC:
8895 case TYPE_IBR:
8896 case TYPE_JSR:
8897 case TYPE_CALLPAL:
8898 case TYPE_FCPYS:
8899 case TYPE_FCMOV:
8900 case TYPE_FADD:
8901 case TYPE_FDIV:
8902 case TYPE_FMUL:
8903 case TYPE_ST_C:
8904 case TYPE_MB:
8905 case TYPE_FSQRT: /* fake */
8906 case TYPE_FTOI: /* fake */
8907 case TYPE_ITOF: /* fake */
8908 return EV4_IB1;
8910 default:
8911 gcc_unreachable ();
8915 static enum alphaev5_pipe
8916 alphaev5_insn_pipe (rtx_insn *insn)
8918 if (recog_memoized (insn) < 0)
8919 return EV5_STOP;
8920 if (get_attr_length (insn) != 4)
8921 return EV5_STOP;
8923 switch (get_attr_type (insn))
8925 case TYPE_ILD:
8926 case TYPE_FLD:
8927 case TYPE_LDSYM:
8928 case TYPE_IADD:
8929 case TYPE_ILOG:
8930 case TYPE_ICMOV:
8931 case TYPE_ICMP:
8932 return EV5_E01;
8934 case TYPE_IST:
8935 case TYPE_FST:
8936 case TYPE_SHIFT:
8937 case TYPE_IMUL:
8938 case TYPE_MISC:
8939 case TYPE_MVI:
8940 case TYPE_LD_L:
8941 case TYPE_ST_C:
8942 case TYPE_MB:
8943 case TYPE_FTOI: /* fake */
8944 case TYPE_ITOF: /* fake */
8945 return EV5_E0;
8947 case TYPE_IBR:
8948 case TYPE_JSR:
8949 case TYPE_CALLPAL:
8950 return EV5_E1;
8952 case TYPE_FCPYS:
8953 return EV5_FAM;
8955 case TYPE_FBR:
8956 case TYPE_FCMOV:
8957 case TYPE_FADD:
8958 case TYPE_FDIV:
8959 case TYPE_FSQRT: /* fake */
8960 return EV5_FA;
8962 case TYPE_FMUL:
8963 return EV5_FM;
8965 default:
8966 gcc_unreachable ();
8970 /* IN_USE is a mask of the slots currently filled within the insn group.
8971 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8972 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8974 LEN is, of course, the length of the group in bytes. */
8976 static rtx_insn *
8977 alphaev4_next_group (rtx_insn *insn, int *pin_use, int *plen)
8979 int len, in_use;
8981 len = in_use = 0;
8983 if (! INSN_P (insn)
8984 || GET_CODE (PATTERN (insn)) == CLOBBER
8985 || GET_CODE (PATTERN (insn)) == USE)
8986 goto next_and_done;
8988 while (1)
8990 enum alphaev4_pipe pipe;
8992 pipe = alphaev4_insn_pipe (insn);
8993 switch (pipe)
8995 case EV4_STOP:
8996 /* Force complex instructions to start new groups. */
8997 if (in_use)
8998 goto done;
9000 /* If this is a completely unrecognized insn, it's an asm.
9001 We don't know how long it is, so record length as -1 to
9002 signal a needed realignment. */
9003 if (recog_memoized (insn) < 0)
9004 len = -1;
9005 else
9006 len = get_attr_length (insn);
9007 goto next_and_done;
9009 case EV4_IBX:
9010 if (in_use & EV4_IB0)
9012 if (in_use & EV4_IB1)
9013 goto done;
9014 in_use |= EV4_IB1;
9016 else
9017 in_use |= EV4_IB0 | EV4_IBX;
9018 break;
9020 case EV4_IB0:
9021 if (in_use & EV4_IB0)
9023 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9024 goto done;
9025 in_use |= EV4_IB1;
9027 in_use |= EV4_IB0;
9028 break;
9030 case EV4_IB1:
9031 if (in_use & EV4_IB1)
9032 goto done;
9033 in_use |= EV4_IB1;
9034 break;
9036 default:
9037 gcc_unreachable ();
9039 len += 4;
9041 /* Haifa doesn't do well scheduling branches. */
9042 if (JUMP_P (insn))
9043 goto next_and_done;
9045 next:
9046 insn = next_nonnote_insn (insn);
9048 if (!insn || ! INSN_P (insn))
9049 goto done;
9051 /* Let Haifa tell us where it thinks insn group boundaries are. */
9052 if (GET_MODE (insn) == TImode)
9053 goto done;
9055 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9056 goto next;
9059 next_and_done:
9060 insn = next_nonnote_insn (insn);
9062 done:
9063 *plen = len;
9064 *pin_use = in_use;
9065 return insn;
9068 /* IN_USE is a mask of the slots currently filled within the insn group.
9069 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9070 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9072 LEN is, of course, the length of the group in bytes. */
9074 static rtx_insn *
9075 alphaev5_next_group (rtx_insn *insn, int *pin_use, int *plen)
9077 int len, in_use;
9079 len = in_use = 0;
9081 if (! INSN_P (insn)
9082 || GET_CODE (PATTERN (insn)) == CLOBBER
9083 || GET_CODE (PATTERN (insn)) == USE)
9084 goto next_and_done;
9086 while (1)
9088 enum alphaev5_pipe pipe;
9090 pipe = alphaev5_insn_pipe (insn);
9091 switch (pipe)
9093 case EV5_STOP:
9094 /* Force complex instructions to start new groups. */
9095 if (in_use)
9096 goto done;
9098 /* If this is a completely unrecognized insn, it's an asm.
9099 We don't know how long it is, so record length as -1 to
9100 signal a needed realignment. */
9101 if (recog_memoized (insn) < 0)
9102 len = -1;
9103 else
9104 len = get_attr_length (insn);
9105 goto next_and_done;
9107 /* ??? Most of the places below, we would like to assert never
9108 happen, as it would indicate an error either in Haifa, or
9109 in the scheduling description. Unfortunately, Haifa never
9110 schedules the last instruction of the BB, so we don't have
9111 an accurate TI bit to go off. */
9112 case EV5_E01:
9113 if (in_use & EV5_E0)
9115 if (in_use & EV5_E1)
9116 goto done;
9117 in_use |= EV5_E1;
9119 else
9120 in_use |= EV5_E0 | EV5_E01;
9121 break;
9123 case EV5_E0:
9124 if (in_use & EV5_E0)
9126 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9127 goto done;
9128 in_use |= EV5_E1;
9130 in_use |= EV5_E0;
9131 break;
9133 case EV5_E1:
9134 if (in_use & EV5_E1)
9135 goto done;
9136 in_use |= EV5_E1;
9137 break;
9139 case EV5_FAM:
9140 if (in_use & EV5_FA)
9142 if (in_use & EV5_FM)
9143 goto done;
9144 in_use |= EV5_FM;
9146 else
9147 in_use |= EV5_FA | EV5_FAM;
9148 break;
9150 case EV5_FA:
9151 if (in_use & EV5_FA)
9152 goto done;
9153 in_use |= EV5_FA;
9154 break;
9156 case EV5_FM:
9157 if (in_use & EV5_FM)
9158 goto done;
9159 in_use |= EV5_FM;
9160 break;
9162 case EV5_NONE:
9163 break;
9165 default:
9166 gcc_unreachable ();
9168 len += 4;
9170 /* Haifa doesn't do well scheduling branches. */
9171 /* ??? If this is predicted not-taken, slotting continues, except
9172 that no more IBR, FBR, or JSR insns may be slotted. */
9173 if (JUMP_P (insn))
9174 goto next_and_done;
9176 next:
9177 insn = next_nonnote_insn (insn);
9179 if (!insn || ! INSN_P (insn))
9180 goto done;
9182 /* Let Haifa tell us where it thinks insn group boundaries are. */
9183 if (GET_MODE (insn) == TImode)
9184 goto done;
9186 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9187 goto next;
9190 next_and_done:
9191 insn = next_nonnote_insn (insn);
9193 done:
9194 *plen = len;
9195 *pin_use = in_use;
9196 return insn;
9199 static rtx
9200 alphaev4_next_nop (int *pin_use)
9202 int in_use = *pin_use;
9203 rtx nop;
9205 if (!(in_use & EV4_IB0))
9207 in_use |= EV4_IB0;
9208 nop = gen_nop ();
9210 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9212 in_use |= EV4_IB1;
9213 nop = gen_nop ();
9215 else if (TARGET_FP && !(in_use & EV4_IB1))
9217 in_use |= EV4_IB1;
9218 nop = gen_fnop ();
9220 else
9221 nop = gen_unop ();
9223 *pin_use = in_use;
9224 return nop;
9227 static rtx
9228 alphaev5_next_nop (int *pin_use)
9230 int in_use = *pin_use;
9231 rtx nop;
9233 if (!(in_use & EV5_E1))
9235 in_use |= EV5_E1;
9236 nop = gen_nop ();
9238 else if (TARGET_FP && !(in_use & EV5_FA))
9240 in_use |= EV5_FA;
9241 nop = gen_fnop ();
9243 else if (TARGET_FP && !(in_use & EV5_FM))
9245 in_use |= EV5_FM;
9246 nop = gen_fnop ();
9248 else
9249 nop = gen_unop ();
9251 *pin_use = in_use;
9252 return nop;
9255 /* The instruction group alignment main loop. */
9257 static void
9258 alpha_align_insns_1 (unsigned int max_align,
9259 rtx_insn *(*next_group) (rtx_insn *, int *, int *),
9260 rtx (*next_nop) (int *))
9262 /* ALIGN is the known alignment for the insn group. */
9263 unsigned int align;
9264 /* OFS is the offset of the current insn in the insn group. */
9265 int ofs;
9266 int prev_in_use, in_use, len, ldgp;
9267 rtx_insn *i, *next;
9269 /* Let shorten branches care for assigning alignments to code labels. */
9270 shorten_branches (get_insns ());
9272 unsigned int option_alignment = align_functions_max_skip + 1;
9273 if (option_alignment < 4)
9274 align = 4;
9275 else if ((unsigned int) option_alignment < max_align)
9276 align = option_alignment;
9277 else
9278 align = max_align;
9280 ofs = prev_in_use = 0;
9281 i = get_insns ();
9282 if (NOTE_P (i))
9283 i = next_nonnote_insn (i);
9285 ldgp = alpha_function_needs_gp ? 8 : 0;
9287 while (i)
9289 next = (*next_group) (i, &in_use, &len);
9291 /* When we see a label, resync alignment etc. */
9292 if (LABEL_P (i))
9294 unsigned int new_align = 1 << label_to_alignment (i);
9296 if (new_align >= align)
9298 align = new_align < max_align ? new_align : max_align;
9299 ofs = 0;
9302 else if (ofs & (new_align-1))
9303 ofs = (ofs | (new_align-1)) + 1;
9304 gcc_assert (!len);
9307 /* Handle complex instructions special. */
9308 else if (in_use == 0)
9310 /* Asms will have length < 0. This is a signal that we have
9311 lost alignment knowledge. Assume, however, that the asm
9312 will not mis-align instructions. */
9313 if (len < 0)
9315 ofs = 0;
9316 align = 4;
9317 len = 0;
9321 /* If the known alignment is smaller than the recognized insn group,
9322 realign the output. */
9323 else if ((int) align < len)
9325 unsigned int new_log_align = len > 8 ? 4 : 3;
9326 rtx_insn *prev, *where;
9328 where = prev = prev_nonnote_insn (i);
9329 if (!where || !LABEL_P (where))
9330 where = i;
9332 /* Can't realign between a call and its gp reload. */
9333 if (! (TARGET_EXPLICIT_RELOCS
9334 && prev && CALL_P (prev)))
9336 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9337 align = 1 << new_log_align;
9338 ofs = 0;
9342 /* We may not insert padding inside the initial ldgp sequence. */
9343 else if (ldgp > 0)
9344 ldgp -= len;
9346 /* If the group won't fit in the same INT16 as the previous,
9347 we need to add padding to keep the group together. Rather
9348 than simply leaving the insn filling to the assembler, we
9349 can make use of the knowledge of what sorts of instructions
9350 were issued in the previous group to make sure that all of
9351 the added nops are really free. */
9352 else if (ofs + len > (int) align)
9354 int nop_count = (align - ofs) / 4;
9355 rtx_insn *where;
9357 /* Insert nops before labels, branches, and calls to truly merge
9358 the execution of the nops with the previous instruction group. */
9359 where = prev_nonnote_insn (i);
9360 if (where)
9362 if (LABEL_P (where))
9364 rtx_insn *where2 = prev_nonnote_insn (where);
9365 if (where2 && JUMP_P (where2))
9366 where = where2;
9368 else if (NONJUMP_INSN_P (where))
9369 where = i;
9371 else
9372 where = i;
9375 emit_insn_before ((*next_nop)(&prev_in_use), where);
9376 while (--nop_count);
9377 ofs = 0;
9380 ofs = (ofs + len) & (align - 1);
9381 prev_in_use = in_use;
9382 i = next;
9386 static void
9387 alpha_align_insns (void)
9389 if (alpha_tune == PROCESSOR_EV4)
9390 alpha_align_insns_1 (8, alphaev4_next_group, alphaev4_next_nop);
9391 else if (alpha_tune == PROCESSOR_EV5)
9392 alpha_align_insns_1 (16, alphaev5_next_group, alphaev5_next_nop);
9393 else
9394 gcc_unreachable ();
9397 /* Insert an unop between sibcall or noreturn function call and GP load. */
9399 static void
9400 alpha_pad_function_end (void)
9402 rtx_insn *insn, *next;
9404 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9406 if (!CALL_P (insn)
9407 || !(SIBLING_CALL_P (insn)
9408 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9409 continue;
9411 next = next_active_insn (insn);
9412 if (next)
9414 rtx pat = PATTERN (next);
9416 if (GET_CODE (pat) == SET
9417 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9418 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9419 emit_insn_after (gen_unop (), insn);
9424 /* Machine dependent reorg pass. */
9426 static void
9427 alpha_reorg (void)
9429 /* Workaround for a linker error that triggers when an exception
9430 handler immediatelly follows a sibcall or a noreturn function.
9432 In the sibcall case:
9434 The instruction stream from an object file:
9436 1d8: 00 00 fb 6b jmp (t12)
9437 1dc: 00 00 ba 27 ldah gp,0(ra)
9438 1e0: 00 00 bd 23 lda gp,0(gp)
9439 1e4: 00 00 7d a7 ldq t12,0(gp)
9440 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9442 was converted in the final link pass to:
9444 12003aa88: 67 fa ff c3 br 120039428 <...>
9445 12003aa8c: 00 00 fe 2f unop
9446 12003aa90: 00 00 fe 2f unop
9447 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9448 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9450 And in the noreturn case:
9452 The instruction stream from an object file:
9454 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9455 58: 00 00 ba 27 ldah gp,0(ra)
9456 5c: 00 00 bd 23 lda gp,0(gp)
9457 60: 00 00 7d a7 ldq t12,0(gp)
9458 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9460 was converted in the final link pass to:
9462 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9463 fdb28: 00 00 fe 2f unop
9464 fdb2c: 00 00 fe 2f unop
9465 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9466 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9468 GP load instructions were wrongly cleared by the linker relaxation
9469 pass. This workaround prevents removal of GP loads by inserting
9470 an unop instruction between a sibcall or noreturn function call and
9471 exception handler prologue. */
9473 if (current_function_has_exception_handlers ())
9474 alpha_pad_function_end ();
9476 /* CALL_PAL that implements trap insn, updates program counter to point
9477 after the insn. In case trap is the last insn in the function,
9478 emit NOP to guarantee that PC remains inside function boundaries.
9479 This workaround is needed to get reliable backtraces. */
9481 rtx_insn *insn = prev_active_insn (get_last_insn ());
9483 if (insn && NONJUMP_INSN_P (insn))
9485 rtx pat = PATTERN (insn);
9486 if (GET_CODE (pat) == PARALLEL)
9488 rtx vec = XVECEXP (pat, 0, 0);
9489 if (GET_CODE (vec) == TRAP_IF
9490 && XEXP (vec, 0) == const1_rtx)
9491 emit_insn_after (gen_unop (), insn);
9496 static void
9497 alpha_file_start (void)
9499 default_file_start ();
9501 fputs ("\t.set noreorder\n", asm_out_file);
9502 fputs ("\t.set volatile\n", asm_out_file);
9503 if (TARGET_ABI_OSF)
9504 fputs ("\t.set noat\n", asm_out_file);
9505 if (TARGET_EXPLICIT_RELOCS)
9506 fputs ("\t.set nomacro\n", asm_out_file);
9507 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9509 const char *arch;
9511 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9512 arch = "ev6";
9513 else if (TARGET_MAX)
9514 arch = "pca56";
9515 else if (TARGET_BWX)
9516 arch = "ev56";
9517 else if (alpha_cpu == PROCESSOR_EV5)
9518 arch = "ev5";
9519 else
9520 arch = "ev4";
9522 fprintf (asm_out_file, "\t.arch %s\n", arch);
9526 /* Since we don't have a .dynbss section, we should not allow global
9527 relocations in the .rodata section. */
9529 static int
9530 alpha_elf_reloc_rw_mask (void)
9532 return flag_pic ? 3 : 2;
9535 /* Return a section for X. The only special thing we do here is to
9536 honor small data. */
9538 static section *
9539 alpha_elf_select_rtx_section (machine_mode mode, rtx x,
9540 unsigned HOST_WIDE_INT align)
9542 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9543 /* ??? Consider using mergeable sdata sections. */
9544 return sdata_section;
9545 else
9546 return default_elf_select_rtx_section (mode, x, align);
9549 static unsigned int
9550 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9552 unsigned int flags = 0;
9554 if (strcmp (name, ".sdata") == 0
9555 || strncmp (name, ".sdata.", 7) == 0
9556 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9557 || strcmp (name, ".sbss") == 0
9558 || strncmp (name, ".sbss.", 6) == 0
9559 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9560 flags = SECTION_SMALL;
9562 flags |= default_section_type_flags (decl, name, reloc);
9563 return flags;
9566 /* Structure to collect function names for final output in link section. */
9567 /* Note that items marked with GTY can't be ifdef'ed out. */
9569 enum reloc_kind
9571 KIND_LINKAGE,
9572 KIND_CODEADDR
9575 struct GTY(()) alpha_links
9577 rtx func;
9578 rtx linkage;
9579 enum reloc_kind rkind;
9582 #if TARGET_ABI_OPEN_VMS
9584 /* Return the VMS argument type corresponding to MODE. */
9586 enum avms_arg_type
9587 alpha_arg_type (machine_mode mode)
9589 switch (mode)
9591 case E_SFmode:
9592 return TARGET_FLOAT_VAX ? FF : FS;
9593 case E_DFmode:
9594 return TARGET_FLOAT_VAX ? FD : FT;
9595 default:
9596 return I64;
9600 /* Return an rtx for an integer representing the VMS Argument Information
9601 register value. */
9604 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9606 unsigned HOST_WIDE_INT regval = cum.num_args;
9607 int i;
9609 for (i = 0; i < 6; i++)
9610 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9612 return GEN_INT (regval);
9616 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9617 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9618 this is the reference to the linkage pointer value, 0 if this is the
9619 reference to the function entry value. RFLAG is 1 if this a reduced
9620 reference (code address only), 0 if this is a full reference. */
9623 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9625 struct alpha_links *al = NULL;
9626 const char *name = XSTR (func, 0);
9628 if (cfun->machine->links)
9630 /* Is this name already defined? */
9631 alpha_links **slot = cfun->machine->links->get (name);
9632 if (slot)
9633 al = *slot;
9635 else
9636 cfun->machine->links
9637 = hash_map<nofree_string_hash, alpha_links *>::create_ggc (64);
9639 if (al == NULL)
9641 size_t buf_len;
9642 char *linksym;
9643 tree id;
9645 if (name[0] == '*')
9646 name++;
9648 /* Follow transparent alias, as this is used for CRTL translations. */
9649 id = maybe_get_identifier (name);
9650 if (id)
9652 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9653 id = TREE_CHAIN (id);
9654 name = IDENTIFIER_POINTER (id);
9657 buf_len = strlen (name) + 8 + 9;
9658 linksym = (char *) alloca (buf_len);
9659 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9661 al = ggc_alloc<alpha_links> ();
9662 al->func = func;
9663 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9665 cfun->machine->links->put (ggc_strdup (name), al);
9668 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9670 if (lflag)
9671 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9672 else
9673 return al->linkage;
9676 static int
9677 alpha_write_one_linkage (const char *name, alpha_links *link, FILE *stream)
9679 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9680 if (link->rkind == KIND_CODEADDR)
9682 /* External and used, request code address. */
9683 fprintf (stream, "\t.code_address ");
9685 else
9687 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9688 && SYMBOL_REF_LOCAL_P (link->func))
9690 /* Locally defined, build linkage pair. */
9691 fprintf (stream, "\t.quad %s..en\n", name);
9692 fprintf (stream, "\t.quad ");
9694 else
9696 /* External, request linkage pair. */
9697 fprintf (stream, "\t.linkage ");
9700 assemble_name (stream, name);
9701 fputs ("\n", stream);
9703 return 0;
9706 static void
9707 alpha_write_linkage (FILE *stream, const char *funname)
9709 fprintf (stream, "\t.link\n");
9710 fprintf (stream, "\t.align 3\n");
9711 in_section = NULL;
9713 #ifdef TARGET_VMS_CRASH_DEBUG
9714 fputs ("\t.name ", stream);
9715 assemble_name (stream, funname);
9716 fputs ("..na\n", stream);
9717 #endif
9719 ASM_OUTPUT_LABEL (stream, funname);
9720 fprintf (stream, "\t.pdesc ");
9721 assemble_name (stream, funname);
9722 fprintf (stream, "..en,%s\n",
9723 alpha_procedure_type == PT_STACK ? "stack"
9724 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9726 if (cfun->machine->links)
9728 hash_map<nofree_string_hash, alpha_links *>::iterator iter
9729 = cfun->machine->links->begin ();
9730 for (; iter != cfun->machine->links->end (); ++iter)
9731 alpha_write_one_linkage ((*iter).first, (*iter).second, stream);
9735 /* Switch to an arbitrary section NAME with attributes as specified
9736 by FLAGS. ALIGN specifies any known alignment requirements for
9737 the section; 0 if the default should be used. */
9739 static void
9740 vms_asm_named_section (const char *name, unsigned int flags,
9741 tree decl ATTRIBUTE_UNUSED)
9743 fputc ('\n', asm_out_file);
9744 fprintf (asm_out_file, ".section\t%s", name);
9746 if (flags & SECTION_DEBUG)
9747 fprintf (asm_out_file, ",NOWRT");
9749 fputc ('\n', asm_out_file);
9752 /* Record an element in the table of global constructors. SYMBOL is
9753 a SYMBOL_REF of the function to be called; PRIORITY is a number
9754 between 0 and MAX_INIT_PRIORITY.
9756 Differs from default_ctors_section_asm_out_constructor in that the
9757 width of the .ctors entry is always 64 bits, rather than the 32 bits
9758 used by a normal pointer. */
9760 static void
9761 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9763 switch_to_section (ctors_section);
9764 assemble_align (BITS_PER_WORD);
9765 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9768 static void
9769 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9771 switch_to_section (dtors_section);
9772 assemble_align (BITS_PER_WORD);
9773 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9775 #else
9777 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9778 bool lflag ATTRIBUTE_UNUSED,
9779 bool rflag ATTRIBUTE_UNUSED)
9781 return NULL_RTX;
9784 #endif /* TARGET_ABI_OPEN_VMS */
9786 static void
9787 alpha_init_libfuncs (void)
9789 if (TARGET_ABI_OPEN_VMS)
9791 /* Use the VMS runtime library functions for division and
9792 remainder. */
9793 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9794 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9795 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9796 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9797 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9798 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9799 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9800 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9801 #ifdef MEM_LIBFUNCS_INIT
9802 MEM_LIBFUNCS_INIT;
9803 #endif
9807 /* On the Alpha, we use this to disable the floating-point registers
9808 when they don't exist. */
9810 static void
9811 alpha_conditional_register_usage (void)
9813 int i;
9814 if (! TARGET_FPREGS)
9815 for (i = 32; i < 63; i++)
9816 fixed_regs[i] = call_used_regs[i] = 1;
9819 /* Canonicalize a comparison from one we don't have to one we do have. */
9821 static void
9822 alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9823 bool op0_preserve_value)
9825 if (!op0_preserve_value
9826 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9827 && (REG_P (*op1) || *op1 == const0_rtx))
9829 std::swap (*op0, *op1);
9830 *code = (int)swap_condition ((enum rtx_code)*code);
9833 if ((*code == LT || *code == LTU)
9834 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9836 *code = *code == LT ? LE : LEU;
9837 *op1 = GEN_INT (255);
9841 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9843 static void
9844 alpha_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
9846 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17);
9848 tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
9849 tree new_fenv_var, reload_fenv, restore_fnenv;
9850 tree update_call, atomic_feraiseexcept, hold_fnclex;
9852 /* Assume OSF/1 compatible interfaces. */
9853 if (!TARGET_ABI_OSF)
9854 return;
9856 /* Generate the equivalent of :
9857 unsigned long fenv_var;
9858 fenv_var = __ieee_get_fp_control ();
9860 unsigned long masked_fenv;
9861 masked_fenv = fenv_var & mask;
9863 __ieee_set_fp_control (masked_fenv); */
9865 fenv_var = create_tmp_var_raw (long_unsigned_type_node);
9866 get_fpscr
9867 = build_fn_decl ("__ieee_get_fp_control",
9868 build_function_type_list (long_unsigned_type_node, NULL));
9869 set_fpscr
9870 = build_fn_decl ("__ieee_set_fp_control",
9871 build_function_type_list (void_type_node, NULL));
9872 mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK);
9873 ld_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node,
9874 fenv_var, build_call_expr (get_fpscr, 0));
9875 masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask);
9876 hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
9877 *hold = build2 (COMPOUND_EXPR, void_type_node,
9878 build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
9879 hold_fnclex);
9881 /* Store the value of masked_fenv to clear the exceptions:
9882 __ieee_set_fp_control (masked_fenv); */
9884 *clear = build_call_expr (set_fpscr, 1, masked_fenv);
9886 /* Generate the equivalent of :
9887 unsigned long new_fenv_var;
9888 new_fenv_var = __ieee_get_fp_control ();
9890 __ieee_set_fp_control (fenv_var);
9892 __atomic_feraiseexcept (new_fenv_var); */
9894 new_fenv_var = create_tmp_var_raw (long_unsigned_type_node);
9895 reload_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node, new_fenv_var,
9896 build_call_expr (get_fpscr, 0));
9897 restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
9898 atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
9899 update_call
9900 = build_call_expr (atomic_feraiseexcept, 1,
9901 fold_convert (integer_type_node, new_fenv_var));
9902 *update = build2 (COMPOUND_EXPR, void_type_node,
9903 build2 (COMPOUND_EXPR, void_type_node,
9904 reload_fenv, restore_fnenv), update_call);
9907 /* Implement TARGET_HARD_REGNO_MODE_OK. On Alpha, the integer registers
9908 can hold any mode. The floating-point registers can hold 64-bit
9909 integers as well, but not smaller values. */
9911 static bool
9912 alpha_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9914 if (IN_RANGE (regno, 32, 62))
9915 return (mode == SFmode
9916 || mode == DFmode
9917 || mode == DImode
9918 || mode == SCmode
9919 || mode == DCmode);
9920 return true;
9923 /* Implement TARGET_MODES_TIEABLE_P. This asymmetric test is true when
9924 MODE1 could be put in an FP register but MODE2 could not. */
9926 static bool
9927 alpha_modes_tieable_p (machine_mode mode1, machine_mode mode2)
9929 return (alpha_hard_regno_mode_ok (32, mode1)
9930 ? alpha_hard_regno_mode_ok (32, mode2)
9931 : true);
9934 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
9936 static bool
9937 alpha_can_change_mode_class (machine_mode from, machine_mode to,
9938 reg_class_t rclass)
9940 return (GET_MODE_SIZE (from) == GET_MODE_SIZE (to)
9941 || !reg_classes_intersect_p (FLOAT_REGS, rclass));
9944 /* Initialize the GCC target structure. */
9945 #if TARGET_ABI_OPEN_VMS
9946 # undef TARGET_ATTRIBUTE_TABLE
9947 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9948 # undef TARGET_CAN_ELIMINATE
9949 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9950 #endif
9952 #undef TARGET_IN_SMALL_DATA_P
9953 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9955 #undef TARGET_ASM_ALIGNED_HI_OP
9956 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9957 #undef TARGET_ASM_ALIGNED_DI_OP
9958 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9960 /* Default unaligned ops are provided for ELF systems. To get unaligned
9961 data for non-ELF systems, we have to turn off auto alignment. */
9962 #if TARGET_ABI_OPEN_VMS
9963 #undef TARGET_ASM_UNALIGNED_HI_OP
9964 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9965 #undef TARGET_ASM_UNALIGNED_SI_OP
9966 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9967 #undef TARGET_ASM_UNALIGNED_DI_OP
9968 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9969 #endif
9971 #undef TARGET_ASM_RELOC_RW_MASK
9972 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9973 #undef TARGET_ASM_SELECT_RTX_SECTION
9974 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9975 #undef TARGET_SECTION_TYPE_FLAGS
9976 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9978 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9979 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9981 #undef TARGET_INIT_LIBFUNCS
9982 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9984 #undef TARGET_LEGITIMIZE_ADDRESS
9985 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9986 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9987 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9989 #undef TARGET_ASM_FILE_START
9990 #define TARGET_ASM_FILE_START alpha_file_start
9992 #undef TARGET_SCHED_ADJUST_COST
9993 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9994 #undef TARGET_SCHED_ISSUE_RATE
9995 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9996 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9997 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9998 alpha_multipass_dfa_lookahead
10000 #undef TARGET_HAVE_TLS
10001 #define TARGET_HAVE_TLS HAVE_AS_TLS
10003 #undef TARGET_BUILTIN_DECL
10004 #define TARGET_BUILTIN_DECL alpha_builtin_decl
10005 #undef TARGET_INIT_BUILTINS
10006 #define TARGET_INIT_BUILTINS alpha_init_builtins
10007 #undef TARGET_EXPAND_BUILTIN
10008 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10009 #undef TARGET_FOLD_BUILTIN
10010 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10011 #undef TARGET_GIMPLE_FOLD_BUILTIN
10012 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
10014 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10015 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10016 #undef TARGET_CANNOT_COPY_INSN_P
10017 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10018 #undef TARGET_LEGITIMATE_CONSTANT_P
10019 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
10020 #undef TARGET_CANNOT_FORCE_CONST_MEM
10021 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10023 #if TARGET_ABI_OSF
10024 #undef TARGET_ASM_OUTPUT_MI_THUNK
10025 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10026 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10027 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10028 #undef TARGET_STDARG_OPTIMIZE_HOOK
10029 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10030 #endif
10032 #undef TARGET_PRINT_OPERAND
10033 #define TARGET_PRINT_OPERAND alpha_print_operand
10034 #undef TARGET_PRINT_OPERAND_ADDRESS
10035 #define TARGET_PRINT_OPERAND_ADDRESS alpha_print_operand_address
10036 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
10037 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P alpha_print_operand_punct_valid_p
10039 /* Use 16-bits anchor. */
10040 #undef TARGET_MIN_ANCHOR_OFFSET
10041 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
10042 #undef TARGET_MAX_ANCHOR_OFFSET
10043 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
10044 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10045 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
10047 #undef TARGET_REGISTER_MOVE_COST
10048 #define TARGET_REGISTER_MOVE_COST alpha_register_move_cost
10049 #undef TARGET_MEMORY_MOVE_COST
10050 #define TARGET_MEMORY_MOVE_COST alpha_memory_move_cost
10051 #undef TARGET_RTX_COSTS
10052 #define TARGET_RTX_COSTS alpha_rtx_costs
10053 #undef TARGET_ADDRESS_COST
10054 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
10056 #undef TARGET_MACHINE_DEPENDENT_REORG
10057 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10059 #undef TARGET_PROMOTE_FUNCTION_MODE
10060 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
10061 #undef TARGET_PROMOTE_PROTOTYPES
10062 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10064 #undef TARGET_FUNCTION_VALUE
10065 #define TARGET_FUNCTION_VALUE alpha_function_value
10066 #undef TARGET_LIBCALL_VALUE
10067 #define TARGET_LIBCALL_VALUE alpha_libcall_value
10068 #undef TARGET_FUNCTION_VALUE_REGNO_P
10069 #define TARGET_FUNCTION_VALUE_REGNO_P alpha_function_value_regno_p
10070 #undef TARGET_RETURN_IN_MEMORY
10071 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10072 #undef TARGET_PASS_BY_REFERENCE
10073 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10074 #undef TARGET_SETUP_INCOMING_VARARGS
10075 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10076 #undef TARGET_STRICT_ARGUMENT_NAMING
10077 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10078 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10079 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10080 #undef TARGET_SPLIT_COMPLEX_ARG
10081 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10082 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10083 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10084 #undef TARGET_ARG_PARTIAL_BYTES
10085 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10086 #undef TARGET_FUNCTION_ARG
10087 #define TARGET_FUNCTION_ARG alpha_function_arg
10088 #undef TARGET_FUNCTION_ARG_ADVANCE
10089 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
10090 #undef TARGET_TRAMPOLINE_INIT
10091 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
10093 #undef TARGET_INSTANTIATE_DECLS
10094 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10096 #undef TARGET_SECONDARY_RELOAD
10097 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10098 #undef TARGET_SECONDARY_MEMORY_NEEDED
10099 #define TARGET_SECONDARY_MEMORY_NEEDED alpha_secondary_memory_needed
10100 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
10101 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE alpha_secondary_memory_needed_mode
10103 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10104 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10105 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10106 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10108 #undef TARGET_BUILD_BUILTIN_VA_LIST
10109 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10111 #undef TARGET_EXPAND_BUILTIN_VA_START
10112 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10114 #undef TARGET_OPTION_OVERRIDE
10115 #define TARGET_OPTION_OVERRIDE alpha_option_override
10117 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
10118 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
10119 alpha_override_options_after_change
10121 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10122 #undef TARGET_MANGLE_TYPE
10123 #define TARGET_MANGLE_TYPE alpha_mangle_type
10124 #endif
10126 #undef TARGET_LRA_P
10127 #define TARGET_LRA_P hook_bool_void_false
10129 #undef TARGET_LEGITIMATE_ADDRESS_P
10130 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10132 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10133 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10135 #undef TARGET_CANONICALIZE_COMPARISON
10136 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10138 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10139 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10141 #undef TARGET_HARD_REGNO_MODE_OK
10142 #define TARGET_HARD_REGNO_MODE_OK alpha_hard_regno_mode_ok
10144 #undef TARGET_MODES_TIEABLE_P
10145 #define TARGET_MODES_TIEABLE_P alpha_modes_tieable_p
10147 #undef TARGET_CAN_CHANGE_MODE_CLASS
10148 #define TARGET_CAN_CHANGE_MODE_CLASS alpha_can_change_mode_class
10150 struct gcc_target targetm = TARGET_INITIALIZER;
10153 #include "gt-alpha.h"