remove param1_is usage
[official-gcc.git] / gcc / config / alpha / alpha.c
blobc0eb0fc34ee083ad91d80d37f485403090f99ea0
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "calls.h"
30 #include "varasm.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "insn-codes.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "obstack.h"
44 #include "except.h"
45 #include "hashtab.h"
46 #include "hash-set.h"
47 #include "vec.h"
48 #include "machmode.h"
49 #include "input.h"
50 #include "function.h"
51 #include "diagnostic-core.h"
52 #include "ggc.h"
53 #include "tm_p.h"
54 #include "target.h"
55 #include "target-def.h"
56 #include "common/common-target.h"
57 #include "debug.h"
58 #include "langhooks.h"
59 #include "hash-map.h"
60 #include "hash-table.h"
61 #include "predict.h"
62 #include "dominance.h"
63 #include "cfg.h"
64 #include "cfgrtl.h"
65 #include "cfganal.h"
66 #include "lcm.h"
67 #include "cfgbuild.h"
68 #include "cfgcleanup.h"
69 #include "basic-block.h"
70 #include "tree-ssa-alias.h"
71 #include "internal-fn.h"
72 #include "gimple-fold.h"
73 #include "tree-eh.h"
74 #include "gimple-expr.h"
75 #include "is-a.h"
76 #include "gimple.h"
77 #include "tree-pass.h"
78 #include "context.h"
79 #include "pass_manager.h"
80 #include "gimple-iterator.h"
81 #include "gimplify.h"
82 #include "gimple-ssa.h"
83 #include "stringpool.h"
84 #include "tree-ssanames.h"
85 #include "tree-stdarg.h"
86 #include "tm-constrs.h"
87 #include "df.h"
88 #include "libfuncs.h"
89 #include "opts.h"
90 #include "params.h"
91 #include "builtins.h"
92 #include "rtl-iter.h"
94 /* Specify which cpu to schedule for. */
95 enum processor_type alpha_tune;
97 /* Which cpu we're generating code for. */
98 enum processor_type alpha_cpu;
100 static const char * const alpha_cpu_name[] =
102 "ev4", "ev5", "ev6"
105 /* Specify how accurate floating-point traps need to be. */
107 enum alpha_trap_precision alpha_tp;
109 /* Specify the floating-point rounding mode. */
111 enum alpha_fp_rounding_mode alpha_fprm;
113 /* Specify which things cause traps. */
115 enum alpha_fp_trap_mode alpha_fptm;
117 /* Nonzero if inside of a function, because the Alpha asm can't
118 handle .files inside of functions. */
120 static int inside_function = FALSE;
122 /* The number of cycles of latency we should assume on memory reads. */
124 int alpha_memory_latency = 3;
126 /* Whether the function needs the GP. */
128 static int alpha_function_needs_gp;
130 /* The assembler name of the current function. */
132 static const char *alpha_fnname;
134 /* The next explicit relocation sequence number. */
135 extern GTY(()) int alpha_next_sequence_number;
136 int alpha_next_sequence_number = 1;
138 /* The literal and gpdisp sequence numbers for this insn, as printed
139 by %# and %* respectively. */
140 extern GTY(()) int alpha_this_literal_sequence_number;
141 extern GTY(()) int alpha_this_gpdisp_sequence_number;
142 int alpha_this_literal_sequence_number;
143 int alpha_this_gpdisp_sequence_number;
145 /* Costs of various operations on the different architectures. */
147 struct alpha_rtx_cost_data
149 unsigned char fp_add;
150 unsigned char fp_mult;
151 unsigned char fp_div_sf;
152 unsigned char fp_div_df;
153 unsigned char int_mult_si;
154 unsigned char int_mult_di;
155 unsigned char int_shift;
156 unsigned char int_cmov;
157 unsigned short int_div;
160 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
162 { /* EV4 */
163 COSTS_N_INSNS (6), /* fp_add */
164 COSTS_N_INSNS (6), /* fp_mult */
165 COSTS_N_INSNS (34), /* fp_div_sf */
166 COSTS_N_INSNS (63), /* fp_div_df */
167 COSTS_N_INSNS (23), /* int_mult_si */
168 COSTS_N_INSNS (23), /* int_mult_di */
169 COSTS_N_INSNS (2), /* int_shift */
170 COSTS_N_INSNS (2), /* int_cmov */
171 COSTS_N_INSNS (97), /* int_div */
173 { /* EV5 */
174 COSTS_N_INSNS (4), /* fp_add */
175 COSTS_N_INSNS (4), /* fp_mult */
176 COSTS_N_INSNS (15), /* fp_div_sf */
177 COSTS_N_INSNS (22), /* fp_div_df */
178 COSTS_N_INSNS (8), /* int_mult_si */
179 COSTS_N_INSNS (12), /* int_mult_di */
180 COSTS_N_INSNS (1) + 1, /* int_shift */
181 COSTS_N_INSNS (1), /* int_cmov */
182 COSTS_N_INSNS (83), /* int_div */
184 { /* EV6 */
185 COSTS_N_INSNS (4), /* fp_add */
186 COSTS_N_INSNS (4), /* fp_mult */
187 COSTS_N_INSNS (12), /* fp_div_sf */
188 COSTS_N_INSNS (15), /* fp_div_df */
189 COSTS_N_INSNS (7), /* int_mult_si */
190 COSTS_N_INSNS (7), /* int_mult_di */
191 COSTS_N_INSNS (1), /* int_shift */
192 COSTS_N_INSNS (2), /* int_cmov */
193 COSTS_N_INSNS (86), /* int_div */
197 /* Similar but tuned for code size instead of execution latency. The
198 extra +N is fractional cost tuning based on latency. It's used to
199 encourage use of cheaper insns like shift, but only if there's just
200 one of them. */
202 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
204 COSTS_N_INSNS (1), /* fp_add */
205 COSTS_N_INSNS (1), /* fp_mult */
206 COSTS_N_INSNS (1), /* fp_div_sf */
207 COSTS_N_INSNS (1) + 1, /* fp_div_df */
208 COSTS_N_INSNS (1) + 1, /* int_mult_si */
209 COSTS_N_INSNS (1) + 2, /* int_mult_di */
210 COSTS_N_INSNS (1), /* int_shift */
211 COSTS_N_INSNS (1), /* int_cmov */
212 COSTS_N_INSNS (6), /* int_div */
215 /* Get the number of args of a function in one of two ways. */
216 #if TARGET_ABI_OPEN_VMS
217 #define NUM_ARGS crtl->args.info.num_args
218 #else
219 #define NUM_ARGS crtl->args.info
220 #endif
222 #define REG_PV 27
223 #define REG_RA 26
225 /* Declarations of static functions. */
226 static struct machine_function *alpha_init_machine_status (void);
227 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
228 static void alpha_handle_trap_shadows (void);
229 static void alpha_align_insns (void);
231 #if TARGET_ABI_OPEN_VMS
232 static void alpha_write_linkage (FILE *, const char *);
233 static bool vms_valid_pointer_mode (machine_mode);
234 #else
235 #define vms_patch_builtins() gcc_unreachable()
236 #endif
238 static unsigned int
239 rest_of_handle_trap_shadows (void)
241 alpha_handle_trap_shadows ();
242 return 0;
245 namespace {
247 const pass_data pass_data_handle_trap_shadows =
249 RTL_PASS,
250 "trap_shadows", /* name */
251 OPTGROUP_NONE, /* optinfo_flags */
252 TV_NONE, /* tv_id */
253 0, /* properties_required */
254 0, /* properties_provided */
255 0, /* properties_destroyed */
256 0, /* todo_flags_start */
257 TODO_df_finish, /* todo_flags_finish */
260 class pass_handle_trap_shadows : public rtl_opt_pass
262 public:
263 pass_handle_trap_shadows(gcc::context *ctxt)
264 : rtl_opt_pass(pass_data_handle_trap_shadows, ctxt)
267 /* opt_pass methods: */
268 virtual bool gate (function *)
270 return alpha_tp != ALPHA_TP_PROG || flag_exceptions;
273 virtual unsigned int execute (function *)
275 return rest_of_handle_trap_shadows ();
278 }; // class pass_handle_trap_shadows
280 } // anon namespace
282 rtl_opt_pass *
283 make_pass_handle_trap_shadows (gcc::context *ctxt)
285 return new pass_handle_trap_shadows (ctxt);
288 static unsigned int
289 rest_of_align_insns (void)
291 alpha_align_insns ();
292 return 0;
295 namespace {
297 const pass_data pass_data_align_insns =
299 RTL_PASS,
300 "align_insns", /* name */
301 OPTGROUP_NONE, /* optinfo_flags */
302 TV_NONE, /* tv_id */
303 0, /* properties_required */
304 0, /* properties_provided */
305 0, /* properties_destroyed */
306 0, /* todo_flags_start */
307 TODO_df_finish, /* todo_flags_finish */
310 class pass_align_insns : public rtl_opt_pass
312 public:
313 pass_align_insns(gcc::context *ctxt)
314 : rtl_opt_pass(pass_data_align_insns, ctxt)
317 /* opt_pass methods: */
318 virtual bool gate (function *)
320 /* Due to the number of extra trapb insns, don't bother fixing up
321 alignment when trap precision is instruction. Moreover, we can
322 only do our job when sched2 is run. */
323 return ((alpha_tune == PROCESSOR_EV4
324 || alpha_tune == PROCESSOR_EV5)
325 && optimize && !optimize_size
326 && alpha_tp != ALPHA_TP_INSN
327 && flag_schedule_insns_after_reload);
330 virtual unsigned int execute (function *)
332 return rest_of_align_insns ();
335 }; // class pass_align_insns
337 } // anon namespace
339 rtl_opt_pass *
340 make_pass_align_insns (gcc::context *ctxt)
342 return new pass_align_insns (ctxt);
345 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
346 /* Implement TARGET_MANGLE_TYPE. */
348 static const char *
349 alpha_mangle_type (const_tree type)
351 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
352 && TARGET_LONG_DOUBLE_128)
353 return "g";
355 /* For all other types, use normal C++ mangling. */
356 return NULL;
358 #endif
360 /* Parse target option strings. */
362 static void
363 alpha_option_override (void)
365 static const struct cpu_table {
366 const char *const name;
367 const enum processor_type processor;
368 const int flags;
369 const unsigned short line_size; /* in bytes */
370 const unsigned short l1_size; /* in kb. */
371 const unsigned short l2_size; /* in kb. */
372 } cpu_table[] = {
373 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
374 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
375 had 64k to 8M 8-byte direct Bcache. */
376 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
377 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
378 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
380 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
381 and 1M to 16M 64 byte L3 (not modeled).
382 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
383 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
384 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
385 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
386 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
387 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
388 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
389 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
390 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
392 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
393 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
394 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
395 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
396 64, 64, 16*1024 },
397 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
398 64, 64, 16*1024 }
401 opt_pass *pass_handle_trap_shadows = make_pass_handle_trap_shadows (g);
402 struct register_pass_info handle_trap_shadows_info
403 = { pass_handle_trap_shadows, "eh_ranges",
404 1, PASS_POS_INSERT_AFTER
407 opt_pass *pass_align_insns = make_pass_align_insns (g);
408 struct register_pass_info align_insns_info
409 = { pass_align_insns, "shorten",
410 1, PASS_POS_INSERT_BEFORE
413 int const ct_size = ARRAY_SIZE (cpu_table);
414 int line_size = 0, l1_size = 0, l2_size = 0;
415 int i;
417 #ifdef SUBTARGET_OVERRIDE_OPTIONS
418 SUBTARGET_OVERRIDE_OPTIONS;
419 #endif
421 /* Default to full IEEE compliance mode for Go language. */
422 if (strcmp (lang_hooks.name, "GNU Go") == 0
423 && !(target_flags_explicit & MASK_IEEE))
424 target_flags |= MASK_IEEE;
426 alpha_fprm = ALPHA_FPRM_NORM;
427 alpha_tp = ALPHA_TP_PROG;
428 alpha_fptm = ALPHA_FPTM_N;
430 if (TARGET_IEEE)
432 alpha_tp = ALPHA_TP_INSN;
433 alpha_fptm = ALPHA_FPTM_SU;
435 if (TARGET_IEEE_WITH_INEXACT)
437 alpha_tp = ALPHA_TP_INSN;
438 alpha_fptm = ALPHA_FPTM_SUI;
441 if (alpha_tp_string)
443 if (! strcmp (alpha_tp_string, "p"))
444 alpha_tp = ALPHA_TP_PROG;
445 else if (! strcmp (alpha_tp_string, "f"))
446 alpha_tp = ALPHA_TP_FUNC;
447 else if (! strcmp (alpha_tp_string, "i"))
448 alpha_tp = ALPHA_TP_INSN;
449 else
450 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
453 if (alpha_fprm_string)
455 if (! strcmp (alpha_fprm_string, "n"))
456 alpha_fprm = ALPHA_FPRM_NORM;
457 else if (! strcmp (alpha_fprm_string, "m"))
458 alpha_fprm = ALPHA_FPRM_MINF;
459 else if (! strcmp (alpha_fprm_string, "c"))
460 alpha_fprm = ALPHA_FPRM_CHOP;
461 else if (! strcmp (alpha_fprm_string,"d"))
462 alpha_fprm = ALPHA_FPRM_DYN;
463 else
464 error ("bad value %qs for -mfp-rounding-mode switch",
465 alpha_fprm_string);
468 if (alpha_fptm_string)
470 if (strcmp (alpha_fptm_string, "n") == 0)
471 alpha_fptm = ALPHA_FPTM_N;
472 else if (strcmp (alpha_fptm_string, "u") == 0)
473 alpha_fptm = ALPHA_FPTM_U;
474 else if (strcmp (alpha_fptm_string, "su") == 0)
475 alpha_fptm = ALPHA_FPTM_SU;
476 else if (strcmp (alpha_fptm_string, "sui") == 0)
477 alpha_fptm = ALPHA_FPTM_SUI;
478 else
479 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
482 if (alpha_cpu_string)
484 for (i = 0; i < ct_size; i++)
485 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
487 alpha_tune = alpha_cpu = cpu_table[i].processor;
488 line_size = cpu_table[i].line_size;
489 l1_size = cpu_table[i].l1_size;
490 l2_size = cpu_table[i].l2_size;
491 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
492 target_flags |= cpu_table[i].flags;
493 break;
495 if (i == ct_size)
496 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
499 if (alpha_tune_string)
501 for (i = 0; i < ct_size; i++)
502 if (! strcmp (alpha_tune_string, cpu_table [i].name))
504 alpha_tune = cpu_table[i].processor;
505 line_size = cpu_table[i].line_size;
506 l1_size = cpu_table[i].l1_size;
507 l2_size = cpu_table[i].l2_size;
508 break;
510 if (i == ct_size)
511 error ("bad value %qs for -mtune switch", alpha_tune_string);
514 if (line_size)
515 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
516 global_options.x_param_values,
517 global_options_set.x_param_values);
518 if (l1_size)
519 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
520 global_options.x_param_values,
521 global_options_set.x_param_values);
522 if (l2_size)
523 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
524 global_options.x_param_values,
525 global_options_set.x_param_values);
527 /* Do some sanity checks on the above options. */
529 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
530 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
532 warning (0, "fp software completion requires -mtrap-precision=i");
533 alpha_tp = ALPHA_TP_INSN;
536 if (alpha_cpu == PROCESSOR_EV6)
538 /* Except for EV6 pass 1 (not released), we always have precise
539 arithmetic traps. Which means we can do software completion
540 without minding trap shadows. */
541 alpha_tp = ALPHA_TP_PROG;
544 if (TARGET_FLOAT_VAX)
546 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
548 warning (0, "rounding mode not supported for VAX floats");
549 alpha_fprm = ALPHA_FPRM_NORM;
551 if (alpha_fptm == ALPHA_FPTM_SUI)
553 warning (0, "trap mode not supported for VAX floats");
554 alpha_fptm = ALPHA_FPTM_SU;
556 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
557 warning (0, "128-bit long double not supported for VAX floats");
558 target_flags &= ~MASK_LONG_DOUBLE_128;
562 char *end;
563 int lat;
565 if (!alpha_mlat_string)
566 alpha_mlat_string = "L1";
568 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
569 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
571 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
572 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
573 && alpha_mlat_string[2] == '\0')
575 static int const cache_latency[][4] =
577 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
578 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
579 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
582 lat = alpha_mlat_string[1] - '0';
583 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
585 warning (0, "L%d cache latency unknown for %s",
586 lat, alpha_cpu_name[alpha_tune]);
587 lat = 3;
589 else
590 lat = cache_latency[alpha_tune][lat-1];
592 else if (! strcmp (alpha_mlat_string, "main"))
594 /* Most current memories have about 370ns latency. This is
595 a reasonable guess for a fast cpu. */
596 lat = 150;
598 else
600 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
601 lat = 3;
604 alpha_memory_latency = lat;
607 /* Default the definition of "small data" to 8 bytes. */
608 if (!global_options_set.x_g_switch_value)
609 g_switch_value = 8;
611 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
612 if (flag_pic == 1)
613 target_flags |= MASK_SMALL_DATA;
614 else if (flag_pic == 2)
615 target_flags &= ~MASK_SMALL_DATA;
617 /* Align labels and loops for optimal branching. */
618 /* ??? Kludge these by not doing anything if we don't optimize. */
619 if (optimize > 0)
621 if (align_loops <= 0)
622 align_loops = 16;
623 if (align_jumps <= 0)
624 align_jumps = 16;
626 if (align_functions <= 0)
627 align_functions = 16;
629 /* Register variables and functions with the garbage collector. */
631 /* Set up function hooks. */
632 init_machine_status = alpha_init_machine_status;
634 /* Tell the compiler when we're using VAX floating point. */
635 if (TARGET_FLOAT_VAX)
637 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
638 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
639 REAL_MODE_FORMAT (TFmode) = NULL;
642 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
643 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
644 target_flags |= MASK_LONG_DOUBLE_128;
645 #endif
647 /* This needs to be done at start up. It's convenient to do it here. */
648 register_pass (&handle_trap_shadows_info);
649 register_pass (&align_insns_info);
652 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
655 zap_mask (HOST_WIDE_INT value)
657 int i;
659 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
660 i++, value >>= 8)
661 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
662 return 0;
664 return 1;
667 /* Return true if OP is valid for a particular TLS relocation.
668 We are already guaranteed that OP is a CONST. */
671 tls_symbolic_operand_1 (rtx op, int size, int unspec)
673 op = XEXP (op, 0);
675 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
676 return 0;
677 op = XVECEXP (op, 0, 0);
679 if (GET_CODE (op) != SYMBOL_REF)
680 return 0;
682 switch (SYMBOL_REF_TLS_MODEL (op))
684 case TLS_MODEL_LOCAL_DYNAMIC:
685 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
686 case TLS_MODEL_INITIAL_EXEC:
687 return unspec == UNSPEC_TPREL && size == 64;
688 case TLS_MODEL_LOCAL_EXEC:
689 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
690 default:
691 gcc_unreachable ();
695 /* Used by aligned_memory_operand and unaligned_memory_operand to
696 resolve what reload is going to do with OP if it's a register. */
699 resolve_reload_operand (rtx op)
701 if (reload_in_progress)
703 rtx tmp = op;
704 if (GET_CODE (tmp) == SUBREG)
705 tmp = SUBREG_REG (tmp);
706 if (REG_P (tmp)
707 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
709 op = reg_equiv_memory_loc (REGNO (tmp));
710 if (op == 0)
711 return 0;
714 return op;
717 /* The scalar modes supported differs from the default check-what-c-supports
718 version in that sometimes TFmode is available even when long double
719 indicates only DFmode. */
721 static bool
722 alpha_scalar_mode_supported_p (machine_mode mode)
724 switch (mode)
726 case QImode:
727 case HImode:
728 case SImode:
729 case DImode:
730 case TImode: /* via optabs.c */
731 return true;
733 case SFmode:
734 case DFmode:
735 return true;
737 case TFmode:
738 return TARGET_HAS_XFLOATING_LIBS;
740 default:
741 return false;
745 /* Alpha implements a couple of integer vector mode operations when
746 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
747 which allows the vectorizer to operate on e.g. move instructions,
748 or when expand_vector_operations can do something useful. */
750 static bool
751 alpha_vector_mode_supported_p (machine_mode mode)
753 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
756 /* Return 1 if this function can directly return via $26. */
759 direct_return (void)
761 return (TARGET_ABI_OSF
762 && reload_completed
763 && alpha_sa_size () == 0
764 && get_frame_size () == 0
765 && crtl->outgoing_args_size == 0
766 && crtl->args.pretend_args_size == 0);
769 /* Return the TLS model to use for SYMBOL. */
771 static enum tls_model
772 tls_symbolic_operand_type (rtx symbol)
774 enum tls_model model;
776 if (GET_CODE (symbol) != SYMBOL_REF)
777 return TLS_MODEL_NONE;
778 model = SYMBOL_REF_TLS_MODEL (symbol);
780 /* Local-exec with a 64-bit size is the same code as initial-exec. */
781 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
782 model = TLS_MODEL_INITIAL_EXEC;
784 return model;
787 /* Return true if the function DECL will share the same GP as any
788 function in the current unit of translation. */
790 static bool
791 decl_has_samegp (const_tree decl)
793 /* Functions that are not local can be overridden, and thus may
794 not share the same gp. */
795 if (!(*targetm.binds_local_p) (decl))
796 return false;
798 /* If -msmall-data is in effect, assume that there is only one GP
799 for the module, and so any local symbol has this property. We
800 need explicit relocations to be able to enforce this for symbols
801 not defined in this unit of translation, however. */
802 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
803 return true;
805 /* Functions that are not external are defined in this UoT. */
806 /* ??? Irritatingly, static functions not yet emitted are still
807 marked "external". Apply this to non-static functions only. */
808 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
811 /* Return true if EXP should be placed in the small data section. */
813 static bool
814 alpha_in_small_data_p (const_tree exp)
816 /* We want to merge strings, so we never consider them small data. */
817 if (TREE_CODE (exp) == STRING_CST)
818 return false;
820 /* Functions are never in the small data area. Duh. */
821 if (TREE_CODE (exp) == FUNCTION_DECL)
822 return false;
824 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
826 const char *section = DECL_SECTION_NAME (exp);
827 if (strcmp (section, ".sdata") == 0
828 || strcmp (section, ".sbss") == 0)
829 return true;
831 else
833 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
835 /* If this is an incomplete type with size 0, then we can't put it
836 in sdata because it might be too big when completed. */
837 if (size > 0 && size <= g_switch_value)
838 return true;
841 return false;
844 #if TARGET_ABI_OPEN_VMS
845 static bool
846 vms_valid_pointer_mode (machine_mode mode)
848 return (mode == SImode || mode == DImode);
851 static bool
852 alpha_linkage_symbol_p (const char *symname)
854 int symlen = strlen (symname);
856 if (symlen > 4)
857 return strcmp (&symname [symlen - 4], "..lk") == 0;
859 return false;
862 #define LINKAGE_SYMBOL_REF_P(X) \
863 ((GET_CODE (X) == SYMBOL_REF \
864 && alpha_linkage_symbol_p (XSTR (X, 0))) \
865 || (GET_CODE (X) == CONST \
866 && GET_CODE (XEXP (X, 0)) == PLUS \
867 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
868 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
869 #endif
871 /* legitimate_address_p recognizes an RTL expression that is a valid
872 memory address for an instruction. The MODE argument is the
873 machine mode for the MEM expression that wants to use this address.
875 For Alpha, we have either a constant address or the sum of a
876 register and a constant address, or just a register. For DImode,
877 any of those forms can be surrounded with an AND that clear the
878 low-order three bits; this is an "unaligned" access. */
880 static bool
881 alpha_legitimate_address_p (machine_mode mode, rtx x, bool strict)
883 /* If this is an ldq_u type address, discard the outer AND. */
884 if (mode == DImode
885 && GET_CODE (x) == AND
886 && CONST_INT_P (XEXP (x, 1))
887 && INTVAL (XEXP (x, 1)) == -8)
888 x = XEXP (x, 0);
890 /* Discard non-paradoxical subregs. */
891 if (GET_CODE (x) == SUBREG
892 && (GET_MODE_SIZE (GET_MODE (x))
893 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
894 x = SUBREG_REG (x);
896 /* Unadorned general registers are valid. */
897 if (REG_P (x)
898 && (strict
899 ? STRICT_REG_OK_FOR_BASE_P (x)
900 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
901 return true;
903 /* Constant addresses (i.e. +/- 32k) are valid. */
904 if (CONSTANT_ADDRESS_P (x))
905 return true;
907 #if TARGET_ABI_OPEN_VMS
908 if (LINKAGE_SYMBOL_REF_P (x))
909 return true;
910 #endif
912 /* Register plus a small constant offset is valid. */
913 if (GET_CODE (x) == PLUS)
915 rtx ofs = XEXP (x, 1);
916 x = XEXP (x, 0);
918 /* Discard non-paradoxical subregs. */
919 if (GET_CODE (x) == SUBREG
920 && (GET_MODE_SIZE (GET_MODE (x))
921 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
922 x = SUBREG_REG (x);
924 if (REG_P (x))
926 if (! strict
927 && NONSTRICT_REG_OK_FP_BASE_P (x)
928 && CONST_INT_P (ofs))
929 return true;
930 if ((strict
931 ? STRICT_REG_OK_FOR_BASE_P (x)
932 : NONSTRICT_REG_OK_FOR_BASE_P (x))
933 && CONSTANT_ADDRESS_P (ofs))
934 return true;
938 /* If we're managing explicit relocations, LO_SUM is valid, as are small
939 data symbols. Avoid explicit relocations of modes larger than word
940 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
941 else if (TARGET_EXPLICIT_RELOCS
942 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
944 if (small_symbolic_operand (x, Pmode))
945 return true;
947 if (GET_CODE (x) == LO_SUM)
949 rtx ofs = XEXP (x, 1);
950 x = XEXP (x, 0);
952 /* Discard non-paradoxical subregs. */
953 if (GET_CODE (x) == SUBREG
954 && (GET_MODE_SIZE (GET_MODE (x))
955 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
956 x = SUBREG_REG (x);
958 /* Must have a valid base register. */
959 if (! (REG_P (x)
960 && (strict
961 ? STRICT_REG_OK_FOR_BASE_P (x)
962 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
963 return false;
965 /* The symbol must be local. */
966 if (local_symbolic_operand (ofs, Pmode)
967 || dtp32_symbolic_operand (ofs, Pmode)
968 || tp32_symbolic_operand (ofs, Pmode))
969 return true;
973 return false;
976 /* Build the SYMBOL_REF for __tls_get_addr. */
978 static GTY(()) rtx tls_get_addr_libfunc;
980 static rtx
981 get_tls_get_addr (void)
983 if (!tls_get_addr_libfunc)
984 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
985 return tls_get_addr_libfunc;
988 /* Try machine-dependent ways of modifying an illegitimate address
989 to be legitimate. If we find one, return the new, valid address. */
991 static rtx
992 alpha_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode)
994 HOST_WIDE_INT addend;
996 /* If the address is (plus reg const_int) and the CONST_INT is not a
997 valid offset, compute the high part of the constant and add it to
998 the register. Then our address is (plus temp low-part-const). */
999 if (GET_CODE (x) == PLUS
1000 && REG_P (XEXP (x, 0))
1001 && CONST_INT_P (XEXP (x, 1))
1002 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1004 addend = INTVAL (XEXP (x, 1));
1005 x = XEXP (x, 0);
1006 goto split_addend;
1009 /* If the address is (const (plus FOO const_int)), find the low-order
1010 part of the CONST_INT. Then load FOO plus any high-order part of the
1011 CONST_INT into a register. Our address is (plus reg low-part-const).
1012 This is done to reduce the number of GOT entries. */
1013 if (can_create_pseudo_p ()
1014 && GET_CODE (x) == CONST
1015 && GET_CODE (XEXP (x, 0)) == PLUS
1016 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
1018 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1019 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1020 goto split_addend;
1023 /* If we have a (plus reg const), emit the load as in (2), then add
1024 the two registers, and finally generate (plus reg low-part-const) as
1025 our address. */
1026 if (can_create_pseudo_p ()
1027 && GET_CODE (x) == PLUS
1028 && REG_P (XEXP (x, 0))
1029 && GET_CODE (XEXP (x, 1)) == CONST
1030 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1031 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
1033 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1034 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1035 XEXP (XEXP (XEXP (x, 1), 0), 0),
1036 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1037 goto split_addend;
1040 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1041 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1042 around +/- 32k offset. */
1043 if (TARGET_EXPLICIT_RELOCS
1044 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1045 && symbolic_operand (x, Pmode))
1047 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1049 switch (tls_symbolic_operand_type (x))
1051 case TLS_MODEL_NONE:
1052 break;
1054 case TLS_MODEL_GLOBAL_DYNAMIC:
1055 start_sequence ();
1057 r0 = gen_rtx_REG (Pmode, 0);
1058 r16 = gen_rtx_REG (Pmode, 16);
1059 tga = get_tls_get_addr ();
1060 dest = gen_reg_rtx (Pmode);
1061 seq = GEN_INT (alpha_next_sequence_number++);
1063 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1064 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1065 insn = emit_call_insn (insn);
1066 RTL_CONST_CALL_P (insn) = 1;
1067 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1069 insn = get_insns ();
1070 end_sequence ();
1072 emit_libcall_block (insn, dest, r0, x);
1073 return dest;
1075 case TLS_MODEL_LOCAL_DYNAMIC:
1076 start_sequence ();
1078 r0 = gen_rtx_REG (Pmode, 0);
1079 r16 = gen_rtx_REG (Pmode, 16);
1080 tga = get_tls_get_addr ();
1081 scratch = gen_reg_rtx (Pmode);
1082 seq = GEN_INT (alpha_next_sequence_number++);
1084 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1085 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1086 insn = emit_call_insn (insn);
1087 RTL_CONST_CALL_P (insn) = 1;
1088 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1090 insn = get_insns ();
1091 end_sequence ();
1093 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1094 UNSPEC_TLSLDM_CALL);
1095 emit_libcall_block (insn, scratch, r0, eqv);
1097 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1098 eqv = gen_rtx_CONST (Pmode, eqv);
1100 if (alpha_tls_size == 64)
1102 dest = gen_reg_rtx (Pmode);
1103 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1104 emit_insn (gen_adddi3 (dest, dest, scratch));
1105 return dest;
1107 if (alpha_tls_size == 32)
1109 insn = gen_rtx_HIGH (Pmode, eqv);
1110 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1111 scratch = gen_reg_rtx (Pmode);
1112 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1114 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1116 case TLS_MODEL_INITIAL_EXEC:
1117 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1118 eqv = gen_rtx_CONST (Pmode, eqv);
1119 tp = gen_reg_rtx (Pmode);
1120 scratch = gen_reg_rtx (Pmode);
1121 dest = gen_reg_rtx (Pmode);
1123 emit_insn (gen_get_thread_pointerdi (tp));
1124 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1125 emit_insn (gen_adddi3 (dest, tp, scratch));
1126 return dest;
1128 case TLS_MODEL_LOCAL_EXEC:
1129 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1130 eqv = gen_rtx_CONST (Pmode, eqv);
1131 tp = gen_reg_rtx (Pmode);
1133 emit_insn (gen_get_thread_pointerdi (tp));
1134 if (alpha_tls_size == 32)
1136 insn = gen_rtx_HIGH (Pmode, eqv);
1137 insn = gen_rtx_PLUS (Pmode, tp, insn);
1138 tp = gen_reg_rtx (Pmode);
1139 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1141 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1143 default:
1144 gcc_unreachable ();
1147 if (local_symbolic_operand (x, Pmode))
1149 if (small_symbolic_operand (x, Pmode))
1150 return x;
1151 else
1153 if (can_create_pseudo_p ())
1154 scratch = gen_reg_rtx (Pmode);
1155 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1156 gen_rtx_HIGH (Pmode, x)));
1157 return gen_rtx_LO_SUM (Pmode, scratch, x);
1162 return NULL;
1164 split_addend:
1166 HOST_WIDE_INT low, high;
1168 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1169 addend -= low;
1170 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1171 addend -= high;
1173 if (addend)
1174 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1175 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1176 1, OPTAB_LIB_WIDEN);
1177 if (high)
1178 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1179 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1180 1, OPTAB_LIB_WIDEN);
1182 return plus_constant (Pmode, x, low);
1187 /* Try machine-dependent ways of modifying an illegitimate address
1188 to be legitimate. Return X or the new, valid address. */
1190 static rtx
1191 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1192 machine_mode mode)
1194 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1195 return new_x ? new_x : x;
1198 /* Return true if ADDR has an effect that depends on the machine mode it
1199 is used for. On the Alpha this is true only for the unaligned modes.
1200 We can simplify the test since we know that the address must be valid. */
1202 static bool
1203 alpha_mode_dependent_address_p (const_rtx addr,
1204 addr_space_t as ATTRIBUTE_UNUSED)
1206 return GET_CODE (addr) == AND;
1209 /* Primarily this is required for TLS symbols, but given that our move
1210 patterns *ought* to be able to handle any symbol at any time, we
1211 should never be spilling symbolic operands to the constant pool, ever. */
1213 static bool
1214 alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1216 enum rtx_code code = GET_CODE (x);
1217 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1220 /* We do not allow indirect calls to be optimized into sibling calls, nor
1221 can we allow a call to a function with a different GP to be optimized
1222 into a sibcall. */
1224 static bool
1225 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1227 /* Can't do indirect tail calls, since we don't know if the target
1228 uses the same GP. */
1229 if (!decl)
1230 return false;
1232 /* Otherwise, we can make a tail call if the target function shares
1233 the same GP. */
1234 return decl_has_samegp (decl);
1237 bool
1238 some_small_symbolic_operand_int (rtx x)
1240 subrtx_var_iterator::array_type array;
1241 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
1243 rtx x = *iter;
1244 /* Don't re-split. */
1245 if (GET_CODE (x) == LO_SUM)
1246 iter.skip_subrtxes ();
1247 else if (small_symbolic_operand (x, Pmode))
1248 return true;
1250 return false;
1254 split_small_symbolic_operand (rtx x)
1256 x = copy_insn (x);
1257 subrtx_ptr_iterator::array_type array;
1258 FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL)
1260 rtx *ptr = *iter;
1261 rtx x = *ptr;
1262 /* Don't re-split. */
1263 if (GET_CODE (x) == LO_SUM)
1264 iter.skip_subrtxes ();
1265 else if (small_symbolic_operand (x, Pmode))
1267 *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1268 iter.skip_subrtxes ();
1271 return x;
1274 /* Indicate that INSN cannot be duplicated. This is true for any insn
1275 that we've marked with gpdisp relocs, since those have to stay in
1276 1-1 correspondence with one another.
1278 Technically we could copy them if we could set up a mapping from one
1279 sequence number to another, across the set of insns to be duplicated.
1280 This seems overly complicated and error-prone since interblock motion
1281 from sched-ebb could move one of the pair of insns to a different block.
1283 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1284 then they'll be in a different block from their ldgp. Which could lead
1285 the bb reorder code to think that it would be ok to copy just the block
1286 containing the call and branch to the block containing the ldgp. */
1288 static bool
1289 alpha_cannot_copy_insn_p (rtx_insn *insn)
1291 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1292 return false;
1293 if (recog_memoized (insn) >= 0)
1294 return get_attr_cannot_copy (insn);
1295 else
1296 return false;
1300 /* Try a machine-dependent way of reloading an illegitimate address
1301 operand. If we find one, push the reload and return the new rtx. */
1304 alpha_legitimize_reload_address (rtx x,
1305 machine_mode mode ATTRIBUTE_UNUSED,
1306 int opnum, int type,
1307 int ind_levels ATTRIBUTE_UNUSED)
1309 /* We must recognize output that we have already generated ourselves. */
1310 if (GET_CODE (x) == PLUS
1311 && GET_CODE (XEXP (x, 0)) == PLUS
1312 && REG_P (XEXP (XEXP (x, 0), 0))
1313 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1314 && CONST_INT_P (XEXP (x, 1)))
1316 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1317 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1318 opnum, (enum reload_type) type);
1319 return x;
1322 /* We wish to handle large displacements off a base register by
1323 splitting the addend across an ldah and the mem insn. This
1324 cuts number of extra insns needed from 3 to 1. */
1325 if (GET_CODE (x) == PLUS
1326 && REG_P (XEXP (x, 0))
1327 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1328 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1329 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1331 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1332 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1333 HOST_WIDE_INT high
1334 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1336 /* Check for 32-bit overflow. */
1337 if (high + low != val)
1338 return NULL_RTX;
1340 /* Reload the high part into a base reg; leave the low part
1341 in the mem directly. */
1342 x = gen_rtx_PLUS (GET_MODE (x),
1343 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1344 GEN_INT (high)),
1345 GEN_INT (low));
1347 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1348 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1349 opnum, (enum reload_type) type);
1350 return x;
1353 return NULL_RTX;
1356 /* Compute a (partial) cost for rtx X. Return true if the complete
1357 cost has been computed, and false if subexpressions should be
1358 scanned. In either case, *TOTAL contains the cost result. */
1360 static bool
1361 alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1362 bool speed)
1364 machine_mode mode = GET_MODE (x);
1365 bool float_mode_p = FLOAT_MODE_P (mode);
1366 const struct alpha_rtx_cost_data *cost_data;
1368 if (!speed)
1369 cost_data = &alpha_rtx_cost_size;
1370 else
1371 cost_data = &alpha_rtx_cost_data[alpha_tune];
1373 switch (code)
1375 case CONST_INT:
1376 /* If this is an 8-bit constant, return zero since it can be used
1377 nearly anywhere with no cost. If it is a valid operand for an
1378 ADD or AND, likewise return 0 if we know it will be used in that
1379 context. Otherwise, return 2 since it might be used there later.
1380 All other constants take at least two insns. */
1381 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1383 *total = 0;
1384 return true;
1386 /* FALLTHRU */
1388 case CONST_DOUBLE:
1389 if (x == CONST0_RTX (mode))
1390 *total = 0;
1391 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1392 || (outer_code == AND && and_operand (x, VOIDmode)))
1393 *total = 0;
1394 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1395 *total = 2;
1396 else
1397 *total = COSTS_N_INSNS (2);
1398 return true;
1400 case CONST:
1401 case SYMBOL_REF:
1402 case LABEL_REF:
1403 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1404 *total = COSTS_N_INSNS (outer_code != MEM);
1405 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1406 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1407 else if (tls_symbolic_operand_type (x))
1408 /* Estimate of cost for call_pal rduniq. */
1409 /* ??? How many insns do we emit here? More than one... */
1410 *total = COSTS_N_INSNS (15);
1411 else
1412 /* Otherwise we do a load from the GOT. */
1413 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1414 return true;
1416 case HIGH:
1417 /* This is effectively an add_operand. */
1418 *total = 2;
1419 return true;
1421 case PLUS:
1422 case MINUS:
1423 if (float_mode_p)
1424 *total = cost_data->fp_add;
1425 else if (GET_CODE (XEXP (x, 0)) == MULT
1426 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1428 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1429 (enum rtx_code) outer_code, opno, speed)
1430 + rtx_cost (XEXP (x, 1),
1431 (enum rtx_code) outer_code, opno, speed)
1432 + COSTS_N_INSNS (1));
1433 return true;
1435 return false;
1437 case MULT:
1438 if (float_mode_p)
1439 *total = cost_data->fp_mult;
1440 else if (mode == DImode)
1441 *total = cost_data->int_mult_di;
1442 else
1443 *total = cost_data->int_mult_si;
1444 return false;
1446 case ASHIFT:
1447 if (CONST_INT_P (XEXP (x, 1))
1448 && INTVAL (XEXP (x, 1)) <= 3)
1450 *total = COSTS_N_INSNS (1);
1451 return false;
1453 /* FALLTHRU */
1455 case ASHIFTRT:
1456 case LSHIFTRT:
1457 *total = cost_data->int_shift;
1458 return false;
1460 case IF_THEN_ELSE:
1461 if (float_mode_p)
1462 *total = cost_data->fp_add;
1463 else
1464 *total = cost_data->int_cmov;
1465 return false;
1467 case DIV:
1468 case UDIV:
1469 case MOD:
1470 case UMOD:
1471 if (!float_mode_p)
1472 *total = cost_data->int_div;
1473 else if (mode == SFmode)
1474 *total = cost_data->fp_div_sf;
1475 else
1476 *total = cost_data->fp_div_df;
1477 return false;
1479 case MEM:
1480 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1481 return true;
1483 case NEG:
1484 if (! float_mode_p)
1486 *total = COSTS_N_INSNS (1);
1487 return false;
1489 /* FALLTHRU */
1491 case ABS:
1492 if (! float_mode_p)
1494 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1495 return false;
1497 /* FALLTHRU */
1499 case FLOAT:
1500 case UNSIGNED_FLOAT:
1501 case FIX:
1502 case UNSIGNED_FIX:
1503 case FLOAT_TRUNCATE:
1504 *total = cost_data->fp_add;
1505 return false;
1507 case FLOAT_EXTEND:
1508 if (MEM_P (XEXP (x, 0)))
1509 *total = 0;
1510 else
1511 *total = cost_data->fp_add;
1512 return false;
1514 default:
1515 return false;
1519 /* REF is an alignable memory location. Place an aligned SImode
1520 reference into *PALIGNED_MEM and the number of bits to shift into
1521 *PBITNUM. SCRATCH is a free register for use in reloading out
1522 of range stack slots. */
1524 void
1525 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1527 rtx base;
1528 HOST_WIDE_INT disp, offset;
1530 gcc_assert (MEM_P (ref));
1532 if (reload_in_progress
1533 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1535 base = find_replacement (&XEXP (ref, 0));
1536 gcc_assert (memory_address_p (GET_MODE (ref), base));
1538 else
1539 base = XEXP (ref, 0);
1541 if (GET_CODE (base) == PLUS)
1542 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1543 else
1544 disp = 0;
1546 /* Find the byte offset within an aligned word. If the memory itself is
1547 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1548 will have examined the base register and determined it is aligned, and
1549 thus displacements from it are naturally alignable. */
1550 if (MEM_ALIGN (ref) >= 32)
1551 offset = 0;
1552 else
1553 offset = disp & 3;
1555 /* The location should not cross aligned word boundary. */
1556 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1557 <= GET_MODE_SIZE (SImode));
1559 /* Access the entire aligned word. */
1560 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1562 /* Convert the byte offset within the word to a bit offset. */
1563 offset *= BITS_PER_UNIT;
1564 *pbitnum = GEN_INT (offset);
1567 /* Similar, but just get the address. Handle the two reload cases.
1568 Add EXTRA_OFFSET to the address we return. */
1571 get_unaligned_address (rtx ref)
1573 rtx base;
1574 HOST_WIDE_INT offset = 0;
1576 gcc_assert (MEM_P (ref));
1578 if (reload_in_progress
1579 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1581 base = find_replacement (&XEXP (ref, 0));
1583 gcc_assert (memory_address_p (GET_MODE (ref), base));
1585 else
1586 base = XEXP (ref, 0);
1588 if (GET_CODE (base) == PLUS)
1589 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1591 return plus_constant (Pmode, base, offset);
1594 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1595 X is always returned in a register. */
1598 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1600 if (GET_CODE (addr) == PLUS)
1602 ofs += INTVAL (XEXP (addr, 1));
1603 addr = XEXP (addr, 0);
1606 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1607 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1610 /* On the Alpha, all (non-symbolic) constants except zero go into
1611 a floating-point register via memory. Note that we cannot
1612 return anything that is not a subset of RCLASS, and that some
1613 symbolic constants cannot be dropped to memory. */
1615 enum reg_class
1616 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1618 /* Zero is present in any register class. */
1619 if (x == CONST0_RTX (GET_MODE (x)))
1620 return rclass;
1622 /* These sorts of constants we can easily drop to memory. */
1623 if (CONST_INT_P (x)
1624 || GET_CODE (x) == CONST_DOUBLE
1625 || GET_CODE (x) == CONST_VECTOR)
1627 if (rclass == FLOAT_REGS)
1628 return NO_REGS;
1629 if (rclass == ALL_REGS)
1630 return GENERAL_REGS;
1631 return rclass;
1634 /* All other kinds of constants should not (and in the case of HIGH
1635 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1636 secondary reload. */
1637 if (CONSTANT_P (x))
1638 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1640 return rclass;
1643 /* Inform reload about cases where moving X with a mode MODE to a register in
1644 RCLASS requires an extra scratch or immediate register. Return the class
1645 needed for the immediate register. */
1647 static reg_class_t
1648 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1649 machine_mode mode, secondary_reload_info *sri)
1651 enum reg_class rclass = (enum reg_class) rclass_i;
1653 /* Loading and storing HImode or QImode values to and from memory
1654 usually requires a scratch register. */
1655 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1657 if (any_memory_operand (x, mode))
1659 if (in_p)
1661 if (!aligned_memory_operand (x, mode))
1662 sri->icode = direct_optab_handler (reload_in_optab, mode);
1664 else
1665 sri->icode = direct_optab_handler (reload_out_optab, mode);
1666 return NO_REGS;
1670 /* We also cannot do integral arithmetic into FP regs, as might result
1671 from register elimination into a DImode fp register. */
1672 if (rclass == FLOAT_REGS)
1674 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1675 return GENERAL_REGS;
1676 if (in_p && INTEGRAL_MODE_P (mode)
1677 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1678 return GENERAL_REGS;
1681 return NO_REGS;
1684 /* Given SEQ, which is an INSN list, look for any MEMs in either
1685 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1686 volatile flags from REF into each of the MEMs found. If REF is not
1687 a MEM, don't do anything. */
1689 void
1690 alpha_set_memflags (rtx seq, rtx ref)
1692 rtx_insn *insn;
1694 if (!MEM_P (ref))
1695 return;
1697 /* This is only called from alpha.md, after having had something
1698 generated from one of the insn patterns. So if everything is
1699 zero, the pattern is already up-to-date. */
1700 if (!MEM_VOLATILE_P (ref)
1701 && !MEM_NOTRAP_P (ref)
1702 && !MEM_READONLY_P (ref))
1703 return;
1705 subrtx_var_iterator::array_type array;
1706 for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
1707 if (INSN_P (insn))
1708 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1710 rtx x = *iter;
1711 if (MEM_P (x))
1713 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref);
1714 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref);
1715 MEM_READONLY_P (x) = MEM_READONLY_P (ref);
1716 /* Sadly, we cannot use alias sets because the extra
1717 aliasing produced by the AND interferes. Given that
1718 two-byte quantities are the only thing we would be
1719 able to differentiate anyway, there does not seem to
1720 be any point in convoluting the early out of the
1721 alias check. */
1722 iter.skip_subrtxes ();
1725 else
1726 gcc_unreachable ();
1729 static rtx alpha_emit_set_const (rtx, machine_mode, HOST_WIDE_INT,
1730 int, bool);
1732 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1733 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1734 and return pc_rtx if successful. */
1736 static rtx
1737 alpha_emit_set_const_1 (rtx target, machine_mode mode,
1738 HOST_WIDE_INT c, int n, bool no_output)
1740 HOST_WIDE_INT new_const;
1741 int i, bits;
1742 /* Use a pseudo if highly optimizing and still generating RTL. */
1743 rtx subtarget
1744 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1745 rtx temp, insn;
1747 /* If this is a sign-extended 32-bit constant, we can do this in at most
1748 three insns, so do it if we have enough insns left. We always have
1749 a sign-extended 32-bit constant when compiling on a narrow machine. */
1751 if (HOST_BITS_PER_WIDE_INT != 64
1752 || c >> 31 == -1 || c >> 31 == 0)
1754 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1755 HOST_WIDE_INT tmp1 = c - low;
1756 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1757 HOST_WIDE_INT extra = 0;
1759 /* If HIGH will be interpreted as negative but the constant is
1760 positive, we must adjust it to do two ldha insns. */
1762 if ((high & 0x8000) != 0 && c >= 0)
1764 extra = 0x4000;
1765 tmp1 -= 0x40000000;
1766 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1769 if (c == low || (low == 0 && extra == 0))
1771 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1772 but that meant that we can't handle INT_MIN on 32-bit machines
1773 (like NT/Alpha), because we recurse indefinitely through
1774 emit_move_insn to gen_movdi. So instead, since we know exactly
1775 what we want, create it explicitly. */
1777 if (no_output)
1778 return pc_rtx;
1779 if (target == NULL)
1780 target = gen_reg_rtx (mode);
1781 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1782 return target;
1784 else if (n >= 2 + (extra != 0))
1786 if (no_output)
1787 return pc_rtx;
1788 if (!can_create_pseudo_p ())
1790 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1791 temp = target;
1793 else
1794 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1795 subtarget, mode);
1797 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1798 This means that if we go through expand_binop, we'll try to
1799 generate extensions, etc, which will require new pseudos, which
1800 will fail during some split phases. The SImode add patterns
1801 still exist, but are not named. So build the insns by hand. */
1803 if (extra != 0)
1805 if (! subtarget)
1806 subtarget = gen_reg_rtx (mode);
1807 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1808 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1809 emit_insn (insn);
1810 temp = subtarget;
1813 if (target == NULL)
1814 target = gen_reg_rtx (mode);
1815 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1816 insn = gen_rtx_SET (VOIDmode, target, insn);
1817 emit_insn (insn);
1818 return target;
1822 /* If we couldn't do it that way, try some other methods. But if we have
1823 no instructions left, don't bother. Likewise, if this is SImode and
1824 we can't make pseudos, we can't do anything since the expand_binop
1825 and expand_unop calls will widen and try to make pseudos. */
1827 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1828 return 0;
1830 /* Next, see if we can load a related constant and then shift and possibly
1831 negate it to get the constant we want. Try this once each increasing
1832 numbers of insns. */
1834 for (i = 1; i < n; i++)
1836 /* First, see if minus some low bits, we've an easy load of
1837 high bits. */
1839 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1840 if (new_const != 0)
1842 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1843 if (temp)
1845 if (no_output)
1846 return temp;
1847 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1848 target, 0, OPTAB_WIDEN);
1852 /* Next try complementing. */
1853 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1854 if (temp)
1856 if (no_output)
1857 return temp;
1858 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1861 /* Next try to form a constant and do a left shift. We can do this
1862 if some low-order bits are zero; the exact_log2 call below tells
1863 us that information. The bits we are shifting out could be any
1864 value, but here we'll just try the 0- and sign-extended forms of
1865 the constant. To try to increase the chance of having the same
1866 constant in more than one insn, start at the highest number of
1867 bits to shift, but try all possibilities in case a ZAPNOT will
1868 be useful. */
1870 bits = exact_log2 (c & -c);
1871 if (bits > 0)
1872 for (; bits > 0; bits--)
1874 new_const = c >> bits;
1875 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1876 if (!temp && c < 0)
1878 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1879 temp = alpha_emit_set_const (subtarget, mode, new_const,
1880 i, no_output);
1882 if (temp)
1884 if (no_output)
1885 return temp;
1886 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1887 target, 0, OPTAB_WIDEN);
1891 /* Now try high-order zero bits. Here we try the shifted-in bits as
1892 all zero and all ones. Be careful to avoid shifting outside the
1893 mode and to avoid shifting outside the host wide int size. */
1894 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1895 confuse the recursive call and set all of the high 32 bits. */
1897 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1898 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1899 if (bits > 0)
1900 for (; bits > 0; bits--)
1902 new_const = c << bits;
1903 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1904 if (!temp)
1906 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1907 temp = alpha_emit_set_const (subtarget, mode, new_const,
1908 i, no_output);
1910 if (temp)
1912 if (no_output)
1913 return temp;
1914 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1915 target, 1, OPTAB_WIDEN);
1919 /* Now try high-order 1 bits. We get that with a sign-extension.
1920 But one bit isn't enough here. Be careful to avoid shifting outside
1921 the mode and to avoid shifting outside the host wide int size. */
1923 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1924 - floor_log2 (~ c) - 2);
1925 if (bits > 0)
1926 for (; bits > 0; bits--)
1928 new_const = c << bits;
1929 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1930 if (!temp)
1932 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1933 temp = alpha_emit_set_const (subtarget, mode, new_const,
1934 i, no_output);
1936 if (temp)
1938 if (no_output)
1939 return temp;
1940 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1941 target, 0, OPTAB_WIDEN);
1946 #if HOST_BITS_PER_WIDE_INT == 64
1947 /* Finally, see if can load a value into the target that is the same as the
1948 constant except that all bytes that are 0 are changed to be 0xff. If we
1949 can, then we can do a ZAPNOT to obtain the desired constant. */
1951 new_const = c;
1952 for (i = 0; i < 64; i += 8)
1953 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1954 new_const |= (HOST_WIDE_INT) 0xff << i;
1956 /* We are only called for SImode and DImode. If this is SImode, ensure that
1957 we are sign extended to a full word. */
1959 if (mode == SImode)
1960 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1962 if (new_const != c)
1964 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1965 if (temp)
1967 if (no_output)
1968 return temp;
1969 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1970 target, 0, OPTAB_WIDEN);
1973 #endif
1975 return 0;
1978 /* Try to output insns to set TARGET equal to the constant C if it can be
1979 done in less than N insns. Do all computations in MODE. Returns the place
1980 where the output has been placed if it can be done and the insns have been
1981 emitted. If it would take more than N insns, zero is returned and no
1982 insns and emitted. */
1984 static rtx
1985 alpha_emit_set_const (rtx target, machine_mode mode,
1986 HOST_WIDE_INT c, int n, bool no_output)
1988 machine_mode orig_mode = mode;
1989 rtx orig_target = target;
1990 rtx result = 0;
1991 int i;
1993 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1994 can't load this constant in one insn, do this in DImode. */
1995 if (!can_create_pseudo_p () && mode == SImode
1996 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1998 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1999 if (result)
2000 return result;
2002 target = no_output ? NULL : gen_lowpart (DImode, target);
2003 mode = DImode;
2005 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
2007 target = no_output ? NULL : gen_lowpart (DImode, target);
2008 mode = DImode;
2011 /* Try 1 insn, then 2, then up to N. */
2012 for (i = 1; i <= n; i++)
2014 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
2015 if (result)
2017 rtx_insn *insn;
2018 rtx set;
2020 if (no_output)
2021 return result;
2023 insn = get_last_insn ();
2024 set = single_set (insn);
2025 if (! CONSTANT_P (SET_SRC (set)))
2026 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2027 break;
2031 /* Allow for the case where we changed the mode of TARGET. */
2032 if (result)
2034 if (result == target)
2035 result = orig_target;
2036 else if (mode != orig_mode)
2037 result = gen_lowpart (orig_mode, result);
2040 return result;
2043 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2044 fall back to a straight forward decomposition. We do this to avoid
2045 exponential run times encountered when looking for longer sequences
2046 with alpha_emit_set_const. */
2048 static rtx
2049 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2051 HOST_WIDE_INT d1, d2, d3, d4;
2053 /* Decompose the entire word */
2054 #if HOST_BITS_PER_WIDE_INT >= 64
2055 gcc_assert (c2 == -(c1 < 0));
2056 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2057 c1 -= d1;
2058 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2059 c1 = (c1 - d2) >> 32;
2060 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2061 c1 -= d3;
2062 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2063 gcc_assert (c1 == d4);
2064 #else
2065 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2066 c1 -= d1;
2067 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2068 gcc_assert (c1 == d2);
2069 c2 += (d2 < 0);
2070 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2071 c2 -= d3;
2072 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2073 gcc_assert (c2 == d4);
2074 #endif
2076 /* Construct the high word */
2077 if (d4)
2079 emit_move_insn (target, GEN_INT (d4));
2080 if (d3)
2081 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2083 else
2084 emit_move_insn (target, GEN_INT (d3));
2086 /* Shift it into place */
2087 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2089 /* Add in the low bits. */
2090 if (d2)
2091 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2092 if (d1)
2093 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2095 return target;
2098 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2099 the low 64 bits. */
2101 static void
2102 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2104 HOST_WIDE_INT i0, i1;
2106 if (GET_CODE (x) == CONST_VECTOR)
2107 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2110 if (CONST_INT_P (x))
2112 i0 = INTVAL (x);
2113 i1 = -(i0 < 0);
2115 else if (HOST_BITS_PER_WIDE_INT >= 64)
2117 i0 = CONST_DOUBLE_LOW (x);
2118 i1 = -(i0 < 0);
2120 else
2122 i0 = CONST_DOUBLE_LOW (x);
2123 i1 = CONST_DOUBLE_HIGH (x);
2126 *p0 = i0;
2127 *p1 = i1;
2130 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2131 we are willing to load the value into a register via a move pattern.
2132 Normally this is all symbolic constants, integral constants that
2133 take three or fewer instructions, and floating-point zero. */
2135 bool
2136 alpha_legitimate_constant_p (machine_mode mode, rtx x)
2138 HOST_WIDE_INT i0, i1;
2140 switch (GET_CODE (x))
2142 case LABEL_REF:
2143 case HIGH:
2144 return true;
2146 case CONST:
2147 if (GET_CODE (XEXP (x, 0)) == PLUS
2148 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2149 x = XEXP (XEXP (x, 0), 0);
2150 else
2151 return true;
2153 if (GET_CODE (x) != SYMBOL_REF)
2154 return true;
2156 /* FALLTHRU */
2158 case SYMBOL_REF:
2159 /* TLS symbols are never valid. */
2160 return SYMBOL_REF_TLS_MODEL (x) == 0;
2162 case CONST_DOUBLE:
2163 if (x == CONST0_RTX (mode))
2164 return true;
2165 if (FLOAT_MODE_P (mode))
2166 return false;
2167 goto do_integer;
2169 case CONST_VECTOR:
2170 if (x == CONST0_RTX (mode))
2171 return true;
2172 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2173 return false;
2174 if (GET_MODE_SIZE (mode) != 8)
2175 return false;
2176 goto do_integer;
2178 case CONST_INT:
2179 do_integer:
2180 if (TARGET_BUILD_CONSTANTS)
2181 return true;
2182 alpha_extract_integer (x, &i0, &i1);
2183 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2184 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2185 return false;
2187 default:
2188 return false;
2192 /* Operand 1 is known to be a constant, and should require more than one
2193 instruction to load. Emit that multi-part load. */
2195 bool
2196 alpha_split_const_mov (machine_mode mode, rtx *operands)
2198 HOST_WIDE_INT i0, i1;
2199 rtx temp = NULL_RTX;
2201 alpha_extract_integer (operands[1], &i0, &i1);
2203 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2204 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2206 if (!temp && TARGET_BUILD_CONSTANTS)
2207 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2209 if (temp)
2211 if (!rtx_equal_p (operands[0], temp))
2212 emit_move_insn (operands[0], temp);
2213 return true;
2216 return false;
2219 /* Expand a move instruction; return true if all work is done.
2220 We don't handle non-bwx subword loads here. */
2222 bool
2223 alpha_expand_mov (machine_mode mode, rtx *operands)
2225 rtx tmp;
2227 /* If the output is not a register, the input must be. */
2228 if (MEM_P (operands[0])
2229 && ! reg_or_0_operand (operands[1], mode))
2230 operands[1] = force_reg (mode, operands[1]);
2232 /* Allow legitimize_address to perform some simplifications. */
2233 if (mode == Pmode && symbolic_operand (operands[1], mode))
2235 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2236 if (tmp)
2238 if (tmp == operands[0])
2239 return true;
2240 operands[1] = tmp;
2241 return false;
2245 /* Early out for non-constants and valid constants. */
2246 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2247 return false;
2249 /* Split large integers. */
2250 if (CONST_INT_P (operands[1])
2251 || GET_CODE (operands[1]) == CONST_DOUBLE
2252 || GET_CODE (operands[1]) == CONST_VECTOR)
2254 if (alpha_split_const_mov (mode, operands))
2255 return true;
2258 /* Otherwise we've nothing left but to drop the thing to memory. */
2259 tmp = force_const_mem (mode, operands[1]);
2261 if (tmp == NULL_RTX)
2262 return false;
2264 if (reload_in_progress)
2266 emit_move_insn (operands[0], XEXP (tmp, 0));
2267 operands[1] = replace_equiv_address (tmp, operands[0]);
2269 else
2270 operands[1] = validize_mem (tmp);
2271 return false;
2274 /* Expand a non-bwx QImode or HImode move instruction;
2275 return true if all work is done. */
2277 bool
2278 alpha_expand_mov_nobwx (machine_mode mode, rtx *operands)
2280 rtx seq;
2282 /* If the output is not a register, the input must be. */
2283 if (MEM_P (operands[0]))
2284 operands[1] = force_reg (mode, operands[1]);
2286 /* Handle four memory cases, unaligned and aligned for either the input
2287 or the output. The only case where we can be called during reload is
2288 for aligned loads; all other cases require temporaries. */
2290 if (any_memory_operand (operands[1], mode))
2292 if (aligned_memory_operand (operands[1], mode))
2294 if (reload_in_progress)
2296 if (mode == QImode)
2297 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2298 else
2299 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2300 emit_insn (seq);
2302 else
2304 rtx aligned_mem, bitnum;
2305 rtx scratch = gen_reg_rtx (SImode);
2306 rtx subtarget;
2307 bool copyout;
2309 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2311 subtarget = operands[0];
2312 if (REG_P (subtarget))
2313 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2314 else
2315 subtarget = gen_reg_rtx (DImode), copyout = true;
2317 if (mode == QImode)
2318 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2319 bitnum, scratch);
2320 else
2321 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2322 bitnum, scratch);
2323 emit_insn (seq);
2325 if (copyout)
2326 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2329 else
2331 /* Don't pass these as parameters since that makes the generated
2332 code depend on parameter evaluation order which will cause
2333 bootstrap failures. */
2335 rtx temp1, temp2, subtarget, ua;
2336 bool copyout;
2338 temp1 = gen_reg_rtx (DImode);
2339 temp2 = gen_reg_rtx (DImode);
2341 subtarget = operands[0];
2342 if (REG_P (subtarget))
2343 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2344 else
2345 subtarget = gen_reg_rtx (DImode), copyout = true;
2347 ua = get_unaligned_address (operands[1]);
2348 if (mode == QImode)
2349 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2350 else
2351 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2353 alpha_set_memflags (seq, operands[1]);
2354 emit_insn (seq);
2356 if (copyout)
2357 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2359 return true;
2362 if (any_memory_operand (operands[0], mode))
2364 if (aligned_memory_operand (operands[0], mode))
2366 rtx aligned_mem, bitnum;
2367 rtx temp1 = gen_reg_rtx (SImode);
2368 rtx temp2 = gen_reg_rtx (SImode);
2370 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2372 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2373 temp1, temp2));
2375 else
2377 rtx temp1 = gen_reg_rtx (DImode);
2378 rtx temp2 = gen_reg_rtx (DImode);
2379 rtx temp3 = gen_reg_rtx (DImode);
2380 rtx ua = get_unaligned_address (operands[0]);
2382 if (mode == QImode)
2383 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2384 else
2385 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2387 alpha_set_memflags (seq, operands[0]);
2388 emit_insn (seq);
2390 return true;
2393 return false;
2396 /* Implement the movmisalign patterns. One of the operands is a memory
2397 that is not naturally aligned. Emit instructions to load it. */
2399 void
2400 alpha_expand_movmisalign (machine_mode mode, rtx *operands)
2402 /* Honor misaligned loads, for those we promised to do so. */
2403 if (MEM_P (operands[1]))
2405 rtx tmp;
2407 if (register_operand (operands[0], mode))
2408 tmp = operands[0];
2409 else
2410 tmp = gen_reg_rtx (mode);
2412 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2413 if (tmp != operands[0])
2414 emit_move_insn (operands[0], tmp);
2416 else if (MEM_P (operands[0]))
2418 if (!reg_or_0_operand (operands[1], mode))
2419 operands[1] = force_reg (mode, operands[1]);
2420 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2422 else
2423 gcc_unreachable ();
2426 /* Generate an unsigned DImode to FP conversion. This is the same code
2427 optabs would emit if we didn't have TFmode patterns.
2429 For SFmode, this is the only construction I've found that can pass
2430 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2431 intermediates will work, because you'll get intermediate rounding
2432 that ruins the end result. Some of this could be fixed by turning
2433 on round-to-positive-infinity, but that requires diddling the fpsr,
2434 which kills performance. I tried turning this around and converting
2435 to a negative number, so that I could turn on /m, but either I did
2436 it wrong or there's something else cause I wound up with the exact
2437 same single-bit error. There is a branch-less form of this same code:
2439 srl $16,1,$1
2440 and $16,1,$2
2441 cmplt $16,0,$3
2442 or $1,$2,$2
2443 cmovge $16,$16,$2
2444 itoft $3,$f10
2445 itoft $2,$f11
2446 cvtqs $f11,$f11
2447 adds $f11,$f11,$f0
2448 fcmoveq $f10,$f11,$f0
2450 I'm not using it because it's the same number of instructions as
2451 this branch-full form, and it has more serialized long latency
2452 instructions on the critical path.
2454 For DFmode, we can avoid rounding errors by breaking up the word
2455 into two pieces, converting them separately, and adding them back:
2457 LC0: .long 0,0x5f800000
2459 itoft $16,$f11
2460 lda $2,LC0
2461 cmplt $16,0,$1
2462 cpyse $f11,$f31,$f10
2463 cpyse $f31,$f11,$f11
2464 s4addq $1,$2,$1
2465 lds $f12,0($1)
2466 cvtqt $f10,$f10
2467 cvtqt $f11,$f11
2468 addt $f12,$f10,$f0
2469 addt $f0,$f11,$f0
2471 This doesn't seem to be a clear-cut win over the optabs form.
2472 It probably all depends on the distribution of numbers being
2473 converted -- in the optabs form, all but high-bit-set has a
2474 much lower minimum execution time. */
2476 void
2477 alpha_emit_floatuns (rtx operands[2])
2479 rtx neglab, donelab, i0, i1, f0, in, out;
2480 machine_mode mode;
2482 out = operands[0];
2483 in = force_reg (DImode, operands[1]);
2484 mode = GET_MODE (out);
2485 neglab = gen_label_rtx ();
2486 donelab = gen_label_rtx ();
2487 i0 = gen_reg_rtx (DImode);
2488 i1 = gen_reg_rtx (DImode);
2489 f0 = gen_reg_rtx (mode);
2491 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2493 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2494 emit_jump_insn (gen_jump (donelab));
2495 emit_barrier ();
2497 emit_label (neglab);
2499 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2500 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2501 emit_insn (gen_iordi3 (i0, i0, i1));
2502 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2503 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2505 emit_label (donelab);
2508 /* Generate the comparison for a conditional branch. */
2510 void
2511 alpha_emit_conditional_branch (rtx operands[], machine_mode cmp_mode)
2513 enum rtx_code cmp_code, branch_code;
2514 machine_mode branch_mode = VOIDmode;
2515 enum rtx_code code = GET_CODE (operands[0]);
2516 rtx op0 = operands[1], op1 = operands[2];
2517 rtx tem;
2519 if (cmp_mode == TFmode)
2521 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2522 op1 = const0_rtx;
2523 cmp_mode = DImode;
2526 /* The general case: fold the comparison code to the types of compares
2527 that we have, choosing the branch as necessary. */
2528 switch (code)
2530 case EQ: case LE: case LT: case LEU: case LTU:
2531 case UNORDERED:
2532 /* We have these compares. */
2533 cmp_code = code, branch_code = NE;
2534 break;
2536 case NE:
2537 case ORDERED:
2538 /* These must be reversed. */
2539 cmp_code = reverse_condition (code), branch_code = EQ;
2540 break;
2542 case GE: case GT: case GEU: case GTU:
2543 /* For FP, we swap them, for INT, we reverse them. */
2544 if (cmp_mode == DFmode)
2546 cmp_code = swap_condition (code);
2547 branch_code = NE;
2548 std::swap (op0, op1);
2550 else
2552 cmp_code = reverse_condition (code);
2553 branch_code = EQ;
2555 break;
2557 default:
2558 gcc_unreachable ();
2561 if (cmp_mode == DFmode)
2563 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2565 /* When we are not as concerned about non-finite values, and we
2566 are comparing against zero, we can branch directly. */
2567 if (op1 == CONST0_RTX (DFmode))
2568 cmp_code = UNKNOWN, branch_code = code;
2569 else if (op0 == CONST0_RTX (DFmode))
2571 /* Undo the swap we probably did just above. */
2572 std::swap (op0, op1);
2573 branch_code = swap_condition (cmp_code);
2574 cmp_code = UNKNOWN;
2577 else
2579 /* ??? We mark the branch mode to be CCmode to prevent the
2580 compare and branch from being combined, since the compare
2581 insn follows IEEE rules that the branch does not. */
2582 branch_mode = CCmode;
2585 else
2587 /* The following optimizations are only for signed compares. */
2588 if (code != LEU && code != LTU && code != GEU && code != GTU)
2590 /* Whee. Compare and branch against 0 directly. */
2591 if (op1 == const0_rtx)
2592 cmp_code = UNKNOWN, branch_code = code;
2594 /* If the constants doesn't fit into an immediate, but can
2595 be generated by lda/ldah, we adjust the argument and
2596 compare against zero, so we can use beq/bne directly. */
2597 /* ??? Don't do this when comparing against symbols, otherwise
2598 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2599 be declared false out of hand (at least for non-weak). */
2600 else if (CONST_INT_P (op1)
2601 && (code == EQ || code == NE)
2602 && !(symbolic_operand (op0, VOIDmode)
2603 || (REG_P (op0) && REG_POINTER (op0))))
2605 rtx n_op1 = GEN_INT (-INTVAL (op1));
2607 if (! satisfies_constraint_I (op1)
2608 && (satisfies_constraint_K (n_op1)
2609 || satisfies_constraint_L (n_op1)))
2610 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2614 if (!reg_or_0_operand (op0, DImode))
2615 op0 = force_reg (DImode, op0);
2616 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2617 op1 = force_reg (DImode, op1);
2620 /* Emit an initial compare instruction, if necessary. */
2621 tem = op0;
2622 if (cmp_code != UNKNOWN)
2624 tem = gen_reg_rtx (cmp_mode);
2625 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2628 /* Emit the branch instruction. */
2629 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2630 gen_rtx_IF_THEN_ELSE (VOIDmode,
2631 gen_rtx_fmt_ee (branch_code,
2632 branch_mode, tem,
2633 CONST0_RTX (cmp_mode)),
2634 gen_rtx_LABEL_REF (VOIDmode,
2635 operands[3]),
2636 pc_rtx));
2637 emit_jump_insn (tem);
2640 /* Certain simplifications can be done to make invalid setcc operations
2641 valid. Return the final comparison, or NULL if we can't work. */
2643 bool
2644 alpha_emit_setcc (rtx operands[], machine_mode cmp_mode)
2646 enum rtx_code cmp_code;
2647 enum rtx_code code = GET_CODE (operands[1]);
2648 rtx op0 = operands[2], op1 = operands[3];
2649 rtx tmp;
2651 if (cmp_mode == TFmode)
2653 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2654 op1 = const0_rtx;
2655 cmp_mode = DImode;
2658 if (cmp_mode == DFmode && !TARGET_FIX)
2659 return 0;
2661 /* The general case: fold the comparison code to the types of compares
2662 that we have, choosing the branch as necessary. */
2664 cmp_code = UNKNOWN;
2665 switch (code)
2667 case EQ: case LE: case LT: case LEU: case LTU:
2668 case UNORDERED:
2669 /* We have these compares. */
2670 if (cmp_mode == DFmode)
2671 cmp_code = code, code = NE;
2672 break;
2674 case NE:
2675 if (cmp_mode == DImode && op1 == const0_rtx)
2676 break;
2677 /* FALLTHRU */
2679 case ORDERED:
2680 cmp_code = reverse_condition (code);
2681 code = EQ;
2682 break;
2684 case GE: case GT: case GEU: case GTU:
2685 /* These normally need swapping, but for integer zero we have
2686 special patterns that recognize swapped operands. */
2687 if (cmp_mode == DImode && op1 == const0_rtx)
2688 break;
2689 code = swap_condition (code);
2690 if (cmp_mode == DFmode)
2691 cmp_code = code, code = NE;
2692 std::swap (op0, op1);
2693 break;
2695 default:
2696 gcc_unreachable ();
2699 if (cmp_mode == DImode)
2701 if (!register_operand (op0, DImode))
2702 op0 = force_reg (DImode, op0);
2703 if (!reg_or_8bit_operand (op1, DImode))
2704 op1 = force_reg (DImode, op1);
2707 /* Emit an initial compare instruction, if necessary. */
2708 if (cmp_code != UNKNOWN)
2710 tmp = gen_reg_rtx (cmp_mode);
2711 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2712 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2714 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2715 op1 = const0_rtx;
2718 /* Emit the setcc instruction. */
2719 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2720 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2721 return true;
2725 /* Rewrite a comparison against zero CMP of the form
2726 (CODE (cc0) (const_int 0)) so it can be written validly in
2727 a conditional move (if_then_else CMP ...).
2728 If both of the operands that set cc0 are nonzero we must emit
2729 an insn to perform the compare (it can't be done within
2730 the conditional move). */
2733 alpha_emit_conditional_move (rtx cmp, machine_mode mode)
2735 enum rtx_code code = GET_CODE (cmp);
2736 enum rtx_code cmov_code = NE;
2737 rtx op0 = XEXP (cmp, 0);
2738 rtx op1 = XEXP (cmp, 1);
2739 machine_mode cmp_mode
2740 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2741 machine_mode cmov_mode = VOIDmode;
2742 int local_fast_math = flag_unsafe_math_optimizations;
2743 rtx tem;
2745 if (cmp_mode == TFmode)
2747 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2748 op1 = const0_rtx;
2749 cmp_mode = DImode;
2752 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2754 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2756 enum rtx_code cmp_code;
2758 if (! TARGET_FIX)
2759 return 0;
2761 /* If we have fp<->int register move instructions, do a cmov by
2762 performing the comparison in fp registers, and move the
2763 zero/nonzero value to integer registers, where we can then
2764 use a normal cmov, or vice-versa. */
2766 switch (code)
2768 case EQ: case LE: case LT: case LEU: case LTU:
2769 case UNORDERED:
2770 /* We have these compares. */
2771 cmp_code = code, code = NE;
2772 break;
2774 case NE:
2775 case ORDERED:
2776 /* These must be reversed. */
2777 cmp_code = reverse_condition (code), code = EQ;
2778 break;
2780 case GE: case GT: case GEU: case GTU:
2781 /* These normally need swapping, but for integer zero we have
2782 special patterns that recognize swapped operands. */
2783 if (cmp_mode == DImode && op1 == const0_rtx)
2784 cmp_code = code, code = NE;
2785 else
2787 cmp_code = swap_condition (code);
2788 code = NE;
2789 std::swap (op0, op1);
2791 break;
2793 default:
2794 gcc_unreachable ();
2797 if (cmp_mode == DImode)
2799 if (!reg_or_0_operand (op0, DImode))
2800 op0 = force_reg (DImode, op0);
2801 if (!reg_or_8bit_operand (op1, DImode))
2802 op1 = force_reg (DImode, op1);
2805 tem = gen_reg_rtx (cmp_mode);
2806 emit_insn (gen_rtx_SET (VOIDmode, tem,
2807 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2808 op0, op1)));
2810 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2811 op0 = gen_lowpart (cmp_mode, tem);
2812 op1 = CONST0_RTX (cmp_mode);
2813 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2814 local_fast_math = 1;
2817 if (cmp_mode == DImode)
2819 if (!reg_or_0_operand (op0, DImode))
2820 op0 = force_reg (DImode, op0);
2821 if (!reg_or_8bit_operand (op1, DImode))
2822 op1 = force_reg (DImode, op1);
2825 /* We may be able to use a conditional move directly.
2826 This avoids emitting spurious compares. */
2827 if (signed_comparison_operator (cmp, VOIDmode)
2828 && (cmp_mode == DImode || local_fast_math)
2829 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2830 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2832 /* We can't put the comparison inside the conditional move;
2833 emit a compare instruction and put that inside the
2834 conditional move. Make sure we emit only comparisons we have;
2835 swap or reverse as necessary. */
2837 if (!can_create_pseudo_p ())
2838 return NULL_RTX;
2840 switch (code)
2842 case EQ: case LE: case LT: case LEU: case LTU:
2843 case UNORDERED:
2844 /* We have these compares: */
2845 break;
2847 case NE:
2848 case ORDERED:
2849 /* These must be reversed. */
2850 code = reverse_condition (code);
2851 cmov_code = EQ;
2852 break;
2854 case GE: case GT: case GEU: case GTU:
2855 /* These normally need swapping, but for integer zero we have
2856 special patterns that recognize swapped operands. */
2857 if (cmp_mode == DImode && op1 == const0_rtx)
2858 break;
2859 code = swap_condition (code);
2860 std::swap (op0, op1);
2861 break;
2863 default:
2864 gcc_unreachable ();
2867 if (cmp_mode == DImode)
2869 if (!reg_or_0_operand (op0, DImode))
2870 op0 = force_reg (DImode, op0);
2871 if (!reg_or_8bit_operand (op1, DImode))
2872 op1 = force_reg (DImode, op1);
2875 /* ??? We mark the branch mode to be CCmode to prevent the compare
2876 and cmov from being combined, since the compare insn follows IEEE
2877 rules that the cmov does not. */
2878 if (cmp_mode == DFmode && !local_fast_math)
2879 cmov_mode = CCmode;
2881 tem = gen_reg_rtx (cmp_mode);
2882 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2883 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2886 /* Simplify a conditional move of two constants into a setcc with
2887 arithmetic. This is done with a splitter since combine would
2888 just undo the work if done during code generation. It also catches
2889 cases we wouldn't have before cse. */
2892 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2893 rtx t_rtx, rtx f_rtx)
2895 HOST_WIDE_INT t, f, diff;
2896 machine_mode mode;
2897 rtx target, subtarget, tmp;
2899 mode = GET_MODE (dest);
2900 t = INTVAL (t_rtx);
2901 f = INTVAL (f_rtx);
2902 diff = t - f;
2904 if (((code == NE || code == EQ) && diff < 0)
2905 || (code == GE || code == GT))
2907 code = reverse_condition (code);
2908 diff = t, t = f, f = diff;
2909 diff = t - f;
2912 subtarget = target = dest;
2913 if (mode != DImode)
2915 target = gen_lowpart (DImode, dest);
2916 if (can_create_pseudo_p ())
2917 subtarget = gen_reg_rtx (DImode);
2918 else
2919 subtarget = target;
2921 /* Below, we must be careful to use copy_rtx on target and subtarget
2922 in intermediate insns, as they may be a subreg rtx, which may not
2923 be shared. */
2925 if (f == 0 && exact_log2 (diff) > 0
2926 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2927 viable over a longer latency cmove. On EV5, the E0 slot is a
2928 scarce resource, and on EV4 shift has the same latency as a cmove. */
2929 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2931 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2932 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2934 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2935 GEN_INT (exact_log2 (t)));
2936 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2938 else if (f == 0 && t == -1)
2940 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2941 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2943 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2945 else if (diff == 1 || diff == 4 || diff == 8)
2947 rtx add_op;
2949 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2950 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2952 if (diff == 1)
2953 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2954 else
2956 add_op = GEN_INT (f);
2957 if (sext_add_operand (add_op, mode))
2959 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2960 GEN_INT (diff));
2961 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2962 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2964 else
2965 return 0;
2968 else
2969 return 0;
2971 return 1;
2974 /* Look up the function X_floating library function name for the
2975 given operation. */
2977 struct GTY(()) xfloating_op
2979 const enum rtx_code code;
2980 const char *const GTY((skip)) osf_func;
2981 const char *const GTY((skip)) vms_func;
2982 rtx libcall;
2985 static GTY(()) struct xfloating_op xfloating_ops[] =
2987 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2988 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2989 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2990 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2991 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2992 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2993 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2994 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2995 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2996 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2997 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2998 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2999 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
3000 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
3001 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
3004 static GTY(()) struct xfloating_op vax_cvt_ops[] =
3006 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
3007 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3010 static rtx
3011 alpha_lookup_xfloating_lib_func (enum rtx_code code)
3013 struct xfloating_op *ops = xfloating_ops;
3014 long n = ARRAY_SIZE (xfloating_ops);
3015 long i;
3017 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
3019 /* How irritating. Nothing to key off for the main table. */
3020 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
3022 ops = vax_cvt_ops;
3023 n = ARRAY_SIZE (vax_cvt_ops);
3026 for (i = 0; i < n; ++i, ++ops)
3027 if (ops->code == code)
3029 rtx func = ops->libcall;
3030 if (!func)
3032 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3033 ? ops->vms_func : ops->osf_func);
3034 ops->libcall = func;
3036 return func;
3039 gcc_unreachable ();
3042 /* Most X_floating operations take the rounding mode as an argument.
3043 Compute that here. */
3045 static int
3046 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3047 enum alpha_fp_rounding_mode round)
3049 int mode;
3051 switch (round)
3053 case ALPHA_FPRM_NORM:
3054 mode = 2;
3055 break;
3056 case ALPHA_FPRM_MINF:
3057 mode = 1;
3058 break;
3059 case ALPHA_FPRM_CHOP:
3060 mode = 0;
3061 break;
3062 case ALPHA_FPRM_DYN:
3063 mode = 4;
3064 break;
3065 default:
3066 gcc_unreachable ();
3068 /* XXX For reference, round to +inf is mode = 3. */
3071 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3072 mode |= 0x10000;
3074 return mode;
3077 /* Emit an X_floating library function call.
3079 Note that these functions do not follow normal calling conventions:
3080 TFmode arguments are passed in two integer registers (as opposed to
3081 indirect); TFmode return values appear in R16+R17.
3083 FUNC is the function to call.
3084 TARGET is where the output belongs.
3085 OPERANDS are the inputs.
3086 NOPERANDS is the count of inputs.
3087 EQUIV is the expression equivalent for the function.
3090 static void
3091 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3092 int noperands, rtx equiv)
3094 rtx usage = NULL_RTX, tmp, reg;
3095 int regno = 16, i;
3097 start_sequence ();
3099 for (i = 0; i < noperands; ++i)
3101 switch (GET_MODE (operands[i]))
3103 case TFmode:
3104 reg = gen_rtx_REG (TFmode, regno);
3105 regno += 2;
3106 break;
3108 case DFmode:
3109 reg = gen_rtx_REG (DFmode, regno + 32);
3110 regno += 1;
3111 break;
3113 case VOIDmode:
3114 gcc_assert (CONST_INT_P (operands[i]));
3115 /* FALLTHRU */
3116 case DImode:
3117 reg = gen_rtx_REG (DImode, regno);
3118 regno += 1;
3119 break;
3121 default:
3122 gcc_unreachable ();
3125 emit_move_insn (reg, operands[i]);
3126 use_reg (&usage, reg);
3129 switch (GET_MODE (target))
3131 case TFmode:
3132 reg = gen_rtx_REG (TFmode, 16);
3133 break;
3134 case DFmode:
3135 reg = gen_rtx_REG (DFmode, 32);
3136 break;
3137 case DImode:
3138 reg = gen_rtx_REG (DImode, 0);
3139 break;
3140 default:
3141 gcc_unreachable ();
3144 tmp = gen_rtx_MEM (QImode, func);
3145 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3146 const0_rtx, const0_rtx));
3147 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3148 RTL_CONST_CALL_P (tmp) = 1;
3150 tmp = get_insns ();
3151 end_sequence ();
3153 emit_libcall_block (tmp, target, reg, equiv);
3156 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3158 void
3159 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3161 rtx func;
3162 int mode;
3163 rtx out_operands[3];
3165 func = alpha_lookup_xfloating_lib_func (code);
3166 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3168 out_operands[0] = operands[1];
3169 out_operands[1] = operands[2];
3170 out_operands[2] = GEN_INT (mode);
3171 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3172 gen_rtx_fmt_ee (code, TFmode, operands[1],
3173 operands[2]));
3176 /* Emit an X_floating library function call for a comparison. */
3178 static rtx
3179 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3181 enum rtx_code cmp_code, res_code;
3182 rtx func, out, operands[2], note;
3184 /* X_floating library comparison functions return
3185 -1 unordered
3186 0 false
3187 1 true
3188 Convert the compare against the raw return value. */
3190 cmp_code = *pcode;
3191 switch (cmp_code)
3193 case UNORDERED:
3194 cmp_code = EQ;
3195 res_code = LT;
3196 break;
3197 case ORDERED:
3198 cmp_code = EQ;
3199 res_code = GE;
3200 break;
3201 case NE:
3202 res_code = NE;
3203 break;
3204 case EQ:
3205 case LT:
3206 case GT:
3207 case LE:
3208 case GE:
3209 res_code = GT;
3210 break;
3211 default:
3212 gcc_unreachable ();
3214 *pcode = res_code;
3216 func = alpha_lookup_xfloating_lib_func (cmp_code);
3218 operands[0] = op0;
3219 operands[1] = op1;
3220 out = gen_reg_rtx (DImode);
3222 /* What's actually returned is -1,0,1, not a proper boolean value. */
3223 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3224 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
3225 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3227 return out;
3230 /* Emit an X_floating library function call for a conversion. */
3232 void
3233 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3235 int noperands = 1, mode;
3236 rtx out_operands[2];
3237 rtx func;
3238 enum rtx_code code = orig_code;
3240 if (code == UNSIGNED_FIX)
3241 code = FIX;
3243 func = alpha_lookup_xfloating_lib_func (code);
3245 out_operands[0] = operands[1];
3247 switch (code)
3249 case FIX:
3250 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3251 out_operands[1] = GEN_INT (mode);
3252 noperands = 2;
3253 break;
3254 case FLOAT_TRUNCATE:
3255 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3256 out_operands[1] = GEN_INT (mode);
3257 noperands = 2;
3258 break;
3259 default:
3260 break;
3263 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3264 gen_rtx_fmt_e (orig_code,
3265 GET_MODE (operands[0]),
3266 operands[1]));
3269 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3270 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3271 guarantee that the sequence
3272 set (OP[0] OP[2])
3273 set (OP[1] OP[3])
3274 is valid. Naturally, output operand ordering is little-endian.
3275 This is used by *movtf_internal and *movti_internal. */
3277 void
3278 alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
3279 bool fixup_overlap)
3281 switch (GET_CODE (operands[1]))
3283 case REG:
3284 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3285 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3286 break;
3288 case MEM:
3289 operands[3] = adjust_address (operands[1], DImode, 8);
3290 operands[2] = adjust_address (operands[1], DImode, 0);
3291 break;
3293 case CONST_INT:
3294 case CONST_DOUBLE:
3295 gcc_assert (operands[1] == CONST0_RTX (mode));
3296 operands[2] = operands[3] = const0_rtx;
3297 break;
3299 default:
3300 gcc_unreachable ();
3303 switch (GET_CODE (operands[0]))
3305 case REG:
3306 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3307 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3308 break;
3310 case MEM:
3311 operands[1] = adjust_address (operands[0], DImode, 8);
3312 operands[0] = adjust_address (operands[0], DImode, 0);
3313 break;
3315 default:
3316 gcc_unreachable ();
3319 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3321 std::swap (operands[0], operands[1]);
3322 std::swap (operands[2], operands[3]);
3326 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3327 op2 is a register containing the sign bit, operation is the
3328 logical operation to be performed. */
3330 void
3331 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3333 rtx high_bit = operands[2];
3334 rtx scratch;
3335 int move;
3337 alpha_split_tmode_pair (operands, TFmode, false);
3339 /* Detect three flavors of operand overlap. */
3340 move = 1;
3341 if (rtx_equal_p (operands[0], operands[2]))
3342 move = 0;
3343 else if (rtx_equal_p (operands[1], operands[2]))
3345 if (rtx_equal_p (operands[0], high_bit))
3346 move = 2;
3347 else
3348 move = -1;
3351 if (move < 0)
3352 emit_move_insn (operands[0], operands[2]);
3354 /* ??? If the destination overlaps both source tf and high_bit, then
3355 assume source tf is dead in its entirety and use the other half
3356 for a scratch register. Otherwise "scratch" is just the proper
3357 destination register. */
3358 scratch = operands[move < 2 ? 1 : 3];
3360 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3362 if (move > 0)
3364 emit_move_insn (operands[0], operands[2]);
3365 if (move > 1)
3366 emit_move_insn (operands[1], scratch);
3370 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3371 unaligned data:
3373 unsigned: signed:
3374 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3375 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3376 lda r3,X(r11) lda r3,X+2(r11)
3377 extwl r1,r3,r1 extql r1,r3,r1
3378 extwh r2,r3,r2 extqh r2,r3,r2
3379 or r1.r2.r1 or r1,r2,r1
3380 sra r1,48,r1
3382 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3383 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3384 lda r3,X(r11) lda r3,X(r11)
3385 extll r1,r3,r1 extll r1,r3,r1
3386 extlh r2,r3,r2 extlh r2,r3,r2
3387 or r1.r2.r1 addl r1,r2,r1
3389 quad: ldq_u r1,X(r11)
3390 ldq_u r2,X+7(r11)
3391 lda r3,X(r11)
3392 extql r1,r3,r1
3393 extqh r2,r3,r2
3394 or r1.r2.r1
3397 void
3398 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3399 HOST_WIDE_INT ofs, int sign)
3401 rtx meml, memh, addr, extl, exth, tmp, mema;
3402 machine_mode mode;
3404 if (TARGET_BWX && size == 2)
3406 meml = adjust_address (mem, QImode, ofs);
3407 memh = adjust_address (mem, QImode, ofs+1);
3408 extl = gen_reg_rtx (DImode);
3409 exth = gen_reg_rtx (DImode);
3410 emit_insn (gen_zero_extendqidi2 (extl, meml));
3411 emit_insn (gen_zero_extendqidi2 (exth, memh));
3412 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3413 NULL, 1, OPTAB_LIB_WIDEN);
3414 addr = expand_simple_binop (DImode, IOR, extl, exth,
3415 NULL, 1, OPTAB_LIB_WIDEN);
3417 if (sign && GET_MODE (tgt) != HImode)
3419 addr = gen_lowpart (HImode, addr);
3420 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3422 else
3424 if (GET_MODE (tgt) != DImode)
3425 addr = gen_lowpart (GET_MODE (tgt), addr);
3426 emit_move_insn (tgt, addr);
3428 return;
3431 meml = gen_reg_rtx (DImode);
3432 memh = gen_reg_rtx (DImode);
3433 addr = gen_reg_rtx (DImode);
3434 extl = gen_reg_rtx (DImode);
3435 exth = gen_reg_rtx (DImode);
3437 mema = XEXP (mem, 0);
3438 if (GET_CODE (mema) == LO_SUM)
3439 mema = force_reg (Pmode, mema);
3441 /* AND addresses cannot be in any alias set, since they may implicitly
3442 alias surrounding code. Ideally we'd have some alias set that
3443 covered all types except those with alignment 8 or higher. */
3445 tmp = change_address (mem, DImode,
3446 gen_rtx_AND (DImode,
3447 plus_constant (DImode, mema, ofs),
3448 GEN_INT (-8)));
3449 set_mem_alias_set (tmp, 0);
3450 emit_move_insn (meml, tmp);
3452 tmp = change_address (mem, DImode,
3453 gen_rtx_AND (DImode,
3454 plus_constant (DImode, mema,
3455 ofs + size - 1),
3456 GEN_INT (-8)));
3457 set_mem_alias_set (tmp, 0);
3458 emit_move_insn (memh, tmp);
3460 if (sign && size == 2)
3462 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3464 emit_insn (gen_extql (extl, meml, addr));
3465 emit_insn (gen_extqh (exth, memh, addr));
3467 /* We must use tgt here for the target. Alpha-vms port fails if we use
3468 addr for the target, because addr is marked as a pointer and combine
3469 knows that pointers are always sign-extended 32-bit values. */
3470 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3471 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3472 addr, 1, OPTAB_WIDEN);
3474 else
3476 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3477 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3478 switch ((int) size)
3480 case 2:
3481 emit_insn (gen_extwh (exth, memh, addr));
3482 mode = HImode;
3483 break;
3484 case 4:
3485 emit_insn (gen_extlh (exth, memh, addr));
3486 mode = SImode;
3487 break;
3488 case 8:
3489 emit_insn (gen_extqh (exth, memh, addr));
3490 mode = DImode;
3491 break;
3492 default:
3493 gcc_unreachable ();
3496 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3497 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3498 sign, OPTAB_WIDEN);
3501 if (addr != tgt)
3502 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3505 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3507 void
3508 alpha_expand_unaligned_store (rtx dst, rtx src,
3509 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3511 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3513 if (TARGET_BWX && size == 2)
3515 if (src != const0_rtx)
3517 dstl = gen_lowpart (QImode, src);
3518 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3519 NULL, 1, OPTAB_LIB_WIDEN);
3520 dsth = gen_lowpart (QImode, dsth);
3522 else
3523 dstl = dsth = const0_rtx;
3525 meml = adjust_address (dst, QImode, ofs);
3526 memh = adjust_address (dst, QImode, ofs+1);
3528 emit_move_insn (meml, dstl);
3529 emit_move_insn (memh, dsth);
3530 return;
3533 dstl = gen_reg_rtx (DImode);
3534 dsth = gen_reg_rtx (DImode);
3535 insl = gen_reg_rtx (DImode);
3536 insh = gen_reg_rtx (DImode);
3538 dsta = XEXP (dst, 0);
3539 if (GET_CODE (dsta) == LO_SUM)
3540 dsta = force_reg (Pmode, dsta);
3542 /* AND addresses cannot be in any alias set, since they may implicitly
3543 alias surrounding code. Ideally we'd have some alias set that
3544 covered all types except those with alignment 8 or higher. */
3546 meml = change_address (dst, DImode,
3547 gen_rtx_AND (DImode,
3548 plus_constant (DImode, dsta, ofs),
3549 GEN_INT (-8)));
3550 set_mem_alias_set (meml, 0);
3552 memh = change_address (dst, DImode,
3553 gen_rtx_AND (DImode,
3554 plus_constant (DImode, dsta,
3555 ofs + size - 1),
3556 GEN_INT (-8)));
3557 set_mem_alias_set (memh, 0);
3559 emit_move_insn (dsth, memh);
3560 emit_move_insn (dstl, meml);
3562 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3564 if (src != CONST0_RTX (GET_MODE (src)))
3566 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3567 GEN_INT (size*8), addr));
3569 switch ((int) size)
3571 case 2:
3572 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3573 break;
3574 case 4:
3575 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3576 break;
3577 case 8:
3578 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3579 break;
3580 default:
3581 gcc_unreachable ();
3585 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3587 switch ((int) size)
3589 case 2:
3590 emit_insn (gen_mskwl (dstl, dstl, addr));
3591 break;
3592 case 4:
3593 emit_insn (gen_mskll (dstl, dstl, addr));
3594 break;
3595 case 8:
3596 emit_insn (gen_mskql (dstl, dstl, addr));
3597 break;
3598 default:
3599 gcc_unreachable ();
3602 if (src != CONST0_RTX (GET_MODE (src)))
3604 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3605 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3608 /* Must store high before low for degenerate case of aligned. */
3609 emit_move_insn (memh, dsth);
3610 emit_move_insn (meml, dstl);
3613 /* The block move code tries to maximize speed by separating loads and
3614 stores at the expense of register pressure: we load all of the data
3615 before we store it back out. There are two secondary effects worth
3616 mentioning, that this speeds copying to/from aligned and unaligned
3617 buffers, and that it makes the code significantly easier to write. */
3619 #define MAX_MOVE_WORDS 8
3621 /* Load an integral number of consecutive unaligned quadwords. */
3623 static void
3624 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3625 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3627 rtx const im8 = GEN_INT (-8);
3628 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3629 rtx sreg, areg, tmp, smema;
3630 HOST_WIDE_INT i;
3632 smema = XEXP (smem, 0);
3633 if (GET_CODE (smema) == LO_SUM)
3634 smema = force_reg (Pmode, smema);
3636 /* Generate all the tmp registers we need. */
3637 for (i = 0; i < words; ++i)
3639 data_regs[i] = out_regs[i];
3640 ext_tmps[i] = gen_reg_rtx (DImode);
3642 data_regs[words] = gen_reg_rtx (DImode);
3644 if (ofs != 0)
3645 smem = adjust_address (smem, GET_MODE (smem), ofs);
3647 /* Load up all of the source data. */
3648 for (i = 0; i < words; ++i)
3650 tmp = change_address (smem, DImode,
3651 gen_rtx_AND (DImode,
3652 plus_constant (DImode, smema, 8*i),
3653 im8));
3654 set_mem_alias_set (tmp, 0);
3655 emit_move_insn (data_regs[i], tmp);
3658 tmp = change_address (smem, DImode,
3659 gen_rtx_AND (DImode,
3660 plus_constant (DImode, smema,
3661 8*words - 1),
3662 im8));
3663 set_mem_alias_set (tmp, 0);
3664 emit_move_insn (data_regs[words], tmp);
3666 /* Extract the half-word fragments. Unfortunately DEC decided to make
3667 extxh with offset zero a noop instead of zeroing the register, so
3668 we must take care of that edge condition ourselves with cmov. */
3670 sreg = copy_addr_to_reg (smema);
3671 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3672 1, OPTAB_WIDEN);
3673 for (i = 0; i < words; ++i)
3675 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3676 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3677 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3678 gen_rtx_IF_THEN_ELSE (DImode,
3679 gen_rtx_EQ (DImode, areg,
3680 const0_rtx),
3681 const0_rtx, ext_tmps[i])));
3684 /* Merge the half-words into whole words. */
3685 for (i = 0; i < words; ++i)
3687 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3688 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3692 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3693 may be NULL to store zeros. */
3695 static void
3696 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3697 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3699 rtx const im8 = GEN_INT (-8);
3700 rtx ins_tmps[MAX_MOVE_WORDS];
3701 rtx st_tmp_1, st_tmp_2, dreg;
3702 rtx st_addr_1, st_addr_2, dmema;
3703 HOST_WIDE_INT i;
3705 dmema = XEXP (dmem, 0);
3706 if (GET_CODE (dmema) == LO_SUM)
3707 dmema = force_reg (Pmode, dmema);
3709 /* Generate all the tmp registers we need. */
3710 if (data_regs != NULL)
3711 for (i = 0; i < words; ++i)
3712 ins_tmps[i] = gen_reg_rtx(DImode);
3713 st_tmp_1 = gen_reg_rtx(DImode);
3714 st_tmp_2 = gen_reg_rtx(DImode);
3716 if (ofs != 0)
3717 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3719 st_addr_2 = change_address (dmem, DImode,
3720 gen_rtx_AND (DImode,
3721 plus_constant (DImode, dmema,
3722 words*8 - 1),
3723 im8));
3724 set_mem_alias_set (st_addr_2, 0);
3726 st_addr_1 = change_address (dmem, DImode,
3727 gen_rtx_AND (DImode, dmema, im8));
3728 set_mem_alias_set (st_addr_1, 0);
3730 /* Load up the destination end bits. */
3731 emit_move_insn (st_tmp_2, st_addr_2);
3732 emit_move_insn (st_tmp_1, st_addr_1);
3734 /* Shift the input data into place. */
3735 dreg = copy_addr_to_reg (dmema);
3736 if (data_regs != NULL)
3738 for (i = words-1; i >= 0; --i)
3740 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3741 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3743 for (i = words-1; i > 0; --i)
3745 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3746 ins_tmps[i-1], ins_tmps[i-1], 1,
3747 OPTAB_WIDEN);
3751 /* Split and merge the ends with the destination data. */
3752 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3753 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3755 if (data_regs != NULL)
3757 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3758 st_tmp_2, 1, OPTAB_WIDEN);
3759 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3760 st_tmp_1, 1, OPTAB_WIDEN);
3763 /* Store it all. */
3764 emit_move_insn (st_addr_2, st_tmp_2);
3765 for (i = words-1; i > 0; --i)
3767 rtx tmp = change_address (dmem, DImode,
3768 gen_rtx_AND (DImode,
3769 plus_constant (DImode,
3770 dmema, i*8),
3771 im8));
3772 set_mem_alias_set (tmp, 0);
3773 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3775 emit_move_insn (st_addr_1, st_tmp_1);
3779 /* Expand string/block move operations.
3781 operands[0] is the pointer to the destination.
3782 operands[1] is the pointer to the source.
3783 operands[2] is the number of bytes to move.
3784 operands[3] is the alignment. */
3787 alpha_expand_block_move (rtx operands[])
3789 rtx bytes_rtx = operands[2];
3790 rtx align_rtx = operands[3];
3791 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3792 HOST_WIDE_INT bytes = orig_bytes;
3793 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3794 HOST_WIDE_INT dst_align = src_align;
3795 rtx orig_src = operands[1];
3796 rtx orig_dst = operands[0];
3797 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3798 rtx tmp;
3799 unsigned int i, words, ofs, nregs = 0;
3801 if (orig_bytes <= 0)
3802 return 1;
3803 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3804 return 0;
3806 /* Look for additional alignment information from recorded register info. */
3808 tmp = XEXP (orig_src, 0);
3809 if (REG_P (tmp))
3810 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3811 else if (GET_CODE (tmp) == PLUS
3812 && REG_P (XEXP (tmp, 0))
3813 && CONST_INT_P (XEXP (tmp, 1)))
3815 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3816 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3818 if (a > src_align)
3820 if (a >= 64 && c % 8 == 0)
3821 src_align = 64;
3822 else if (a >= 32 && c % 4 == 0)
3823 src_align = 32;
3824 else if (a >= 16 && c % 2 == 0)
3825 src_align = 16;
3829 tmp = XEXP (orig_dst, 0);
3830 if (REG_P (tmp))
3831 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3832 else if (GET_CODE (tmp) == PLUS
3833 && REG_P (XEXP (tmp, 0))
3834 && CONST_INT_P (XEXP (tmp, 1)))
3836 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3837 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3839 if (a > dst_align)
3841 if (a >= 64 && c % 8 == 0)
3842 dst_align = 64;
3843 else if (a >= 32 && c % 4 == 0)
3844 dst_align = 32;
3845 else if (a >= 16 && c % 2 == 0)
3846 dst_align = 16;
3850 ofs = 0;
3851 if (src_align >= 64 && bytes >= 8)
3853 words = bytes / 8;
3855 for (i = 0; i < words; ++i)
3856 data_regs[nregs + i] = gen_reg_rtx (DImode);
3858 for (i = 0; i < words; ++i)
3859 emit_move_insn (data_regs[nregs + i],
3860 adjust_address (orig_src, DImode, ofs + i * 8));
3862 nregs += words;
3863 bytes -= words * 8;
3864 ofs += words * 8;
3867 if (src_align >= 32 && bytes >= 4)
3869 words = bytes / 4;
3871 for (i = 0; i < words; ++i)
3872 data_regs[nregs + i] = gen_reg_rtx (SImode);
3874 for (i = 0; i < words; ++i)
3875 emit_move_insn (data_regs[nregs + i],
3876 adjust_address (orig_src, SImode, ofs + i * 4));
3878 nregs += words;
3879 bytes -= words * 4;
3880 ofs += words * 4;
3883 if (bytes >= 8)
3885 words = bytes / 8;
3887 for (i = 0; i < words+1; ++i)
3888 data_regs[nregs + i] = gen_reg_rtx (DImode);
3890 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3891 words, ofs);
3893 nregs += words;
3894 bytes -= words * 8;
3895 ofs += words * 8;
3898 if (! TARGET_BWX && bytes >= 4)
3900 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3901 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3902 bytes -= 4;
3903 ofs += 4;
3906 if (bytes >= 2)
3908 if (src_align >= 16)
3910 do {
3911 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3912 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3913 bytes -= 2;
3914 ofs += 2;
3915 } while (bytes >= 2);
3917 else if (! TARGET_BWX)
3919 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3920 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3921 bytes -= 2;
3922 ofs += 2;
3926 while (bytes > 0)
3928 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3929 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3930 bytes -= 1;
3931 ofs += 1;
3934 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3936 /* Now save it back out again. */
3938 i = 0, ofs = 0;
3940 /* Write out the data in whatever chunks reading the source allowed. */
3941 if (dst_align >= 64)
3943 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3945 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3946 data_regs[i]);
3947 ofs += 8;
3948 i++;
3952 if (dst_align >= 32)
3954 /* If the source has remaining DImode regs, write them out in
3955 two pieces. */
3956 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3958 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3959 NULL_RTX, 1, OPTAB_WIDEN);
3961 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3962 gen_lowpart (SImode, data_regs[i]));
3963 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3964 gen_lowpart (SImode, tmp));
3965 ofs += 8;
3966 i++;
3969 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3971 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3972 data_regs[i]);
3973 ofs += 4;
3974 i++;
3978 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3980 /* Write out a remaining block of words using unaligned methods. */
3982 for (words = 1; i + words < nregs; words++)
3983 if (GET_MODE (data_regs[i + words]) != DImode)
3984 break;
3986 if (words == 1)
3987 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3988 else
3989 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3990 words, ofs);
3992 i += words;
3993 ofs += words * 8;
3996 /* Due to the above, this won't be aligned. */
3997 /* ??? If we have more than one of these, consider constructing full
3998 words in registers and using alpha_expand_unaligned_store_words. */
3999 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4001 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4002 ofs += 4;
4003 i++;
4006 if (dst_align >= 16)
4007 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4009 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4010 i++;
4011 ofs += 2;
4013 else
4014 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4016 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4017 i++;
4018 ofs += 2;
4021 /* The remainder must be byte copies. */
4022 while (i < nregs)
4024 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4025 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4026 i++;
4027 ofs += 1;
4030 return 1;
4034 alpha_expand_block_clear (rtx operands[])
4036 rtx bytes_rtx = operands[1];
4037 rtx align_rtx = operands[3];
4038 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4039 HOST_WIDE_INT bytes = orig_bytes;
4040 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4041 HOST_WIDE_INT alignofs = 0;
4042 rtx orig_dst = operands[0];
4043 rtx tmp;
4044 int i, words, ofs = 0;
4046 if (orig_bytes <= 0)
4047 return 1;
4048 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4049 return 0;
4051 /* Look for stricter alignment. */
4052 tmp = XEXP (orig_dst, 0);
4053 if (REG_P (tmp))
4054 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4055 else if (GET_CODE (tmp) == PLUS
4056 && REG_P (XEXP (tmp, 0))
4057 && CONST_INT_P (XEXP (tmp, 1)))
4059 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4060 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4062 if (a > align)
4064 if (a >= 64)
4065 align = a, alignofs = 8 - c % 8;
4066 else if (a >= 32)
4067 align = a, alignofs = 4 - c % 4;
4068 else if (a >= 16)
4069 align = a, alignofs = 2 - c % 2;
4073 /* Handle an unaligned prefix first. */
4075 if (alignofs > 0)
4077 #if HOST_BITS_PER_WIDE_INT >= 64
4078 /* Given that alignofs is bounded by align, the only time BWX could
4079 generate three stores is for a 7 byte fill. Prefer two individual
4080 stores over a load/mask/store sequence. */
4081 if ((!TARGET_BWX || alignofs == 7)
4082 && align >= 32
4083 && !(alignofs == 4 && bytes >= 4))
4085 machine_mode mode = (align >= 64 ? DImode : SImode);
4086 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4087 rtx mem, tmp;
4088 HOST_WIDE_INT mask;
4090 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4091 set_mem_alias_set (mem, 0);
4093 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4094 if (bytes < alignofs)
4096 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4097 ofs += bytes;
4098 bytes = 0;
4100 else
4102 bytes -= alignofs;
4103 ofs += alignofs;
4105 alignofs = 0;
4107 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4108 NULL_RTX, 1, OPTAB_WIDEN);
4110 emit_move_insn (mem, tmp);
4112 #endif
4114 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4116 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4117 bytes -= 1;
4118 ofs += 1;
4119 alignofs -= 1;
4121 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4123 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4124 bytes -= 2;
4125 ofs += 2;
4126 alignofs -= 2;
4128 if (alignofs == 4 && bytes >= 4)
4130 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4131 bytes -= 4;
4132 ofs += 4;
4133 alignofs = 0;
4136 /* If we've not used the extra lead alignment information by now,
4137 we won't be able to. Downgrade align to match what's left over. */
4138 if (alignofs > 0)
4140 alignofs = alignofs & -alignofs;
4141 align = MIN (align, alignofs * BITS_PER_UNIT);
4145 /* Handle a block of contiguous long-words. */
4147 if (align >= 64 && bytes >= 8)
4149 words = bytes / 8;
4151 for (i = 0; i < words; ++i)
4152 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4153 const0_rtx);
4155 bytes -= words * 8;
4156 ofs += words * 8;
4159 /* If the block is large and appropriately aligned, emit a single
4160 store followed by a sequence of stq_u insns. */
4162 if (align >= 32 && bytes > 16)
4164 rtx orig_dsta;
4166 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4167 bytes -= 4;
4168 ofs += 4;
4170 orig_dsta = XEXP (orig_dst, 0);
4171 if (GET_CODE (orig_dsta) == LO_SUM)
4172 orig_dsta = force_reg (Pmode, orig_dsta);
4174 words = bytes / 8;
4175 for (i = 0; i < words; ++i)
4177 rtx mem
4178 = change_address (orig_dst, DImode,
4179 gen_rtx_AND (DImode,
4180 plus_constant (DImode, orig_dsta,
4181 ofs + i*8),
4182 GEN_INT (-8)));
4183 set_mem_alias_set (mem, 0);
4184 emit_move_insn (mem, const0_rtx);
4187 /* Depending on the alignment, the first stq_u may have overlapped
4188 with the initial stl, which means that the last stq_u didn't
4189 write as much as it would appear. Leave those questionable bytes
4190 unaccounted for. */
4191 bytes -= words * 8 - 4;
4192 ofs += words * 8 - 4;
4195 /* Handle a smaller block of aligned words. */
4197 if ((align >= 64 && bytes == 4)
4198 || (align == 32 && bytes >= 4))
4200 words = bytes / 4;
4202 for (i = 0; i < words; ++i)
4203 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4204 const0_rtx);
4206 bytes -= words * 4;
4207 ofs += words * 4;
4210 /* An unaligned block uses stq_u stores for as many as possible. */
4212 if (bytes >= 8)
4214 words = bytes / 8;
4216 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4218 bytes -= words * 8;
4219 ofs += words * 8;
4222 /* Next clean up any trailing pieces. */
4224 #if HOST_BITS_PER_WIDE_INT >= 64
4225 /* Count the number of bits in BYTES for which aligned stores could
4226 be emitted. */
4227 words = 0;
4228 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4229 if (bytes & i)
4230 words += 1;
4232 /* If we have appropriate alignment (and it wouldn't take too many
4233 instructions otherwise), mask out the bytes we need. */
4234 if (TARGET_BWX ? words > 2 : bytes > 0)
4236 if (align >= 64)
4238 rtx mem, tmp;
4239 HOST_WIDE_INT mask;
4241 mem = adjust_address (orig_dst, DImode, ofs);
4242 set_mem_alias_set (mem, 0);
4244 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4246 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4247 NULL_RTX, 1, OPTAB_WIDEN);
4249 emit_move_insn (mem, tmp);
4250 return 1;
4252 else if (align >= 32 && bytes < 4)
4254 rtx mem, tmp;
4255 HOST_WIDE_INT mask;
4257 mem = adjust_address (orig_dst, SImode, ofs);
4258 set_mem_alias_set (mem, 0);
4260 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4262 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4263 NULL_RTX, 1, OPTAB_WIDEN);
4265 emit_move_insn (mem, tmp);
4266 return 1;
4269 #endif
4271 if (!TARGET_BWX && bytes >= 4)
4273 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4274 bytes -= 4;
4275 ofs += 4;
4278 if (bytes >= 2)
4280 if (align >= 16)
4282 do {
4283 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4284 const0_rtx);
4285 bytes -= 2;
4286 ofs += 2;
4287 } while (bytes >= 2);
4289 else if (! TARGET_BWX)
4291 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4292 bytes -= 2;
4293 ofs += 2;
4297 while (bytes > 0)
4299 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4300 bytes -= 1;
4301 ofs += 1;
4304 return 1;
4307 /* Returns a mask so that zap(x, value) == x & mask. */
4310 alpha_expand_zap_mask (HOST_WIDE_INT value)
4312 rtx result;
4313 int i;
4315 if (HOST_BITS_PER_WIDE_INT >= 64)
4317 HOST_WIDE_INT mask = 0;
4319 for (i = 7; i >= 0; --i)
4321 mask <<= 8;
4322 if (!((value >> i) & 1))
4323 mask |= 0xff;
4326 result = gen_int_mode (mask, DImode);
4328 else
4330 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4332 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4334 for (i = 7; i >= 4; --i)
4336 mask_hi <<= 8;
4337 if (!((value >> i) & 1))
4338 mask_hi |= 0xff;
4341 for (i = 3; i >= 0; --i)
4343 mask_lo <<= 8;
4344 if (!((value >> i) & 1))
4345 mask_lo |= 0xff;
4348 result = immed_double_const (mask_lo, mask_hi, DImode);
4351 return result;
4354 void
4355 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4356 machine_mode mode,
4357 rtx op0, rtx op1, rtx op2)
4359 op0 = gen_lowpart (mode, op0);
4361 if (op1 == const0_rtx)
4362 op1 = CONST0_RTX (mode);
4363 else
4364 op1 = gen_lowpart (mode, op1);
4366 if (op2 == const0_rtx)
4367 op2 = CONST0_RTX (mode);
4368 else
4369 op2 = gen_lowpart (mode, op2);
4371 emit_insn ((*gen) (op0, op1, op2));
4374 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4375 COND is true. Mark the jump as unlikely to be taken. */
4377 static void
4378 emit_unlikely_jump (rtx cond, rtx label)
4380 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
4381 rtx x;
4383 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4384 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4385 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
4388 /* A subroutine of the atomic operation splitters. Emit a load-locked
4389 instruction in MODE. */
4391 static void
4392 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
4394 rtx (*fn) (rtx, rtx) = NULL;
4395 if (mode == SImode)
4396 fn = gen_load_locked_si;
4397 else if (mode == DImode)
4398 fn = gen_load_locked_di;
4399 emit_insn (fn (reg, mem));
4402 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4403 instruction in MODE. */
4405 static void
4406 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
4408 rtx (*fn) (rtx, rtx, rtx) = NULL;
4409 if (mode == SImode)
4410 fn = gen_store_conditional_si;
4411 else if (mode == DImode)
4412 fn = gen_store_conditional_di;
4413 emit_insn (fn (res, mem, val));
4416 /* Subroutines of the atomic operation splitters. Emit barriers
4417 as needed for the memory MODEL. */
4419 static void
4420 alpha_pre_atomic_barrier (enum memmodel model)
4422 if (need_atomic_barrier_p (model, true))
4423 emit_insn (gen_memory_barrier ());
4426 static void
4427 alpha_post_atomic_barrier (enum memmodel model)
4429 if (need_atomic_barrier_p (model, false))
4430 emit_insn (gen_memory_barrier ());
4433 /* A subroutine of the atomic operation splitters. Emit an insxl
4434 instruction in MODE. */
4436 static rtx
4437 emit_insxl (machine_mode mode, rtx op1, rtx op2)
4439 rtx ret = gen_reg_rtx (DImode);
4440 rtx (*fn) (rtx, rtx, rtx);
4442 switch (mode)
4444 case QImode:
4445 fn = gen_insbl;
4446 break;
4447 case HImode:
4448 fn = gen_inswl;
4449 break;
4450 case SImode:
4451 fn = gen_insll;
4452 break;
4453 case DImode:
4454 fn = gen_insql;
4455 break;
4456 default:
4457 gcc_unreachable ();
4460 op1 = force_reg (mode, op1);
4461 emit_insn (fn (ret, op1, op2));
4463 return ret;
4466 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4467 to perform. MEM is the memory on which to operate. VAL is the second
4468 operand of the binary operator. BEFORE and AFTER are optional locations to
4469 return the value of MEM either before of after the operation. SCRATCH is
4470 a scratch register. */
4472 void
4473 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4474 rtx after, rtx scratch, enum memmodel model)
4476 machine_mode mode = GET_MODE (mem);
4477 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4479 alpha_pre_atomic_barrier (model);
4481 label = gen_label_rtx ();
4482 emit_label (label);
4483 label = gen_rtx_LABEL_REF (DImode, label);
4485 if (before == NULL)
4486 before = scratch;
4487 emit_load_locked (mode, before, mem);
4489 if (code == NOT)
4491 x = gen_rtx_AND (mode, before, val);
4492 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4494 x = gen_rtx_NOT (mode, val);
4496 else
4497 x = gen_rtx_fmt_ee (code, mode, before, val);
4498 if (after)
4499 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4500 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4502 emit_store_conditional (mode, cond, mem, scratch);
4504 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4505 emit_unlikely_jump (x, label);
4507 alpha_post_atomic_barrier (model);
4510 /* Expand a compare and swap operation. */
4512 void
4513 alpha_split_compare_and_swap (rtx operands[])
4515 rtx cond, retval, mem, oldval, newval;
4516 bool is_weak;
4517 enum memmodel mod_s, mod_f;
4518 machine_mode mode;
4519 rtx label1, label2, x;
4521 cond = operands[0];
4522 retval = operands[1];
4523 mem = operands[2];
4524 oldval = operands[3];
4525 newval = operands[4];
4526 is_weak = (operands[5] != const0_rtx);
4527 mod_s = (enum memmodel) INTVAL (operands[6]);
4528 mod_f = (enum memmodel) INTVAL (operands[7]);
4529 mode = GET_MODE (mem);
4531 alpha_pre_atomic_barrier (mod_s);
4533 label1 = NULL_RTX;
4534 if (!is_weak)
4536 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4537 emit_label (XEXP (label1, 0));
4539 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4541 emit_load_locked (mode, retval, mem);
4543 x = gen_lowpart (DImode, retval);
4544 if (oldval == const0_rtx)
4546 emit_move_insn (cond, const0_rtx);
4547 x = gen_rtx_NE (DImode, x, const0_rtx);
4549 else
4551 x = gen_rtx_EQ (DImode, x, oldval);
4552 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4553 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4555 emit_unlikely_jump (x, label2);
4557 emit_move_insn (cond, newval);
4558 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4560 if (!is_weak)
4562 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4563 emit_unlikely_jump (x, label1);
4566 if (mod_f != MEMMODEL_RELAXED)
4567 emit_label (XEXP (label2, 0));
4569 alpha_post_atomic_barrier (mod_s);
4571 if (mod_f == MEMMODEL_RELAXED)
4572 emit_label (XEXP (label2, 0));
4575 void
4576 alpha_expand_compare_and_swap_12 (rtx operands[])
4578 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4579 machine_mode mode;
4580 rtx addr, align, wdst;
4581 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4583 cond = operands[0];
4584 dst = operands[1];
4585 mem = operands[2];
4586 oldval = operands[3];
4587 newval = operands[4];
4588 is_weak = operands[5];
4589 mod_s = operands[6];
4590 mod_f = operands[7];
4591 mode = GET_MODE (mem);
4593 /* We forced the address into a register via mem_noofs_operand. */
4594 addr = XEXP (mem, 0);
4595 gcc_assert (register_operand (addr, DImode));
4597 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4598 NULL_RTX, 1, OPTAB_DIRECT);
4600 oldval = convert_modes (DImode, mode, oldval, 1);
4602 if (newval != const0_rtx)
4603 newval = emit_insxl (mode, newval, addr);
4605 wdst = gen_reg_rtx (DImode);
4606 if (mode == QImode)
4607 gen = gen_atomic_compare_and_swapqi_1;
4608 else
4609 gen = gen_atomic_compare_and_swaphi_1;
4610 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4611 is_weak, mod_s, mod_f));
4613 emit_move_insn (dst, gen_lowpart (mode, wdst));
4616 void
4617 alpha_split_compare_and_swap_12 (rtx operands[])
4619 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4620 machine_mode mode;
4621 bool is_weak;
4622 enum memmodel mod_s, mod_f;
4623 rtx label1, label2, mem, addr, width, mask, x;
4625 cond = operands[0];
4626 dest = operands[1];
4627 orig_mem = operands[2];
4628 oldval = operands[3];
4629 newval = operands[4];
4630 align = operands[5];
4631 is_weak = (operands[6] != const0_rtx);
4632 mod_s = (enum memmodel) INTVAL (operands[7]);
4633 mod_f = (enum memmodel) INTVAL (operands[8]);
4634 scratch = operands[9];
4635 mode = GET_MODE (orig_mem);
4636 addr = XEXP (orig_mem, 0);
4638 mem = gen_rtx_MEM (DImode, align);
4639 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4640 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4641 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4643 alpha_pre_atomic_barrier (mod_s);
4645 label1 = NULL_RTX;
4646 if (!is_weak)
4648 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4649 emit_label (XEXP (label1, 0));
4651 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4653 emit_load_locked (DImode, scratch, mem);
4655 width = GEN_INT (GET_MODE_BITSIZE (mode));
4656 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4657 emit_insn (gen_extxl (dest, scratch, width, addr));
4659 if (oldval == const0_rtx)
4661 emit_move_insn (cond, const0_rtx);
4662 x = gen_rtx_NE (DImode, dest, const0_rtx);
4664 else
4666 x = gen_rtx_EQ (DImode, dest, oldval);
4667 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4668 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4670 emit_unlikely_jump (x, label2);
4672 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4674 if (newval != const0_rtx)
4675 emit_insn (gen_iordi3 (cond, cond, newval));
4677 emit_store_conditional (DImode, cond, mem, cond);
4679 if (!is_weak)
4681 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4682 emit_unlikely_jump (x, label1);
4685 if (mod_f != MEMMODEL_RELAXED)
4686 emit_label (XEXP (label2, 0));
4688 alpha_post_atomic_barrier (mod_s);
4690 if (mod_f == MEMMODEL_RELAXED)
4691 emit_label (XEXP (label2, 0));
4694 /* Expand an atomic exchange operation. */
4696 void
4697 alpha_split_atomic_exchange (rtx operands[])
4699 rtx retval, mem, val, scratch;
4700 enum memmodel model;
4701 machine_mode mode;
4702 rtx label, x, cond;
4704 retval = operands[0];
4705 mem = operands[1];
4706 val = operands[2];
4707 model = (enum memmodel) INTVAL (operands[3]);
4708 scratch = operands[4];
4709 mode = GET_MODE (mem);
4710 cond = gen_lowpart (DImode, scratch);
4712 alpha_pre_atomic_barrier (model);
4714 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4715 emit_label (XEXP (label, 0));
4717 emit_load_locked (mode, retval, mem);
4718 emit_move_insn (scratch, val);
4719 emit_store_conditional (mode, cond, mem, scratch);
4721 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4722 emit_unlikely_jump (x, label);
4724 alpha_post_atomic_barrier (model);
4727 void
4728 alpha_expand_atomic_exchange_12 (rtx operands[])
4730 rtx dst, mem, val, model;
4731 machine_mode mode;
4732 rtx addr, align, wdst;
4733 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4735 dst = operands[0];
4736 mem = operands[1];
4737 val = operands[2];
4738 model = operands[3];
4739 mode = GET_MODE (mem);
4741 /* We forced the address into a register via mem_noofs_operand. */
4742 addr = XEXP (mem, 0);
4743 gcc_assert (register_operand (addr, DImode));
4745 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4746 NULL_RTX, 1, OPTAB_DIRECT);
4748 /* Insert val into the correct byte location within the word. */
4749 if (val != const0_rtx)
4750 val = emit_insxl (mode, val, addr);
4752 wdst = gen_reg_rtx (DImode);
4753 if (mode == QImode)
4754 gen = gen_atomic_exchangeqi_1;
4755 else
4756 gen = gen_atomic_exchangehi_1;
4757 emit_insn (gen (wdst, mem, val, align, model));
4759 emit_move_insn (dst, gen_lowpart (mode, wdst));
4762 void
4763 alpha_split_atomic_exchange_12 (rtx operands[])
4765 rtx dest, orig_mem, addr, val, align, scratch;
4766 rtx label, mem, width, mask, x;
4767 machine_mode mode;
4768 enum memmodel model;
4770 dest = operands[0];
4771 orig_mem = operands[1];
4772 val = operands[2];
4773 align = operands[3];
4774 model = (enum memmodel) INTVAL (operands[4]);
4775 scratch = operands[5];
4776 mode = GET_MODE (orig_mem);
4777 addr = XEXP (orig_mem, 0);
4779 mem = gen_rtx_MEM (DImode, align);
4780 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4781 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4782 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4784 alpha_pre_atomic_barrier (model);
4786 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4787 emit_label (XEXP (label, 0));
4789 emit_load_locked (DImode, scratch, mem);
4791 width = GEN_INT (GET_MODE_BITSIZE (mode));
4792 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4793 emit_insn (gen_extxl (dest, scratch, width, addr));
4794 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4795 if (val != const0_rtx)
4796 emit_insn (gen_iordi3 (scratch, scratch, val));
4798 emit_store_conditional (DImode, scratch, mem, scratch);
4800 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4801 emit_unlikely_jump (x, label);
4803 alpha_post_atomic_barrier (model);
4806 /* Adjust the cost of a scheduling dependency. Return the new cost of
4807 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4809 static int
4810 alpha_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4812 enum attr_type dep_insn_type;
4814 /* If the dependence is an anti-dependence, there is no cost. For an
4815 output dependence, there is sometimes a cost, but it doesn't seem
4816 worth handling those few cases. */
4817 if (REG_NOTE_KIND (link) != 0)
4818 return cost;
4820 /* If we can't recognize the insns, we can't really do anything. */
4821 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4822 return cost;
4824 dep_insn_type = get_attr_type (dep_insn);
4826 /* Bring in the user-defined memory latency. */
4827 if (dep_insn_type == TYPE_ILD
4828 || dep_insn_type == TYPE_FLD
4829 || dep_insn_type == TYPE_LDSYM)
4830 cost += alpha_memory_latency-1;
4832 /* Everything else handled in DFA bypasses now. */
4834 return cost;
4837 /* The number of instructions that can be issued per cycle. */
4839 static int
4840 alpha_issue_rate (void)
4842 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4845 /* How many alternative schedules to try. This should be as wide as the
4846 scheduling freedom in the DFA, but no wider. Making this value too
4847 large results extra work for the scheduler.
4849 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4850 alternative schedules. For EV5, we can choose between E0/E1 and
4851 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4853 static int
4854 alpha_multipass_dfa_lookahead (void)
4856 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4859 /* Machine-specific function data. */
4861 struct GTY(()) alpha_links;
4863 struct string_traits : default_hashmap_traits
4865 static bool equal_keys (const char *const &a, const char *const &b)
4867 return strcmp (a, b) == 0;
4871 struct GTY(()) machine_function
4873 /* For flag_reorder_blocks_and_partition. */
4874 rtx gp_save_rtx;
4876 /* For VMS condition handlers. */
4877 bool uses_condition_handler;
4879 /* Linkage entries. */
4880 hash_map<const char *, alpha_links *, string_traits> *links;
4883 /* How to allocate a 'struct machine_function'. */
4885 static struct machine_function *
4886 alpha_init_machine_status (void)
4888 return ggc_cleared_alloc<machine_function> ();
4891 /* Support for frame based VMS condition handlers. */
4893 /* A VMS condition handler may be established for a function with a call to
4894 __builtin_establish_vms_condition_handler, and cancelled with a call to
4895 __builtin_revert_vms_condition_handler.
4897 The VMS Condition Handling Facility knows about the existence of a handler
4898 from the procedure descriptor .handler field. As the VMS native compilers,
4899 we store the user specified handler's address at a fixed location in the
4900 stack frame and point the procedure descriptor at a common wrapper which
4901 fetches the real handler's address and issues an indirect call.
4903 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4905 We force the procedure kind to PT_STACK, and the fixed frame location is
4906 fp+8, just before the register save area. We use the handler_data field in
4907 the procedure descriptor to state the fp offset at which the installed
4908 handler address can be found. */
4910 #define VMS_COND_HANDLER_FP_OFFSET 8
4912 /* Expand code to store the currently installed user VMS condition handler
4913 into TARGET and install HANDLER as the new condition handler. */
4915 void
4916 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4918 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4919 VMS_COND_HANDLER_FP_OFFSET);
4921 rtx handler_slot
4922 = gen_rtx_MEM (DImode, handler_slot_address);
4924 emit_move_insn (target, handler_slot);
4925 emit_move_insn (handler_slot, handler);
4927 /* Notify the start/prologue/epilogue emitters that the condition handler
4928 slot is needed. In addition to reserving the slot space, this will force
4929 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4930 use above is correct. */
4931 cfun->machine->uses_condition_handler = true;
4934 /* Expand code to store the current VMS condition handler into TARGET and
4935 nullify it. */
4937 void
4938 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4940 /* We implement this by establishing a null condition handler, with the tiny
4941 side effect of setting uses_condition_handler. This is a little bit
4942 pessimistic if no actual builtin_establish call is ever issued, which is
4943 not a real problem and expected never to happen anyway. */
4945 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4948 /* Functions to save and restore alpha_return_addr_rtx. */
4950 /* Start the ball rolling with RETURN_ADDR_RTX. */
4953 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4955 if (count != 0)
4956 return const0_rtx;
4958 return get_hard_reg_initial_val (Pmode, REG_RA);
4961 /* Return or create a memory slot containing the gp value for the current
4962 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4965 alpha_gp_save_rtx (void)
4967 rtx_insn *seq;
4968 rtx m = cfun->machine->gp_save_rtx;
4970 if (m == NULL)
4972 start_sequence ();
4974 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4975 m = validize_mem (m);
4976 emit_move_insn (m, pic_offset_table_rtx);
4978 seq = get_insns ();
4979 end_sequence ();
4981 /* We used to simply emit the sequence after entry_of_function.
4982 However this breaks the CFG if the first instruction in the
4983 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4984 label. Emit the sequence properly on the edge. We are only
4985 invoked from dw2_build_landing_pads and finish_eh_generation
4986 will call commit_edge_insertions thanks to a kludge. */
4987 insert_insn_on_edge (seq,
4988 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4990 cfun->machine->gp_save_rtx = m;
4993 return m;
4996 static void
4997 alpha_instantiate_decls (void)
4999 if (cfun->machine->gp_save_rtx != NULL_RTX)
5000 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
5003 static int
5004 alpha_ra_ever_killed (void)
5006 rtx_insn *top;
5008 if (!has_hard_reg_initial_val (Pmode, REG_RA))
5009 return (int)df_regs_ever_live_p (REG_RA);
5011 push_topmost_sequence ();
5012 top = get_insns ();
5013 pop_topmost_sequence ();
5015 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL);
5019 /* Return the trap mode suffix applicable to the current
5020 instruction, or NULL. */
5022 static const char *
5023 get_trap_mode_suffix (void)
5025 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
5027 switch (s)
5029 case TRAP_SUFFIX_NONE:
5030 return NULL;
5032 case TRAP_SUFFIX_SU:
5033 if (alpha_fptm >= ALPHA_FPTM_SU)
5034 return "su";
5035 return NULL;
5037 case TRAP_SUFFIX_SUI:
5038 if (alpha_fptm >= ALPHA_FPTM_SUI)
5039 return "sui";
5040 return NULL;
5042 case TRAP_SUFFIX_V_SV:
5043 switch (alpha_fptm)
5045 case ALPHA_FPTM_N:
5046 return NULL;
5047 case ALPHA_FPTM_U:
5048 return "v";
5049 case ALPHA_FPTM_SU:
5050 case ALPHA_FPTM_SUI:
5051 return "sv";
5052 default:
5053 gcc_unreachable ();
5056 case TRAP_SUFFIX_V_SV_SVI:
5057 switch (alpha_fptm)
5059 case ALPHA_FPTM_N:
5060 return NULL;
5061 case ALPHA_FPTM_U:
5062 return "v";
5063 case ALPHA_FPTM_SU:
5064 return "sv";
5065 case ALPHA_FPTM_SUI:
5066 return "svi";
5067 default:
5068 gcc_unreachable ();
5070 break;
5072 case TRAP_SUFFIX_U_SU_SUI:
5073 switch (alpha_fptm)
5075 case ALPHA_FPTM_N:
5076 return NULL;
5077 case ALPHA_FPTM_U:
5078 return "u";
5079 case ALPHA_FPTM_SU:
5080 return "su";
5081 case ALPHA_FPTM_SUI:
5082 return "sui";
5083 default:
5084 gcc_unreachable ();
5086 break;
5088 default:
5089 gcc_unreachable ();
5091 gcc_unreachable ();
5094 /* Return the rounding mode suffix applicable to the current
5095 instruction, or NULL. */
5097 static const char *
5098 get_round_mode_suffix (void)
5100 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5102 switch (s)
5104 case ROUND_SUFFIX_NONE:
5105 return NULL;
5106 case ROUND_SUFFIX_NORMAL:
5107 switch (alpha_fprm)
5109 case ALPHA_FPRM_NORM:
5110 return NULL;
5111 case ALPHA_FPRM_MINF:
5112 return "m";
5113 case ALPHA_FPRM_CHOP:
5114 return "c";
5115 case ALPHA_FPRM_DYN:
5116 return "d";
5117 default:
5118 gcc_unreachable ();
5120 break;
5122 case ROUND_SUFFIX_C:
5123 return "c";
5125 default:
5126 gcc_unreachable ();
5128 gcc_unreachable ();
5131 /* Print an operand. Recognize special options, documented below. */
5133 void
5134 print_operand (FILE *file, rtx x, int code)
5136 int i;
5138 switch (code)
5140 case '~':
5141 /* Print the assembler name of the current function. */
5142 assemble_name (file, alpha_fnname);
5143 break;
5145 case '&':
5146 if (const char *name = get_some_local_dynamic_name ())
5147 assemble_name (file, name);
5148 else
5149 output_operand_lossage ("'%%&' used without any "
5150 "local dynamic TLS references");
5151 break;
5153 case '/':
5155 const char *trap = get_trap_mode_suffix ();
5156 const char *round = get_round_mode_suffix ();
5158 if (trap || round)
5159 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5160 break;
5163 case ',':
5164 /* Generates single precision instruction suffix. */
5165 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5166 break;
5168 case '-':
5169 /* Generates double precision instruction suffix. */
5170 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5171 break;
5173 case '#':
5174 if (alpha_this_literal_sequence_number == 0)
5175 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5176 fprintf (file, "%d", alpha_this_literal_sequence_number);
5177 break;
5179 case '*':
5180 if (alpha_this_gpdisp_sequence_number == 0)
5181 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5182 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5183 break;
5185 case 'H':
5186 if (GET_CODE (x) == HIGH)
5187 output_addr_const (file, XEXP (x, 0));
5188 else
5189 output_operand_lossage ("invalid %%H value");
5190 break;
5192 case 'J':
5194 const char *lituse;
5196 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5198 x = XVECEXP (x, 0, 0);
5199 lituse = "lituse_tlsgd";
5201 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5203 x = XVECEXP (x, 0, 0);
5204 lituse = "lituse_tlsldm";
5206 else if (CONST_INT_P (x))
5207 lituse = "lituse_jsr";
5208 else
5210 output_operand_lossage ("invalid %%J value");
5211 break;
5214 if (x != const0_rtx)
5215 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5217 break;
5219 case 'j':
5221 const char *lituse;
5223 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5224 lituse = "lituse_jsrdirect";
5225 #else
5226 lituse = "lituse_jsr";
5227 #endif
5229 gcc_assert (INTVAL (x) != 0);
5230 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5232 break;
5233 case 'r':
5234 /* If this operand is the constant zero, write it as "$31". */
5235 if (REG_P (x))
5236 fprintf (file, "%s", reg_names[REGNO (x)]);
5237 else if (x == CONST0_RTX (GET_MODE (x)))
5238 fprintf (file, "$31");
5239 else
5240 output_operand_lossage ("invalid %%r value");
5241 break;
5243 case 'R':
5244 /* Similar, but for floating-point. */
5245 if (REG_P (x))
5246 fprintf (file, "%s", reg_names[REGNO (x)]);
5247 else if (x == CONST0_RTX (GET_MODE (x)))
5248 fprintf (file, "$f31");
5249 else
5250 output_operand_lossage ("invalid %%R value");
5251 break;
5253 case 'N':
5254 /* Write the 1's complement of a constant. */
5255 if (!CONST_INT_P (x))
5256 output_operand_lossage ("invalid %%N value");
5258 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5259 break;
5261 case 'P':
5262 /* Write 1 << C, for a constant C. */
5263 if (!CONST_INT_P (x))
5264 output_operand_lossage ("invalid %%P value");
5266 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5267 break;
5269 case 'h':
5270 /* Write the high-order 16 bits of a constant, sign-extended. */
5271 if (!CONST_INT_P (x))
5272 output_operand_lossage ("invalid %%h value");
5274 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5275 break;
5277 case 'L':
5278 /* Write the low-order 16 bits of a constant, sign-extended. */
5279 if (!CONST_INT_P (x))
5280 output_operand_lossage ("invalid %%L value");
5282 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5283 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5284 break;
5286 case 'm':
5287 /* Write mask for ZAP insn. */
5288 if (GET_CODE (x) == CONST_DOUBLE)
5290 HOST_WIDE_INT mask = 0;
5291 HOST_WIDE_INT value;
5293 value = CONST_DOUBLE_LOW (x);
5294 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5295 i++, value >>= 8)
5296 if (value & 0xff)
5297 mask |= (1 << i);
5299 value = CONST_DOUBLE_HIGH (x);
5300 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5301 i++, value >>= 8)
5302 if (value & 0xff)
5303 mask |= (1 << (i + sizeof (int)));
5305 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5308 else if (CONST_INT_P (x))
5310 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5312 for (i = 0; i < 8; i++, value >>= 8)
5313 if (value & 0xff)
5314 mask |= (1 << i);
5316 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5318 else
5319 output_operand_lossage ("invalid %%m value");
5320 break;
5322 case 'M':
5323 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5324 if (!CONST_INT_P (x)
5325 || (INTVAL (x) != 8 && INTVAL (x) != 16
5326 && INTVAL (x) != 32 && INTVAL (x) != 64))
5327 output_operand_lossage ("invalid %%M value");
5329 fprintf (file, "%s",
5330 (INTVAL (x) == 8 ? "b"
5331 : INTVAL (x) == 16 ? "w"
5332 : INTVAL (x) == 32 ? "l"
5333 : "q"));
5334 break;
5336 case 'U':
5337 /* Similar, except do it from the mask. */
5338 if (CONST_INT_P (x))
5340 HOST_WIDE_INT value = INTVAL (x);
5342 if (value == 0xff)
5344 fputc ('b', file);
5345 break;
5347 if (value == 0xffff)
5349 fputc ('w', file);
5350 break;
5352 if (value == 0xffffffff)
5354 fputc ('l', file);
5355 break;
5357 if (value == -1)
5359 fputc ('q', file);
5360 break;
5363 else if (HOST_BITS_PER_WIDE_INT == 32
5364 && GET_CODE (x) == CONST_DOUBLE
5365 && CONST_DOUBLE_LOW (x) == 0xffffffff
5366 && CONST_DOUBLE_HIGH (x) == 0)
5368 fputc ('l', file);
5369 break;
5371 output_operand_lossage ("invalid %%U value");
5372 break;
5374 case 's':
5375 /* Write the constant value divided by 8. */
5376 if (!CONST_INT_P (x)
5377 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5378 || (INTVAL (x) & 7) != 0)
5379 output_operand_lossage ("invalid %%s value");
5381 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5382 break;
5384 case 'S':
5385 /* Same, except compute (64 - c) / 8 */
5387 if (!CONST_INT_P (x)
5388 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5389 && (INTVAL (x) & 7) != 8)
5390 output_operand_lossage ("invalid %%s value");
5392 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5393 break;
5395 case 'C': case 'D': case 'c': case 'd':
5396 /* Write out comparison name. */
5398 enum rtx_code c = GET_CODE (x);
5400 if (!COMPARISON_P (x))
5401 output_operand_lossage ("invalid %%C value");
5403 else if (code == 'D')
5404 c = reverse_condition (c);
5405 else if (code == 'c')
5406 c = swap_condition (c);
5407 else if (code == 'd')
5408 c = swap_condition (reverse_condition (c));
5410 if (c == LEU)
5411 fprintf (file, "ule");
5412 else if (c == LTU)
5413 fprintf (file, "ult");
5414 else if (c == UNORDERED)
5415 fprintf (file, "un");
5416 else
5417 fprintf (file, "%s", GET_RTX_NAME (c));
5419 break;
5421 case 'E':
5422 /* Write the divide or modulus operator. */
5423 switch (GET_CODE (x))
5425 case DIV:
5426 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5427 break;
5428 case UDIV:
5429 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5430 break;
5431 case MOD:
5432 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5433 break;
5434 case UMOD:
5435 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5436 break;
5437 default:
5438 output_operand_lossage ("invalid %%E value");
5439 break;
5441 break;
5443 case 'A':
5444 /* Write "_u" for unaligned access. */
5445 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5446 fprintf (file, "_u");
5447 break;
5449 case 0:
5450 if (REG_P (x))
5451 fprintf (file, "%s", reg_names[REGNO (x)]);
5452 else if (MEM_P (x))
5453 output_address (XEXP (x, 0));
5454 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5456 switch (XINT (XEXP (x, 0), 1))
5458 case UNSPEC_DTPREL:
5459 case UNSPEC_TPREL:
5460 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5461 break;
5462 default:
5463 output_operand_lossage ("unknown relocation unspec");
5464 break;
5467 else
5468 output_addr_const (file, x);
5469 break;
5471 default:
5472 output_operand_lossage ("invalid %%xn code");
5476 void
5477 print_operand_address (FILE *file, rtx addr)
5479 int basereg = 31;
5480 HOST_WIDE_INT offset = 0;
5482 if (GET_CODE (addr) == AND)
5483 addr = XEXP (addr, 0);
5485 if (GET_CODE (addr) == PLUS
5486 && CONST_INT_P (XEXP (addr, 1)))
5488 offset = INTVAL (XEXP (addr, 1));
5489 addr = XEXP (addr, 0);
5492 if (GET_CODE (addr) == LO_SUM)
5494 const char *reloc16, *reloclo;
5495 rtx op1 = XEXP (addr, 1);
5497 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5499 op1 = XEXP (op1, 0);
5500 switch (XINT (op1, 1))
5502 case UNSPEC_DTPREL:
5503 reloc16 = NULL;
5504 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5505 break;
5506 case UNSPEC_TPREL:
5507 reloc16 = NULL;
5508 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5509 break;
5510 default:
5511 output_operand_lossage ("unknown relocation unspec");
5512 return;
5515 output_addr_const (file, XVECEXP (op1, 0, 0));
5517 else
5519 reloc16 = "gprel";
5520 reloclo = "gprellow";
5521 output_addr_const (file, op1);
5524 if (offset)
5525 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5527 addr = XEXP (addr, 0);
5528 switch (GET_CODE (addr))
5530 case REG:
5531 basereg = REGNO (addr);
5532 break;
5534 case SUBREG:
5535 basereg = subreg_regno (addr);
5536 break;
5538 default:
5539 gcc_unreachable ();
5542 fprintf (file, "($%d)\t\t!%s", basereg,
5543 (basereg == 29 ? reloc16 : reloclo));
5544 return;
5547 switch (GET_CODE (addr))
5549 case REG:
5550 basereg = REGNO (addr);
5551 break;
5553 case SUBREG:
5554 basereg = subreg_regno (addr);
5555 break;
5557 case CONST_INT:
5558 offset = INTVAL (addr);
5559 break;
5561 case SYMBOL_REF:
5562 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5563 fprintf (file, "%s", XSTR (addr, 0));
5564 return;
5566 case CONST:
5567 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5568 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5569 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5570 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5571 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5572 INTVAL (XEXP (XEXP (addr, 0), 1)));
5573 return;
5575 default:
5576 output_operand_lossage ("invalid operand address");
5577 return;
5580 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5583 /* Emit RTL insns to initialize the variable parts of a trampoline at
5584 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5585 for the static chain value for the function. */
5587 static void
5588 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5590 rtx fnaddr, mem, word1, word2;
5592 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5594 #ifdef POINTERS_EXTEND_UNSIGNED
5595 fnaddr = convert_memory_address (Pmode, fnaddr);
5596 chain_value = convert_memory_address (Pmode, chain_value);
5597 #endif
5599 if (TARGET_ABI_OPEN_VMS)
5601 const char *fnname;
5602 char *trname;
5604 /* Construct the name of the trampoline entry point. */
5605 fnname = XSTR (fnaddr, 0);
5606 trname = (char *) alloca (strlen (fnname) + 5);
5607 strcpy (trname, fnname);
5608 strcat (trname, "..tr");
5609 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5610 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5612 /* Trampoline (or "bounded") procedure descriptor is constructed from
5613 the function's procedure descriptor with certain fields zeroed IAW
5614 the VMS calling standard. This is stored in the first quadword. */
5615 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5616 word1 = expand_and (DImode, word1,
5617 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5618 NULL);
5620 else
5622 /* These 4 instructions are:
5623 ldq $1,24($27)
5624 ldq $27,16($27)
5625 jmp $31,($27),0
5627 We don't bother setting the HINT field of the jump; the nop
5628 is merely there for padding. */
5629 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5630 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5633 /* Store the first two words, as computed above. */
5634 mem = adjust_address (m_tramp, DImode, 0);
5635 emit_move_insn (mem, word1);
5636 mem = adjust_address (m_tramp, DImode, 8);
5637 emit_move_insn (mem, word2);
5639 /* Store function address and static chain value. */
5640 mem = adjust_address (m_tramp, Pmode, 16);
5641 emit_move_insn (mem, fnaddr);
5642 mem = adjust_address (m_tramp, Pmode, 24);
5643 emit_move_insn (mem, chain_value);
5645 if (TARGET_ABI_OSF)
5647 emit_insn (gen_imb ());
5648 #ifdef HAVE_ENABLE_EXECUTE_STACK
5649 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5650 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5651 #endif
5655 /* Determine where to put an argument to a function.
5656 Value is zero to push the argument on the stack,
5657 or a hard register in which to store the argument.
5659 MODE is the argument's machine mode.
5660 TYPE is the data type of the argument (as a tree).
5661 This is null for libcalls where that information may
5662 not be available.
5663 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5664 the preceding args and about the function being called.
5665 NAMED is nonzero if this argument is a named parameter
5666 (otherwise it is an extra parameter matching an ellipsis).
5668 On Alpha the first 6 words of args are normally in registers
5669 and the rest are pushed. */
5671 static rtx
5672 alpha_function_arg (cumulative_args_t cum_v, machine_mode mode,
5673 const_tree type, bool named ATTRIBUTE_UNUSED)
5675 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5676 int basereg;
5677 int num_args;
5679 /* Don't get confused and pass small structures in FP registers. */
5680 if (type && AGGREGATE_TYPE_P (type))
5681 basereg = 16;
5682 else
5684 #ifdef ENABLE_CHECKING
5685 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5686 values here. */
5687 gcc_assert (!COMPLEX_MODE_P (mode));
5688 #endif
5690 /* Set up defaults for FP operands passed in FP registers, and
5691 integral operands passed in integer registers. */
5692 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5693 basereg = 32 + 16;
5694 else
5695 basereg = 16;
5698 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5699 the two platforms, so we can't avoid conditional compilation. */
5700 #if TARGET_ABI_OPEN_VMS
5702 if (mode == VOIDmode)
5703 return alpha_arg_info_reg_val (*cum);
5705 num_args = cum->num_args;
5706 if (num_args >= 6
5707 || targetm.calls.must_pass_in_stack (mode, type))
5708 return NULL_RTX;
5710 #elif TARGET_ABI_OSF
5712 if (*cum >= 6)
5713 return NULL_RTX;
5714 num_args = *cum;
5716 /* VOID is passed as a special flag for "last argument". */
5717 if (type == void_type_node)
5718 basereg = 16;
5719 else if (targetm.calls.must_pass_in_stack (mode, type))
5720 return NULL_RTX;
5722 #else
5723 #error Unhandled ABI
5724 #endif
5726 return gen_rtx_REG (mode, num_args + basereg);
5729 /* Update the data in CUM to advance over an argument
5730 of mode MODE and data type TYPE.
5731 (TYPE is null for libcalls where that information may not be available.) */
5733 static void
5734 alpha_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
5735 const_tree type, bool named ATTRIBUTE_UNUSED)
5737 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5738 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5739 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5741 #if TARGET_ABI_OSF
5742 *cum += increment;
5743 #else
5744 if (!onstack && cum->num_args < 6)
5745 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5746 cum->num_args += increment;
5747 #endif
5750 static int
5751 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5752 machine_mode mode ATTRIBUTE_UNUSED,
5753 tree type ATTRIBUTE_UNUSED,
5754 bool named ATTRIBUTE_UNUSED)
5756 int words = 0;
5757 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5759 #if TARGET_ABI_OPEN_VMS
5760 if (cum->num_args < 6
5761 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5762 words = 6 - cum->num_args;
5763 #elif TARGET_ABI_OSF
5764 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5765 words = 6 - *cum;
5766 #else
5767 #error Unhandled ABI
5768 #endif
5770 return words * UNITS_PER_WORD;
5774 /* Return true if TYPE must be returned in memory, instead of in registers. */
5776 static bool
5777 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5779 machine_mode mode = VOIDmode;
5780 int size;
5782 if (type)
5784 mode = TYPE_MODE (type);
5786 /* All aggregates are returned in memory, except on OpenVMS where
5787 records that fit 64 bits should be returned by immediate value
5788 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5789 if (TARGET_ABI_OPEN_VMS
5790 && TREE_CODE (type) != ARRAY_TYPE
5791 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5792 return false;
5794 if (AGGREGATE_TYPE_P (type))
5795 return true;
5798 size = GET_MODE_SIZE (mode);
5799 switch (GET_MODE_CLASS (mode))
5801 case MODE_VECTOR_FLOAT:
5802 /* Pass all float vectors in memory, like an aggregate. */
5803 return true;
5805 case MODE_COMPLEX_FLOAT:
5806 /* We judge complex floats on the size of their element,
5807 not the size of the whole type. */
5808 size = GET_MODE_UNIT_SIZE (mode);
5809 break;
5811 case MODE_INT:
5812 case MODE_FLOAT:
5813 case MODE_COMPLEX_INT:
5814 case MODE_VECTOR_INT:
5815 break;
5817 default:
5818 /* ??? We get called on all sorts of random stuff from
5819 aggregate_value_p. We must return something, but it's not
5820 clear what's safe to return. Pretend it's a struct I
5821 guess. */
5822 return true;
5825 /* Otherwise types must fit in one register. */
5826 return size > UNITS_PER_WORD;
5829 /* Return true if TYPE should be passed by invisible reference. */
5831 static bool
5832 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5833 machine_mode mode,
5834 const_tree type ATTRIBUTE_UNUSED,
5835 bool named ATTRIBUTE_UNUSED)
5837 return mode == TFmode || mode == TCmode;
5840 /* Define how to find the value returned by a function. VALTYPE is the
5841 data type of the value (as a tree). If the precise function being
5842 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5843 MODE is set instead of VALTYPE for libcalls.
5845 On Alpha the value is found in $0 for integer functions and
5846 $f0 for floating-point functions. */
5849 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5850 machine_mode mode)
5852 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5853 enum mode_class mclass;
5855 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5857 if (valtype)
5858 mode = TYPE_MODE (valtype);
5860 mclass = GET_MODE_CLASS (mode);
5861 switch (mclass)
5863 case MODE_INT:
5864 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5865 where we have them returning both SImode and DImode. */
5866 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5867 PROMOTE_MODE (mode, dummy, valtype);
5868 /* FALLTHRU */
5870 case MODE_COMPLEX_INT:
5871 case MODE_VECTOR_INT:
5872 regnum = 0;
5873 break;
5875 case MODE_FLOAT:
5876 regnum = 32;
5877 break;
5879 case MODE_COMPLEX_FLOAT:
5881 machine_mode cmode = GET_MODE_INNER (mode);
5883 return gen_rtx_PARALLEL
5884 (VOIDmode,
5885 gen_rtvec (2,
5886 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5887 const0_rtx),
5888 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5889 GEN_INT (GET_MODE_SIZE (cmode)))));
5892 case MODE_RANDOM:
5893 /* We should only reach here for BLKmode on VMS. */
5894 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5895 regnum = 0;
5896 break;
5898 default:
5899 gcc_unreachable ();
5902 return gen_rtx_REG (mode, regnum);
5905 /* TCmode complex values are passed by invisible reference. We
5906 should not split these values. */
5908 static bool
5909 alpha_split_complex_arg (const_tree type)
5911 return TYPE_MODE (type) != TCmode;
5914 static tree
5915 alpha_build_builtin_va_list (void)
5917 tree base, ofs, space, record, type_decl;
5919 if (TARGET_ABI_OPEN_VMS)
5920 return ptr_type_node;
5922 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5923 type_decl = build_decl (BUILTINS_LOCATION,
5924 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5925 TYPE_STUB_DECL (record) = type_decl;
5926 TYPE_NAME (record) = type_decl;
5928 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5930 /* Dummy field to prevent alignment warnings. */
5931 space = build_decl (BUILTINS_LOCATION,
5932 FIELD_DECL, NULL_TREE, integer_type_node);
5933 DECL_FIELD_CONTEXT (space) = record;
5934 DECL_ARTIFICIAL (space) = 1;
5935 DECL_IGNORED_P (space) = 1;
5937 ofs = build_decl (BUILTINS_LOCATION,
5938 FIELD_DECL, get_identifier ("__offset"),
5939 integer_type_node);
5940 DECL_FIELD_CONTEXT (ofs) = record;
5941 DECL_CHAIN (ofs) = space;
5942 /* ??? This is a hack, __offset is marked volatile to prevent
5943 DCE that confuses stdarg optimization and results in
5944 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5945 TREE_THIS_VOLATILE (ofs) = 1;
5947 base = build_decl (BUILTINS_LOCATION,
5948 FIELD_DECL, get_identifier ("__base"),
5949 ptr_type_node);
5950 DECL_FIELD_CONTEXT (base) = record;
5951 DECL_CHAIN (base) = ofs;
5953 TYPE_FIELDS (record) = base;
5954 layout_type (record);
5956 va_list_gpr_counter_field = ofs;
5957 return record;
5960 #if TARGET_ABI_OSF
5961 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5962 and constant additions. */
5964 static gimple
5965 va_list_skip_additions (tree lhs)
5967 gimple stmt;
5969 for (;;)
5971 enum tree_code code;
5973 stmt = SSA_NAME_DEF_STMT (lhs);
5975 if (gimple_code (stmt) == GIMPLE_PHI)
5976 return stmt;
5978 if (!is_gimple_assign (stmt)
5979 || gimple_assign_lhs (stmt) != lhs)
5980 return NULL;
5982 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5983 return stmt;
5984 code = gimple_assign_rhs_code (stmt);
5985 if (!CONVERT_EXPR_CODE_P (code)
5986 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5987 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5988 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))))
5989 return stmt;
5991 lhs = gimple_assign_rhs1 (stmt);
5995 /* Check if LHS = RHS statement is
5996 LHS = *(ap.__base + ap.__offset + cst)
5998 LHS = *(ap.__base
5999 + ((ap.__offset + cst <= 47)
6000 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
6001 If the former, indicate that GPR registers are needed,
6002 if the latter, indicate that FPR registers are needed.
6004 Also look for LHS = (*ptr).field, where ptr is one of the forms
6005 listed above.
6007 On alpha, cfun->va_list_gpr_size is used as size of the needed
6008 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
6009 registers are needed and bit 1 set if FPR registers are needed.
6010 Return true if va_list references should not be scanned for the
6011 current statement. */
6013 static bool
6014 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
6016 tree base, offset, rhs;
6017 int offset_arg = 1;
6018 gimple base_stmt;
6020 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
6021 != GIMPLE_SINGLE_RHS)
6022 return false;
6024 rhs = gimple_assign_rhs1 (stmt);
6025 while (handled_component_p (rhs))
6026 rhs = TREE_OPERAND (rhs, 0);
6027 if (TREE_CODE (rhs) != MEM_REF
6028 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6029 return false;
6031 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6032 if (stmt == NULL
6033 || !is_gimple_assign (stmt)
6034 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6035 return false;
6037 base = gimple_assign_rhs1 (stmt);
6038 if (TREE_CODE (base) == SSA_NAME)
6040 base_stmt = va_list_skip_additions (base);
6041 if (base_stmt
6042 && is_gimple_assign (base_stmt)
6043 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6044 base = gimple_assign_rhs1 (base_stmt);
6047 if (TREE_CODE (base) != COMPONENT_REF
6048 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6050 base = gimple_assign_rhs2 (stmt);
6051 if (TREE_CODE (base) == SSA_NAME)
6053 base_stmt = va_list_skip_additions (base);
6054 if (base_stmt
6055 && is_gimple_assign (base_stmt)
6056 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6057 base = gimple_assign_rhs1 (base_stmt);
6060 if (TREE_CODE (base) != COMPONENT_REF
6061 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6062 return false;
6064 offset_arg = 0;
6067 base = get_base_address (base);
6068 if (TREE_CODE (base) != VAR_DECL
6069 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
6070 return false;
6072 offset = gimple_op (stmt, 1 + offset_arg);
6073 if (TREE_CODE (offset) == SSA_NAME)
6075 gimple offset_stmt = va_list_skip_additions (offset);
6077 if (offset_stmt
6078 && gimple_code (offset_stmt) == GIMPLE_PHI)
6080 HOST_WIDE_INT sub;
6081 gimple arg1_stmt, arg2_stmt;
6082 tree arg1, arg2;
6083 enum tree_code code1, code2;
6085 if (gimple_phi_num_args (offset_stmt) != 2)
6086 goto escapes;
6088 arg1_stmt
6089 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6090 arg2_stmt
6091 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6092 if (arg1_stmt == NULL
6093 || !is_gimple_assign (arg1_stmt)
6094 || arg2_stmt == NULL
6095 || !is_gimple_assign (arg2_stmt))
6096 goto escapes;
6098 code1 = gimple_assign_rhs_code (arg1_stmt);
6099 code2 = gimple_assign_rhs_code (arg2_stmt);
6100 if (code1 == COMPONENT_REF
6101 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6102 /* Do nothing. */;
6103 else if (code2 == COMPONENT_REF
6104 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6106 gimple tem = arg1_stmt;
6107 code2 = code1;
6108 arg1_stmt = arg2_stmt;
6109 arg2_stmt = tem;
6111 else
6112 goto escapes;
6114 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt)))
6115 goto escapes;
6117 sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt));
6118 if (code2 == MINUS_EXPR)
6119 sub = -sub;
6120 if (sub < -48 || sub > -32)
6121 goto escapes;
6123 arg1 = gimple_assign_rhs1 (arg1_stmt);
6124 arg2 = gimple_assign_rhs1 (arg2_stmt);
6125 if (TREE_CODE (arg2) == SSA_NAME)
6127 arg2_stmt = va_list_skip_additions (arg2);
6128 if (arg2_stmt == NULL
6129 || !is_gimple_assign (arg2_stmt)
6130 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6131 goto escapes;
6132 arg2 = gimple_assign_rhs1 (arg2_stmt);
6134 if (arg1 != arg2)
6135 goto escapes;
6137 if (TREE_CODE (arg1) != COMPONENT_REF
6138 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6139 || get_base_address (arg1) != base)
6140 goto escapes;
6142 /* Need floating point regs. */
6143 cfun->va_list_fpr_size |= 2;
6144 return false;
6146 if (offset_stmt
6147 && is_gimple_assign (offset_stmt)
6148 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6149 offset = gimple_assign_rhs1 (offset_stmt);
6151 if (TREE_CODE (offset) != COMPONENT_REF
6152 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6153 || get_base_address (offset) != base)
6154 goto escapes;
6155 else
6156 /* Need general regs. */
6157 cfun->va_list_fpr_size |= 1;
6158 return false;
6160 escapes:
6161 si->va_list_escapes = true;
6162 return false;
6164 #endif
6166 /* Perform any needed actions needed for a function that is receiving a
6167 variable number of arguments. */
6169 static void
6170 alpha_setup_incoming_varargs (cumulative_args_t pcum, machine_mode mode,
6171 tree type, int *pretend_size, int no_rtl)
6173 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6175 /* Skip the current argument. */
6176 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6177 true);
6179 #if TARGET_ABI_OPEN_VMS
6180 /* For VMS, we allocate space for all 6 arg registers plus a count.
6182 However, if NO registers need to be saved, don't allocate any space.
6183 This is not only because we won't need the space, but because AP
6184 includes the current_pretend_args_size and we don't want to mess up
6185 any ap-relative addresses already made. */
6186 if (cum.num_args < 6)
6188 if (!no_rtl)
6190 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6191 emit_insn (gen_arg_home ());
6193 *pretend_size = 7 * UNITS_PER_WORD;
6195 #else
6196 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6197 only push those that are remaining. However, if NO registers need to
6198 be saved, don't allocate any space. This is not only because we won't
6199 need the space, but because AP includes the current_pretend_args_size
6200 and we don't want to mess up any ap-relative addresses already made.
6202 If we are not to use the floating-point registers, save the integer
6203 registers where we would put the floating-point registers. This is
6204 not the most efficient way to implement varargs with just one register
6205 class, but it isn't worth doing anything more efficient in this rare
6206 case. */
6207 if (cum >= 6)
6208 return;
6210 if (!no_rtl)
6212 int count;
6213 alias_set_type set = get_varargs_alias_set ();
6214 rtx tmp;
6216 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6217 if (count > 6 - cum)
6218 count = 6 - cum;
6220 /* Detect whether integer registers or floating-point registers
6221 are needed by the detected va_arg statements. See above for
6222 how these values are computed. Note that the "escape" value
6223 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6224 these bits set. */
6225 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6227 if (cfun->va_list_fpr_size & 1)
6229 tmp = gen_rtx_MEM (BLKmode,
6230 plus_constant (Pmode, virtual_incoming_args_rtx,
6231 (cum + 6) * UNITS_PER_WORD));
6232 MEM_NOTRAP_P (tmp) = 1;
6233 set_mem_alias_set (tmp, set);
6234 move_block_from_reg (16 + cum, tmp, count);
6237 if (cfun->va_list_fpr_size & 2)
6239 tmp = gen_rtx_MEM (BLKmode,
6240 plus_constant (Pmode, virtual_incoming_args_rtx,
6241 cum * UNITS_PER_WORD));
6242 MEM_NOTRAP_P (tmp) = 1;
6243 set_mem_alias_set (tmp, set);
6244 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6247 *pretend_size = 12 * UNITS_PER_WORD;
6248 #endif
6251 static void
6252 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6254 HOST_WIDE_INT offset;
6255 tree t, offset_field, base_field;
6257 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6258 return;
6260 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6261 up by 48, storing fp arg registers in the first 48 bytes, and the
6262 integer arg registers in the next 48 bytes. This is only done,
6263 however, if any integer registers need to be stored.
6265 If no integer registers need be stored, then we must subtract 48
6266 in order to account for the integer arg registers which are counted
6267 in argsize above, but which are not actually stored on the stack.
6268 Must further be careful here about structures straddling the last
6269 integer argument register; that futzes with pretend_args_size,
6270 which changes the meaning of AP. */
6272 if (NUM_ARGS < 6)
6273 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6274 else
6275 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6277 if (TARGET_ABI_OPEN_VMS)
6279 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6280 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6281 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6282 TREE_SIDE_EFFECTS (t) = 1;
6283 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6285 else
6287 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6288 offset_field = DECL_CHAIN (base_field);
6290 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6291 valist, base_field, NULL_TREE);
6292 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6293 valist, offset_field, NULL_TREE);
6295 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6296 t = fold_build_pointer_plus_hwi (t, offset);
6297 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6298 TREE_SIDE_EFFECTS (t) = 1;
6299 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6301 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6302 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6303 TREE_SIDE_EFFECTS (t) = 1;
6304 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6308 static tree
6309 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6310 gimple_seq *pre_p)
6312 tree type_size, ptr_type, addend, t, addr;
6313 gimple_seq internal_post;
6315 /* If the type could not be passed in registers, skip the block
6316 reserved for the registers. */
6317 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6319 t = build_int_cst (TREE_TYPE (offset), 6*8);
6320 gimplify_assign (offset,
6321 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6322 pre_p);
6325 addend = offset;
6326 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6328 if (TREE_CODE (type) == COMPLEX_TYPE)
6330 tree real_part, imag_part, real_temp;
6332 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6333 offset, pre_p);
6335 /* Copy the value into a new temporary, lest the formal temporary
6336 be reused out from under us. */
6337 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6339 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6340 offset, pre_p);
6342 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6344 else if (TREE_CODE (type) == REAL_TYPE)
6346 tree fpaddend, cond, fourtyeight;
6348 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6349 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6350 addend, fourtyeight);
6351 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6352 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6353 fpaddend, addend);
6356 /* Build the final address and force that value into a temporary. */
6357 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6358 internal_post = NULL;
6359 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6360 gimple_seq_add_seq (pre_p, internal_post);
6362 /* Update the offset field. */
6363 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6364 if (type_size == NULL || TREE_OVERFLOW (type_size))
6365 t = size_zero_node;
6366 else
6368 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6369 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6370 t = size_binop (MULT_EXPR, t, size_int (8));
6372 t = fold_convert (TREE_TYPE (offset), t);
6373 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6374 pre_p);
6376 return build_va_arg_indirect_ref (addr);
6379 static tree
6380 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6381 gimple_seq *post_p)
6383 tree offset_field, base_field, offset, base, t, r;
6384 bool indirect;
6386 if (TARGET_ABI_OPEN_VMS)
6387 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6389 base_field = TYPE_FIELDS (va_list_type_node);
6390 offset_field = DECL_CHAIN (base_field);
6391 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6392 valist, base_field, NULL_TREE);
6393 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6394 valist, offset_field, NULL_TREE);
6396 /* Pull the fields of the structure out into temporaries. Since we never
6397 modify the base field, we can use a formal temporary. Sign-extend the
6398 offset field so that it's the proper width for pointer arithmetic. */
6399 base = get_formal_tmp_var (base_field, pre_p);
6401 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6402 offset = get_initialized_tmp_var (t, pre_p, NULL);
6404 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6405 if (indirect)
6406 type = build_pointer_type_for_mode (type, ptr_mode, true);
6408 /* Find the value. Note that this will be a stable indirection, or
6409 a composite of stable indirections in the case of complex. */
6410 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6412 /* Stuff the offset temporary back into its field. */
6413 gimplify_assign (unshare_expr (offset_field),
6414 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6416 if (indirect)
6417 r = build_va_arg_indirect_ref (r);
6419 return r;
6422 /* Builtins. */
6424 enum alpha_builtin
6426 ALPHA_BUILTIN_CMPBGE,
6427 ALPHA_BUILTIN_EXTBL,
6428 ALPHA_BUILTIN_EXTWL,
6429 ALPHA_BUILTIN_EXTLL,
6430 ALPHA_BUILTIN_EXTQL,
6431 ALPHA_BUILTIN_EXTWH,
6432 ALPHA_BUILTIN_EXTLH,
6433 ALPHA_BUILTIN_EXTQH,
6434 ALPHA_BUILTIN_INSBL,
6435 ALPHA_BUILTIN_INSWL,
6436 ALPHA_BUILTIN_INSLL,
6437 ALPHA_BUILTIN_INSQL,
6438 ALPHA_BUILTIN_INSWH,
6439 ALPHA_BUILTIN_INSLH,
6440 ALPHA_BUILTIN_INSQH,
6441 ALPHA_BUILTIN_MSKBL,
6442 ALPHA_BUILTIN_MSKWL,
6443 ALPHA_BUILTIN_MSKLL,
6444 ALPHA_BUILTIN_MSKQL,
6445 ALPHA_BUILTIN_MSKWH,
6446 ALPHA_BUILTIN_MSKLH,
6447 ALPHA_BUILTIN_MSKQH,
6448 ALPHA_BUILTIN_UMULH,
6449 ALPHA_BUILTIN_ZAP,
6450 ALPHA_BUILTIN_ZAPNOT,
6451 ALPHA_BUILTIN_AMASK,
6452 ALPHA_BUILTIN_IMPLVER,
6453 ALPHA_BUILTIN_RPCC,
6454 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6455 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6457 /* TARGET_MAX */
6458 ALPHA_BUILTIN_MINUB8,
6459 ALPHA_BUILTIN_MINSB8,
6460 ALPHA_BUILTIN_MINUW4,
6461 ALPHA_BUILTIN_MINSW4,
6462 ALPHA_BUILTIN_MAXUB8,
6463 ALPHA_BUILTIN_MAXSB8,
6464 ALPHA_BUILTIN_MAXUW4,
6465 ALPHA_BUILTIN_MAXSW4,
6466 ALPHA_BUILTIN_PERR,
6467 ALPHA_BUILTIN_PKLB,
6468 ALPHA_BUILTIN_PKWB,
6469 ALPHA_BUILTIN_UNPKBL,
6470 ALPHA_BUILTIN_UNPKBW,
6472 /* TARGET_CIX */
6473 ALPHA_BUILTIN_CTTZ,
6474 ALPHA_BUILTIN_CTLZ,
6475 ALPHA_BUILTIN_CTPOP,
6477 ALPHA_BUILTIN_max
6480 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6481 CODE_FOR_builtin_cmpbge,
6482 CODE_FOR_extbl,
6483 CODE_FOR_extwl,
6484 CODE_FOR_extll,
6485 CODE_FOR_extql,
6486 CODE_FOR_extwh,
6487 CODE_FOR_extlh,
6488 CODE_FOR_extqh,
6489 CODE_FOR_builtin_insbl,
6490 CODE_FOR_builtin_inswl,
6491 CODE_FOR_builtin_insll,
6492 CODE_FOR_insql,
6493 CODE_FOR_inswh,
6494 CODE_FOR_inslh,
6495 CODE_FOR_insqh,
6496 CODE_FOR_mskbl,
6497 CODE_FOR_mskwl,
6498 CODE_FOR_mskll,
6499 CODE_FOR_mskql,
6500 CODE_FOR_mskwh,
6501 CODE_FOR_msklh,
6502 CODE_FOR_mskqh,
6503 CODE_FOR_umuldi3_highpart,
6504 CODE_FOR_builtin_zap,
6505 CODE_FOR_builtin_zapnot,
6506 CODE_FOR_builtin_amask,
6507 CODE_FOR_builtin_implver,
6508 CODE_FOR_builtin_rpcc,
6509 CODE_FOR_builtin_establish_vms_condition_handler,
6510 CODE_FOR_builtin_revert_vms_condition_handler,
6512 /* TARGET_MAX */
6513 CODE_FOR_builtin_minub8,
6514 CODE_FOR_builtin_minsb8,
6515 CODE_FOR_builtin_minuw4,
6516 CODE_FOR_builtin_minsw4,
6517 CODE_FOR_builtin_maxub8,
6518 CODE_FOR_builtin_maxsb8,
6519 CODE_FOR_builtin_maxuw4,
6520 CODE_FOR_builtin_maxsw4,
6521 CODE_FOR_builtin_perr,
6522 CODE_FOR_builtin_pklb,
6523 CODE_FOR_builtin_pkwb,
6524 CODE_FOR_builtin_unpkbl,
6525 CODE_FOR_builtin_unpkbw,
6527 /* TARGET_CIX */
6528 CODE_FOR_ctzdi2,
6529 CODE_FOR_clzdi2,
6530 CODE_FOR_popcountdi2
6533 struct alpha_builtin_def
6535 const char *name;
6536 enum alpha_builtin code;
6537 unsigned int target_mask;
6538 bool is_const;
6541 static struct alpha_builtin_def const zero_arg_builtins[] = {
6542 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6543 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6546 static struct alpha_builtin_def const one_arg_builtins[] = {
6547 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6548 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6549 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6550 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6551 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6552 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6553 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6554 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6557 static struct alpha_builtin_def const two_arg_builtins[] = {
6558 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6559 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6560 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6561 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6562 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6563 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6564 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6565 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6566 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6567 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6568 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6569 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6570 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6571 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6572 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6573 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6574 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6575 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6576 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6577 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6578 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6579 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6580 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6581 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6582 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6583 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6584 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6585 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6586 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6587 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6588 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6589 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6590 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6591 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6594 static GTY(()) tree alpha_dimode_u;
6595 static GTY(()) tree alpha_v8qi_u;
6596 static GTY(()) tree alpha_v8qi_s;
6597 static GTY(()) tree alpha_v4hi_u;
6598 static GTY(()) tree alpha_v4hi_s;
6600 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6602 /* Return the alpha builtin for CODE. */
6604 static tree
6605 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6607 if (code >= ALPHA_BUILTIN_max)
6608 return error_mark_node;
6609 return alpha_builtins[code];
6612 /* Helper function of alpha_init_builtins. Add the built-in specified
6613 by NAME, TYPE, CODE, and ECF. */
6615 static void
6616 alpha_builtin_function (const char *name, tree ftype,
6617 enum alpha_builtin code, unsigned ecf)
6619 tree decl = add_builtin_function (name, ftype, (int) code,
6620 BUILT_IN_MD, NULL, NULL_TREE);
6622 if (ecf & ECF_CONST)
6623 TREE_READONLY (decl) = 1;
6624 if (ecf & ECF_NOTHROW)
6625 TREE_NOTHROW (decl) = 1;
6627 alpha_builtins [(int) code] = decl;
6630 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6631 functions pointed to by P, with function type FTYPE. */
6633 static void
6634 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6635 tree ftype)
6637 size_t i;
6639 for (i = 0; i < count; ++i, ++p)
6640 if ((target_flags & p->target_mask) == p->target_mask)
6641 alpha_builtin_function (p->name, ftype, p->code,
6642 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6645 static void
6646 alpha_init_builtins (void)
6648 tree ftype;
6650 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6651 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6652 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6653 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6654 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6656 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6657 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6659 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6660 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6662 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6663 alpha_dimode_u, NULL_TREE);
6664 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6666 if (TARGET_ABI_OPEN_VMS)
6668 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6669 NULL_TREE);
6670 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6671 ftype,
6672 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6675 ftype = build_function_type_list (ptr_type_node, void_type_node,
6676 NULL_TREE);
6677 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6678 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6680 vms_patch_builtins ();
6684 /* Expand an expression EXP that calls a built-in function,
6685 with result going to TARGET if that's convenient
6686 (and in mode MODE if that's convenient).
6687 SUBTARGET may be used as the target for computing one of EXP's operands.
6688 IGNORE is nonzero if the value is to be ignored. */
6690 static rtx
6691 alpha_expand_builtin (tree exp, rtx target,
6692 rtx subtarget ATTRIBUTE_UNUSED,
6693 machine_mode mode ATTRIBUTE_UNUSED,
6694 int ignore ATTRIBUTE_UNUSED)
6696 #define MAX_ARGS 2
6698 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6699 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6700 tree arg;
6701 call_expr_arg_iterator iter;
6702 enum insn_code icode;
6703 rtx op[MAX_ARGS], pat;
6704 int arity;
6705 bool nonvoid;
6707 if (fcode >= ALPHA_BUILTIN_max)
6708 internal_error ("bad builtin fcode");
6709 icode = code_for_builtin[fcode];
6710 if (icode == 0)
6711 internal_error ("bad builtin fcode");
6713 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6715 arity = 0;
6716 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6718 const struct insn_operand_data *insn_op;
6720 if (arg == error_mark_node)
6721 return NULL_RTX;
6722 if (arity > MAX_ARGS)
6723 return NULL_RTX;
6725 insn_op = &insn_data[icode].operand[arity + nonvoid];
6727 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6729 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6730 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6731 arity++;
6734 if (nonvoid)
6736 machine_mode tmode = insn_data[icode].operand[0].mode;
6737 if (!target
6738 || GET_MODE (target) != tmode
6739 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6740 target = gen_reg_rtx (tmode);
6743 switch (arity)
6745 case 0:
6746 pat = GEN_FCN (icode) (target);
6747 break;
6748 case 1:
6749 if (nonvoid)
6750 pat = GEN_FCN (icode) (target, op[0]);
6751 else
6752 pat = GEN_FCN (icode) (op[0]);
6753 break;
6754 case 2:
6755 pat = GEN_FCN (icode) (target, op[0], op[1]);
6756 break;
6757 default:
6758 gcc_unreachable ();
6760 if (!pat)
6761 return NULL_RTX;
6762 emit_insn (pat);
6764 if (nonvoid)
6765 return target;
6766 else
6767 return const0_rtx;
6771 /* Several bits below assume HWI >= 64 bits. This should be enforced
6772 by config.gcc. */
6773 #if HOST_BITS_PER_WIDE_INT < 64
6774 # error "HOST_WIDE_INT too small"
6775 #endif
6777 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6778 with an 8-bit output vector. OPINT contains the integer operands; bit N
6779 of OP_CONST is set if OPINT[N] is valid. */
6781 static tree
6782 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6784 if (op_const == 3)
6786 int i, val;
6787 for (i = 0, val = 0; i < 8; ++i)
6789 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6790 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6791 if (c0 >= c1)
6792 val |= 1 << i;
6794 return build_int_cst (alpha_dimode_u, val);
6796 else if (op_const == 2 && opint[1] == 0)
6797 return build_int_cst (alpha_dimode_u, 0xff);
6798 return NULL;
6801 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6802 specialized form of an AND operation. Other byte manipulation instructions
6803 are defined in terms of this instruction, so this is also used as a
6804 subroutine for other builtins.
6806 OP contains the tree operands; OPINT contains the extracted integer values.
6807 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6808 OPINT may be considered. */
6810 static tree
6811 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6812 long op_const)
6814 if (op_const & 2)
6816 unsigned HOST_WIDE_INT mask = 0;
6817 int i;
6819 for (i = 0; i < 8; ++i)
6820 if ((opint[1] >> i) & 1)
6821 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6823 if (op_const & 1)
6824 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6826 if (op)
6827 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6828 build_int_cst (alpha_dimode_u, mask));
6830 else if ((op_const & 1) && opint[0] == 0)
6831 return build_int_cst (alpha_dimode_u, 0);
6832 return NULL;
6835 /* Fold the builtins for the EXT family of instructions. */
6837 static tree
6838 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6839 long op_const, unsigned HOST_WIDE_INT bytemask,
6840 bool is_high)
6842 long zap_const = 2;
6843 tree *zap_op = NULL;
6845 if (op_const & 2)
6847 unsigned HOST_WIDE_INT loc;
6849 loc = opint[1] & 7;
6850 loc *= BITS_PER_UNIT;
6852 if (loc != 0)
6854 if (op_const & 1)
6856 unsigned HOST_WIDE_INT temp = opint[0];
6857 if (is_high)
6858 temp <<= loc;
6859 else
6860 temp >>= loc;
6861 opint[0] = temp;
6862 zap_const = 3;
6865 else
6866 zap_op = op;
6869 opint[1] = bytemask;
6870 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6873 /* Fold the builtins for the INS family of instructions. */
6875 static tree
6876 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6877 long op_const, unsigned HOST_WIDE_INT bytemask,
6878 bool is_high)
6880 if ((op_const & 1) && opint[0] == 0)
6881 return build_int_cst (alpha_dimode_u, 0);
6883 if (op_const & 2)
6885 unsigned HOST_WIDE_INT temp, loc, byteloc;
6886 tree *zap_op = NULL;
6888 loc = opint[1] & 7;
6889 bytemask <<= loc;
6891 temp = opint[0];
6892 if (is_high)
6894 byteloc = (64 - (loc * 8)) & 0x3f;
6895 if (byteloc == 0)
6896 zap_op = op;
6897 else
6898 temp >>= byteloc;
6899 bytemask >>= 8;
6901 else
6903 byteloc = loc * 8;
6904 if (byteloc == 0)
6905 zap_op = op;
6906 else
6907 temp <<= byteloc;
6910 opint[0] = temp;
6911 opint[1] = bytemask;
6912 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6915 return NULL;
6918 static tree
6919 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6920 long op_const, unsigned HOST_WIDE_INT bytemask,
6921 bool is_high)
6923 if (op_const & 2)
6925 unsigned HOST_WIDE_INT loc;
6927 loc = opint[1] & 7;
6928 bytemask <<= loc;
6930 if (is_high)
6931 bytemask >>= 8;
6933 opint[1] = bytemask ^ 0xff;
6936 return alpha_fold_builtin_zapnot (op, opint, op_const);
6939 static tree
6940 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6942 tree op0 = fold_convert (vtype, op[0]);
6943 tree op1 = fold_convert (vtype, op[1]);
6944 tree val = fold_build2 (code, vtype, op0, op1);
6945 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6948 static tree
6949 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6951 unsigned HOST_WIDE_INT temp = 0;
6952 int i;
6954 if (op_const != 3)
6955 return NULL;
6957 for (i = 0; i < 8; ++i)
6959 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6960 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6961 if (a >= b)
6962 temp += a - b;
6963 else
6964 temp += b - a;
6967 return build_int_cst (alpha_dimode_u, temp);
6970 static tree
6971 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6973 unsigned HOST_WIDE_INT temp;
6975 if (op_const == 0)
6976 return NULL;
6978 temp = opint[0] & 0xff;
6979 temp |= (opint[0] >> 24) & 0xff00;
6981 return build_int_cst (alpha_dimode_u, temp);
6984 static tree
6985 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6987 unsigned HOST_WIDE_INT temp;
6989 if (op_const == 0)
6990 return NULL;
6992 temp = opint[0] & 0xff;
6993 temp |= (opint[0] >> 8) & 0xff00;
6994 temp |= (opint[0] >> 16) & 0xff0000;
6995 temp |= (opint[0] >> 24) & 0xff000000;
6997 return build_int_cst (alpha_dimode_u, temp);
7000 static tree
7001 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7003 unsigned HOST_WIDE_INT temp;
7005 if (op_const == 0)
7006 return NULL;
7008 temp = opint[0] & 0xff;
7009 temp |= (opint[0] & 0xff00) << 24;
7011 return build_int_cst (alpha_dimode_u, temp);
7014 static tree
7015 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7017 unsigned HOST_WIDE_INT temp;
7019 if (op_const == 0)
7020 return NULL;
7022 temp = opint[0] & 0xff;
7023 temp |= (opint[0] & 0x0000ff00) << 8;
7024 temp |= (opint[0] & 0x00ff0000) << 16;
7025 temp |= (opint[0] & 0xff000000) << 24;
7027 return build_int_cst (alpha_dimode_u, temp);
7030 static tree
7031 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7033 unsigned HOST_WIDE_INT temp;
7035 if (op_const == 0)
7036 return NULL;
7038 if (opint[0] == 0)
7039 temp = 64;
7040 else
7041 temp = exact_log2 (opint[0] & -opint[0]);
7043 return build_int_cst (alpha_dimode_u, temp);
7046 static tree
7047 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7049 unsigned HOST_WIDE_INT temp;
7051 if (op_const == 0)
7052 return NULL;
7054 if (opint[0] == 0)
7055 temp = 64;
7056 else
7057 temp = 64 - floor_log2 (opint[0]) - 1;
7059 return build_int_cst (alpha_dimode_u, temp);
7062 static tree
7063 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7065 unsigned HOST_WIDE_INT temp, op;
7067 if (op_const == 0)
7068 return NULL;
7070 op = opint[0];
7071 temp = 0;
7072 while (op)
7073 temp++, op &= op - 1;
7075 return build_int_cst (alpha_dimode_u, temp);
7078 /* Fold one of our builtin functions. */
7080 static tree
7081 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7082 bool ignore ATTRIBUTE_UNUSED)
7084 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7085 long op_const = 0;
7086 int i;
7088 if (n_args > MAX_ARGS)
7089 return NULL;
7091 for (i = 0; i < n_args; i++)
7093 tree arg = op[i];
7094 if (arg == error_mark_node)
7095 return NULL;
7097 opint[i] = 0;
7098 if (TREE_CODE (arg) == INTEGER_CST)
7100 op_const |= 1L << i;
7101 opint[i] = int_cst_value (arg);
7105 switch (DECL_FUNCTION_CODE (fndecl))
7107 case ALPHA_BUILTIN_CMPBGE:
7108 return alpha_fold_builtin_cmpbge (opint, op_const);
7110 case ALPHA_BUILTIN_EXTBL:
7111 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7112 case ALPHA_BUILTIN_EXTWL:
7113 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7114 case ALPHA_BUILTIN_EXTLL:
7115 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7116 case ALPHA_BUILTIN_EXTQL:
7117 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7118 case ALPHA_BUILTIN_EXTWH:
7119 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7120 case ALPHA_BUILTIN_EXTLH:
7121 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7122 case ALPHA_BUILTIN_EXTQH:
7123 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7125 case ALPHA_BUILTIN_INSBL:
7126 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7127 case ALPHA_BUILTIN_INSWL:
7128 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7129 case ALPHA_BUILTIN_INSLL:
7130 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7131 case ALPHA_BUILTIN_INSQL:
7132 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7133 case ALPHA_BUILTIN_INSWH:
7134 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7135 case ALPHA_BUILTIN_INSLH:
7136 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7137 case ALPHA_BUILTIN_INSQH:
7138 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7140 case ALPHA_BUILTIN_MSKBL:
7141 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7142 case ALPHA_BUILTIN_MSKWL:
7143 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7144 case ALPHA_BUILTIN_MSKLL:
7145 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7146 case ALPHA_BUILTIN_MSKQL:
7147 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7148 case ALPHA_BUILTIN_MSKWH:
7149 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7150 case ALPHA_BUILTIN_MSKLH:
7151 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7152 case ALPHA_BUILTIN_MSKQH:
7153 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7155 case ALPHA_BUILTIN_ZAP:
7156 opint[1] ^= 0xff;
7157 /* FALLTHRU */
7158 case ALPHA_BUILTIN_ZAPNOT:
7159 return alpha_fold_builtin_zapnot (op, opint, op_const);
7161 case ALPHA_BUILTIN_MINUB8:
7162 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7163 case ALPHA_BUILTIN_MINSB8:
7164 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7165 case ALPHA_BUILTIN_MINUW4:
7166 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7167 case ALPHA_BUILTIN_MINSW4:
7168 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7169 case ALPHA_BUILTIN_MAXUB8:
7170 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7171 case ALPHA_BUILTIN_MAXSB8:
7172 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7173 case ALPHA_BUILTIN_MAXUW4:
7174 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7175 case ALPHA_BUILTIN_MAXSW4:
7176 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7178 case ALPHA_BUILTIN_PERR:
7179 return alpha_fold_builtin_perr (opint, op_const);
7180 case ALPHA_BUILTIN_PKLB:
7181 return alpha_fold_builtin_pklb (opint, op_const);
7182 case ALPHA_BUILTIN_PKWB:
7183 return alpha_fold_builtin_pkwb (opint, op_const);
7184 case ALPHA_BUILTIN_UNPKBL:
7185 return alpha_fold_builtin_unpkbl (opint, op_const);
7186 case ALPHA_BUILTIN_UNPKBW:
7187 return alpha_fold_builtin_unpkbw (opint, op_const);
7189 case ALPHA_BUILTIN_CTTZ:
7190 return alpha_fold_builtin_cttz (opint, op_const);
7191 case ALPHA_BUILTIN_CTLZ:
7192 return alpha_fold_builtin_ctlz (opint, op_const);
7193 case ALPHA_BUILTIN_CTPOP:
7194 return alpha_fold_builtin_ctpop (opint, op_const);
7196 case ALPHA_BUILTIN_AMASK:
7197 case ALPHA_BUILTIN_IMPLVER:
7198 case ALPHA_BUILTIN_RPCC:
7199 /* None of these are foldable at compile-time. */
7200 default:
7201 return NULL;
7205 bool
7206 alpha_gimple_fold_builtin (gimple_stmt_iterator *gsi)
7208 bool changed = false;
7209 gimple stmt = gsi_stmt (*gsi);
7210 tree call = gimple_call_fn (stmt);
7211 gimple new_stmt = NULL;
7213 if (call)
7215 tree fndecl = gimple_call_fndecl (stmt);
7217 if (fndecl)
7219 tree arg0, arg1;
7221 switch (DECL_FUNCTION_CODE (fndecl))
7223 case ALPHA_BUILTIN_UMULH:
7224 arg0 = gimple_call_arg (stmt, 0);
7225 arg1 = gimple_call_arg (stmt, 1);
7227 new_stmt
7228 = gimple_build_assign_with_ops (MULT_HIGHPART_EXPR,
7229 gimple_call_lhs (stmt),
7230 arg0,
7231 arg1);
7232 break;
7233 default:
7234 break;
7239 if (new_stmt)
7241 gsi_replace (gsi, new_stmt, true);
7242 changed = true;
7245 return changed;
7248 /* This page contains routines that are used to determine what the function
7249 prologue and epilogue code will do and write them out. */
7251 /* Compute the size of the save area in the stack. */
7253 /* These variables are used for communication between the following functions.
7254 They indicate various things about the current function being compiled
7255 that are used to tell what kind of prologue, epilogue and procedure
7256 descriptor to generate. */
7258 /* Nonzero if we need a stack procedure. */
7259 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7260 static enum alpha_procedure_types alpha_procedure_type;
7262 /* Register number (either FP or SP) that is used to unwind the frame. */
7263 static int vms_unwind_regno;
7265 /* Register number used to save FP. We need not have one for RA since
7266 we don't modify it for register procedures. This is only defined
7267 for register frame procedures. */
7268 static int vms_save_fp_regno;
7270 /* Register number used to reference objects off our PV. */
7271 static int vms_base_regno;
7273 /* Compute register masks for saved registers. */
7275 static void
7276 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7278 unsigned long imask = 0;
7279 unsigned long fmask = 0;
7280 unsigned int i;
7282 /* When outputting a thunk, we don't have valid register life info,
7283 but assemble_start_function wants to output .frame and .mask
7284 directives. */
7285 if (cfun->is_thunk)
7287 *imaskP = 0;
7288 *fmaskP = 0;
7289 return;
7292 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7293 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7295 /* One for every register we have to save. */
7296 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7297 if (! fixed_regs[i] && ! call_used_regs[i]
7298 && df_regs_ever_live_p (i) && i != REG_RA)
7300 if (i < 32)
7301 imask |= (1UL << i);
7302 else
7303 fmask |= (1UL << (i - 32));
7306 /* We need to restore these for the handler. */
7307 if (crtl->calls_eh_return)
7309 for (i = 0; ; ++i)
7311 unsigned regno = EH_RETURN_DATA_REGNO (i);
7312 if (regno == INVALID_REGNUM)
7313 break;
7314 imask |= 1UL << regno;
7318 /* If any register spilled, then spill the return address also. */
7319 /* ??? This is required by the Digital stack unwind specification
7320 and isn't needed if we're doing Dwarf2 unwinding. */
7321 if (imask || fmask || alpha_ra_ever_killed ())
7322 imask |= (1UL << REG_RA);
7324 *imaskP = imask;
7325 *fmaskP = fmask;
7329 alpha_sa_size (void)
7331 unsigned long mask[2];
7332 int sa_size = 0;
7333 int i, j;
7335 alpha_sa_mask (&mask[0], &mask[1]);
7337 for (j = 0; j < 2; ++j)
7338 for (i = 0; i < 32; ++i)
7339 if ((mask[j] >> i) & 1)
7340 sa_size++;
7342 if (TARGET_ABI_OPEN_VMS)
7344 /* Start with a stack procedure if we make any calls (REG_RA used), or
7345 need a frame pointer, with a register procedure if we otherwise need
7346 at least a slot, and with a null procedure in other cases. */
7347 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7348 alpha_procedure_type = PT_STACK;
7349 else if (get_frame_size() != 0)
7350 alpha_procedure_type = PT_REGISTER;
7351 else
7352 alpha_procedure_type = PT_NULL;
7354 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7355 made the final decision on stack procedure vs register procedure. */
7356 if (alpha_procedure_type == PT_STACK)
7357 sa_size -= 2;
7359 /* Decide whether to refer to objects off our PV via FP or PV.
7360 If we need FP for something else or if we receive a nonlocal
7361 goto (which expects PV to contain the value), we must use PV.
7362 Otherwise, start by assuming we can use FP. */
7364 vms_base_regno
7365 = (frame_pointer_needed
7366 || cfun->has_nonlocal_label
7367 || alpha_procedure_type == PT_STACK
7368 || crtl->outgoing_args_size)
7369 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7371 /* If we want to copy PV into FP, we need to find some register
7372 in which to save FP. */
7374 vms_save_fp_regno = -1;
7375 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7376 for (i = 0; i < 32; i++)
7377 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7378 vms_save_fp_regno = i;
7380 /* A VMS condition handler requires a stack procedure in our
7381 implementation. (not required by the calling standard). */
7382 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7383 || cfun->machine->uses_condition_handler)
7384 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7385 else if (alpha_procedure_type == PT_NULL)
7386 vms_base_regno = REG_PV;
7388 /* Stack unwinding should be done via FP unless we use it for PV. */
7389 vms_unwind_regno = (vms_base_regno == REG_PV
7390 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7392 /* If this is a stack procedure, allow space for saving FP, RA and
7393 a condition handler slot if needed. */
7394 if (alpha_procedure_type == PT_STACK)
7395 sa_size += 2 + cfun->machine->uses_condition_handler;
7397 else
7399 /* Our size must be even (multiple of 16 bytes). */
7400 if (sa_size & 1)
7401 sa_size++;
7404 return sa_size * 8;
7407 /* Define the offset between two registers, one to be eliminated,
7408 and the other its replacement, at the start of a routine. */
7410 HOST_WIDE_INT
7411 alpha_initial_elimination_offset (unsigned int from,
7412 unsigned int to ATTRIBUTE_UNUSED)
7414 HOST_WIDE_INT ret;
7416 ret = alpha_sa_size ();
7417 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7419 switch (from)
7421 case FRAME_POINTER_REGNUM:
7422 break;
7424 case ARG_POINTER_REGNUM:
7425 ret += (ALPHA_ROUND (get_frame_size ()
7426 + crtl->args.pretend_args_size)
7427 - crtl->args.pretend_args_size);
7428 break;
7430 default:
7431 gcc_unreachable ();
7434 return ret;
7437 #if TARGET_ABI_OPEN_VMS
7439 /* Worker function for TARGET_CAN_ELIMINATE. */
7441 static bool
7442 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7444 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7445 alpha_sa_size ();
7447 switch (alpha_procedure_type)
7449 case PT_NULL:
7450 /* NULL procedures have no frame of their own and we only
7451 know how to resolve from the current stack pointer. */
7452 return to == STACK_POINTER_REGNUM;
7454 case PT_REGISTER:
7455 case PT_STACK:
7456 /* We always eliminate except to the stack pointer if there is no
7457 usable frame pointer at hand. */
7458 return (to != STACK_POINTER_REGNUM
7459 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7462 gcc_unreachable ();
7465 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7466 designates the same location as FROM. */
7468 HOST_WIDE_INT
7469 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7471 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7472 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7473 on the proper computations and will need the register save area size
7474 in most cases. */
7476 HOST_WIDE_INT sa_size = alpha_sa_size ();
7478 /* PT_NULL procedures have no frame of their own and we only allow
7479 elimination to the stack pointer. This is the argument pointer and we
7480 resolve the soft frame pointer to that as well. */
7482 if (alpha_procedure_type == PT_NULL)
7483 return 0;
7485 /* For a PT_STACK procedure the frame layout looks as follows
7487 -----> decreasing addresses
7489 < size rounded up to 16 | likewise >
7490 --------------#------------------------------+++--------------+++-------#
7491 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7492 --------------#---------------------------------------------------------#
7493 ^ ^ ^ ^
7494 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7497 PT_REGISTER procedures are similar in that they may have a frame of their
7498 own. They have no regs-sa/pv/outgoing-args area.
7500 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7501 to STACK_PTR if need be. */
7504 HOST_WIDE_INT offset;
7505 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7507 switch (from)
7509 case FRAME_POINTER_REGNUM:
7510 offset = ALPHA_ROUND (sa_size + pv_save_size);
7511 break;
7512 case ARG_POINTER_REGNUM:
7513 offset = (ALPHA_ROUND (sa_size + pv_save_size
7514 + get_frame_size ()
7515 + crtl->args.pretend_args_size)
7516 - crtl->args.pretend_args_size);
7517 break;
7518 default:
7519 gcc_unreachable ();
7522 if (to == STACK_POINTER_REGNUM)
7523 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7525 return offset;
7529 #define COMMON_OBJECT "common_object"
7531 static tree
7532 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7533 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7534 bool *no_add_attrs ATTRIBUTE_UNUSED)
7536 tree decl = *node;
7537 gcc_assert (DECL_P (decl));
7539 DECL_COMMON (decl) = 1;
7540 return NULL_TREE;
7543 static const struct attribute_spec vms_attribute_table[] =
7545 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7546 affects_type_identity } */
7547 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7548 { NULL, 0, 0, false, false, false, NULL, false }
7551 void
7552 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7553 unsigned HOST_WIDE_INT size,
7554 unsigned int align)
7556 tree attr = DECL_ATTRIBUTES (decl);
7557 fprintf (file, "%s", COMMON_ASM_OP);
7558 assemble_name (file, name);
7559 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7560 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7561 fprintf (file, ",%u", align / BITS_PER_UNIT);
7562 if (attr)
7564 attr = lookup_attribute (COMMON_OBJECT, attr);
7565 if (attr)
7566 fprintf (file, ",%s",
7567 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7569 fputc ('\n', file);
7572 #undef COMMON_OBJECT
7574 #endif
7576 bool
7577 alpha_find_lo_sum_using_gp (rtx insn)
7579 subrtx_iterator::array_type array;
7580 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
7582 const_rtx x = *iter;
7583 if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx)
7584 return true;
7586 return false;
7589 static int
7590 alpha_does_function_need_gp (void)
7592 rtx_insn *insn;
7594 /* The GP being variable is an OSF abi thing. */
7595 if (! TARGET_ABI_OSF)
7596 return 0;
7598 /* We need the gp to load the address of __mcount. */
7599 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7600 return 1;
7602 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7603 if (cfun->is_thunk)
7604 return 1;
7606 /* The nonlocal receiver pattern assumes that the gp is valid for
7607 the nested function. Reasonable because it's almost always set
7608 correctly already. For the cases where that's wrong, make sure
7609 the nested function loads its gp on entry. */
7610 if (crtl->has_nonlocal_goto)
7611 return 1;
7613 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7614 Even if we are a static function, we still need to do this in case
7615 our address is taken and passed to something like qsort. */
7617 push_topmost_sequence ();
7618 insn = get_insns ();
7619 pop_topmost_sequence ();
7621 for (; insn; insn = NEXT_INSN (insn))
7622 if (NONDEBUG_INSN_P (insn)
7623 && GET_CODE (PATTERN (insn)) != USE
7624 && GET_CODE (PATTERN (insn)) != CLOBBER
7625 && get_attr_usegp (insn))
7626 return 1;
7628 return 0;
7632 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7633 sequences. */
7635 static rtx_insn *
7636 set_frame_related_p (void)
7638 rtx_insn *seq = get_insns ();
7639 rtx_insn *insn;
7641 end_sequence ();
7643 if (!seq)
7644 return NULL;
7646 if (INSN_P (seq))
7648 insn = seq;
7649 while (insn != NULL_RTX)
7651 RTX_FRAME_RELATED_P (insn) = 1;
7652 insn = NEXT_INSN (insn);
7654 seq = emit_insn (seq);
7656 else
7658 seq = emit_insn (seq);
7659 RTX_FRAME_RELATED_P (seq) = 1;
7661 return seq;
7664 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7666 /* Generates a store with the proper unwind info attached. VALUE is
7667 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7668 contains SP+FRAME_BIAS, and that is the unwind info that should be
7669 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7670 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7672 static void
7673 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7674 HOST_WIDE_INT base_ofs, rtx frame_reg)
7676 rtx addr, mem;
7677 rtx_insn *insn;
7679 addr = plus_constant (Pmode, base_reg, base_ofs);
7680 mem = gen_frame_mem (DImode, addr);
7682 insn = emit_move_insn (mem, value);
7683 RTX_FRAME_RELATED_P (insn) = 1;
7685 if (frame_bias || value != frame_reg)
7687 if (frame_bias)
7689 addr = plus_constant (Pmode, stack_pointer_rtx,
7690 frame_bias + base_ofs);
7691 mem = gen_rtx_MEM (DImode, addr);
7694 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7695 gen_rtx_SET (VOIDmode, mem, frame_reg));
7699 static void
7700 emit_frame_store (unsigned int regno, rtx base_reg,
7701 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7703 rtx reg = gen_rtx_REG (DImode, regno);
7704 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7707 /* Compute the frame size. SIZE is the size of the "naked" frame
7708 and SA_SIZE is the size of the register save area. */
7710 static HOST_WIDE_INT
7711 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7713 if (TARGET_ABI_OPEN_VMS)
7714 return ALPHA_ROUND (sa_size
7715 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7716 + size
7717 + crtl->args.pretend_args_size);
7718 else
7719 return ALPHA_ROUND (crtl->outgoing_args_size)
7720 + sa_size
7721 + ALPHA_ROUND (size
7722 + crtl->args.pretend_args_size);
7725 /* Write function prologue. */
7727 /* On vms we have two kinds of functions:
7729 - stack frame (PROC_STACK)
7730 these are 'normal' functions with local vars and which are
7731 calling other functions
7732 - register frame (PROC_REGISTER)
7733 keeps all data in registers, needs no stack
7735 We must pass this to the assembler so it can generate the
7736 proper pdsc (procedure descriptor)
7737 This is done with the '.pdesc' command.
7739 On not-vms, we don't really differentiate between the two, as we can
7740 simply allocate stack without saving registers. */
7742 void
7743 alpha_expand_prologue (void)
7745 /* Registers to save. */
7746 unsigned long imask = 0;
7747 unsigned long fmask = 0;
7748 /* Stack space needed for pushing registers clobbered by us. */
7749 HOST_WIDE_INT sa_size, sa_bias;
7750 /* Complete stack size needed. */
7751 HOST_WIDE_INT frame_size;
7752 /* Probed stack size; it additionally includes the size of
7753 the "reserve region" if any. */
7754 HOST_WIDE_INT probed_size;
7755 /* Offset from base reg to register save area. */
7756 HOST_WIDE_INT reg_offset;
7757 rtx sa_reg;
7758 int i;
7760 sa_size = alpha_sa_size ();
7761 frame_size = compute_frame_size (get_frame_size (), sa_size);
7763 if (flag_stack_usage_info)
7764 current_function_static_stack_size = frame_size;
7766 if (TARGET_ABI_OPEN_VMS)
7767 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7768 else
7769 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7771 alpha_sa_mask (&imask, &fmask);
7773 /* Emit an insn to reload GP, if needed. */
7774 if (TARGET_ABI_OSF)
7776 alpha_function_needs_gp = alpha_does_function_need_gp ();
7777 if (alpha_function_needs_gp)
7778 emit_insn (gen_prologue_ldgp ());
7781 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7782 the call to mcount ourselves, rather than having the linker do it
7783 magically in response to -pg. Since _mcount has special linkage,
7784 don't represent the call as a call. */
7785 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7786 emit_insn (gen_prologue_mcount ());
7788 /* Adjust the stack by the frame size. If the frame size is > 4096
7789 bytes, we need to be sure we probe somewhere in the first and last
7790 4096 bytes (we can probably get away without the latter test) and
7791 every 8192 bytes in between. If the frame size is > 32768, we
7792 do this in a loop. Otherwise, we generate the explicit probe
7793 instructions.
7795 Note that we are only allowed to adjust sp once in the prologue. */
7797 probed_size = frame_size;
7798 if (flag_stack_check)
7799 probed_size += STACK_CHECK_PROTECT;
7801 if (probed_size <= 32768)
7803 if (probed_size > 4096)
7805 int probed;
7807 for (probed = 4096; probed < probed_size; probed += 8192)
7808 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7810 /* We only have to do this probe if we aren't saving registers or
7811 if we are probing beyond the frame because of -fstack-check. */
7812 if ((sa_size == 0 && probed_size > probed - 4096)
7813 || flag_stack_check)
7814 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7817 if (frame_size != 0)
7818 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7819 GEN_INT (-frame_size))));
7821 else
7823 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7824 number of 8192 byte blocks to probe. We then probe each block
7825 in the loop and then set SP to the proper location. If the
7826 amount remaining is > 4096, we have to do one more probe if we
7827 are not saving any registers or if we are probing beyond the
7828 frame because of -fstack-check. */
7830 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7831 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7832 rtx ptr = gen_rtx_REG (DImode, 22);
7833 rtx count = gen_rtx_REG (DImode, 23);
7834 rtx seq;
7836 emit_move_insn (count, GEN_INT (blocks));
7837 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7839 /* Because of the difficulty in emitting a new basic block this
7840 late in the compilation, generate the loop as a single insn. */
7841 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7843 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7845 rtx last = gen_rtx_MEM (DImode,
7846 plus_constant (Pmode, ptr, -leftover));
7847 MEM_VOLATILE_P (last) = 1;
7848 emit_move_insn (last, const0_rtx);
7851 if (flag_stack_check)
7853 /* If -fstack-check is specified we have to load the entire
7854 constant into a register and subtract from the sp in one go,
7855 because the probed stack size is not equal to the frame size. */
7856 HOST_WIDE_INT lo, hi;
7857 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7858 hi = frame_size - lo;
7860 emit_move_insn (ptr, GEN_INT (hi));
7861 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7862 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7863 ptr));
7865 else
7867 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7868 GEN_INT (-leftover)));
7871 /* This alternative is special, because the DWARF code cannot
7872 possibly intuit through the loop above. So we invent this
7873 note it looks at instead. */
7874 RTX_FRAME_RELATED_P (seq) = 1;
7875 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7876 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7877 plus_constant (Pmode, stack_pointer_rtx,
7878 -frame_size)));
7881 /* Cope with very large offsets to the register save area. */
7882 sa_bias = 0;
7883 sa_reg = stack_pointer_rtx;
7884 if (reg_offset + sa_size > 0x8000)
7886 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7887 rtx sa_bias_rtx;
7889 if (low + sa_size <= 0x8000)
7890 sa_bias = reg_offset - low, reg_offset = low;
7891 else
7892 sa_bias = reg_offset, reg_offset = 0;
7894 sa_reg = gen_rtx_REG (DImode, 24);
7895 sa_bias_rtx = GEN_INT (sa_bias);
7897 if (add_operand (sa_bias_rtx, DImode))
7898 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7899 else
7901 emit_move_insn (sa_reg, sa_bias_rtx);
7902 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7906 /* Save regs in stack order. Beginning with VMS PV. */
7907 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7908 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7910 /* Save register RA next. */
7911 if (imask & (1UL << REG_RA))
7913 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7914 imask &= ~(1UL << REG_RA);
7915 reg_offset += 8;
7918 /* Now save any other registers required to be saved. */
7919 for (i = 0; i < 31; i++)
7920 if (imask & (1UL << i))
7922 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7923 reg_offset += 8;
7926 for (i = 0; i < 31; i++)
7927 if (fmask & (1UL << i))
7929 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7930 reg_offset += 8;
7933 if (TARGET_ABI_OPEN_VMS)
7935 /* Register frame procedures save the fp. */
7936 if (alpha_procedure_type == PT_REGISTER)
7938 rtx_insn *insn =
7939 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7940 hard_frame_pointer_rtx);
7941 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7942 RTX_FRAME_RELATED_P (insn) = 1;
7945 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7946 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7947 gen_rtx_REG (DImode, REG_PV)));
7949 if (alpha_procedure_type != PT_NULL
7950 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7951 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7953 /* If we have to allocate space for outgoing args, do it now. */
7954 if (crtl->outgoing_args_size != 0)
7956 rtx_insn *seq
7957 = emit_move_insn (stack_pointer_rtx,
7958 plus_constant
7959 (Pmode, hard_frame_pointer_rtx,
7960 - (ALPHA_ROUND
7961 (crtl->outgoing_args_size))));
7963 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7964 if ! frame_pointer_needed. Setting the bit will change the CFA
7965 computation rule to use sp again, which would be wrong if we had
7966 frame_pointer_needed, as this means sp might move unpredictably
7967 later on.
7969 Also, note that
7970 frame_pointer_needed
7971 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7973 crtl->outgoing_args_size != 0
7974 => alpha_procedure_type != PT_NULL,
7976 so when we are not setting the bit here, we are guaranteed to
7977 have emitted an FRP frame pointer update just before. */
7978 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7981 else
7983 /* If we need a frame pointer, set it from the stack pointer. */
7984 if (frame_pointer_needed)
7986 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7987 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7988 else
7989 /* This must always be the last instruction in the
7990 prologue, thus we emit a special move + clobber. */
7991 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7992 stack_pointer_rtx, sa_reg)));
7996 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7997 the prologue, for exception handling reasons, we cannot do this for
7998 any insn that might fault. We could prevent this for mems with a
7999 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8000 have to prevent all such scheduling with a blockage.
8002 Linux, on the other hand, never bothered to implement OSF/1's
8003 exception handling, and so doesn't care about such things. Anyone
8004 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8006 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8007 emit_insn (gen_blockage ());
8010 /* Count the number of .file directives, so that .loc is up to date. */
8011 int num_source_filenames = 0;
8013 /* Output the textual info surrounding the prologue. */
8015 void
8016 alpha_start_function (FILE *file, const char *fnname,
8017 tree decl ATTRIBUTE_UNUSED)
8019 unsigned long imask = 0;
8020 unsigned long fmask = 0;
8021 /* Stack space needed for pushing registers clobbered by us. */
8022 HOST_WIDE_INT sa_size;
8023 /* Complete stack size needed. */
8024 unsigned HOST_WIDE_INT frame_size;
8025 /* The maximum debuggable frame size. */
8026 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
8027 /* Offset from base reg to register save area. */
8028 HOST_WIDE_INT reg_offset;
8029 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8030 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8031 int i;
8033 #if TARGET_ABI_OPEN_VMS
8034 vms_start_function (fnname);
8035 #endif
8037 alpha_fnname = fnname;
8038 sa_size = alpha_sa_size ();
8039 frame_size = compute_frame_size (get_frame_size (), sa_size);
8041 if (TARGET_ABI_OPEN_VMS)
8042 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8043 else
8044 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8046 alpha_sa_mask (&imask, &fmask);
8048 /* Issue function start and label. */
8049 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
8051 fputs ("\t.ent ", file);
8052 assemble_name (file, fnname);
8053 putc ('\n', file);
8055 /* If the function needs GP, we'll write the "..ng" label there.
8056 Otherwise, do it here. */
8057 if (TARGET_ABI_OSF
8058 && ! alpha_function_needs_gp
8059 && ! cfun->is_thunk)
8061 putc ('$', file);
8062 assemble_name (file, fnname);
8063 fputs ("..ng:\n", file);
8066 /* Nested functions on VMS that are potentially called via trampoline
8067 get a special transfer entry point that loads the called functions
8068 procedure descriptor and static chain. */
8069 if (TARGET_ABI_OPEN_VMS
8070 && !TREE_PUBLIC (decl)
8071 && DECL_CONTEXT (decl)
8072 && !TYPE_P (DECL_CONTEXT (decl))
8073 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
8075 strcpy (tramp_label, fnname);
8076 strcat (tramp_label, "..tr");
8077 ASM_OUTPUT_LABEL (file, tramp_label);
8078 fprintf (file, "\tldq $1,24($27)\n");
8079 fprintf (file, "\tldq $27,16($27)\n");
8082 strcpy (entry_label, fnname);
8083 if (TARGET_ABI_OPEN_VMS)
8084 strcat (entry_label, "..en");
8086 ASM_OUTPUT_LABEL (file, entry_label);
8087 inside_function = TRUE;
8089 if (TARGET_ABI_OPEN_VMS)
8090 fprintf (file, "\t.base $%d\n", vms_base_regno);
8092 if (TARGET_ABI_OSF
8093 && TARGET_IEEE_CONFORMANT
8094 && !flag_inhibit_size_directive)
8096 /* Set flags in procedure descriptor to request IEEE-conformant
8097 math-library routines. The value we set it to is PDSC_EXC_IEEE
8098 (/usr/include/pdsc.h). */
8099 fputs ("\t.eflag 48\n", file);
8102 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8103 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8104 alpha_arg_offset = -frame_size + 48;
8106 /* Describe our frame. If the frame size is larger than an integer,
8107 print it as zero to avoid an assembler error. We won't be
8108 properly describing such a frame, but that's the best we can do. */
8109 if (TARGET_ABI_OPEN_VMS)
8110 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8111 HOST_WIDE_INT_PRINT_DEC "\n",
8112 vms_unwind_regno,
8113 frame_size >= (1UL << 31) ? 0 : frame_size,
8114 reg_offset);
8115 else if (!flag_inhibit_size_directive)
8116 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8117 (frame_pointer_needed
8118 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8119 frame_size >= max_frame_size ? 0 : frame_size,
8120 crtl->args.pretend_args_size);
8122 /* Describe which registers were spilled. */
8123 if (TARGET_ABI_OPEN_VMS)
8125 if (imask)
8126 /* ??? Does VMS care if mask contains ra? The old code didn't
8127 set it, so I don't here. */
8128 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8129 if (fmask)
8130 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8131 if (alpha_procedure_type == PT_REGISTER)
8132 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8134 else if (!flag_inhibit_size_directive)
8136 if (imask)
8138 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8139 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8141 for (i = 0; i < 32; ++i)
8142 if (imask & (1UL << i))
8143 reg_offset += 8;
8146 if (fmask)
8147 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8148 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8151 #if TARGET_ABI_OPEN_VMS
8152 /* If a user condition handler has been installed at some point, emit
8153 the procedure descriptor bits to point the Condition Handling Facility
8154 at the indirection wrapper, and state the fp offset at which the user
8155 handler may be found. */
8156 if (cfun->machine->uses_condition_handler)
8158 fprintf (file, "\t.handler __gcc_shell_handler\n");
8159 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8162 #ifdef TARGET_VMS_CRASH_DEBUG
8163 /* Support of minimal traceback info. */
8164 switch_to_section (readonly_data_section);
8165 fprintf (file, "\t.align 3\n");
8166 assemble_name (file, fnname); fputs ("..na:\n", file);
8167 fputs ("\t.ascii \"", file);
8168 assemble_name (file, fnname);
8169 fputs ("\\0\"\n", file);
8170 switch_to_section (text_section);
8171 #endif
8172 #endif /* TARGET_ABI_OPEN_VMS */
8175 /* Emit the .prologue note at the scheduled end of the prologue. */
8177 static void
8178 alpha_output_function_end_prologue (FILE *file)
8180 if (TARGET_ABI_OPEN_VMS)
8181 fputs ("\t.prologue\n", file);
8182 else if (!flag_inhibit_size_directive)
8183 fprintf (file, "\t.prologue %d\n",
8184 alpha_function_needs_gp || cfun->is_thunk);
8187 /* Write function epilogue. */
8189 void
8190 alpha_expand_epilogue (void)
8192 /* Registers to save. */
8193 unsigned long imask = 0;
8194 unsigned long fmask = 0;
8195 /* Stack space needed for pushing registers clobbered by us. */
8196 HOST_WIDE_INT sa_size;
8197 /* Complete stack size needed. */
8198 HOST_WIDE_INT frame_size;
8199 /* Offset from base reg to register save area. */
8200 HOST_WIDE_INT reg_offset;
8201 int fp_is_frame_pointer, fp_offset;
8202 rtx sa_reg, sa_reg_exp = NULL;
8203 rtx sp_adj1, sp_adj2, mem, reg, insn;
8204 rtx eh_ofs;
8205 rtx cfa_restores = NULL_RTX;
8206 int i;
8208 sa_size = alpha_sa_size ();
8209 frame_size = compute_frame_size (get_frame_size (), sa_size);
8211 if (TARGET_ABI_OPEN_VMS)
8213 if (alpha_procedure_type == PT_STACK)
8214 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8215 else
8216 reg_offset = 0;
8218 else
8219 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8221 alpha_sa_mask (&imask, &fmask);
8223 fp_is_frame_pointer
8224 = (TARGET_ABI_OPEN_VMS
8225 ? alpha_procedure_type == PT_STACK
8226 : frame_pointer_needed);
8227 fp_offset = 0;
8228 sa_reg = stack_pointer_rtx;
8230 if (crtl->calls_eh_return)
8231 eh_ofs = EH_RETURN_STACKADJ_RTX;
8232 else
8233 eh_ofs = NULL_RTX;
8235 if (sa_size)
8237 /* If we have a frame pointer, restore SP from it. */
8238 if (TARGET_ABI_OPEN_VMS
8239 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8240 : frame_pointer_needed)
8241 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8243 /* Cope with very large offsets to the register save area. */
8244 if (reg_offset + sa_size > 0x8000)
8246 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8247 HOST_WIDE_INT bias;
8249 if (low + sa_size <= 0x8000)
8250 bias = reg_offset - low, reg_offset = low;
8251 else
8252 bias = reg_offset, reg_offset = 0;
8254 sa_reg = gen_rtx_REG (DImode, 22);
8255 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8257 emit_move_insn (sa_reg, sa_reg_exp);
8260 /* Restore registers in order, excepting a true frame pointer. */
8262 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8263 reg = gen_rtx_REG (DImode, REG_RA);
8264 emit_move_insn (reg, mem);
8265 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8267 reg_offset += 8;
8268 imask &= ~(1UL << REG_RA);
8270 for (i = 0; i < 31; ++i)
8271 if (imask & (1UL << i))
8273 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8274 fp_offset = reg_offset;
8275 else
8277 mem = gen_frame_mem (DImode,
8278 plus_constant (Pmode, sa_reg,
8279 reg_offset));
8280 reg = gen_rtx_REG (DImode, i);
8281 emit_move_insn (reg, mem);
8282 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8283 cfa_restores);
8285 reg_offset += 8;
8288 for (i = 0; i < 31; ++i)
8289 if (fmask & (1UL << i))
8291 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8292 reg_offset));
8293 reg = gen_rtx_REG (DFmode, i+32);
8294 emit_move_insn (reg, mem);
8295 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8296 reg_offset += 8;
8300 if (frame_size || eh_ofs)
8302 sp_adj1 = stack_pointer_rtx;
8304 if (eh_ofs)
8306 sp_adj1 = gen_rtx_REG (DImode, 23);
8307 emit_move_insn (sp_adj1,
8308 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8311 /* If the stack size is large, begin computation into a temporary
8312 register so as not to interfere with a potential fp restore,
8313 which must be consecutive with an SP restore. */
8314 if (frame_size < 32768 && !cfun->calls_alloca)
8315 sp_adj2 = GEN_INT (frame_size);
8316 else if (frame_size < 0x40007fffL)
8318 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8320 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8321 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8322 sp_adj1 = sa_reg;
8323 else
8325 sp_adj1 = gen_rtx_REG (DImode, 23);
8326 emit_move_insn (sp_adj1, sp_adj2);
8328 sp_adj2 = GEN_INT (low);
8330 else
8332 rtx tmp = gen_rtx_REG (DImode, 23);
8333 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8334 if (!sp_adj2)
8336 /* We can't drop new things to memory this late, afaik,
8337 so build it up by pieces. */
8338 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8339 -(frame_size < 0));
8340 gcc_assert (sp_adj2);
8344 /* From now on, things must be in order. So emit blockages. */
8346 /* Restore the frame pointer. */
8347 if (fp_is_frame_pointer)
8349 emit_insn (gen_blockage ());
8350 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8351 fp_offset));
8352 emit_move_insn (hard_frame_pointer_rtx, mem);
8353 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8354 hard_frame_pointer_rtx, cfa_restores);
8356 else if (TARGET_ABI_OPEN_VMS)
8358 emit_insn (gen_blockage ());
8359 emit_move_insn (hard_frame_pointer_rtx,
8360 gen_rtx_REG (DImode, vms_save_fp_regno));
8361 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8362 hard_frame_pointer_rtx, cfa_restores);
8365 /* Restore the stack pointer. */
8366 emit_insn (gen_blockage ());
8367 if (sp_adj2 == const0_rtx)
8368 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8369 else
8370 insn = emit_move_insn (stack_pointer_rtx,
8371 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8372 REG_NOTES (insn) = cfa_restores;
8373 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8374 RTX_FRAME_RELATED_P (insn) = 1;
8376 else
8378 gcc_assert (cfa_restores == NULL);
8380 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8382 emit_insn (gen_blockage ());
8383 insn = emit_move_insn (hard_frame_pointer_rtx,
8384 gen_rtx_REG (DImode, vms_save_fp_regno));
8385 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8386 RTX_FRAME_RELATED_P (insn) = 1;
8391 /* Output the rest of the textual info surrounding the epilogue. */
8393 void
8394 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8396 rtx_insn *insn;
8398 /* We output a nop after noreturn calls at the very end of the function to
8399 ensure that the return address always remains in the caller's code range,
8400 as not doing so might confuse unwinding engines. */
8401 insn = get_last_insn ();
8402 if (!INSN_P (insn))
8403 insn = prev_active_insn (insn);
8404 if (insn && CALL_P (insn))
8405 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8407 #if TARGET_ABI_OPEN_VMS
8408 /* Write the linkage entries. */
8409 alpha_write_linkage (file, fnname);
8410 #endif
8412 /* End the function. */
8413 if (TARGET_ABI_OPEN_VMS
8414 || !flag_inhibit_size_directive)
8416 fputs ("\t.end ", file);
8417 assemble_name (file, fnname);
8418 putc ('\n', file);
8420 inside_function = FALSE;
8423 #if TARGET_ABI_OSF
8424 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8426 In order to avoid the hordes of differences between generated code
8427 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8428 lots of code loading up large constants, generate rtl and emit it
8429 instead of going straight to text.
8431 Not sure why this idea hasn't been explored before... */
8433 static void
8434 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8435 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8436 tree function)
8438 HOST_WIDE_INT hi, lo;
8439 rtx this_rtx, funexp;
8440 rtx_insn *insn;
8442 /* We always require a valid GP. */
8443 emit_insn (gen_prologue_ldgp ());
8444 emit_note (NOTE_INSN_PROLOGUE_END);
8446 /* Find the "this" pointer. If the function returns a structure,
8447 the structure return pointer is in $16. */
8448 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8449 this_rtx = gen_rtx_REG (Pmode, 17);
8450 else
8451 this_rtx = gen_rtx_REG (Pmode, 16);
8453 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8454 entire constant for the add. */
8455 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8456 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8457 if (hi + lo == delta)
8459 if (hi)
8460 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8461 if (lo)
8462 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8464 else
8466 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8467 delta, -(delta < 0));
8468 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8471 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8472 if (vcall_offset)
8474 rtx tmp, tmp2;
8476 tmp = gen_rtx_REG (Pmode, 0);
8477 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8479 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8480 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8481 if (hi + lo == vcall_offset)
8483 if (hi)
8484 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8486 else
8488 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8489 vcall_offset, -(vcall_offset < 0));
8490 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8491 lo = 0;
8493 if (lo)
8494 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8495 else
8496 tmp2 = tmp;
8497 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8499 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8502 /* Generate a tail call to the target function. */
8503 if (! TREE_USED (function))
8505 assemble_external (function);
8506 TREE_USED (function) = 1;
8508 funexp = XEXP (DECL_RTL (function), 0);
8509 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8510 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8511 SIBLING_CALL_P (insn) = 1;
8513 /* Run just enough of rest_of_compilation to get the insns emitted.
8514 There's not really enough bulk here to make other passes such as
8515 instruction scheduling worth while. Note that use_thunk calls
8516 assemble_start_function and assemble_end_function. */
8517 insn = get_insns ();
8518 shorten_branches (insn);
8519 final_start_function (insn, file, 1);
8520 final (insn, file, 1);
8521 final_end_function ();
8523 #endif /* TARGET_ABI_OSF */
8525 /* Debugging support. */
8527 #include "gstab.h"
8529 /* Name of the file containing the current function. */
8531 static const char *current_function_file = "";
8533 /* Offsets to alpha virtual arg/local debugging pointers. */
8535 long alpha_arg_offset;
8536 long alpha_auto_offset;
8538 /* Emit a new filename to a stream. */
8540 void
8541 alpha_output_filename (FILE *stream, const char *name)
8543 static int first_time = TRUE;
8545 if (first_time)
8547 first_time = FALSE;
8548 ++num_source_filenames;
8549 current_function_file = name;
8550 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8551 output_quoted_string (stream, name);
8552 fprintf (stream, "\n");
8555 else if (name != current_function_file
8556 && strcmp (name, current_function_file) != 0)
8558 ++num_source_filenames;
8559 current_function_file = name;
8560 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8562 output_quoted_string (stream, name);
8563 fprintf (stream, "\n");
8567 /* Structure to show the current status of registers and memory. */
8569 struct shadow_summary
8571 struct {
8572 unsigned int i : 31; /* Mask of int regs */
8573 unsigned int fp : 31; /* Mask of fp regs */
8574 unsigned int mem : 1; /* mem == imem | fpmem */
8575 } used, defd;
8578 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8579 to the summary structure. SET is nonzero if the insn is setting the
8580 object, otherwise zero. */
8582 static void
8583 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8585 const char *format_ptr;
8586 int i, j;
8588 if (x == 0)
8589 return;
8591 switch (GET_CODE (x))
8593 /* ??? Note that this case would be incorrect if the Alpha had a
8594 ZERO_EXTRACT in SET_DEST. */
8595 case SET:
8596 summarize_insn (SET_SRC (x), sum, 0);
8597 summarize_insn (SET_DEST (x), sum, 1);
8598 break;
8600 case CLOBBER:
8601 summarize_insn (XEXP (x, 0), sum, 1);
8602 break;
8604 case USE:
8605 summarize_insn (XEXP (x, 0), sum, 0);
8606 break;
8608 case ASM_OPERANDS:
8609 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8610 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8611 break;
8613 case PARALLEL:
8614 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8615 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8616 break;
8618 case SUBREG:
8619 summarize_insn (SUBREG_REG (x), sum, 0);
8620 break;
8622 case REG:
8624 int regno = REGNO (x);
8625 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8627 if (regno == 31 || regno == 63)
8628 break;
8630 if (set)
8632 if (regno < 32)
8633 sum->defd.i |= mask;
8634 else
8635 sum->defd.fp |= mask;
8637 else
8639 if (regno < 32)
8640 sum->used.i |= mask;
8641 else
8642 sum->used.fp |= mask;
8645 break;
8647 case MEM:
8648 if (set)
8649 sum->defd.mem = 1;
8650 else
8651 sum->used.mem = 1;
8653 /* Find the regs used in memory address computation: */
8654 summarize_insn (XEXP (x, 0), sum, 0);
8655 break;
8657 case CONST_INT: case CONST_DOUBLE:
8658 case SYMBOL_REF: case LABEL_REF: case CONST:
8659 case SCRATCH: case ASM_INPUT:
8660 break;
8662 /* Handle common unary and binary ops for efficiency. */
8663 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8664 case MOD: case UDIV: case UMOD: case AND: case IOR:
8665 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8666 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8667 case NE: case EQ: case GE: case GT: case LE:
8668 case LT: case GEU: case GTU: case LEU: case LTU:
8669 summarize_insn (XEXP (x, 0), sum, 0);
8670 summarize_insn (XEXP (x, 1), sum, 0);
8671 break;
8673 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8674 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8675 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8676 case SQRT: case FFS:
8677 summarize_insn (XEXP (x, 0), sum, 0);
8678 break;
8680 default:
8681 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8682 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8683 switch (format_ptr[i])
8685 case 'e':
8686 summarize_insn (XEXP (x, i), sum, 0);
8687 break;
8689 case 'E':
8690 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8691 summarize_insn (XVECEXP (x, i, j), sum, 0);
8692 break;
8694 case 'i':
8695 break;
8697 default:
8698 gcc_unreachable ();
8703 /* Ensure a sufficient number of `trapb' insns are in the code when
8704 the user requests code with a trap precision of functions or
8705 instructions.
8707 In naive mode, when the user requests a trap-precision of
8708 "instruction", a trapb is needed after every instruction that may
8709 generate a trap. This ensures that the code is resumption safe but
8710 it is also slow.
8712 When optimizations are turned on, we delay issuing a trapb as long
8713 as possible. In this context, a trap shadow is the sequence of
8714 instructions that starts with a (potentially) trap generating
8715 instruction and extends to the next trapb or call_pal instruction
8716 (but GCC never generates call_pal by itself). We can delay (and
8717 therefore sometimes omit) a trapb subject to the following
8718 conditions:
8720 (a) On entry to the trap shadow, if any Alpha register or memory
8721 location contains a value that is used as an operand value by some
8722 instruction in the trap shadow (live on entry), then no instruction
8723 in the trap shadow may modify the register or memory location.
8725 (b) Within the trap shadow, the computation of the base register
8726 for a memory load or store instruction may not involve using the
8727 result of an instruction that might generate an UNPREDICTABLE
8728 result.
8730 (c) Within the trap shadow, no register may be used more than once
8731 as a destination register. (This is to make life easier for the
8732 trap-handler.)
8734 (d) The trap shadow may not include any branch instructions. */
8736 static void
8737 alpha_handle_trap_shadows (void)
8739 struct shadow_summary shadow;
8740 int trap_pending, exception_nesting;
8741 rtx_insn *i, *n;
8743 trap_pending = 0;
8744 exception_nesting = 0;
8745 shadow.used.i = 0;
8746 shadow.used.fp = 0;
8747 shadow.used.mem = 0;
8748 shadow.defd = shadow.used;
8750 for (i = get_insns (); i ; i = NEXT_INSN (i))
8752 if (NOTE_P (i))
8754 switch (NOTE_KIND (i))
8756 case NOTE_INSN_EH_REGION_BEG:
8757 exception_nesting++;
8758 if (trap_pending)
8759 goto close_shadow;
8760 break;
8762 case NOTE_INSN_EH_REGION_END:
8763 exception_nesting--;
8764 if (trap_pending)
8765 goto close_shadow;
8766 break;
8768 case NOTE_INSN_EPILOGUE_BEG:
8769 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8770 goto close_shadow;
8771 break;
8774 else if (trap_pending)
8776 if (alpha_tp == ALPHA_TP_FUNC)
8778 if (JUMP_P (i)
8779 && GET_CODE (PATTERN (i)) == RETURN)
8780 goto close_shadow;
8782 else if (alpha_tp == ALPHA_TP_INSN)
8784 if (optimize > 0)
8786 struct shadow_summary sum;
8788 sum.used.i = 0;
8789 sum.used.fp = 0;
8790 sum.used.mem = 0;
8791 sum.defd = sum.used;
8793 switch (GET_CODE (i))
8795 case INSN:
8796 /* Annoyingly, get_attr_trap will die on these. */
8797 if (GET_CODE (PATTERN (i)) == USE
8798 || GET_CODE (PATTERN (i)) == CLOBBER)
8799 break;
8801 summarize_insn (PATTERN (i), &sum, 0);
8803 if ((sum.defd.i & shadow.defd.i)
8804 || (sum.defd.fp & shadow.defd.fp))
8806 /* (c) would be violated */
8807 goto close_shadow;
8810 /* Combine shadow with summary of current insn: */
8811 shadow.used.i |= sum.used.i;
8812 shadow.used.fp |= sum.used.fp;
8813 shadow.used.mem |= sum.used.mem;
8814 shadow.defd.i |= sum.defd.i;
8815 shadow.defd.fp |= sum.defd.fp;
8816 shadow.defd.mem |= sum.defd.mem;
8818 if ((sum.defd.i & shadow.used.i)
8819 || (sum.defd.fp & shadow.used.fp)
8820 || (sum.defd.mem & shadow.used.mem))
8822 /* (a) would be violated (also takes care of (b)) */
8823 gcc_assert (get_attr_trap (i) != TRAP_YES
8824 || (!(sum.defd.i & sum.used.i)
8825 && !(sum.defd.fp & sum.used.fp)));
8827 goto close_shadow;
8829 break;
8831 case BARRIER:
8832 /* __builtin_unreachable can expand to no code at all,
8833 leaving (barrier) RTXes in the instruction stream. */
8834 goto close_shadow_notrapb;
8836 case JUMP_INSN:
8837 case CALL_INSN:
8838 case CODE_LABEL:
8839 goto close_shadow;
8841 default:
8842 gcc_unreachable ();
8845 else
8847 close_shadow:
8848 n = emit_insn_before (gen_trapb (), i);
8849 PUT_MODE (n, TImode);
8850 PUT_MODE (i, TImode);
8851 close_shadow_notrapb:
8852 trap_pending = 0;
8853 shadow.used.i = 0;
8854 shadow.used.fp = 0;
8855 shadow.used.mem = 0;
8856 shadow.defd = shadow.used;
8861 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8862 && NONJUMP_INSN_P (i)
8863 && GET_CODE (PATTERN (i)) != USE
8864 && GET_CODE (PATTERN (i)) != CLOBBER
8865 && get_attr_trap (i) == TRAP_YES)
8867 if (optimize && !trap_pending)
8868 summarize_insn (PATTERN (i), &shadow, 0);
8869 trap_pending = 1;
8874 /* Alpha can only issue instruction groups simultaneously if they are
8875 suitably aligned. This is very processor-specific. */
8876 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8877 that are marked "fake". These instructions do not exist on that target,
8878 but it is possible to see these insns with deranged combinations of
8879 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8880 choose a result at random. */
8882 enum alphaev4_pipe {
8883 EV4_STOP = 0,
8884 EV4_IB0 = 1,
8885 EV4_IB1 = 2,
8886 EV4_IBX = 4
8889 enum alphaev5_pipe {
8890 EV5_STOP = 0,
8891 EV5_NONE = 1,
8892 EV5_E01 = 2,
8893 EV5_E0 = 4,
8894 EV5_E1 = 8,
8895 EV5_FAM = 16,
8896 EV5_FA = 32,
8897 EV5_FM = 64
8900 static enum alphaev4_pipe
8901 alphaev4_insn_pipe (rtx_insn *insn)
8903 if (recog_memoized (insn) < 0)
8904 return EV4_STOP;
8905 if (get_attr_length (insn) != 4)
8906 return EV4_STOP;
8908 switch (get_attr_type (insn))
8910 case TYPE_ILD:
8911 case TYPE_LDSYM:
8912 case TYPE_FLD:
8913 case TYPE_LD_L:
8914 return EV4_IBX;
8916 case TYPE_IADD:
8917 case TYPE_ILOG:
8918 case TYPE_ICMOV:
8919 case TYPE_ICMP:
8920 case TYPE_FST:
8921 case TYPE_SHIFT:
8922 case TYPE_IMUL:
8923 case TYPE_FBR:
8924 case TYPE_MVI: /* fake */
8925 return EV4_IB0;
8927 case TYPE_IST:
8928 case TYPE_MISC:
8929 case TYPE_IBR:
8930 case TYPE_JSR:
8931 case TYPE_CALLPAL:
8932 case TYPE_FCPYS:
8933 case TYPE_FCMOV:
8934 case TYPE_FADD:
8935 case TYPE_FDIV:
8936 case TYPE_FMUL:
8937 case TYPE_ST_C:
8938 case TYPE_MB:
8939 case TYPE_FSQRT: /* fake */
8940 case TYPE_FTOI: /* fake */
8941 case TYPE_ITOF: /* fake */
8942 return EV4_IB1;
8944 default:
8945 gcc_unreachable ();
8949 static enum alphaev5_pipe
8950 alphaev5_insn_pipe (rtx_insn *insn)
8952 if (recog_memoized (insn) < 0)
8953 return EV5_STOP;
8954 if (get_attr_length (insn) != 4)
8955 return EV5_STOP;
8957 switch (get_attr_type (insn))
8959 case TYPE_ILD:
8960 case TYPE_FLD:
8961 case TYPE_LDSYM:
8962 case TYPE_IADD:
8963 case TYPE_ILOG:
8964 case TYPE_ICMOV:
8965 case TYPE_ICMP:
8966 return EV5_E01;
8968 case TYPE_IST:
8969 case TYPE_FST:
8970 case TYPE_SHIFT:
8971 case TYPE_IMUL:
8972 case TYPE_MISC:
8973 case TYPE_MVI:
8974 case TYPE_LD_L:
8975 case TYPE_ST_C:
8976 case TYPE_MB:
8977 case TYPE_FTOI: /* fake */
8978 case TYPE_ITOF: /* fake */
8979 return EV5_E0;
8981 case TYPE_IBR:
8982 case TYPE_JSR:
8983 case TYPE_CALLPAL:
8984 return EV5_E1;
8986 case TYPE_FCPYS:
8987 return EV5_FAM;
8989 case TYPE_FBR:
8990 case TYPE_FCMOV:
8991 case TYPE_FADD:
8992 case TYPE_FDIV:
8993 case TYPE_FSQRT: /* fake */
8994 return EV5_FA;
8996 case TYPE_FMUL:
8997 return EV5_FM;
8999 default:
9000 gcc_unreachable ();
9004 /* IN_USE is a mask of the slots currently filled within the insn group.
9005 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9006 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9008 LEN is, of course, the length of the group in bytes. */
9010 static rtx_insn *
9011 alphaev4_next_group (rtx_insn *insn, int *pin_use, int *plen)
9013 int len, in_use;
9015 len = in_use = 0;
9017 if (! INSN_P (insn)
9018 || GET_CODE (PATTERN (insn)) == CLOBBER
9019 || GET_CODE (PATTERN (insn)) == USE)
9020 goto next_and_done;
9022 while (1)
9024 enum alphaev4_pipe pipe;
9026 pipe = alphaev4_insn_pipe (insn);
9027 switch (pipe)
9029 case EV4_STOP:
9030 /* Force complex instructions to start new groups. */
9031 if (in_use)
9032 goto done;
9034 /* If this is a completely unrecognized insn, it's an asm.
9035 We don't know how long it is, so record length as -1 to
9036 signal a needed realignment. */
9037 if (recog_memoized (insn) < 0)
9038 len = -1;
9039 else
9040 len = get_attr_length (insn);
9041 goto next_and_done;
9043 case EV4_IBX:
9044 if (in_use & EV4_IB0)
9046 if (in_use & EV4_IB1)
9047 goto done;
9048 in_use |= EV4_IB1;
9050 else
9051 in_use |= EV4_IB0 | EV4_IBX;
9052 break;
9054 case EV4_IB0:
9055 if (in_use & EV4_IB0)
9057 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9058 goto done;
9059 in_use |= EV4_IB1;
9061 in_use |= EV4_IB0;
9062 break;
9064 case EV4_IB1:
9065 if (in_use & EV4_IB1)
9066 goto done;
9067 in_use |= EV4_IB1;
9068 break;
9070 default:
9071 gcc_unreachable ();
9073 len += 4;
9075 /* Haifa doesn't do well scheduling branches. */
9076 if (JUMP_P (insn))
9077 goto next_and_done;
9079 next:
9080 insn = next_nonnote_insn (insn);
9082 if (!insn || ! INSN_P (insn))
9083 goto done;
9085 /* Let Haifa tell us where it thinks insn group boundaries are. */
9086 if (GET_MODE (insn) == TImode)
9087 goto done;
9089 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9090 goto next;
9093 next_and_done:
9094 insn = next_nonnote_insn (insn);
9096 done:
9097 *plen = len;
9098 *pin_use = in_use;
9099 return insn;
9102 /* IN_USE is a mask of the slots currently filled within the insn group.
9103 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9104 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9106 LEN is, of course, the length of the group in bytes. */
9108 static rtx_insn *
9109 alphaev5_next_group (rtx_insn *insn, int *pin_use, int *plen)
9111 int len, in_use;
9113 len = in_use = 0;
9115 if (! INSN_P (insn)
9116 || GET_CODE (PATTERN (insn)) == CLOBBER
9117 || GET_CODE (PATTERN (insn)) == USE)
9118 goto next_and_done;
9120 while (1)
9122 enum alphaev5_pipe pipe;
9124 pipe = alphaev5_insn_pipe (insn);
9125 switch (pipe)
9127 case EV5_STOP:
9128 /* Force complex instructions to start new groups. */
9129 if (in_use)
9130 goto done;
9132 /* If this is a completely unrecognized insn, it's an asm.
9133 We don't know how long it is, so record length as -1 to
9134 signal a needed realignment. */
9135 if (recog_memoized (insn) < 0)
9136 len = -1;
9137 else
9138 len = get_attr_length (insn);
9139 goto next_and_done;
9141 /* ??? Most of the places below, we would like to assert never
9142 happen, as it would indicate an error either in Haifa, or
9143 in the scheduling description. Unfortunately, Haifa never
9144 schedules the last instruction of the BB, so we don't have
9145 an accurate TI bit to go off. */
9146 case EV5_E01:
9147 if (in_use & EV5_E0)
9149 if (in_use & EV5_E1)
9150 goto done;
9151 in_use |= EV5_E1;
9153 else
9154 in_use |= EV5_E0 | EV5_E01;
9155 break;
9157 case EV5_E0:
9158 if (in_use & EV5_E0)
9160 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9161 goto done;
9162 in_use |= EV5_E1;
9164 in_use |= EV5_E0;
9165 break;
9167 case EV5_E1:
9168 if (in_use & EV5_E1)
9169 goto done;
9170 in_use |= EV5_E1;
9171 break;
9173 case EV5_FAM:
9174 if (in_use & EV5_FA)
9176 if (in_use & EV5_FM)
9177 goto done;
9178 in_use |= EV5_FM;
9180 else
9181 in_use |= EV5_FA | EV5_FAM;
9182 break;
9184 case EV5_FA:
9185 if (in_use & EV5_FA)
9186 goto done;
9187 in_use |= EV5_FA;
9188 break;
9190 case EV5_FM:
9191 if (in_use & EV5_FM)
9192 goto done;
9193 in_use |= EV5_FM;
9194 break;
9196 case EV5_NONE:
9197 break;
9199 default:
9200 gcc_unreachable ();
9202 len += 4;
9204 /* Haifa doesn't do well scheduling branches. */
9205 /* ??? If this is predicted not-taken, slotting continues, except
9206 that no more IBR, FBR, or JSR insns may be slotted. */
9207 if (JUMP_P (insn))
9208 goto next_and_done;
9210 next:
9211 insn = next_nonnote_insn (insn);
9213 if (!insn || ! INSN_P (insn))
9214 goto done;
9216 /* Let Haifa tell us where it thinks insn group boundaries are. */
9217 if (GET_MODE (insn) == TImode)
9218 goto done;
9220 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9221 goto next;
9224 next_and_done:
9225 insn = next_nonnote_insn (insn);
9227 done:
9228 *plen = len;
9229 *pin_use = in_use;
9230 return insn;
9233 static rtx
9234 alphaev4_next_nop (int *pin_use)
9236 int in_use = *pin_use;
9237 rtx nop;
9239 if (!(in_use & EV4_IB0))
9241 in_use |= EV4_IB0;
9242 nop = gen_nop ();
9244 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9246 in_use |= EV4_IB1;
9247 nop = gen_nop ();
9249 else if (TARGET_FP && !(in_use & EV4_IB1))
9251 in_use |= EV4_IB1;
9252 nop = gen_fnop ();
9254 else
9255 nop = gen_unop ();
9257 *pin_use = in_use;
9258 return nop;
9261 static rtx
9262 alphaev5_next_nop (int *pin_use)
9264 int in_use = *pin_use;
9265 rtx nop;
9267 if (!(in_use & EV5_E1))
9269 in_use |= EV5_E1;
9270 nop = gen_nop ();
9272 else if (TARGET_FP && !(in_use & EV5_FA))
9274 in_use |= EV5_FA;
9275 nop = gen_fnop ();
9277 else if (TARGET_FP && !(in_use & EV5_FM))
9279 in_use |= EV5_FM;
9280 nop = gen_fnop ();
9282 else
9283 nop = gen_unop ();
9285 *pin_use = in_use;
9286 return nop;
9289 /* The instruction group alignment main loop. */
9291 static void
9292 alpha_align_insns_1 (unsigned int max_align,
9293 rtx_insn *(*next_group) (rtx_insn *, int *, int *),
9294 rtx (*next_nop) (int *))
9296 /* ALIGN is the known alignment for the insn group. */
9297 unsigned int align;
9298 /* OFS is the offset of the current insn in the insn group. */
9299 int ofs;
9300 int prev_in_use, in_use, len, ldgp;
9301 rtx_insn *i, *next;
9303 /* Let shorten branches care for assigning alignments to code labels. */
9304 shorten_branches (get_insns ());
9306 if (align_functions < 4)
9307 align = 4;
9308 else if ((unsigned int) align_functions < max_align)
9309 align = align_functions;
9310 else
9311 align = max_align;
9313 ofs = prev_in_use = 0;
9314 i = get_insns ();
9315 if (NOTE_P (i))
9316 i = next_nonnote_insn (i);
9318 ldgp = alpha_function_needs_gp ? 8 : 0;
9320 while (i)
9322 next = (*next_group) (i, &in_use, &len);
9324 /* When we see a label, resync alignment etc. */
9325 if (LABEL_P (i))
9327 unsigned int new_align = 1 << label_to_alignment (i);
9329 if (new_align >= align)
9331 align = new_align < max_align ? new_align : max_align;
9332 ofs = 0;
9335 else if (ofs & (new_align-1))
9336 ofs = (ofs | (new_align-1)) + 1;
9337 gcc_assert (!len);
9340 /* Handle complex instructions special. */
9341 else if (in_use == 0)
9343 /* Asms will have length < 0. This is a signal that we have
9344 lost alignment knowledge. Assume, however, that the asm
9345 will not mis-align instructions. */
9346 if (len < 0)
9348 ofs = 0;
9349 align = 4;
9350 len = 0;
9354 /* If the known alignment is smaller than the recognized insn group,
9355 realign the output. */
9356 else if ((int) align < len)
9358 unsigned int new_log_align = len > 8 ? 4 : 3;
9359 rtx_insn *prev, *where;
9361 where = prev = prev_nonnote_insn (i);
9362 if (!where || !LABEL_P (where))
9363 where = i;
9365 /* Can't realign between a call and its gp reload. */
9366 if (! (TARGET_EXPLICIT_RELOCS
9367 && prev && CALL_P (prev)))
9369 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9370 align = 1 << new_log_align;
9371 ofs = 0;
9375 /* We may not insert padding inside the initial ldgp sequence. */
9376 else if (ldgp > 0)
9377 ldgp -= len;
9379 /* If the group won't fit in the same INT16 as the previous,
9380 we need to add padding to keep the group together. Rather
9381 than simply leaving the insn filling to the assembler, we
9382 can make use of the knowledge of what sorts of instructions
9383 were issued in the previous group to make sure that all of
9384 the added nops are really free. */
9385 else if (ofs + len > (int) align)
9387 int nop_count = (align - ofs) / 4;
9388 rtx_insn *where;
9390 /* Insert nops before labels, branches, and calls to truly merge
9391 the execution of the nops with the previous instruction group. */
9392 where = prev_nonnote_insn (i);
9393 if (where)
9395 if (LABEL_P (where))
9397 rtx_insn *where2 = prev_nonnote_insn (where);
9398 if (where2 && JUMP_P (where2))
9399 where = where2;
9401 else if (NONJUMP_INSN_P (where))
9402 where = i;
9404 else
9405 where = i;
9408 emit_insn_before ((*next_nop)(&prev_in_use), where);
9409 while (--nop_count);
9410 ofs = 0;
9413 ofs = (ofs + len) & (align - 1);
9414 prev_in_use = in_use;
9415 i = next;
9419 static void
9420 alpha_align_insns (void)
9422 if (alpha_tune == PROCESSOR_EV4)
9423 alpha_align_insns_1 (8, alphaev4_next_group, alphaev4_next_nop);
9424 else if (alpha_tune == PROCESSOR_EV5)
9425 alpha_align_insns_1 (16, alphaev5_next_group, alphaev5_next_nop);
9426 else
9427 gcc_unreachable ();
9430 /* Insert an unop between sibcall or noreturn function call and GP load. */
9432 static void
9433 alpha_pad_function_end (void)
9435 rtx_insn *insn, *next;
9437 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9439 if (!CALL_P (insn)
9440 || !(SIBLING_CALL_P (insn)
9441 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9442 continue;
9444 /* Make sure we do not split a call and its corresponding
9445 CALL_ARG_LOCATION note. */
9446 next = NEXT_INSN (insn);
9447 if (next == NULL)
9448 continue;
9449 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9450 insn = next;
9452 next = next_active_insn (insn);
9453 if (next)
9455 rtx pat = PATTERN (next);
9457 if (GET_CODE (pat) == SET
9458 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9459 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9460 emit_insn_after (gen_unop (), insn);
9465 /* Machine dependent reorg pass. */
9467 static void
9468 alpha_reorg (void)
9470 /* Workaround for a linker error that triggers when an exception
9471 handler immediatelly follows a sibcall or a noreturn function.
9473 In the sibcall case:
9475 The instruction stream from an object file:
9477 1d8: 00 00 fb 6b jmp (t12)
9478 1dc: 00 00 ba 27 ldah gp,0(ra)
9479 1e0: 00 00 bd 23 lda gp,0(gp)
9480 1e4: 00 00 7d a7 ldq t12,0(gp)
9481 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9483 was converted in the final link pass to:
9485 12003aa88: 67 fa ff c3 br 120039428 <...>
9486 12003aa8c: 00 00 fe 2f unop
9487 12003aa90: 00 00 fe 2f unop
9488 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9489 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9491 And in the noreturn case:
9493 The instruction stream from an object file:
9495 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9496 58: 00 00 ba 27 ldah gp,0(ra)
9497 5c: 00 00 bd 23 lda gp,0(gp)
9498 60: 00 00 7d a7 ldq t12,0(gp)
9499 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9501 was converted in the final link pass to:
9503 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9504 fdb28: 00 00 fe 2f unop
9505 fdb2c: 00 00 fe 2f unop
9506 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9507 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9509 GP load instructions were wrongly cleared by the linker relaxation
9510 pass. This workaround prevents removal of GP loads by inserting
9511 an unop instruction between a sibcall or noreturn function call and
9512 exception handler prologue. */
9514 if (current_function_has_exception_handlers ())
9515 alpha_pad_function_end ();
9518 static void
9519 alpha_file_start (void)
9521 default_file_start ();
9523 fputs ("\t.set noreorder\n", asm_out_file);
9524 fputs ("\t.set volatile\n", asm_out_file);
9525 if (TARGET_ABI_OSF)
9526 fputs ("\t.set noat\n", asm_out_file);
9527 if (TARGET_EXPLICIT_RELOCS)
9528 fputs ("\t.set nomacro\n", asm_out_file);
9529 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9531 const char *arch;
9533 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9534 arch = "ev6";
9535 else if (TARGET_MAX)
9536 arch = "pca56";
9537 else if (TARGET_BWX)
9538 arch = "ev56";
9539 else if (alpha_cpu == PROCESSOR_EV5)
9540 arch = "ev5";
9541 else
9542 arch = "ev4";
9544 fprintf (asm_out_file, "\t.arch %s\n", arch);
9548 /* Since we don't have a .dynbss section, we should not allow global
9549 relocations in the .rodata section. */
9551 static int
9552 alpha_elf_reloc_rw_mask (void)
9554 return flag_pic ? 3 : 2;
9557 /* Return a section for X. The only special thing we do here is to
9558 honor small data. */
9560 static section *
9561 alpha_elf_select_rtx_section (machine_mode mode, rtx x,
9562 unsigned HOST_WIDE_INT align)
9564 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9565 /* ??? Consider using mergeable sdata sections. */
9566 return sdata_section;
9567 else
9568 return default_elf_select_rtx_section (mode, x, align);
9571 static unsigned int
9572 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9574 unsigned int flags = 0;
9576 if (strcmp (name, ".sdata") == 0
9577 || strncmp (name, ".sdata.", 7) == 0
9578 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9579 || strcmp (name, ".sbss") == 0
9580 || strncmp (name, ".sbss.", 6) == 0
9581 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9582 flags = SECTION_SMALL;
9584 flags |= default_section_type_flags (decl, name, reloc);
9585 return flags;
9588 /* Structure to collect function names for final output in link section. */
9589 /* Note that items marked with GTY can't be ifdef'ed out. */
9591 enum reloc_kind
9593 KIND_LINKAGE,
9594 KIND_CODEADDR
9597 struct GTY(()) alpha_links
9599 rtx func;
9600 rtx linkage;
9601 enum reloc_kind rkind;
9604 #if TARGET_ABI_OPEN_VMS
9606 /* Return the VMS argument type corresponding to MODE. */
9608 enum avms_arg_type
9609 alpha_arg_type (machine_mode mode)
9611 switch (mode)
9613 case SFmode:
9614 return TARGET_FLOAT_VAX ? FF : FS;
9615 case DFmode:
9616 return TARGET_FLOAT_VAX ? FD : FT;
9617 default:
9618 return I64;
9622 /* Return an rtx for an integer representing the VMS Argument Information
9623 register value. */
9626 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9628 unsigned HOST_WIDE_INT regval = cum.num_args;
9629 int i;
9631 for (i = 0; i < 6; i++)
9632 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9634 return GEN_INT (regval);
9638 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9639 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9640 this is the reference to the linkage pointer value, 0 if this is the
9641 reference to the function entry value. RFLAG is 1 if this a reduced
9642 reference (code address only), 0 if this is a full reference. */
9645 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9647 struct alpha_links *al = NULL;
9648 const char *name = XSTR (func, 0);
9650 if (cfun->machine->links)
9652 /* Is this name already defined? */
9653 alpha_links *slot = cfun->machine->links->get (name);
9654 if (slot)
9655 al = *slot;
9657 else
9658 cfun->machine->links
9659 = hash_map<const char *, alpha_links *, string_traits>::create_ggc (64);
9661 if (al == NULL)
9663 size_t buf_len;
9664 char *linksym;
9665 tree id;
9667 if (name[0] == '*')
9668 name++;
9670 /* Follow transparent alias, as this is used for CRTL translations. */
9671 id = maybe_get_identifier (name);
9672 if (id)
9674 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9675 id = TREE_CHAIN (id);
9676 name = IDENTIFIER_POINTER (id);
9679 buf_len = strlen (name) + 8 + 9;
9680 linksym = (char *) alloca (buf_len);
9681 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9683 al = ggc_alloc<alpha_links> ();
9684 al->func = func;
9685 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9687 cfun->machine->links->put (ggc_strdup (name), al);
9690 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9692 if (lflag)
9693 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9694 else
9695 return al->linkage;
9698 static int
9699 alpha_write_one_linkage (const char *name, alpha_links *link, FILE *steam)
9701 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9702 if (link->rkind == KIND_CODEADDR)
9704 /* External and used, request code address. */
9705 fprintf (stream, "\t.code_address ");
9707 else
9709 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9710 && SYMBOL_REF_LOCAL_P (link->func))
9712 /* Locally defined, build linkage pair. */
9713 fprintf (stream, "\t.quad %s..en\n", name);
9714 fprintf (stream, "\t.quad ");
9716 else
9718 /* External, request linkage pair. */
9719 fprintf (stream, "\t.linkage ");
9722 assemble_name (stream, name);
9723 fputs ("\n", stream);
9725 return 0;
9728 static void
9729 alpha_write_linkage (FILE *stream, const char *funname)
9731 fprintf (stream, "\t.link\n");
9732 fprintf (stream, "\t.align 3\n");
9733 in_section = NULL;
9735 #ifdef TARGET_VMS_CRASH_DEBUG
9736 fputs ("\t.name ", stream);
9737 assemble_name (stream, funname);
9738 fputs ("..na\n", stream);
9739 #endif
9741 ASM_OUTPUT_LABEL (stream, funname);
9742 fprintf (stream, "\t.pdesc ");
9743 assemble_name (stream, funname);
9744 fprintf (stream, "..en,%s\n",
9745 alpha_procedure_type == PT_STACK ? "stack"
9746 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9748 if (cfun->machine->links)
9750 hash_map<const char *, alpha_links *, string_traits>::iterator iter
9751 = cfun->machine->links->begin ();
9752 for (; iter != cfun->machine->links->end (); ++iter)
9753 alpha_write_one_linkage ((*iter).first, (*iter).second, stream);
9757 /* Switch to an arbitrary section NAME with attributes as specified
9758 by FLAGS. ALIGN specifies any known alignment requirements for
9759 the section; 0 if the default should be used. */
9761 static void
9762 vms_asm_named_section (const char *name, unsigned int flags,
9763 tree decl ATTRIBUTE_UNUSED)
9765 fputc ('\n', asm_out_file);
9766 fprintf (asm_out_file, ".section\t%s", name);
9768 if (flags & SECTION_DEBUG)
9769 fprintf (asm_out_file, ",NOWRT");
9771 fputc ('\n', asm_out_file);
9774 /* Record an element in the table of global constructors. SYMBOL is
9775 a SYMBOL_REF of the function to be called; PRIORITY is a number
9776 between 0 and MAX_INIT_PRIORITY.
9778 Differs from default_ctors_section_asm_out_constructor in that the
9779 width of the .ctors entry is always 64 bits, rather than the 32 bits
9780 used by a normal pointer. */
9782 static void
9783 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9785 switch_to_section (ctors_section);
9786 assemble_align (BITS_PER_WORD);
9787 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9790 static void
9791 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9793 switch_to_section (dtors_section);
9794 assemble_align (BITS_PER_WORD);
9795 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9797 #else
9799 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9800 bool lflag ATTRIBUTE_UNUSED,
9801 bool rflag ATTRIBUTE_UNUSED)
9803 return NULL_RTX;
9806 #endif /* TARGET_ABI_OPEN_VMS */
9808 static void
9809 alpha_init_libfuncs (void)
9811 if (TARGET_ABI_OPEN_VMS)
9813 /* Use the VMS runtime library functions for division and
9814 remainder. */
9815 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9816 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9817 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9818 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9819 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9820 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9821 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9822 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9823 abort_libfunc = init_one_libfunc ("decc$abort");
9824 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9825 #ifdef MEM_LIBFUNCS_INIT
9826 MEM_LIBFUNCS_INIT;
9827 #endif
9831 /* On the Alpha, we use this to disable the floating-point registers
9832 when they don't exist. */
9834 static void
9835 alpha_conditional_register_usage (void)
9837 int i;
9838 if (! TARGET_FPREGS)
9839 for (i = 32; i < 63; i++)
9840 fixed_regs[i] = call_used_regs[i] = 1;
9843 /* Canonicalize a comparison from one we don't have to one we do have. */
9845 static void
9846 alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9847 bool op0_preserve_value)
9849 if (!op0_preserve_value
9850 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9851 && (REG_P (*op1) || *op1 == const0_rtx))
9853 rtx tem = *op0;
9854 *op0 = *op1;
9855 *op1 = tem;
9856 *code = (int)swap_condition ((enum rtx_code)*code);
9859 if ((*code == LT || *code == LTU)
9860 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9862 *code = *code == LT ? LE : LEU;
9863 *op1 = GEN_INT (255);
9867 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9869 static void
9870 alpha_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
9872 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17);
9874 tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
9875 tree new_fenv_var, reload_fenv, restore_fnenv;
9876 tree update_call, atomic_feraiseexcept, hold_fnclex;
9878 /* Assume OSF/1 compatible interfaces. */
9879 if (!TARGET_ABI_OSF)
9880 return;
9882 /* Generate the equivalent of :
9883 unsigned long fenv_var;
9884 fenv_var = __ieee_get_fp_control ();
9886 unsigned long masked_fenv;
9887 masked_fenv = fenv_var & mask;
9889 __ieee_set_fp_control (masked_fenv); */
9891 fenv_var = create_tmp_var (long_unsigned_type_node, NULL);
9892 get_fpscr
9893 = build_fn_decl ("__ieee_get_fp_control",
9894 build_function_type_list (long_unsigned_type_node, NULL));
9895 set_fpscr
9896 = build_fn_decl ("__ieee_set_fp_control",
9897 build_function_type_list (void_type_node, NULL));
9898 mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK);
9899 ld_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node,
9900 fenv_var, build_call_expr (get_fpscr, 0));
9901 masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask);
9902 hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
9903 *hold = build2 (COMPOUND_EXPR, void_type_node,
9904 build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
9905 hold_fnclex);
9907 /* Store the value of masked_fenv to clear the exceptions:
9908 __ieee_set_fp_control (masked_fenv); */
9910 *clear = build_call_expr (set_fpscr, 1, masked_fenv);
9912 /* Generate the equivalent of :
9913 unsigned long new_fenv_var;
9914 new_fenv_var = __ieee_get_fp_control ();
9916 __ieee_set_fp_control (fenv_var);
9918 __atomic_feraiseexcept (new_fenv_var); */
9920 new_fenv_var = create_tmp_var (long_unsigned_type_node, NULL);
9921 reload_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node, new_fenv_var,
9922 build_call_expr (get_fpscr, 0));
9923 restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
9924 atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
9925 update_call
9926 = build_call_expr (atomic_feraiseexcept, 1,
9927 fold_convert (integer_type_node, new_fenv_var));
9928 *update = build2 (COMPOUND_EXPR, void_type_node,
9929 build2 (COMPOUND_EXPR, void_type_node,
9930 reload_fenv, restore_fnenv), update_call);
9933 /* Initialize the GCC target structure. */
9934 #if TARGET_ABI_OPEN_VMS
9935 # undef TARGET_ATTRIBUTE_TABLE
9936 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9937 # undef TARGET_CAN_ELIMINATE
9938 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9939 #endif
9941 #undef TARGET_IN_SMALL_DATA_P
9942 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9944 #undef TARGET_ASM_ALIGNED_HI_OP
9945 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9946 #undef TARGET_ASM_ALIGNED_DI_OP
9947 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9949 /* Default unaligned ops are provided for ELF systems. To get unaligned
9950 data for non-ELF systems, we have to turn off auto alignment. */
9951 #if TARGET_ABI_OPEN_VMS
9952 #undef TARGET_ASM_UNALIGNED_HI_OP
9953 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9954 #undef TARGET_ASM_UNALIGNED_SI_OP
9955 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9956 #undef TARGET_ASM_UNALIGNED_DI_OP
9957 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9958 #endif
9960 #undef TARGET_ASM_RELOC_RW_MASK
9961 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9962 #undef TARGET_ASM_SELECT_RTX_SECTION
9963 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9964 #undef TARGET_SECTION_TYPE_FLAGS
9965 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9967 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9968 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9970 #undef TARGET_INIT_LIBFUNCS
9971 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9973 #undef TARGET_LEGITIMIZE_ADDRESS
9974 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9975 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9976 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9978 #undef TARGET_ASM_FILE_START
9979 #define TARGET_ASM_FILE_START alpha_file_start
9981 #undef TARGET_SCHED_ADJUST_COST
9982 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9983 #undef TARGET_SCHED_ISSUE_RATE
9984 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9985 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9986 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9987 alpha_multipass_dfa_lookahead
9989 #undef TARGET_HAVE_TLS
9990 #define TARGET_HAVE_TLS HAVE_AS_TLS
9992 #undef TARGET_BUILTIN_DECL
9993 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9994 #undef TARGET_INIT_BUILTINS
9995 #define TARGET_INIT_BUILTINS alpha_init_builtins
9996 #undef TARGET_EXPAND_BUILTIN
9997 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9998 #undef TARGET_FOLD_BUILTIN
9999 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10000 #undef TARGET_GIMPLE_FOLD_BUILTIN
10001 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
10003 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10004 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10005 #undef TARGET_CANNOT_COPY_INSN_P
10006 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10007 #undef TARGET_LEGITIMATE_CONSTANT_P
10008 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
10009 #undef TARGET_CANNOT_FORCE_CONST_MEM
10010 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10012 #if TARGET_ABI_OSF
10013 #undef TARGET_ASM_OUTPUT_MI_THUNK
10014 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10015 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10016 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10017 #undef TARGET_STDARG_OPTIMIZE_HOOK
10018 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10019 #endif
10021 /* Use 16-bits anchor. */
10022 #undef TARGET_MIN_ANCHOR_OFFSET
10023 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
10024 #undef TARGET_MAX_ANCHOR_OFFSET
10025 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
10026 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10027 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
10029 #undef TARGET_RTX_COSTS
10030 #define TARGET_RTX_COSTS alpha_rtx_costs
10031 #undef TARGET_ADDRESS_COST
10032 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
10034 #undef TARGET_MACHINE_DEPENDENT_REORG
10035 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10037 #undef TARGET_PROMOTE_FUNCTION_MODE
10038 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
10039 #undef TARGET_PROMOTE_PROTOTYPES
10040 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10041 #undef TARGET_RETURN_IN_MEMORY
10042 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10043 #undef TARGET_PASS_BY_REFERENCE
10044 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10045 #undef TARGET_SETUP_INCOMING_VARARGS
10046 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10047 #undef TARGET_STRICT_ARGUMENT_NAMING
10048 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10049 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10050 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10051 #undef TARGET_SPLIT_COMPLEX_ARG
10052 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10053 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10054 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10055 #undef TARGET_ARG_PARTIAL_BYTES
10056 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10057 #undef TARGET_FUNCTION_ARG
10058 #define TARGET_FUNCTION_ARG alpha_function_arg
10059 #undef TARGET_FUNCTION_ARG_ADVANCE
10060 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
10061 #undef TARGET_TRAMPOLINE_INIT
10062 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
10064 #undef TARGET_INSTANTIATE_DECLS
10065 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10067 #undef TARGET_SECONDARY_RELOAD
10068 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10070 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10071 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10072 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10073 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10075 #undef TARGET_BUILD_BUILTIN_VA_LIST
10076 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10078 #undef TARGET_EXPAND_BUILTIN_VA_START
10079 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10081 /* The Alpha architecture does not require sequential consistency. See
10082 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10083 for an example of how it can be violated in practice. */
10084 #undef TARGET_RELAXED_ORDERING
10085 #define TARGET_RELAXED_ORDERING true
10087 #undef TARGET_OPTION_OVERRIDE
10088 #define TARGET_OPTION_OVERRIDE alpha_option_override
10090 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10091 #undef TARGET_MANGLE_TYPE
10092 #define TARGET_MANGLE_TYPE alpha_mangle_type
10093 #endif
10095 #undef TARGET_LEGITIMATE_ADDRESS_P
10096 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10098 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10099 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10101 #undef TARGET_CANONICALIZE_COMPARISON
10102 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10104 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10105 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10107 struct gcc_target targetm = TARGET_INITIALIZER;
10110 #include "gt-alpha.h"