* config/alpha/alpha.c (alpha_in_small_data_p): Reject common symbols.
[official-gcc.git] / gcc / config / alpha / alpha.c
blob554ff09b0556e35cba53ac361677c0d6f50fa55d
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "varasm.h"
41 #include "regs.h"
42 #include "hard-reg-set.h"
43 #include "insn-config.h"
44 #include "conditions.h"
45 #include "output.h"
46 #include "insn-attr.h"
47 #include "flags.h"
48 #include "recog.h"
49 #include "hashtab.h"
50 #include "function.h"
51 #include "statistics.h"
52 #include "real.h"
53 #include "fixed-value.h"
54 #include "expmed.h"
55 #include "dojump.h"
56 #include "explow.h"
57 #include "emit-rtl.h"
58 #include "stmt.h"
59 #include "expr.h"
60 #include "insn-codes.h"
61 #include "optabs.h"
62 #include "reload.h"
63 #include "obstack.h"
64 #include "except.h"
65 #include "diagnostic-core.h"
66 #include "ggc.h"
67 #include "tm_p.h"
68 #include "target.h"
69 #include "target-def.h"
70 #include "common/common-target.h"
71 #include "debug.h"
72 #include "langhooks.h"
73 #include "hash-map.h"
74 #include "hash-table.h"
75 #include "predict.h"
76 #include "dominance.h"
77 #include "cfg.h"
78 #include "cfgrtl.h"
79 #include "cfganal.h"
80 #include "lcm.h"
81 #include "cfgbuild.h"
82 #include "cfgcleanup.h"
83 #include "basic-block.h"
84 #include "tree-ssa-alias.h"
85 #include "internal-fn.h"
86 #include "gimple-fold.h"
87 #include "tree-eh.h"
88 #include "gimple-expr.h"
89 #include "is-a.h"
90 #include "gimple.h"
91 #include "tree-pass.h"
92 #include "context.h"
93 #include "pass_manager.h"
94 #include "gimple-iterator.h"
95 #include "gimplify.h"
96 #include "gimple-ssa.h"
97 #include "stringpool.h"
98 #include "tree-ssanames.h"
99 #include "tree-stdarg.h"
100 #include "tm-constrs.h"
101 #include "df.h"
102 #include "libfuncs.h"
103 #include "opts.h"
104 #include "params.h"
105 #include "builtins.h"
106 #include "rtl-iter.h"
108 /* Specify which cpu to schedule for. */
109 enum processor_type alpha_tune;
111 /* Which cpu we're generating code for. */
112 enum processor_type alpha_cpu;
114 static const char * const alpha_cpu_name[] =
116 "ev4", "ev5", "ev6"
119 /* Specify how accurate floating-point traps need to be. */
121 enum alpha_trap_precision alpha_tp;
123 /* Specify the floating-point rounding mode. */
125 enum alpha_fp_rounding_mode alpha_fprm;
127 /* Specify which things cause traps. */
129 enum alpha_fp_trap_mode alpha_fptm;
131 /* Nonzero if inside of a function, because the Alpha asm can't
132 handle .files inside of functions. */
134 static int inside_function = FALSE;
136 /* The number of cycles of latency we should assume on memory reads. */
138 int alpha_memory_latency = 3;
140 /* Whether the function needs the GP. */
142 static int alpha_function_needs_gp;
144 /* The assembler name of the current function. */
146 static const char *alpha_fnname;
148 /* The next explicit relocation sequence number. */
149 extern GTY(()) int alpha_next_sequence_number;
150 int alpha_next_sequence_number = 1;
152 /* The literal and gpdisp sequence numbers for this insn, as printed
153 by %# and %* respectively. */
154 extern GTY(()) int alpha_this_literal_sequence_number;
155 extern GTY(()) int alpha_this_gpdisp_sequence_number;
156 int alpha_this_literal_sequence_number;
157 int alpha_this_gpdisp_sequence_number;
159 /* Costs of various operations on the different architectures. */
161 struct alpha_rtx_cost_data
163 unsigned char fp_add;
164 unsigned char fp_mult;
165 unsigned char fp_div_sf;
166 unsigned char fp_div_df;
167 unsigned char int_mult_si;
168 unsigned char int_mult_di;
169 unsigned char int_shift;
170 unsigned char int_cmov;
171 unsigned short int_div;
174 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
176 { /* EV4 */
177 COSTS_N_INSNS (6), /* fp_add */
178 COSTS_N_INSNS (6), /* fp_mult */
179 COSTS_N_INSNS (34), /* fp_div_sf */
180 COSTS_N_INSNS (63), /* fp_div_df */
181 COSTS_N_INSNS (23), /* int_mult_si */
182 COSTS_N_INSNS (23), /* int_mult_di */
183 COSTS_N_INSNS (2), /* int_shift */
184 COSTS_N_INSNS (2), /* int_cmov */
185 COSTS_N_INSNS (97), /* int_div */
187 { /* EV5 */
188 COSTS_N_INSNS (4), /* fp_add */
189 COSTS_N_INSNS (4), /* fp_mult */
190 COSTS_N_INSNS (15), /* fp_div_sf */
191 COSTS_N_INSNS (22), /* fp_div_df */
192 COSTS_N_INSNS (8), /* int_mult_si */
193 COSTS_N_INSNS (12), /* int_mult_di */
194 COSTS_N_INSNS (1) + 1, /* int_shift */
195 COSTS_N_INSNS (1), /* int_cmov */
196 COSTS_N_INSNS (83), /* int_div */
198 { /* EV6 */
199 COSTS_N_INSNS (4), /* fp_add */
200 COSTS_N_INSNS (4), /* fp_mult */
201 COSTS_N_INSNS (12), /* fp_div_sf */
202 COSTS_N_INSNS (15), /* fp_div_df */
203 COSTS_N_INSNS (7), /* int_mult_si */
204 COSTS_N_INSNS (7), /* int_mult_di */
205 COSTS_N_INSNS (1), /* int_shift */
206 COSTS_N_INSNS (2), /* int_cmov */
207 COSTS_N_INSNS (86), /* int_div */
211 /* Similar but tuned for code size instead of execution latency. The
212 extra +N is fractional cost tuning based on latency. It's used to
213 encourage use of cheaper insns like shift, but only if there's just
214 one of them. */
216 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
218 COSTS_N_INSNS (1), /* fp_add */
219 COSTS_N_INSNS (1), /* fp_mult */
220 COSTS_N_INSNS (1), /* fp_div_sf */
221 COSTS_N_INSNS (1) + 1, /* fp_div_df */
222 COSTS_N_INSNS (1) + 1, /* int_mult_si */
223 COSTS_N_INSNS (1) + 2, /* int_mult_di */
224 COSTS_N_INSNS (1), /* int_shift */
225 COSTS_N_INSNS (1), /* int_cmov */
226 COSTS_N_INSNS (6), /* int_div */
229 /* Get the number of args of a function in one of two ways. */
230 #if TARGET_ABI_OPEN_VMS
231 #define NUM_ARGS crtl->args.info.num_args
232 #else
233 #define NUM_ARGS crtl->args.info
234 #endif
236 #define REG_PV 27
237 #define REG_RA 26
239 /* Declarations of static functions. */
240 static struct machine_function *alpha_init_machine_status (void);
241 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
242 static void alpha_handle_trap_shadows (void);
243 static void alpha_align_insns (void);
245 #if TARGET_ABI_OPEN_VMS
246 static void alpha_write_linkage (FILE *, const char *);
247 static bool vms_valid_pointer_mode (machine_mode);
248 #else
249 #define vms_patch_builtins() gcc_unreachable()
250 #endif
252 static unsigned int
253 rest_of_handle_trap_shadows (void)
255 alpha_handle_trap_shadows ();
256 return 0;
259 namespace {
261 const pass_data pass_data_handle_trap_shadows =
263 RTL_PASS,
264 "trap_shadows", /* name */
265 OPTGROUP_NONE, /* optinfo_flags */
266 TV_NONE, /* tv_id */
267 0, /* properties_required */
268 0, /* properties_provided */
269 0, /* properties_destroyed */
270 0, /* todo_flags_start */
271 TODO_df_finish, /* todo_flags_finish */
274 class pass_handle_trap_shadows : public rtl_opt_pass
276 public:
277 pass_handle_trap_shadows(gcc::context *ctxt)
278 : rtl_opt_pass(pass_data_handle_trap_shadows, ctxt)
281 /* opt_pass methods: */
282 virtual bool gate (function *)
284 return alpha_tp != ALPHA_TP_PROG || flag_exceptions;
287 virtual unsigned int execute (function *)
289 return rest_of_handle_trap_shadows ();
292 }; // class pass_handle_trap_shadows
294 } // anon namespace
296 rtl_opt_pass *
297 make_pass_handle_trap_shadows (gcc::context *ctxt)
299 return new pass_handle_trap_shadows (ctxt);
302 static unsigned int
303 rest_of_align_insns (void)
305 alpha_align_insns ();
306 return 0;
309 namespace {
311 const pass_data pass_data_align_insns =
313 RTL_PASS,
314 "align_insns", /* name */
315 OPTGROUP_NONE, /* optinfo_flags */
316 TV_NONE, /* tv_id */
317 0, /* properties_required */
318 0, /* properties_provided */
319 0, /* properties_destroyed */
320 0, /* todo_flags_start */
321 TODO_df_finish, /* todo_flags_finish */
324 class pass_align_insns : public rtl_opt_pass
326 public:
327 pass_align_insns(gcc::context *ctxt)
328 : rtl_opt_pass(pass_data_align_insns, ctxt)
331 /* opt_pass methods: */
332 virtual bool gate (function *)
334 /* Due to the number of extra trapb insns, don't bother fixing up
335 alignment when trap precision is instruction. Moreover, we can
336 only do our job when sched2 is run. */
337 return ((alpha_tune == PROCESSOR_EV4
338 || alpha_tune == PROCESSOR_EV5)
339 && optimize && !optimize_size
340 && alpha_tp != ALPHA_TP_INSN
341 && flag_schedule_insns_after_reload);
344 virtual unsigned int execute (function *)
346 return rest_of_align_insns ();
349 }; // class pass_align_insns
351 } // anon namespace
353 rtl_opt_pass *
354 make_pass_align_insns (gcc::context *ctxt)
356 return new pass_align_insns (ctxt);
359 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
360 /* Implement TARGET_MANGLE_TYPE. */
362 static const char *
363 alpha_mangle_type (const_tree type)
365 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
366 && TARGET_LONG_DOUBLE_128)
367 return "g";
369 /* For all other types, use normal C++ mangling. */
370 return NULL;
372 #endif
374 /* Parse target option strings. */
376 static void
377 alpha_option_override (void)
379 static const struct cpu_table {
380 const char *const name;
381 const enum processor_type processor;
382 const int flags;
383 const unsigned short line_size; /* in bytes */
384 const unsigned short l1_size; /* in kb. */
385 const unsigned short l2_size; /* in kb. */
386 } cpu_table[] = {
387 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
388 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
389 had 64k to 8M 8-byte direct Bcache. */
390 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
391 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
392 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
394 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
395 and 1M to 16M 64 byte L3 (not modeled).
396 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
397 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
398 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
399 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
400 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
401 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
402 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
403 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
404 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
406 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
407 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
408 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
409 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
410 64, 64, 16*1024 },
411 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
412 64, 64, 16*1024 }
415 opt_pass *pass_handle_trap_shadows = make_pass_handle_trap_shadows (g);
416 struct register_pass_info handle_trap_shadows_info
417 = { pass_handle_trap_shadows, "eh_ranges",
418 1, PASS_POS_INSERT_AFTER
421 opt_pass *pass_align_insns = make_pass_align_insns (g);
422 struct register_pass_info align_insns_info
423 = { pass_align_insns, "shorten",
424 1, PASS_POS_INSERT_BEFORE
427 int const ct_size = ARRAY_SIZE (cpu_table);
428 int line_size = 0, l1_size = 0, l2_size = 0;
429 int i;
431 #ifdef SUBTARGET_OVERRIDE_OPTIONS
432 SUBTARGET_OVERRIDE_OPTIONS;
433 #endif
435 /* Default to full IEEE compliance mode for Go language. */
436 if (strcmp (lang_hooks.name, "GNU Go") == 0
437 && !(target_flags_explicit & MASK_IEEE))
438 target_flags |= MASK_IEEE;
440 alpha_fprm = ALPHA_FPRM_NORM;
441 alpha_tp = ALPHA_TP_PROG;
442 alpha_fptm = ALPHA_FPTM_N;
444 if (TARGET_IEEE)
446 alpha_tp = ALPHA_TP_INSN;
447 alpha_fptm = ALPHA_FPTM_SU;
449 if (TARGET_IEEE_WITH_INEXACT)
451 alpha_tp = ALPHA_TP_INSN;
452 alpha_fptm = ALPHA_FPTM_SUI;
455 if (alpha_tp_string)
457 if (! strcmp (alpha_tp_string, "p"))
458 alpha_tp = ALPHA_TP_PROG;
459 else if (! strcmp (alpha_tp_string, "f"))
460 alpha_tp = ALPHA_TP_FUNC;
461 else if (! strcmp (alpha_tp_string, "i"))
462 alpha_tp = ALPHA_TP_INSN;
463 else
464 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
467 if (alpha_fprm_string)
469 if (! strcmp (alpha_fprm_string, "n"))
470 alpha_fprm = ALPHA_FPRM_NORM;
471 else if (! strcmp (alpha_fprm_string, "m"))
472 alpha_fprm = ALPHA_FPRM_MINF;
473 else if (! strcmp (alpha_fprm_string, "c"))
474 alpha_fprm = ALPHA_FPRM_CHOP;
475 else if (! strcmp (alpha_fprm_string,"d"))
476 alpha_fprm = ALPHA_FPRM_DYN;
477 else
478 error ("bad value %qs for -mfp-rounding-mode switch",
479 alpha_fprm_string);
482 if (alpha_fptm_string)
484 if (strcmp (alpha_fptm_string, "n") == 0)
485 alpha_fptm = ALPHA_FPTM_N;
486 else if (strcmp (alpha_fptm_string, "u") == 0)
487 alpha_fptm = ALPHA_FPTM_U;
488 else if (strcmp (alpha_fptm_string, "su") == 0)
489 alpha_fptm = ALPHA_FPTM_SU;
490 else if (strcmp (alpha_fptm_string, "sui") == 0)
491 alpha_fptm = ALPHA_FPTM_SUI;
492 else
493 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
496 if (alpha_cpu_string)
498 for (i = 0; i < ct_size; i++)
499 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
501 alpha_tune = alpha_cpu = cpu_table[i].processor;
502 line_size = cpu_table[i].line_size;
503 l1_size = cpu_table[i].l1_size;
504 l2_size = cpu_table[i].l2_size;
505 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
506 target_flags |= cpu_table[i].flags;
507 break;
509 if (i == ct_size)
510 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
513 if (alpha_tune_string)
515 for (i = 0; i < ct_size; i++)
516 if (! strcmp (alpha_tune_string, cpu_table [i].name))
518 alpha_tune = cpu_table[i].processor;
519 line_size = cpu_table[i].line_size;
520 l1_size = cpu_table[i].l1_size;
521 l2_size = cpu_table[i].l2_size;
522 break;
524 if (i == ct_size)
525 error ("bad value %qs for -mtune switch", alpha_tune_string);
528 if (line_size)
529 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
530 global_options.x_param_values,
531 global_options_set.x_param_values);
532 if (l1_size)
533 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
534 global_options.x_param_values,
535 global_options_set.x_param_values);
536 if (l2_size)
537 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
538 global_options.x_param_values,
539 global_options_set.x_param_values);
541 /* Do some sanity checks on the above options. */
543 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
544 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
546 warning (0, "fp software completion requires -mtrap-precision=i");
547 alpha_tp = ALPHA_TP_INSN;
550 if (alpha_cpu == PROCESSOR_EV6)
552 /* Except for EV6 pass 1 (not released), we always have precise
553 arithmetic traps. Which means we can do software completion
554 without minding trap shadows. */
555 alpha_tp = ALPHA_TP_PROG;
558 if (TARGET_FLOAT_VAX)
560 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
562 warning (0, "rounding mode not supported for VAX floats");
563 alpha_fprm = ALPHA_FPRM_NORM;
565 if (alpha_fptm == ALPHA_FPTM_SUI)
567 warning (0, "trap mode not supported for VAX floats");
568 alpha_fptm = ALPHA_FPTM_SU;
570 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
571 warning (0, "128-bit long double not supported for VAX floats");
572 target_flags &= ~MASK_LONG_DOUBLE_128;
576 char *end;
577 int lat;
579 if (!alpha_mlat_string)
580 alpha_mlat_string = "L1";
582 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
583 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
585 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
586 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
587 && alpha_mlat_string[2] == '\0')
589 static int const cache_latency[][4] =
591 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
592 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
593 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
596 lat = alpha_mlat_string[1] - '0';
597 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
599 warning (0, "L%d cache latency unknown for %s",
600 lat, alpha_cpu_name[alpha_tune]);
601 lat = 3;
603 else
604 lat = cache_latency[alpha_tune][lat-1];
606 else if (! strcmp (alpha_mlat_string, "main"))
608 /* Most current memories have about 370ns latency. This is
609 a reasonable guess for a fast cpu. */
610 lat = 150;
612 else
614 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
615 lat = 3;
618 alpha_memory_latency = lat;
621 /* Default the definition of "small data" to 8 bytes. */
622 if (!global_options_set.x_g_switch_value)
623 g_switch_value = 8;
625 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
626 if (flag_pic == 1)
627 target_flags |= MASK_SMALL_DATA;
628 else if (flag_pic == 2)
629 target_flags &= ~MASK_SMALL_DATA;
631 /* Align labels and loops for optimal branching. */
632 /* ??? Kludge these by not doing anything if we don't optimize. */
633 if (optimize > 0)
635 if (align_loops <= 0)
636 align_loops = 16;
637 if (align_jumps <= 0)
638 align_jumps = 16;
640 if (align_functions <= 0)
641 align_functions = 16;
643 /* Register variables and functions with the garbage collector. */
645 /* Set up function hooks. */
646 init_machine_status = alpha_init_machine_status;
648 /* Tell the compiler when we're using VAX floating point. */
649 if (TARGET_FLOAT_VAX)
651 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
652 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
653 REAL_MODE_FORMAT (TFmode) = NULL;
656 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
657 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
658 target_flags |= MASK_LONG_DOUBLE_128;
659 #endif
661 /* This needs to be done at start up. It's convenient to do it here. */
662 register_pass (&handle_trap_shadows_info);
663 register_pass (&align_insns_info);
666 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
669 zap_mask (HOST_WIDE_INT value)
671 int i;
673 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
674 i++, value >>= 8)
675 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
676 return 0;
678 return 1;
681 /* Return true if OP is valid for a particular TLS relocation.
682 We are already guaranteed that OP is a CONST. */
685 tls_symbolic_operand_1 (rtx op, int size, int unspec)
687 op = XEXP (op, 0);
689 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
690 return 0;
691 op = XVECEXP (op, 0, 0);
693 if (GET_CODE (op) != SYMBOL_REF)
694 return 0;
696 switch (SYMBOL_REF_TLS_MODEL (op))
698 case TLS_MODEL_LOCAL_DYNAMIC:
699 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
700 case TLS_MODEL_INITIAL_EXEC:
701 return unspec == UNSPEC_TPREL && size == 64;
702 case TLS_MODEL_LOCAL_EXEC:
703 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
704 default:
705 gcc_unreachable ();
709 /* Used by aligned_memory_operand and unaligned_memory_operand to
710 resolve what reload is going to do with OP if it's a register. */
713 resolve_reload_operand (rtx op)
715 if (reload_in_progress)
717 rtx tmp = op;
718 if (GET_CODE (tmp) == SUBREG)
719 tmp = SUBREG_REG (tmp);
720 if (REG_P (tmp)
721 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
723 op = reg_equiv_memory_loc (REGNO (tmp));
724 if (op == 0)
725 return 0;
728 return op;
731 /* The scalar modes supported differs from the default check-what-c-supports
732 version in that sometimes TFmode is available even when long double
733 indicates only DFmode. */
735 static bool
736 alpha_scalar_mode_supported_p (machine_mode mode)
738 switch (mode)
740 case QImode:
741 case HImode:
742 case SImode:
743 case DImode:
744 case TImode: /* via optabs.c */
745 return true;
747 case SFmode:
748 case DFmode:
749 return true;
751 case TFmode:
752 return TARGET_HAS_XFLOATING_LIBS;
754 default:
755 return false;
759 /* Alpha implements a couple of integer vector mode operations when
760 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
761 which allows the vectorizer to operate on e.g. move instructions,
762 or when expand_vector_operations can do something useful. */
764 static bool
765 alpha_vector_mode_supported_p (machine_mode mode)
767 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
770 /* Return 1 if this function can directly return via $26. */
773 direct_return (void)
775 return (TARGET_ABI_OSF
776 && reload_completed
777 && alpha_sa_size () == 0
778 && get_frame_size () == 0
779 && crtl->outgoing_args_size == 0
780 && crtl->args.pretend_args_size == 0);
783 /* Return the TLS model to use for SYMBOL. */
785 static enum tls_model
786 tls_symbolic_operand_type (rtx symbol)
788 enum tls_model model;
790 if (GET_CODE (symbol) != SYMBOL_REF)
791 return TLS_MODEL_NONE;
792 model = SYMBOL_REF_TLS_MODEL (symbol);
794 /* Local-exec with a 64-bit size is the same code as initial-exec. */
795 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
796 model = TLS_MODEL_INITIAL_EXEC;
798 return model;
801 /* Return true if the function DECL will share the same GP as any
802 function in the current unit of translation. */
804 static bool
805 decl_has_samegp (const_tree decl)
807 /* Functions that are not local can be overridden, and thus may
808 not share the same gp. */
809 if (!(*targetm.binds_local_p) (decl))
810 return false;
812 /* If -msmall-data is in effect, assume that there is only one GP
813 for the module, and so any local symbol has this property. We
814 need explicit relocations to be able to enforce this for symbols
815 not defined in this unit of translation, however. */
816 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
817 return true;
819 /* Functions that are not external are defined in this UoT. */
820 /* ??? Irritatingly, static functions not yet emitted are still
821 marked "external". Apply this to non-static functions only. */
822 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
825 /* Return true if EXP should be placed in the small data section. */
827 static bool
828 alpha_in_small_data_p (const_tree exp)
830 /* We want to merge strings, so we never consider them small data. */
831 if (TREE_CODE (exp) == STRING_CST)
832 return false;
834 /* Functions are never in the small data area. Duh. */
835 if (TREE_CODE (exp) == FUNCTION_DECL)
836 return false;
838 /* COMMON symbols are never small data. */
839 if (TREE_CODE (exp) == VAR_DECL && DECL_COMMON (exp))
840 return false;
842 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
844 const char *section = DECL_SECTION_NAME (exp);
845 if (strcmp (section, ".sdata") == 0
846 || strcmp (section, ".sbss") == 0)
847 return true;
849 else
851 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
853 /* If this is an incomplete type with size 0, then we can't put it
854 in sdata because it might be too big when completed. */
855 if (size > 0 && size <= g_switch_value)
856 return true;
859 return false;
862 #if TARGET_ABI_OPEN_VMS
863 static bool
864 vms_valid_pointer_mode (machine_mode mode)
866 return (mode == SImode || mode == DImode);
869 static bool
870 alpha_linkage_symbol_p (const char *symname)
872 int symlen = strlen (symname);
874 if (symlen > 4)
875 return strcmp (&symname [symlen - 4], "..lk") == 0;
877 return false;
880 #define LINKAGE_SYMBOL_REF_P(X) \
881 ((GET_CODE (X) == SYMBOL_REF \
882 && alpha_linkage_symbol_p (XSTR (X, 0))) \
883 || (GET_CODE (X) == CONST \
884 && GET_CODE (XEXP (X, 0)) == PLUS \
885 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
886 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
887 #endif
889 /* legitimate_address_p recognizes an RTL expression that is a valid
890 memory address for an instruction. The MODE argument is the
891 machine mode for the MEM expression that wants to use this address.
893 For Alpha, we have either a constant address or the sum of a
894 register and a constant address, or just a register. For DImode,
895 any of those forms can be surrounded with an AND that clear the
896 low-order three bits; this is an "unaligned" access. */
898 static bool
899 alpha_legitimate_address_p (machine_mode mode, rtx x, bool strict)
901 /* If this is an ldq_u type address, discard the outer AND. */
902 if (mode == DImode
903 && GET_CODE (x) == AND
904 && CONST_INT_P (XEXP (x, 1))
905 && INTVAL (XEXP (x, 1)) == -8)
906 x = XEXP (x, 0);
908 /* Discard non-paradoxical subregs. */
909 if (GET_CODE (x) == SUBREG
910 && (GET_MODE_SIZE (GET_MODE (x))
911 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
912 x = SUBREG_REG (x);
914 /* Unadorned general registers are valid. */
915 if (REG_P (x)
916 && (strict
917 ? STRICT_REG_OK_FOR_BASE_P (x)
918 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
919 return true;
921 /* Constant addresses (i.e. +/- 32k) are valid. */
922 if (CONSTANT_ADDRESS_P (x))
923 return true;
925 #if TARGET_ABI_OPEN_VMS
926 if (LINKAGE_SYMBOL_REF_P (x))
927 return true;
928 #endif
930 /* Register plus a small constant offset is valid. */
931 if (GET_CODE (x) == PLUS)
933 rtx ofs = XEXP (x, 1);
934 x = XEXP (x, 0);
936 /* Discard non-paradoxical subregs. */
937 if (GET_CODE (x) == SUBREG
938 && (GET_MODE_SIZE (GET_MODE (x))
939 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
940 x = SUBREG_REG (x);
942 if (REG_P (x))
944 if (! strict
945 && NONSTRICT_REG_OK_FP_BASE_P (x)
946 && CONST_INT_P (ofs))
947 return true;
948 if ((strict
949 ? STRICT_REG_OK_FOR_BASE_P (x)
950 : NONSTRICT_REG_OK_FOR_BASE_P (x))
951 && CONSTANT_ADDRESS_P (ofs))
952 return true;
956 /* If we're managing explicit relocations, LO_SUM is valid, as are small
957 data symbols. Avoid explicit relocations of modes larger than word
958 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
959 else if (TARGET_EXPLICIT_RELOCS
960 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
962 if (small_symbolic_operand (x, Pmode))
963 return true;
965 if (GET_CODE (x) == LO_SUM)
967 rtx ofs = XEXP (x, 1);
968 x = XEXP (x, 0);
970 /* Discard non-paradoxical subregs. */
971 if (GET_CODE (x) == SUBREG
972 && (GET_MODE_SIZE (GET_MODE (x))
973 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
974 x = SUBREG_REG (x);
976 /* Must have a valid base register. */
977 if (! (REG_P (x)
978 && (strict
979 ? STRICT_REG_OK_FOR_BASE_P (x)
980 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
981 return false;
983 /* The symbol must be local. */
984 if (local_symbolic_operand (ofs, Pmode)
985 || dtp32_symbolic_operand (ofs, Pmode)
986 || tp32_symbolic_operand (ofs, Pmode))
987 return true;
991 return false;
994 /* Build the SYMBOL_REF for __tls_get_addr. */
996 static GTY(()) rtx tls_get_addr_libfunc;
998 static rtx
999 get_tls_get_addr (void)
1001 if (!tls_get_addr_libfunc)
1002 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
1003 return tls_get_addr_libfunc;
1006 /* Try machine-dependent ways of modifying an illegitimate address
1007 to be legitimate. If we find one, return the new, valid address. */
1009 static rtx
1010 alpha_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode)
1012 HOST_WIDE_INT addend;
1014 /* If the address is (plus reg const_int) and the CONST_INT is not a
1015 valid offset, compute the high part of the constant and add it to
1016 the register. Then our address is (plus temp low-part-const). */
1017 if (GET_CODE (x) == PLUS
1018 && REG_P (XEXP (x, 0))
1019 && CONST_INT_P (XEXP (x, 1))
1020 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1022 addend = INTVAL (XEXP (x, 1));
1023 x = XEXP (x, 0);
1024 goto split_addend;
1027 /* If the address is (const (plus FOO const_int)), find the low-order
1028 part of the CONST_INT. Then load FOO plus any high-order part of the
1029 CONST_INT into a register. Our address is (plus reg low-part-const).
1030 This is done to reduce the number of GOT entries. */
1031 if (can_create_pseudo_p ()
1032 && GET_CODE (x) == CONST
1033 && GET_CODE (XEXP (x, 0)) == PLUS
1034 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
1036 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1037 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1038 goto split_addend;
1041 /* If we have a (plus reg const), emit the load as in (2), then add
1042 the two registers, and finally generate (plus reg low-part-const) as
1043 our address. */
1044 if (can_create_pseudo_p ()
1045 && GET_CODE (x) == PLUS
1046 && REG_P (XEXP (x, 0))
1047 && GET_CODE (XEXP (x, 1)) == CONST
1048 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1049 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
1051 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1052 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1053 XEXP (XEXP (XEXP (x, 1), 0), 0),
1054 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1055 goto split_addend;
1058 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1059 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1060 around +/- 32k offset. */
1061 if (TARGET_EXPLICIT_RELOCS
1062 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1063 && symbolic_operand (x, Pmode))
1065 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1067 switch (tls_symbolic_operand_type (x))
1069 case TLS_MODEL_NONE:
1070 break;
1072 case TLS_MODEL_GLOBAL_DYNAMIC:
1073 start_sequence ();
1075 r0 = gen_rtx_REG (Pmode, 0);
1076 r16 = gen_rtx_REG (Pmode, 16);
1077 tga = get_tls_get_addr ();
1078 dest = gen_reg_rtx (Pmode);
1079 seq = GEN_INT (alpha_next_sequence_number++);
1081 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1082 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1083 insn = emit_call_insn (insn);
1084 RTL_CONST_CALL_P (insn) = 1;
1085 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1087 insn = get_insns ();
1088 end_sequence ();
1090 emit_libcall_block (insn, dest, r0, x);
1091 return dest;
1093 case TLS_MODEL_LOCAL_DYNAMIC:
1094 start_sequence ();
1096 r0 = gen_rtx_REG (Pmode, 0);
1097 r16 = gen_rtx_REG (Pmode, 16);
1098 tga = get_tls_get_addr ();
1099 scratch = gen_reg_rtx (Pmode);
1100 seq = GEN_INT (alpha_next_sequence_number++);
1102 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1103 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1104 insn = emit_call_insn (insn);
1105 RTL_CONST_CALL_P (insn) = 1;
1106 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1108 insn = get_insns ();
1109 end_sequence ();
1111 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1112 UNSPEC_TLSLDM_CALL);
1113 emit_libcall_block (insn, scratch, r0, eqv);
1115 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1116 eqv = gen_rtx_CONST (Pmode, eqv);
1118 if (alpha_tls_size == 64)
1120 dest = gen_reg_rtx (Pmode);
1121 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1122 emit_insn (gen_adddi3 (dest, dest, scratch));
1123 return dest;
1125 if (alpha_tls_size == 32)
1127 insn = gen_rtx_HIGH (Pmode, eqv);
1128 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1129 scratch = gen_reg_rtx (Pmode);
1130 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1132 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1134 case TLS_MODEL_INITIAL_EXEC:
1135 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1136 eqv = gen_rtx_CONST (Pmode, eqv);
1137 tp = gen_reg_rtx (Pmode);
1138 scratch = gen_reg_rtx (Pmode);
1139 dest = gen_reg_rtx (Pmode);
1141 emit_insn (gen_get_thread_pointerdi (tp));
1142 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1143 emit_insn (gen_adddi3 (dest, tp, scratch));
1144 return dest;
1146 case TLS_MODEL_LOCAL_EXEC:
1147 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1148 eqv = gen_rtx_CONST (Pmode, eqv);
1149 tp = gen_reg_rtx (Pmode);
1151 emit_insn (gen_get_thread_pointerdi (tp));
1152 if (alpha_tls_size == 32)
1154 insn = gen_rtx_HIGH (Pmode, eqv);
1155 insn = gen_rtx_PLUS (Pmode, tp, insn);
1156 tp = gen_reg_rtx (Pmode);
1157 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1159 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1161 default:
1162 gcc_unreachable ();
1165 if (local_symbolic_operand (x, Pmode))
1167 if (small_symbolic_operand (x, Pmode))
1168 return x;
1169 else
1171 if (can_create_pseudo_p ())
1172 scratch = gen_reg_rtx (Pmode);
1173 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1174 gen_rtx_HIGH (Pmode, x)));
1175 return gen_rtx_LO_SUM (Pmode, scratch, x);
1180 return NULL;
1182 split_addend:
1184 HOST_WIDE_INT low, high;
1186 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1187 addend -= low;
1188 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1189 addend -= high;
1191 if (addend)
1192 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1193 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1194 1, OPTAB_LIB_WIDEN);
1195 if (high)
1196 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1197 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1198 1, OPTAB_LIB_WIDEN);
1200 return plus_constant (Pmode, x, low);
1205 /* Try machine-dependent ways of modifying an illegitimate address
1206 to be legitimate. Return X or the new, valid address. */
1208 static rtx
1209 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1210 machine_mode mode)
1212 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1213 return new_x ? new_x : x;
1216 /* Return true if ADDR has an effect that depends on the machine mode it
1217 is used for. On the Alpha this is true only for the unaligned modes.
1218 We can simplify the test since we know that the address must be valid. */
1220 static bool
1221 alpha_mode_dependent_address_p (const_rtx addr,
1222 addr_space_t as ATTRIBUTE_UNUSED)
1224 return GET_CODE (addr) == AND;
1227 /* Primarily this is required for TLS symbols, but given that our move
1228 patterns *ought* to be able to handle any symbol at any time, we
1229 should never be spilling symbolic operands to the constant pool, ever. */
1231 static bool
1232 alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1234 enum rtx_code code = GET_CODE (x);
1235 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1238 /* We do not allow indirect calls to be optimized into sibling calls, nor
1239 can we allow a call to a function with a different GP to be optimized
1240 into a sibcall. */
1242 static bool
1243 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1245 /* Can't do indirect tail calls, since we don't know if the target
1246 uses the same GP. */
1247 if (!decl)
1248 return false;
1250 /* Otherwise, we can make a tail call if the target function shares
1251 the same GP. */
1252 return decl_has_samegp (decl);
1255 bool
1256 some_small_symbolic_operand_int (rtx x)
1258 subrtx_var_iterator::array_type array;
1259 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
1261 rtx x = *iter;
1262 /* Don't re-split. */
1263 if (GET_CODE (x) == LO_SUM)
1264 iter.skip_subrtxes ();
1265 else if (small_symbolic_operand (x, Pmode))
1266 return true;
1268 return false;
1272 split_small_symbolic_operand (rtx x)
1274 x = copy_insn (x);
1275 subrtx_ptr_iterator::array_type array;
1276 FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL)
1278 rtx *ptr = *iter;
1279 rtx x = *ptr;
1280 /* Don't re-split. */
1281 if (GET_CODE (x) == LO_SUM)
1282 iter.skip_subrtxes ();
1283 else if (small_symbolic_operand (x, Pmode))
1285 *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1286 iter.skip_subrtxes ();
1289 return x;
1292 /* Indicate that INSN cannot be duplicated. This is true for any insn
1293 that we've marked with gpdisp relocs, since those have to stay in
1294 1-1 correspondence with one another.
1296 Technically we could copy them if we could set up a mapping from one
1297 sequence number to another, across the set of insns to be duplicated.
1298 This seems overly complicated and error-prone since interblock motion
1299 from sched-ebb could move one of the pair of insns to a different block.
1301 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1302 then they'll be in a different block from their ldgp. Which could lead
1303 the bb reorder code to think that it would be ok to copy just the block
1304 containing the call and branch to the block containing the ldgp. */
1306 static bool
1307 alpha_cannot_copy_insn_p (rtx_insn *insn)
1309 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1310 return false;
1311 if (recog_memoized (insn) >= 0)
1312 return get_attr_cannot_copy (insn);
1313 else
1314 return false;
1318 /* Try a machine-dependent way of reloading an illegitimate address
1319 operand. If we find one, push the reload and return the new rtx. */
1322 alpha_legitimize_reload_address (rtx x,
1323 machine_mode mode ATTRIBUTE_UNUSED,
1324 int opnum, int type,
1325 int ind_levels ATTRIBUTE_UNUSED)
1327 /* We must recognize output that we have already generated ourselves. */
1328 if (GET_CODE (x) == PLUS
1329 && GET_CODE (XEXP (x, 0)) == PLUS
1330 && REG_P (XEXP (XEXP (x, 0), 0))
1331 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1332 && CONST_INT_P (XEXP (x, 1)))
1334 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1335 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1336 opnum, (enum reload_type) type);
1337 return x;
1340 /* We wish to handle large displacements off a base register by
1341 splitting the addend across an ldah and the mem insn. This
1342 cuts number of extra insns needed from 3 to 1. */
1343 if (GET_CODE (x) == PLUS
1344 && REG_P (XEXP (x, 0))
1345 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1346 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1347 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1349 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1350 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1351 HOST_WIDE_INT high
1352 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1354 /* Check for 32-bit overflow. */
1355 if (high + low != val)
1356 return NULL_RTX;
1358 /* Reload the high part into a base reg; leave the low part
1359 in the mem directly. */
1360 x = gen_rtx_PLUS (GET_MODE (x),
1361 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1362 GEN_INT (high)),
1363 GEN_INT (low));
1365 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1366 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1367 opnum, (enum reload_type) type);
1368 return x;
1371 return NULL_RTX;
1374 /* Compute a (partial) cost for rtx X. Return true if the complete
1375 cost has been computed, and false if subexpressions should be
1376 scanned. In either case, *TOTAL contains the cost result. */
1378 static bool
1379 alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1380 bool speed)
1382 machine_mode mode = GET_MODE (x);
1383 bool float_mode_p = FLOAT_MODE_P (mode);
1384 const struct alpha_rtx_cost_data *cost_data;
1386 if (!speed)
1387 cost_data = &alpha_rtx_cost_size;
1388 else
1389 cost_data = &alpha_rtx_cost_data[alpha_tune];
1391 switch (code)
1393 case CONST_INT:
1394 /* If this is an 8-bit constant, return zero since it can be used
1395 nearly anywhere with no cost. If it is a valid operand for an
1396 ADD or AND, likewise return 0 if we know it will be used in that
1397 context. Otherwise, return 2 since it might be used there later.
1398 All other constants take at least two insns. */
1399 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1401 *total = 0;
1402 return true;
1404 /* FALLTHRU */
1406 case CONST_DOUBLE:
1407 if (x == CONST0_RTX (mode))
1408 *total = 0;
1409 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1410 || (outer_code == AND && and_operand (x, VOIDmode)))
1411 *total = 0;
1412 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1413 *total = 2;
1414 else
1415 *total = COSTS_N_INSNS (2);
1416 return true;
1418 case CONST:
1419 case SYMBOL_REF:
1420 case LABEL_REF:
1421 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1422 *total = COSTS_N_INSNS (outer_code != MEM);
1423 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1424 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1425 else if (tls_symbolic_operand_type (x))
1426 /* Estimate of cost for call_pal rduniq. */
1427 /* ??? How many insns do we emit here? More than one... */
1428 *total = COSTS_N_INSNS (15);
1429 else
1430 /* Otherwise we do a load from the GOT. */
1431 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1432 return true;
1434 case HIGH:
1435 /* This is effectively an add_operand. */
1436 *total = 2;
1437 return true;
1439 case PLUS:
1440 case MINUS:
1441 if (float_mode_p)
1442 *total = cost_data->fp_add;
1443 else if (GET_CODE (XEXP (x, 0)) == MULT
1444 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1446 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1447 (enum rtx_code) outer_code, opno, speed)
1448 + rtx_cost (XEXP (x, 1),
1449 (enum rtx_code) outer_code, opno, speed)
1450 + COSTS_N_INSNS (1));
1451 return true;
1453 return false;
1455 case MULT:
1456 if (float_mode_p)
1457 *total = cost_data->fp_mult;
1458 else if (mode == DImode)
1459 *total = cost_data->int_mult_di;
1460 else
1461 *total = cost_data->int_mult_si;
1462 return false;
1464 case ASHIFT:
1465 if (CONST_INT_P (XEXP (x, 1))
1466 && INTVAL (XEXP (x, 1)) <= 3)
1468 *total = COSTS_N_INSNS (1);
1469 return false;
1471 /* FALLTHRU */
1473 case ASHIFTRT:
1474 case LSHIFTRT:
1475 *total = cost_data->int_shift;
1476 return false;
1478 case IF_THEN_ELSE:
1479 if (float_mode_p)
1480 *total = cost_data->fp_add;
1481 else
1482 *total = cost_data->int_cmov;
1483 return false;
1485 case DIV:
1486 case UDIV:
1487 case MOD:
1488 case UMOD:
1489 if (!float_mode_p)
1490 *total = cost_data->int_div;
1491 else if (mode == SFmode)
1492 *total = cost_data->fp_div_sf;
1493 else
1494 *total = cost_data->fp_div_df;
1495 return false;
1497 case MEM:
1498 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1499 return true;
1501 case NEG:
1502 if (! float_mode_p)
1504 *total = COSTS_N_INSNS (1);
1505 return false;
1507 /* FALLTHRU */
1509 case ABS:
1510 if (! float_mode_p)
1512 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1513 return false;
1515 /* FALLTHRU */
1517 case FLOAT:
1518 case UNSIGNED_FLOAT:
1519 case FIX:
1520 case UNSIGNED_FIX:
1521 case FLOAT_TRUNCATE:
1522 *total = cost_data->fp_add;
1523 return false;
1525 case FLOAT_EXTEND:
1526 if (MEM_P (XEXP (x, 0)))
1527 *total = 0;
1528 else
1529 *total = cost_data->fp_add;
1530 return false;
1532 default:
1533 return false;
1537 /* REF is an alignable memory location. Place an aligned SImode
1538 reference into *PALIGNED_MEM and the number of bits to shift into
1539 *PBITNUM. SCRATCH is a free register for use in reloading out
1540 of range stack slots. */
1542 void
1543 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1545 rtx base;
1546 HOST_WIDE_INT disp, offset;
1548 gcc_assert (MEM_P (ref));
1550 if (reload_in_progress
1551 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1553 base = find_replacement (&XEXP (ref, 0));
1554 gcc_assert (memory_address_p (GET_MODE (ref), base));
1556 else
1557 base = XEXP (ref, 0);
1559 if (GET_CODE (base) == PLUS)
1560 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1561 else
1562 disp = 0;
1564 /* Find the byte offset within an aligned word. If the memory itself is
1565 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1566 will have examined the base register and determined it is aligned, and
1567 thus displacements from it are naturally alignable. */
1568 if (MEM_ALIGN (ref) >= 32)
1569 offset = 0;
1570 else
1571 offset = disp & 3;
1573 /* The location should not cross aligned word boundary. */
1574 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1575 <= GET_MODE_SIZE (SImode));
1577 /* Access the entire aligned word. */
1578 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1580 /* Convert the byte offset within the word to a bit offset. */
1581 offset *= BITS_PER_UNIT;
1582 *pbitnum = GEN_INT (offset);
1585 /* Similar, but just get the address. Handle the two reload cases.
1586 Add EXTRA_OFFSET to the address we return. */
1589 get_unaligned_address (rtx ref)
1591 rtx base;
1592 HOST_WIDE_INT offset = 0;
1594 gcc_assert (MEM_P (ref));
1596 if (reload_in_progress
1597 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1599 base = find_replacement (&XEXP (ref, 0));
1601 gcc_assert (memory_address_p (GET_MODE (ref), base));
1603 else
1604 base = XEXP (ref, 0);
1606 if (GET_CODE (base) == PLUS)
1607 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1609 return plus_constant (Pmode, base, offset);
1612 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1613 X is always returned in a register. */
1616 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1618 if (GET_CODE (addr) == PLUS)
1620 ofs += INTVAL (XEXP (addr, 1));
1621 addr = XEXP (addr, 0);
1624 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1625 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1628 /* On the Alpha, all (non-symbolic) constants except zero go into
1629 a floating-point register via memory. Note that we cannot
1630 return anything that is not a subset of RCLASS, and that some
1631 symbolic constants cannot be dropped to memory. */
1633 enum reg_class
1634 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1636 /* Zero is present in any register class. */
1637 if (x == CONST0_RTX (GET_MODE (x)))
1638 return rclass;
1640 /* These sorts of constants we can easily drop to memory. */
1641 if (CONST_INT_P (x)
1642 || GET_CODE (x) == CONST_DOUBLE
1643 || GET_CODE (x) == CONST_VECTOR)
1645 if (rclass == FLOAT_REGS)
1646 return NO_REGS;
1647 if (rclass == ALL_REGS)
1648 return GENERAL_REGS;
1649 return rclass;
1652 /* All other kinds of constants should not (and in the case of HIGH
1653 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1654 secondary reload. */
1655 if (CONSTANT_P (x))
1656 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1658 return rclass;
1661 /* Inform reload about cases where moving X with a mode MODE to a register in
1662 RCLASS requires an extra scratch or immediate register. Return the class
1663 needed for the immediate register. */
1665 static reg_class_t
1666 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1667 machine_mode mode, secondary_reload_info *sri)
1669 enum reg_class rclass = (enum reg_class) rclass_i;
1671 /* Loading and storing HImode or QImode values to and from memory
1672 usually requires a scratch register. */
1673 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1675 if (any_memory_operand (x, mode))
1677 if (in_p)
1679 if (!aligned_memory_operand (x, mode))
1680 sri->icode = direct_optab_handler (reload_in_optab, mode);
1682 else
1683 sri->icode = direct_optab_handler (reload_out_optab, mode);
1684 return NO_REGS;
1688 /* We also cannot do integral arithmetic into FP regs, as might result
1689 from register elimination into a DImode fp register. */
1690 if (rclass == FLOAT_REGS)
1692 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1693 return GENERAL_REGS;
1694 if (in_p && INTEGRAL_MODE_P (mode)
1695 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1696 return GENERAL_REGS;
1699 return NO_REGS;
1702 /* Given SEQ, which is an INSN list, look for any MEMs in either
1703 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1704 volatile flags from REF into each of the MEMs found. If REF is not
1705 a MEM, don't do anything. */
1707 void
1708 alpha_set_memflags (rtx seq, rtx ref)
1710 rtx_insn *insn;
1712 if (!MEM_P (ref))
1713 return;
1715 /* This is only called from alpha.md, after having had something
1716 generated from one of the insn patterns. So if everything is
1717 zero, the pattern is already up-to-date. */
1718 if (!MEM_VOLATILE_P (ref)
1719 && !MEM_NOTRAP_P (ref)
1720 && !MEM_READONLY_P (ref))
1721 return;
1723 subrtx_var_iterator::array_type array;
1724 for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
1725 if (INSN_P (insn))
1726 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1728 rtx x = *iter;
1729 if (MEM_P (x))
1731 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref);
1732 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref);
1733 MEM_READONLY_P (x) = MEM_READONLY_P (ref);
1734 /* Sadly, we cannot use alias sets because the extra
1735 aliasing produced by the AND interferes. Given that
1736 two-byte quantities are the only thing we would be
1737 able to differentiate anyway, there does not seem to
1738 be any point in convoluting the early out of the
1739 alias check. */
1740 iter.skip_subrtxes ();
1743 else
1744 gcc_unreachable ();
1747 static rtx alpha_emit_set_const (rtx, machine_mode, HOST_WIDE_INT,
1748 int, bool);
1750 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1751 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1752 and return pc_rtx if successful. */
1754 static rtx
1755 alpha_emit_set_const_1 (rtx target, machine_mode mode,
1756 HOST_WIDE_INT c, int n, bool no_output)
1758 HOST_WIDE_INT new_const;
1759 int i, bits;
1760 /* Use a pseudo if highly optimizing and still generating RTL. */
1761 rtx subtarget
1762 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1763 rtx temp, insn;
1765 /* If this is a sign-extended 32-bit constant, we can do this in at most
1766 three insns, so do it if we have enough insns left. We always have
1767 a sign-extended 32-bit constant when compiling on a narrow machine. */
1769 if (HOST_BITS_PER_WIDE_INT != 64
1770 || c >> 31 == -1 || c >> 31 == 0)
1772 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1773 HOST_WIDE_INT tmp1 = c - low;
1774 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1775 HOST_WIDE_INT extra = 0;
1777 /* If HIGH will be interpreted as negative but the constant is
1778 positive, we must adjust it to do two ldha insns. */
1780 if ((high & 0x8000) != 0 && c >= 0)
1782 extra = 0x4000;
1783 tmp1 -= 0x40000000;
1784 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1787 if (c == low || (low == 0 && extra == 0))
1789 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1790 but that meant that we can't handle INT_MIN on 32-bit machines
1791 (like NT/Alpha), because we recurse indefinitely through
1792 emit_move_insn to gen_movdi. So instead, since we know exactly
1793 what we want, create it explicitly. */
1795 if (no_output)
1796 return pc_rtx;
1797 if (target == NULL)
1798 target = gen_reg_rtx (mode);
1799 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1800 return target;
1802 else if (n >= 2 + (extra != 0))
1804 if (no_output)
1805 return pc_rtx;
1806 if (!can_create_pseudo_p ())
1808 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1809 temp = target;
1811 else
1812 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1813 subtarget, mode);
1815 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1816 This means that if we go through expand_binop, we'll try to
1817 generate extensions, etc, which will require new pseudos, which
1818 will fail during some split phases. The SImode add patterns
1819 still exist, but are not named. So build the insns by hand. */
1821 if (extra != 0)
1823 if (! subtarget)
1824 subtarget = gen_reg_rtx (mode);
1825 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1826 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1827 emit_insn (insn);
1828 temp = subtarget;
1831 if (target == NULL)
1832 target = gen_reg_rtx (mode);
1833 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1834 insn = gen_rtx_SET (VOIDmode, target, insn);
1835 emit_insn (insn);
1836 return target;
1840 /* If we couldn't do it that way, try some other methods. But if we have
1841 no instructions left, don't bother. Likewise, if this is SImode and
1842 we can't make pseudos, we can't do anything since the expand_binop
1843 and expand_unop calls will widen and try to make pseudos. */
1845 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1846 return 0;
1848 /* Next, see if we can load a related constant and then shift and possibly
1849 negate it to get the constant we want. Try this once each increasing
1850 numbers of insns. */
1852 for (i = 1; i < n; i++)
1854 /* First, see if minus some low bits, we've an easy load of
1855 high bits. */
1857 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1858 if (new_const != 0)
1860 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1861 if (temp)
1863 if (no_output)
1864 return temp;
1865 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1866 target, 0, OPTAB_WIDEN);
1870 /* Next try complementing. */
1871 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1872 if (temp)
1874 if (no_output)
1875 return temp;
1876 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1879 /* Next try to form a constant and do a left shift. We can do this
1880 if some low-order bits are zero; the exact_log2 call below tells
1881 us that information. The bits we are shifting out could be any
1882 value, but here we'll just try the 0- and sign-extended forms of
1883 the constant. To try to increase the chance of having the same
1884 constant in more than one insn, start at the highest number of
1885 bits to shift, but try all possibilities in case a ZAPNOT will
1886 be useful. */
1888 bits = exact_log2 (c & -c);
1889 if (bits > 0)
1890 for (; bits > 0; bits--)
1892 new_const = c >> bits;
1893 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1894 if (!temp && c < 0)
1896 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1897 temp = alpha_emit_set_const (subtarget, mode, new_const,
1898 i, no_output);
1900 if (temp)
1902 if (no_output)
1903 return temp;
1904 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1905 target, 0, OPTAB_WIDEN);
1909 /* Now try high-order zero bits. Here we try the shifted-in bits as
1910 all zero and all ones. Be careful to avoid shifting outside the
1911 mode and to avoid shifting outside the host wide int size. */
1912 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1913 confuse the recursive call and set all of the high 32 bits. */
1915 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1916 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1917 if (bits > 0)
1918 for (; bits > 0; bits--)
1920 new_const = c << bits;
1921 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1922 if (!temp)
1924 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1925 temp = alpha_emit_set_const (subtarget, mode, new_const,
1926 i, no_output);
1928 if (temp)
1930 if (no_output)
1931 return temp;
1932 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1933 target, 1, OPTAB_WIDEN);
1937 /* Now try high-order 1 bits. We get that with a sign-extension.
1938 But one bit isn't enough here. Be careful to avoid shifting outside
1939 the mode and to avoid shifting outside the host wide int size. */
1941 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1942 - floor_log2 (~ c) - 2);
1943 if (bits > 0)
1944 for (; bits > 0; bits--)
1946 new_const = c << bits;
1947 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1948 if (!temp)
1950 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1951 temp = alpha_emit_set_const (subtarget, mode, new_const,
1952 i, no_output);
1954 if (temp)
1956 if (no_output)
1957 return temp;
1958 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1959 target, 0, OPTAB_WIDEN);
1964 #if HOST_BITS_PER_WIDE_INT == 64
1965 /* Finally, see if can load a value into the target that is the same as the
1966 constant except that all bytes that are 0 are changed to be 0xff. If we
1967 can, then we can do a ZAPNOT to obtain the desired constant. */
1969 new_const = c;
1970 for (i = 0; i < 64; i += 8)
1971 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1972 new_const |= (HOST_WIDE_INT) 0xff << i;
1974 /* We are only called for SImode and DImode. If this is SImode, ensure that
1975 we are sign extended to a full word. */
1977 if (mode == SImode)
1978 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1980 if (new_const != c)
1982 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1983 if (temp)
1985 if (no_output)
1986 return temp;
1987 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1988 target, 0, OPTAB_WIDEN);
1991 #endif
1993 return 0;
1996 /* Try to output insns to set TARGET equal to the constant C if it can be
1997 done in less than N insns. Do all computations in MODE. Returns the place
1998 where the output has been placed if it can be done and the insns have been
1999 emitted. If it would take more than N insns, zero is returned and no
2000 insns and emitted. */
2002 static rtx
2003 alpha_emit_set_const (rtx target, machine_mode mode,
2004 HOST_WIDE_INT c, int n, bool no_output)
2006 machine_mode orig_mode = mode;
2007 rtx orig_target = target;
2008 rtx result = 0;
2009 int i;
2011 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2012 can't load this constant in one insn, do this in DImode. */
2013 if (!can_create_pseudo_p () && mode == SImode
2014 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
2016 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
2017 if (result)
2018 return result;
2020 target = no_output ? NULL : gen_lowpart (DImode, target);
2021 mode = DImode;
2023 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
2025 target = no_output ? NULL : gen_lowpart (DImode, target);
2026 mode = DImode;
2029 /* Try 1 insn, then 2, then up to N. */
2030 for (i = 1; i <= n; i++)
2032 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
2033 if (result)
2035 rtx_insn *insn;
2036 rtx set;
2038 if (no_output)
2039 return result;
2041 insn = get_last_insn ();
2042 set = single_set (insn);
2043 if (! CONSTANT_P (SET_SRC (set)))
2044 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2045 break;
2049 /* Allow for the case where we changed the mode of TARGET. */
2050 if (result)
2052 if (result == target)
2053 result = orig_target;
2054 else if (mode != orig_mode)
2055 result = gen_lowpart (orig_mode, result);
2058 return result;
2061 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2062 fall back to a straight forward decomposition. We do this to avoid
2063 exponential run times encountered when looking for longer sequences
2064 with alpha_emit_set_const. */
2066 static rtx
2067 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2069 HOST_WIDE_INT d1, d2, d3, d4;
2071 /* Decompose the entire word */
2072 #if HOST_BITS_PER_WIDE_INT >= 64
2073 gcc_assert (c2 == -(c1 < 0));
2074 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2075 c1 -= d1;
2076 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2077 c1 = (c1 - d2) >> 32;
2078 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2079 c1 -= d3;
2080 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2081 gcc_assert (c1 == d4);
2082 #else
2083 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2084 c1 -= d1;
2085 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2086 gcc_assert (c1 == d2);
2087 c2 += (d2 < 0);
2088 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2089 c2 -= d3;
2090 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2091 gcc_assert (c2 == d4);
2092 #endif
2094 /* Construct the high word */
2095 if (d4)
2097 emit_move_insn (target, GEN_INT (d4));
2098 if (d3)
2099 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2101 else
2102 emit_move_insn (target, GEN_INT (d3));
2104 /* Shift it into place */
2105 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2107 /* Add in the low bits. */
2108 if (d2)
2109 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2110 if (d1)
2111 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2113 return target;
2116 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2117 the low 64 bits. */
2119 static void
2120 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2122 HOST_WIDE_INT i0, i1;
2124 if (GET_CODE (x) == CONST_VECTOR)
2125 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2128 if (CONST_INT_P (x))
2130 i0 = INTVAL (x);
2131 i1 = -(i0 < 0);
2133 else if (HOST_BITS_PER_WIDE_INT >= 64)
2135 i0 = CONST_DOUBLE_LOW (x);
2136 i1 = -(i0 < 0);
2138 else
2140 i0 = CONST_DOUBLE_LOW (x);
2141 i1 = CONST_DOUBLE_HIGH (x);
2144 *p0 = i0;
2145 *p1 = i1;
2148 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2149 we are willing to load the value into a register via a move pattern.
2150 Normally this is all symbolic constants, integral constants that
2151 take three or fewer instructions, and floating-point zero. */
2153 bool
2154 alpha_legitimate_constant_p (machine_mode mode, rtx x)
2156 HOST_WIDE_INT i0, i1;
2158 switch (GET_CODE (x))
2160 case LABEL_REF:
2161 case HIGH:
2162 return true;
2164 case CONST:
2165 if (GET_CODE (XEXP (x, 0)) == PLUS
2166 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2167 x = XEXP (XEXP (x, 0), 0);
2168 else
2169 return true;
2171 if (GET_CODE (x) != SYMBOL_REF)
2172 return true;
2174 /* FALLTHRU */
2176 case SYMBOL_REF:
2177 /* TLS symbols are never valid. */
2178 return SYMBOL_REF_TLS_MODEL (x) == 0;
2180 case CONST_DOUBLE:
2181 if (x == CONST0_RTX (mode))
2182 return true;
2183 if (FLOAT_MODE_P (mode))
2184 return false;
2185 goto do_integer;
2187 case CONST_VECTOR:
2188 if (x == CONST0_RTX (mode))
2189 return true;
2190 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2191 return false;
2192 if (GET_MODE_SIZE (mode) != 8)
2193 return false;
2194 goto do_integer;
2196 case CONST_INT:
2197 do_integer:
2198 if (TARGET_BUILD_CONSTANTS)
2199 return true;
2200 alpha_extract_integer (x, &i0, &i1);
2201 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2202 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2203 return false;
2205 default:
2206 return false;
2210 /* Operand 1 is known to be a constant, and should require more than one
2211 instruction to load. Emit that multi-part load. */
2213 bool
2214 alpha_split_const_mov (machine_mode mode, rtx *operands)
2216 HOST_WIDE_INT i0, i1;
2217 rtx temp = NULL_RTX;
2219 alpha_extract_integer (operands[1], &i0, &i1);
2221 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2222 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2224 if (!temp && TARGET_BUILD_CONSTANTS)
2225 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2227 if (temp)
2229 if (!rtx_equal_p (operands[0], temp))
2230 emit_move_insn (operands[0], temp);
2231 return true;
2234 return false;
2237 /* Expand a move instruction; return true if all work is done.
2238 We don't handle non-bwx subword loads here. */
2240 bool
2241 alpha_expand_mov (machine_mode mode, rtx *operands)
2243 rtx tmp;
2245 /* If the output is not a register, the input must be. */
2246 if (MEM_P (operands[0])
2247 && ! reg_or_0_operand (operands[1], mode))
2248 operands[1] = force_reg (mode, operands[1]);
2250 /* Allow legitimize_address to perform some simplifications. */
2251 if (mode == Pmode && symbolic_operand (operands[1], mode))
2253 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2254 if (tmp)
2256 if (tmp == operands[0])
2257 return true;
2258 operands[1] = tmp;
2259 return false;
2263 /* Early out for non-constants and valid constants. */
2264 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2265 return false;
2267 /* Split large integers. */
2268 if (CONST_INT_P (operands[1])
2269 || GET_CODE (operands[1]) == CONST_DOUBLE
2270 || GET_CODE (operands[1]) == CONST_VECTOR)
2272 if (alpha_split_const_mov (mode, operands))
2273 return true;
2276 /* Otherwise we've nothing left but to drop the thing to memory. */
2277 tmp = force_const_mem (mode, operands[1]);
2279 if (tmp == NULL_RTX)
2280 return false;
2282 if (reload_in_progress)
2284 emit_move_insn (operands[0], XEXP (tmp, 0));
2285 operands[1] = replace_equiv_address (tmp, operands[0]);
2287 else
2288 operands[1] = validize_mem (tmp);
2289 return false;
2292 /* Expand a non-bwx QImode or HImode move instruction;
2293 return true if all work is done. */
2295 bool
2296 alpha_expand_mov_nobwx (machine_mode mode, rtx *operands)
2298 rtx seq;
2300 /* If the output is not a register, the input must be. */
2301 if (MEM_P (operands[0]))
2302 operands[1] = force_reg (mode, operands[1]);
2304 /* Handle four memory cases, unaligned and aligned for either the input
2305 or the output. The only case where we can be called during reload is
2306 for aligned loads; all other cases require temporaries. */
2308 if (any_memory_operand (operands[1], mode))
2310 if (aligned_memory_operand (operands[1], mode))
2312 if (reload_in_progress)
2314 if (mode == QImode)
2315 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2316 else
2317 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2318 emit_insn (seq);
2320 else
2322 rtx aligned_mem, bitnum;
2323 rtx scratch = gen_reg_rtx (SImode);
2324 rtx subtarget;
2325 bool copyout;
2327 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2329 subtarget = operands[0];
2330 if (REG_P (subtarget))
2331 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2332 else
2333 subtarget = gen_reg_rtx (DImode), copyout = true;
2335 if (mode == QImode)
2336 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2337 bitnum, scratch);
2338 else
2339 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2340 bitnum, scratch);
2341 emit_insn (seq);
2343 if (copyout)
2344 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2347 else
2349 /* Don't pass these as parameters since that makes the generated
2350 code depend on parameter evaluation order which will cause
2351 bootstrap failures. */
2353 rtx temp1, temp2, subtarget, ua;
2354 bool copyout;
2356 temp1 = gen_reg_rtx (DImode);
2357 temp2 = gen_reg_rtx (DImode);
2359 subtarget = operands[0];
2360 if (REG_P (subtarget))
2361 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2362 else
2363 subtarget = gen_reg_rtx (DImode), copyout = true;
2365 ua = get_unaligned_address (operands[1]);
2366 if (mode == QImode)
2367 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2368 else
2369 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2371 alpha_set_memflags (seq, operands[1]);
2372 emit_insn (seq);
2374 if (copyout)
2375 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2377 return true;
2380 if (any_memory_operand (operands[0], mode))
2382 if (aligned_memory_operand (operands[0], mode))
2384 rtx aligned_mem, bitnum;
2385 rtx temp1 = gen_reg_rtx (SImode);
2386 rtx temp2 = gen_reg_rtx (SImode);
2388 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2390 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2391 temp1, temp2));
2393 else
2395 rtx temp1 = gen_reg_rtx (DImode);
2396 rtx temp2 = gen_reg_rtx (DImode);
2397 rtx temp3 = gen_reg_rtx (DImode);
2398 rtx ua = get_unaligned_address (operands[0]);
2400 if (mode == QImode)
2401 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2402 else
2403 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2405 alpha_set_memflags (seq, operands[0]);
2406 emit_insn (seq);
2408 return true;
2411 return false;
2414 /* Implement the movmisalign patterns. One of the operands is a memory
2415 that is not naturally aligned. Emit instructions to load it. */
2417 void
2418 alpha_expand_movmisalign (machine_mode mode, rtx *operands)
2420 /* Honor misaligned loads, for those we promised to do so. */
2421 if (MEM_P (operands[1]))
2423 rtx tmp;
2425 if (register_operand (operands[0], mode))
2426 tmp = operands[0];
2427 else
2428 tmp = gen_reg_rtx (mode);
2430 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2431 if (tmp != operands[0])
2432 emit_move_insn (operands[0], tmp);
2434 else if (MEM_P (operands[0]))
2436 if (!reg_or_0_operand (operands[1], mode))
2437 operands[1] = force_reg (mode, operands[1]);
2438 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2440 else
2441 gcc_unreachable ();
2444 /* Generate an unsigned DImode to FP conversion. This is the same code
2445 optabs would emit if we didn't have TFmode patterns.
2447 For SFmode, this is the only construction I've found that can pass
2448 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2449 intermediates will work, because you'll get intermediate rounding
2450 that ruins the end result. Some of this could be fixed by turning
2451 on round-to-positive-infinity, but that requires diddling the fpsr,
2452 which kills performance. I tried turning this around and converting
2453 to a negative number, so that I could turn on /m, but either I did
2454 it wrong or there's something else cause I wound up with the exact
2455 same single-bit error. There is a branch-less form of this same code:
2457 srl $16,1,$1
2458 and $16,1,$2
2459 cmplt $16,0,$3
2460 or $1,$2,$2
2461 cmovge $16,$16,$2
2462 itoft $3,$f10
2463 itoft $2,$f11
2464 cvtqs $f11,$f11
2465 adds $f11,$f11,$f0
2466 fcmoveq $f10,$f11,$f0
2468 I'm not using it because it's the same number of instructions as
2469 this branch-full form, and it has more serialized long latency
2470 instructions on the critical path.
2472 For DFmode, we can avoid rounding errors by breaking up the word
2473 into two pieces, converting them separately, and adding them back:
2475 LC0: .long 0,0x5f800000
2477 itoft $16,$f11
2478 lda $2,LC0
2479 cmplt $16,0,$1
2480 cpyse $f11,$f31,$f10
2481 cpyse $f31,$f11,$f11
2482 s4addq $1,$2,$1
2483 lds $f12,0($1)
2484 cvtqt $f10,$f10
2485 cvtqt $f11,$f11
2486 addt $f12,$f10,$f0
2487 addt $f0,$f11,$f0
2489 This doesn't seem to be a clear-cut win over the optabs form.
2490 It probably all depends on the distribution of numbers being
2491 converted -- in the optabs form, all but high-bit-set has a
2492 much lower minimum execution time. */
2494 void
2495 alpha_emit_floatuns (rtx operands[2])
2497 rtx neglab, donelab, i0, i1, f0, in, out;
2498 machine_mode mode;
2500 out = operands[0];
2501 in = force_reg (DImode, operands[1]);
2502 mode = GET_MODE (out);
2503 neglab = gen_label_rtx ();
2504 donelab = gen_label_rtx ();
2505 i0 = gen_reg_rtx (DImode);
2506 i1 = gen_reg_rtx (DImode);
2507 f0 = gen_reg_rtx (mode);
2509 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2511 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2512 emit_jump_insn (gen_jump (donelab));
2513 emit_barrier ();
2515 emit_label (neglab);
2517 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2518 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2519 emit_insn (gen_iordi3 (i0, i0, i1));
2520 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2521 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2523 emit_label (donelab);
2526 /* Generate the comparison for a conditional branch. */
2528 void
2529 alpha_emit_conditional_branch (rtx operands[], machine_mode cmp_mode)
2531 enum rtx_code cmp_code, branch_code;
2532 machine_mode branch_mode = VOIDmode;
2533 enum rtx_code code = GET_CODE (operands[0]);
2534 rtx op0 = operands[1], op1 = operands[2];
2535 rtx tem;
2537 if (cmp_mode == TFmode)
2539 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2540 op1 = const0_rtx;
2541 cmp_mode = DImode;
2544 /* The general case: fold the comparison code to the types of compares
2545 that we have, choosing the branch as necessary. */
2546 switch (code)
2548 case EQ: case LE: case LT: case LEU: case LTU:
2549 case UNORDERED:
2550 /* We have these compares. */
2551 cmp_code = code, branch_code = NE;
2552 break;
2554 case NE:
2555 case ORDERED:
2556 /* These must be reversed. */
2557 cmp_code = reverse_condition (code), branch_code = EQ;
2558 break;
2560 case GE: case GT: case GEU: case GTU:
2561 /* For FP, we swap them, for INT, we reverse them. */
2562 if (cmp_mode == DFmode)
2564 cmp_code = swap_condition (code);
2565 branch_code = NE;
2566 std::swap (op0, op1);
2568 else
2570 cmp_code = reverse_condition (code);
2571 branch_code = EQ;
2573 break;
2575 default:
2576 gcc_unreachable ();
2579 if (cmp_mode == DFmode)
2581 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2583 /* When we are not as concerned about non-finite values, and we
2584 are comparing against zero, we can branch directly. */
2585 if (op1 == CONST0_RTX (DFmode))
2586 cmp_code = UNKNOWN, branch_code = code;
2587 else if (op0 == CONST0_RTX (DFmode))
2589 /* Undo the swap we probably did just above. */
2590 std::swap (op0, op1);
2591 branch_code = swap_condition (cmp_code);
2592 cmp_code = UNKNOWN;
2595 else
2597 /* ??? We mark the branch mode to be CCmode to prevent the
2598 compare and branch from being combined, since the compare
2599 insn follows IEEE rules that the branch does not. */
2600 branch_mode = CCmode;
2603 else
2605 /* The following optimizations are only for signed compares. */
2606 if (code != LEU && code != LTU && code != GEU && code != GTU)
2608 /* Whee. Compare and branch against 0 directly. */
2609 if (op1 == const0_rtx)
2610 cmp_code = UNKNOWN, branch_code = code;
2612 /* If the constants doesn't fit into an immediate, but can
2613 be generated by lda/ldah, we adjust the argument and
2614 compare against zero, so we can use beq/bne directly. */
2615 /* ??? Don't do this when comparing against symbols, otherwise
2616 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2617 be declared false out of hand (at least for non-weak). */
2618 else if (CONST_INT_P (op1)
2619 && (code == EQ || code == NE)
2620 && !(symbolic_operand (op0, VOIDmode)
2621 || (REG_P (op0) && REG_POINTER (op0))))
2623 rtx n_op1 = GEN_INT (-INTVAL (op1));
2625 if (! satisfies_constraint_I (op1)
2626 && (satisfies_constraint_K (n_op1)
2627 || satisfies_constraint_L (n_op1)))
2628 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2632 if (!reg_or_0_operand (op0, DImode))
2633 op0 = force_reg (DImode, op0);
2634 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2635 op1 = force_reg (DImode, op1);
2638 /* Emit an initial compare instruction, if necessary. */
2639 tem = op0;
2640 if (cmp_code != UNKNOWN)
2642 tem = gen_reg_rtx (cmp_mode);
2643 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2646 /* Emit the branch instruction. */
2647 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2648 gen_rtx_IF_THEN_ELSE (VOIDmode,
2649 gen_rtx_fmt_ee (branch_code,
2650 branch_mode, tem,
2651 CONST0_RTX (cmp_mode)),
2652 gen_rtx_LABEL_REF (VOIDmode,
2653 operands[3]),
2654 pc_rtx));
2655 emit_jump_insn (tem);
2658 /* Certain simplifications can be done to make invalid setcc operations
2659 valid. Return the final comparison, or NULL if we can't work. */
2661 bool
2662 alpha_emit_setcc (rtx operands[], machine_mode cmp_mode)
2664 enum rtx_code cmp_code;
2665 enum rtx_code code = GET_CODE (operands[1]);
2666 rtx op0 = operands[2], op1 = operands[3];
2667 rtx tmp;
2669 if (cmp_mode == TFmode)
2671 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2672 op1 = const0_rtx;
2673 cmp_mode = DImode;
2676 if (cmp_mode == DFmode && !TARGET_FIX)
2677 return 0;
2679 /* The general case: fold the comparison code to the types of compares
2680 that we have, choosing the branch as necessary. */
2682 cmp_code = UNKNOWN;
2683 switch (code)
2685 case EQ: case LE: case LT: case LEU: case LTU:
2686 case UNORDERED:
2687 /* We have these compares. */
2688 if (cmp_mode == DFmode)
2689 cmp_code = code, code = NE;
2690 break;
2692 case NE:
2693 if (cmp_mode == DImode && op1 == const0_rtx)
2694 break;
2695 /* FALLTHRU */
2697 case ORDERED:
2698 cmp_code = reverse_condition (code);
2699 code = EQ;
2700 break;
2702 case GE: case GT: case GEU: case GTU:
2703 /* These normally need swapping, but for integer zero we have
2704 special patterns that recognize swapped operands. */
2705 if (cmp_mode == DImode && op1 == const0_rtx)
2706 break;
2707 code = swap_condition (code);
2708 if (cmp_mode == DFmode)
2709 cmp_code = code, code = NE;
2710 std::swap (op0, op1);
2711 break;
2713 default:
2714 gcc_unreachable ();
2717 if (cmp_mode == DImode)
2719 if (!register_operand (op0, DImode))
2720 op0 = force_reg (DImode, op0);
2721 if (!reg_or_8bit_operand (op1, DImode))
2722 op1 = force_reg (DImode, op1);
2725 /* Emit an initial compare instruction, if necessary. */
2726 if (cmp_code != UNKNOWN)
2728 tmp = gen_reg_rtx (cmp_mode);
2729 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2730 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2732 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2733 op1 = const0_rtx;
2736 /* Emit the setcc instruction. */
2737 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2738 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2739 return true;
2743 /* Rewrite a comparison against zero CMP of the form
2744 (CODE (cc0) (const_int 0)) so it can be written validly in
2745 a conditional move (if_then_else CMP ...).
2746 If both of the operands that set cc0 are nonzero we must emit
2747 an insn to perform the compare (it can't be done within
2748 the conditional move). */
2751 alpha_emit_conditional_move (rtx cmp, machine_mode mode)
2753 enum rtx_code code = GET_CODE (cmp);
2754 enum rtx_code cmov_code = NE;
2755 rtx op0 = XEXP (cmp, 0);
2756 rtx op1 = XEXP (cmp, 1);
2757 machine_mode cmp_mode
2758 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2759 machine_mode cmov_mode = VOIDmode;
2760 int local_fast_math = flag_unsafe_math_optimizations;
2761 rtx tem;
2763 if (cmp_mode == TFmode)
2765 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2766 op1 = const0_rtx;
2767 cmp_mode = DImode;
2770 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2772 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2774 enum rtx_code cmp_code;
2776 if (! TARGET_FIX)
2777 return 0;
2779 /* If we have fp<->int register move instructions, do a cmov by
2780 performing the comparison in fp registers, and move the
2781 zero/nonzero value to integer registers, where we can then
2782 use a normal cmov, or vice-versa. */
2784 switch (code)
2786 case EQ: case LE: case LT: case LEU: case LTU:
2787 case UNORDERED:
2788 /* We have these compares. */
2789 cmp_code = code, code = NE;
2790 break;
2792 case NE:
2793 case ORDERED:
2794 /* These must be reversed. */
2795 cmp_code = reverse_condition (code), code = EQ;
2796 break;
2798 case GE: case GT: case GEU: case GTU:
2799 /* These normally need swapping, but for integer zero we have
2800 special patterns that recognize swapped operands. */
2801 if (cmp_mode == DImode && op1 == const0_rtx)
2802 cmp_code = code, code = NE;
2803 else
2805 cmp_code = swap_condition (code);
2806 code = NE;
2807 std::swap (op0, op1);
2809 break;
2811 default:
2812 gcc_unreachable ();
2815 if (cmp_mode == DImode)
2817 if (!reg_or_0_operand (op0, DImode))
2818 op0 = force_reg (DImode, op0);
2819 if (!reg_or_8bit_operand (op1, DImode))
2820 op1 = force_reg (DImode, op1);
2823 tem = gen_reg_rtx (cmp_mode);
2824 emit_insn (gen_rtx_SET (VOIDmode, tem,
2825 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2826 op0, op1)));
2828 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2829 op0 = gen_lowpart (cmp_mode, tem);
2830 op1 = CONST0_RTX (cmp_mode);
2831 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2832 local_fast_math = 1;
2835 if (cmp_mode == DImode)
2837 if (!reg_or_0_operand (op0, DImode))
2838 op0 = force_reg (DImode, op0);
2839 if (!reg_or_8bit_operand (op1, DImode))
2840 op1 = force_reg (DImode, op1);
2843 /* We may be able to use a conditional move directly.
2844 This avoids emitting spurious compares. */
2845 if (signed_comparison_operator (cmp, VOIDmode)
2846 && (cmp_mode == DImode || local_fast_math)
2847 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2848 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2850 /* We can't put the comparison inside the conditional move;
2851 emit a compare instruction and put that inside the
2852 conditional move. Make sure we emit only comparisons we have;
2853 swap or reverse as necessary. */
2855 if (!can_create_pseudo_p ())
2856 return NULL_RTX;
2858 switch (code)
2860 case EQ: case LE: case LT: case LEU: case LTU:
2861 case UNORDERED:
2862 /* We have these compares: */
2863 break;
2865 case NE:
2866 case ORDERED:
2867 /* These must be reversed. */
2868 code = reverse_condition (code);
2869 cmov_code = EQ;
2870 break;
2872 case GE: case GT: case GEU: case GTU:
2873 /* These normally need swapping, but for integer zero we have
2874 special patterns that recognize swapped operands. */
2875 if (cmp_mode == DImode && op1 == const0_rtx)
2876 break;
2877 code = swap_condition (code);
2878 std::swap (op0, op1);
2879 break;
2881 default:
2882 gcc_unreachable ();
2885 if (cmp_mode == DImode)
2887 if (!reg_or_0_operand (op0, DImode))
2888 op0 = force_reg (DImode, op0);
2889 if (!reg_or_8bit_operand (op1, DImode))
2890 op1 = force_reg (DImode, op1);
2893 /* ??? We mark the branch mode to be CCmode to prevent the compare
2894 and cmov from being combined, since the compare insn follows IEEE
2895 rules that the cmov does not. */
2896 if (cmp_mode == DFmode && !local_fast_math)
2897 cmov_mode = CCmode;
2899 tem = gen_reg_rtx (cmp_mode);
2900 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2901 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2904 /* Simplify a conditional move of two constants into a setcc with
2905 arithmetic. This is done with a splitter since combine would
2906 just undo the work if done during code generation. It also catches
2907 cases we wouldn't have before cse. */
2910 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2911 rtx t_rtx, rtx f_rtx)
2913 HOST_WIDE_INT t, f, diff;
2914 machine_mode mode;
2915 rtx target, subtarget, tmp;
2917 mode = GET_MODE (dest);
2918 t = INTVAL (t_rtx);
2919 f = INTVAL (f_rtx);
2920 diff = t - f;
2922 if (((code == NE || code == EQ) && diff < 0)
2923 || (code == GE || code == GT))
2925 code = reverse_condition (code);
2926 diff = t, t = f, f = diff;
2927 diff = t - f;
2930 subtarget = target = dest;
2931 if (mode != DImode)
2933 target = gen_lowpart (DImode, dest);
2934 if (can_create_pseudo_p ())
2935 subtarget = gen_reg_rtx (DImode);
2936 else
2937 subtarget = target;
2939 /* Below, we must be careful to use copy_rtx on target and subtarget
2940 in intermediate insns, as they may be a subreg rtx, which may not
2941 be shared. */
2943 if (f == 0 && exact_log2 (diff) > 0
2944 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2945 viable over a longer latency cmove. On EV5, the E0 slot is a
2946 scarce resource, and on EV4 shift has the same latency as a cmove. */
2947 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2949 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2950 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2952 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2953 GEN_INT (exact_log2 (t)));
2954 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2956 else if (f == 0 && t == -1)
2958 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2959 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2961 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2963 else if (diff == 1 || diff == 4 || diff == 8)
2965 rtx add_op;
2967 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2968 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2970 if (diff == 1)
2971 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2972 else
2974 add_op = GEN_INT (f);
2975 if (sext_add_operand (add_op, mode))
2977 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2978 GEN_INT (diff));
2979 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2980 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2982 else
2983 return 0;
2986 else
2987 return 0;
2989 return 1;
2992 /* Look up the function X_floating library function name for the
2993 given operation. */
2995 struct GTY(()) xfloating_op
2997 const enum rtx_code code;
2998 const char *const GTY((skip)) osf_func;
2999 const char *const GTY((skip)) vms_func;
3000 rtx libcall;
3003 static GTY(()) struct xfloating_op xfloating_ops[] =
3005 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
3006 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
3007 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
3008 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
3009 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
3010 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
3011 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
3012 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
3013 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
3014 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
3015 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
3016 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
3017 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
3018 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
3019 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
3022 static GTY(()) struct xfloating_op vax_cvt_ops[] =
3024 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
3025 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3028 static rtx
3029 alpha_lookup_xfloating_lib_func (enum rtx_code code)
3031 struct xfloating_op *ops = xfloating_ops;
3032 long n = ARRAY_SIZE (xfloating_ops);
3033 long i;
3035 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
3037 /* How irritating. Nothing to key off for the main table. */
3038 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
3040 ops = vax_cvt_ops;
3041 n = ARRAY_SIZE (vax_cvt_ops);
3044 for (i = 0; i < n; ++i, ++ops)
3045 if (ops->code == code)
3047 rtx func = ops->libcall;
3048 if (!func)
3050 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3051 ? ops->vms_func : ops->osf_func);
3052 ops->libcall = func;
3054 return func;
3057 gcc_unreachable ();
3060 /* Most X_floating operations take the rounding mode as an argument.
3061 Compute that here. */
3063 static int
3064 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3065 enum alpha_fp_rounding_mode round)
3067 int mode;
3069 switch (round)
3071 case ALPHA_FPRM_NORM:
3072 mode = 2;
3073 break;
3074 case ALPHA_FPRM_MINF:
3075 mode = 1;
3076 break;
3077 case ALPHA_FPRM_CHOP:
3078 mode = 0;
3079 break;
3080 case ALPHA_FPRM_DYN:
3081 mode = 4;
3082 break;
3083 default:
3084 gcc_unreachable ();
3086 /* XXX For reference, round to +inf is mode = 3. */
3089 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3090 mode |= 0x10000;
3092 return mode;
3095 /* Emit an X_floating library function call.
3097 Note that these functions do not follow normal calling conventions:
3098 TFmode arguments are passed in two integer registers (as opposed to
3099 indirect); TFmode return values appear in R16+R17.
3101 FUNC is the function to call.
3102 TARGET is where the output belongs.
3103 OPERANDS are the inputs.
3104 NOPERANDS is the count of inputs.
3105 EQUIV is the expression equivalent for the function.
3108 static void
3109 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3110 int noperands, rtx equiv)
3112 rtx usage = NULL_RTX, tmp, reg;
3113 int regno = 16, i;
3115 start_sequence ();
3117 for (i = 0; i < noperands; ++i)
3119 switch (GET_MODE (operands[i]))
3121 case TFmode:
3122 reg = gen_rtx_REG (TFmode, regno);
3123 regno += 2;
3124 break;
3126 case DFmode:
3127 reg = gen_rtx_REG (DFmode, regno + 32);
3128 regno += 1;
3129 break;
3131 case VOIDmode:
3132 gcc_assert (CONST_INT_P (operands[i]));
3133 /* FALLTHRU */
3134 case DImode:
3135 reg = gen_rtx_REG (DImode, regno);
3136 regno += 1;
3137 break;
3139 default:
3140 gcc_unreachable ();
3143 emit_move_insn (reg, operands[i]);
3144 use_reg (&usage, reg);
3147 switch (GET_MODE (target))
3149 case TFmode:
3150 reg = gen_rtx_REG (TFmode, 16);
3151 break;
3152 case DFmode:
3153 reg = gen_rtx_REG (DFmode, 32);
3154 break;
3155 case DImode:
3156 reg = gen_rtx_REG (DImode, 0);
3157 break;
3158 default:
3159 gcc_unreachable ();
3162 tmp = gen_rtx_MEM (QImode, func);
3163 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3164 const0_rtx, const0_rtx));
3165 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3166 RTL_CONST_CALL_P (tmp) = 1;
3168 tmp = get_insns ();
3169 end_sequence ();
3171 emit_libcall_block (tmp, target, reg, equiv);
3174 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3176 void
3177 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3179 rtx func;
3180 int mode;
3181 rtx out_operands[3];
3183 func = alpha_lookup_xfloating_lib_func (code);
3184 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3186 out_operands[0] = operands[1];
3187 out_operands[1] = operands[2];
3188 out_operands[2] = GEN_INT (mode);
3189 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3190 gen_rtx_fmt_ee (code, TFmode, operands[1],
3191 operands[2]));
3194 /* Emit an X_floating library function call for a comparison. */
3196 static rtx
3197 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3199 enum rtx_code cmp_code, res_code;
3200 rtx func, out, operands[2], note;
3202 /* X_floating library comparison functions return
3203 -1 unordered
3204 0 false
3205 1 true
3206 Convert the compare against the raw return value. */
3208 cmp_code = *pcode;
3209 switch (cmp_code)
3211 case UNORDERED:
3212 cmp_code = EQ;
3213 res_code = LT;
3214 break;
3215 case ORDERED:
3216 cmp_code = EQ;
3217 res_code = GE;
3218 break;
3219 case NE:
3220 res_code = NE;
3221 break;
3222 case EQ:
3223 case LT:
3224 case GT:
3225 case LE:
3226 case GE:
3227 res_code = GT;
3228 break;
3229 default:
3230 gcc_unreachable ();
3232 *pcode = res_code;
3234 func = alpha_lookup_xfloating_lib_func (cmp_code);
3236 operands[0] = op0;
3237 operands[1] = op1;
3238 out = gen_reg_rtx (DImode);
3240 /* What's actually returned is -1,0,1, not a proper boolean value. */
3241 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3242 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
3243 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3245 return out;
3248 /* Emit an X_floating library function call for a conversion. */
3250 void
3251 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3253 int noperands = 1, mode;
3254 rtx out_operands[2];
3255 rtx func;
3256 enum rtx_code code = orig_code;
3258 if (code == UNSIGNED_FIX)
3259 code = FIX;
3261 func = alpha_lookup_xfloating_lib_func (code);
3263 out_operands[0] = operands[1];
3265 switch (code)
3267 case FIX:
3268 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3269 out_operands[1] = GEN_INT (mode);
3270 noperands = 2;
3271 break;
3272 case FLOAT_TRUNCATE:
3273 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3274 out_operands[1] = GEN_INT (mode);
3275 noperands = 2;
3276 break;
3277 default:
3278 break;
3281 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3282 gen_rtx_fmt_e (orig_code,
3283 GET_MODE (operands[0]),
3284 operands[1]));
3287 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3288 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3289 guarantee that the sequence
3290 set (OP[0] OP[2])
3291 set (OP[1] OP[3])
3292 is valid. Naturally, output operand ordering is little-endian.
3293 This is used by *movtf_internal and *movti_internal. */
3295 void
3296 alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
3297 bool fixup_overlap)
3299 switch (GET_CODE (operands[1]))
3301 case REG:
3302 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3303 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3304 break;
3306 case MEM:
3307 operands[3] = adjust_address (operands[1], DImode, 8);
3308 operands[2] = adjust_address (operands[1], DImode, 0);
3309 break;
3311 case CONST_INT:
3312 case CONST_DOUBLE:
3313 gcc_assert (operands[1] == CONST0_RTX (mode));
3314 operands[2] = operands[3] = const0_rtx;
3315 break;
3317 default:
3318 gcc_unreachable ();
3321 switch (GET_CODE (operands[0]))
3323 case REG:
3324 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3325 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3326 break;
3328 case MEM:
3329 operands[1] = adjust_address (operands[0], DImode, 8);
3330 operands[0] = adjust_address (operands[0], DImode, 0);
3331 break;
3333 default:
3334 gcc_unreachable ();
3337 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3339 std::swap (operands[0], operands[1]);
3340 std::swap (operands[2], operands[3]);
3344 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3345 op2 is a register containing the sign bit, operation is the
3346 logical operation to be performed. */
3348 void
3349 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3351 rtx high_bit = operands[2];
3352 rtx scratch;
3353 int move;
3355 alpha_split_tmode_pair (operands, TFmode, false);
3357 /* Detect three flavors of operand overlap. */
3358 move = 1;
3359 if (rtx_equal_p (operands[0], operands[2]))
3360 move = 0;
3361 else if (rtx_equal_p (operands[1], operands[2]))
3363 if (rtx_equal_p (operands[0], high_bit))
3364 move = 2;
3365 else
3366 move = -1;
3369 if (move < 0)
3370 emit_move_insn (operands[0], operands[2]);
3372 /* ??? If the destination overlaps both source tf and high_bit, then
3373 assume source tf is dead in its entirety and use the other half
3374 for a scratch register. Otherwise "scratch" is just the proper
3375 destination register. */
3376 scratch = operands[move < 2 ? 1 : 3];
3378 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3380 if (move > 0)
3382 emit_move_insn (operands[0], operands[2]);
3383 if (move > 1)
3384 emit_move_insn (operands[1], scratch);
3388 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3389 unaligned data:
3391 unsigned: signed:
3392 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3393 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3394 lda r3,X(r11) lda r3,X+2(r11)
3395 extwl r1,r3,r1 extql r1,r3,r1
3396 extwh r2,r3,r2 extqh r2,r3,r2
3397 or r1.r2.r1 or r1,r2,r1
3398 sra r1,48,r1
3400 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3401 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3402 lda r3,X(r11) lda r3,X(r11)
3403 extll r1,r3,r1 extll r1,r3,r1
3404 extlh r2,r3,r2 extlh r2,r3,r2
3405 or r1.r2.r1 addl r1,r2,r1
3407 quad: ldq_u r1,X(r11)
3408 ldq_u r2,X+7(r11)
3409 lda r3,X(r11)
3410 extql r1,r3,r1
3411 extqh r2,r3,r2
3412 or r1.r2.r1
3415 void
3416 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3417 HOST_WIDE_INT ofs, int sign)
3419 rtx meml, memh, addr, extl, exth, tmp, mema;
3420 machine_mode mode;
3422 if (TARGET_BWX && size == 2)
3424 meml = adjust_address (mem, QImode, ofs);
3425 memh = adjust_address (mem, QImode, ofs+1);
3426 extl = gen_reg_rtx (DImode);
3427 exth = gen_reg_rtx (DImode);
3428 emit_insn (gen_zero_extendqidi2 (extl, meml));
3429 emit_insn (gen_zero_extendqidi2 (exth, memh));
3430 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3431 NULL, 1, OPTAB_LIB_WIDEN);
3432 addr = expand_simple_binop (DImode, IOR, extl, exth,
3433 NULL, 1, OPTAB_LIB_WIDEN);
3435 if (sign && GET_MODE (tgt) != HImode)
3437 addr = gen_lowpart (HImode, addr);
3438 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3440 else
3442 if (GET_MODE (tgt) != DImode)
3443 addr = gen_lowpart (GET_MODE (tgt), addr);
3444 emit_move_insn (tgt, addr);
3446 return;
3449 meml = gen_reg_rtx (DImode);
3450 memh = gen_reg_rtx (DImode);
3451 addr = gen_reg_rtx (DImode);
3452 extl = gen_reg_rtx (DImode);
3453 exth = gen_reg_rtx (DImode);
3455 mema = XEXP (mem, 0);
3456 if (GET_CODE (mema) == LO_SUM)
3457 mema = force_reg (Pmode, mema);
3459 /* AND addresses cannot be in any alias set, since they may implicitly
3460 alias surrounding code. Ideally we'd have some alias set that
3461 covered all types except those with alignment 8 or higher. */
3463 tmp = change_address (mem, DImode,
3464 gen_rtx_AND (DImode,
3465 plus_constant (DImode, mema, ofs),
3466 GEN_INT (-8)));
3467 set_mem_alias_set (tmp, 0);
3468 emit_move_insn (meml, tmp);
3470 tmp = change_address (mem, DImode,
3471 gen_rtx_AND (DImode,
3472 plus_constant (DImode, mema,
3473 ofs + size - 1),
3474 GEN_INT (-8)));
3475 set_mem_alias_set (tmp, 0);
3476 emit_move_insn (memh, tmp);
3478 if (sign && size == 2)
3480 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3482 emit_insn (gen_extql (extl, meml, addr));
3483 emit_insn (gen_extqh (exth, memh, addr));
3485 /* We must use tgt here for the target. Alpha-vms port fails if we use
3486 addr for the target, because addr is marked as a pointer and combine
3487 knows that pointers are always sign-extended 32-bit values. */
3488 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3489 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3490 addr, 1, OPTAB_WIDEN);
3492 else
3494 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3495 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3496 switch ((int) size)
3498 case 2:
3499 emit_insn (gen_extwh (exth, memh, addr));
3500 mode = HImode;
3501 break;
3502 case 4:
3503 emit_insn (gen_extlh (exth, memh, addr));
3504 mode = SImode;
3505 break;
3506 case 8:
3507 emit_insn (gen_extqh (exth, memh, addr));
3508 mode = DImode;
3509 break;
3510 default:
3511 gcc_unreachable ();
3514 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3515 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3516 sign, OPTAB_WIDEN);
3519 if (addr != tgt)
3520 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3523 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3525 void
3526 alpha_expand_unaligned_store (rtx dst, rtx src,
3527 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3529 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3531 if (TARGET_BWX && size == 2)
3533 if (src != const0_rtx)
3535 dstl = gen_lowpart (QImode, src);
3536 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3537 NULL, 1, OPTAB_LIB_WIDEN);
3538 dsth = gen_lowpart (QImode, dsth);
3540 else
3541 dstl = dsth = const0_rtx;
3543 meml = adjust_address (dst, QImode, ofs);
3544 memh = adjust_address (dst, QImode, ofs+1);
3546 emit_move_insn (meml, dstl);
3547 emit_move_insn (memh, dsth);
3548 return;
3551 dstl = gen_reg_rtx (DImode);
3552 dsth = gen_reg_rtx (DImode);
3553 insl = gen_reg_rtx (DImode);
3554 insh = gen_reg_rtx (DImode);
3556 dsta = XEXP (dst, 0);
3557 if (GET_CODE (dsta) == LO_SUM)
3558 dsta = force_reg (Pmode, dsta);
3560 /* AND addresses cannot be in any alias set, since they may implicitly
3561 alias surrounding code. Ideally we'd have some alias set that
3562 covered all types except those with alignment 8 or higher. */
3564 meml = change_address (dst, DImode,
3565 gen_rtx_AND (DImode,
3566 plus_constant (DImode, dsta, ofs),
3567 GEN_INT (-8)));
3568 set_mem_alias_set (meml, 0);
3570 memh = change_address (dst, DImode,
3571 gen_rtx_AND (DImode,
3572 plus_constant (DImode, dsta,
3573 ofs + size - 1),
3574 GEN_INT (-8)));
3575 set_mem_alias_set (memh, 0);
3577 emit_move_insn (dsth, memh);
3578 emit_move_insn (dstl, meml);
3580 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3582 if (src != CONST0_RTX (GET_MODE (src)))
3584 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3585 GEN_INT (size*8), addr));
3587 switch ((int) size)
3589 case 2:
3590 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3591 break;
3592 case 4:
3593 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3594 break;
3595 case 8:
3596 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3597 break;
3598 default:
3599 gcc_unreachable ();
3603 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3605 switch ((int) size)
3607 case 2:
3608 emit_insn (gen_mskwl (dstl, dstl, addr));
3609 break;
3610 case 4:
3611 emit_insn (gen_mskll (dstl, dstl, addr));
3612 break;
3613 case 8:
3614 emit_insn (gen_mskql (dstl, dstl, addr));
3615 break;
3616 default:
3617 gcc_unreachable ();
3620 if (src != CONST0_RTX (GET_MODE (src)))
3622 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3623 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3626 /* Must store high before low for degenerate case of aligned. */
3627 emit_move_insn (memh, dsth);
3628 emit_move_insn (meml, dstl);
3631 /* The block move code tries to maximize speed by separating loads and
3632 stores at the expense of register pressure: we load all of the data
3633 before we store it back out. There are two secondary effects worth
3634 mentioning, that this speeds copying to/from aligned and unaligned
3635 buffers, and that it makes the code significantly easier to write. */
3637 #define MAX_MOVE_WORDS 8
3639 /* Load an integral number of consecutive unaligned quadwords. */
3641 static void
3642 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3643 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3645 rtx const im8 = GEN_INT (-8);
3646 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3647 rtx sreg, areg, tmp, smema;
3648 HOST_WIDE_INT i;
3650 smema = XEXP (smem, 0);
3651 if (GET_CODE (smema) == LO_SUM)
3652 smema = force_reg (Pmode, smema);
3654 /* Generate all the tmp registers we need. */
3655 for (i = 0; i < words; ++i)
3657 data_regs[i] = out_regs[i];
3658 ext_tmps[i] = gen_reg_rtx (DImode);
3660 data_regs[words] = gen_reg_rtx (DImode);
3662 if (ofs != 0)
3663 smem = adjust_address (smem, GET_MODE (smem), ofs);
3665 /* Load up all of the source data. */
3666 for (i = 0; i < words; ++i)
3668 tmp = change_address (smem, DImode,
3669 gen_rtx_AND (DImode,
3670 plus_constant (DImode, smema, 8*i),
3671 im8));
3672 set_mem_alias_set (tmp, 0);
3673 emit_move_insn (data_regs[i], tmp);
3676 tmp = change_address (smem, DImode,
3677 gen_rtx_AND (DImode,
3678 plus_constant (DImode, smema,
3679 8*words - 1),
3680 im8));
3681 set_mem_alias_set (tmp, 0);
3682 emit_move_insn (data_regs[words], tmp);
3684 /* Extract the half-word fragments. Unfortunately DEC decided to make
3685 extxh with offset zero a noop instead of zeroing the register, so
3686 we must take care of that edge condition ourselves with cmov. */
3688 sreg = copy_addr_to_reg (smema);
3689 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3690 1, OPTAB_WIDEN);
3691 for (i = 0; i < words; ++i)
3693 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3694 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3695 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3696 gen_rtx_IF_THEN_ELSE (DImode,
3697 gen_rtx_EQ (DImode, areg,
3698 const0_rtx),
3699 const0_rtx, ext_tmps[i])));
3702 /* Merge the half-words into whole words. */
3703 for (i = 0; i < words; ++i)
3705 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3706 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3710 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3711 may be NULL to store zeros. */
3713 static void
3714 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3715 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3717 rtx const im8 = GEN_INT (-8);
3718 rtx ins_tmps[MAX_MOVE_WORDS];
3719 rtx st_tmp_1, st_tmp_2, dreg;
3720 rtx st_addr_1, st_addr_2, dmema;
3721 HOST_WIDE_INT i;
3723 dmema = XEXP (dmem, 0);
3724 if (GET_CODE (dmema) == LO_SUM)
3725 dmema = force_reg (Pmode, dmema);
3727 /* Generate all the tmp registers we need. */
3728 if (data_regs != NULL)
3729 for (i = 0; i < words; ++i)
3730 ins_tmps[i] = gen_reg_rtx(DImode);
3731 st_tmp_1 = gen_reg_rtx(DImode);
3732 st_tmp_2 = gen_reg_rtx(DImode);
3734 if (ofs != 0)
3735 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3737 st_addr_2 = change_address (dmem, DImode,
3738 gen_rtx_AND (DImode,
3739 plus_constant (DImode, dmema,
3740 words*8 - 1),
3741 im8));
3742 set_mem_alias_set (st_addr_2, 0);
3744 st_addr_1 = change_address (dmem, DImode,
3745 gen_rtx_AND (DImode, dmema, im8));
3746 set_mem_alias_set (st_addr_1, 0);
3748 /* Load up the destination end bits. */
3749 emit_move_insn (st_tmp_2, st_addr_2);
3750 emit_move_insn (st_tmp_1, st_addr_1);
3752 /* Shift the input data into place. */
3753 dreg = copy_addr_to_reg (dmema);
3754 if (data_regs != NULL)
3756 for (i = words-1; i >= 0; --i)
3758 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3759 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3761 for (i = words-1; i > 0; --i)
3763 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3764 ins_tmps[i-1], ins_tmps[i-1], 1,
3765 OPTAB_WIDEN);
3769 /* Split and merge the ends with the destination data. */
3770 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3771 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3773 if (data_regs != NULL)
3775 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3776 st_tmp_2, 1, OPTAB_WIDEN);
3777 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3778 st_tmp_1, 1, OPTAB_WIDEN);
3781 /* Store it all. */
3782 emit_move_insn (st_addr_2, st_tmp_2);
3783 for (i = words-1; i > 0; --i)
3785 rtx tmp = change_address (dmem, DImode,
3786 gen_rtx_AND (DImode,
3787 plus_constant (DImode,
3788 dmema, i*8),
3789 im8));
3790 set_mem_alias_set (tmp, 0);
3791 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3793 emit_move_insn (st_addr_1, st_tmp_1);
3797 /* Expand string/block move operations.
3799 operands[0] is the pointer to the destination.
3800 operands[1] is the pointer to the source.
3801 operands[2] is the number of bytes to move.
3802 operands[3] is the alignment. */
3805 alpha_expand_block_move (rtx operands[])
3807 rtx bytes_rtx = operands[2];
3808 rtx align_rtx = operands[3];
3809 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3810 HOST_WIDE_INT bytes = orig_bytes;
3811 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3812 HOST_WIDE_INT dst_align = src_align;
3813 rtx orig_src = operands[1];
3814 rtx orig_dst = operands[0];
3815 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3816 rtx tmp;
3817 unsigned int i, words, ofs, nregs = 0;
3819 if (orig_bytes <= 0)
3820 return 1;
3821 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3822 return 0;
3824 /* Look for additional alignment information from recorded register info. */
3826 tmp = XEXP (orig_src, 0);
3827 if (REG_P (tmp))
3828 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3829 else if (GET_CODE (tmp) == PLUS
3830 && REG_P (XEXP (tmp, 0))
3831 && CONST_INT_P (XEXP (tmp, 1)))
3833 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3834 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3836 if (a > src_align)
3838 if (a >= 64 && c % 8 == 0)
3839 src_align = 64;
3840 else if (a >= 32 && c % 4 == 0)
3841 src_align = 32;
3842 else if (a >= 16 && c % 2 == 0)
3843 src_align = 16;
3847 tmp = XEXP (orig_dst, 0);
3848 if (REG_P (tmp))
3849 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3850 else if (GET_CODE (tmp) == PLUS
3851 && REG_P (XEXP (tmp, 0))
3852 && CONST_INT_P (XEXP (tmp, 1)))
3854 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3855 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3857 if (a > dst_align)
3859 if (a >= 64 && c % 8 == 0)
3860 dst_align = 64;
3861 else if (a >= 32 && c % 4 == 0)
3862 dst_align = 32;
3863 else if (a >= 16 && c % 2 == 0)
3864 dst_align = 16;
3868 ofs = 0;
3869 if (src_align >= 64 && bytes >= 8)
3871 words = bytes / 8;
3873 for (i = 0; i < words; ++i)
3874 data_regs[nregs + i] = gen_reg_rtx (DImode);
3876 for (i = 0; i < words; ++i)
3877 emit_move_insn (data_regs[nregs + i],
3878 adjust_address (orig_src, DImode, ofs + i * 8));
3880 nregs += words;
3881 bytes -= words * 8;
3882 ofs += words * 8;
3885 if (src_align >= 32 && bytes >= 4)
3887 words = bytes / 4;
3889 for (i = 0; i < words; ++i)
3890 data_regs[nregs + i] = gen_reg_rtx (SImode);
3892 for (i = 0; i < words; ++i)
3893 emit_move_insn (data_regs[nregs + i],
3894 adjust_address (orig_src, SImode, ofs + i * 4));
3896 nregs += words;
3897 bytes -= words * 4;
3898 ofs += words * 4;
3901 if (bytes >= 8)
3903 words = bytes / 8;
3905 for (i = 0; i < words+1; ++i)
3906 data_regs[nregs + i] = gen_reg_rtx (DImode);
3908 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3909 words, ofs);
3911 nregs += words;
3912 bytes -= words * 8;
3913 ofs += words * 8;
3916 if (! TARGET_BWX && bytes >= 4)
3918 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3919 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3920 bytes -= 4;
3921 ofs += 4;
3924 if (bytes >= 2)
3926 if (src_align >= 16)
3928 do {
3929 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3930 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3931 bytes -= 2;
3932 ofs += 2;
3933 } while (bytes >= 2);
3935 else if (! TARGET_BWX)
3937 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3938 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3939 bytes -= 2;
3940 ofs += 2;
3944 while (bytes > 0)
3946 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3947 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3948 bytes -= 1;
3949 ofs += 1;
3952 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3954 /* Now save it back out again. */
3956 i = 0, ofs = 0;
3958 /* Write out the data in whatever chunks reading the source allowed. */
3959 if (dst_align >= 64)
3961 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3963 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3964 data_regs[i]);
3965 ofs += 8;
3966 i++;
3970 if (dst_align >= 32)
3972 /* If the source has remaining DImode regs, write them out in
3973 two pieces. */
3974 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3976 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3977 NULL_RTX, 1, OPTAB_WIDEN);
3979 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3980 gen_lowpart (SImode, data_regs[i]));
3981 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3982 gen_lowpart (SImode, tmp));
3983 ofs += 8;
3984 i++;
3987 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3989 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3990 data_regs[i]);
3991 ofs += 4;
3992 i++;
3996 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3998 /* Write out a remaining block of words using unaligned methods. */
4000 for (words = 1; i + words < nregs; words++)
4001 if (GET_MODE (data_regs[i + words]) != DImode)
4002 break;
4004 if (words == 1)
4005 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4006 else
4007 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4008 words, ofs);
4010 i += words;
4011 ofs += words * 8;
4014 /* Due to the above, this won't be aligned. */
4015 /* ??? If we have more than one of these, consider constructing full
4016 words in registers and using alpha_expand_unaligned_store_words. */
4017 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4019 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4020 ofs += 4;
4021 i++;
4024 if (dst_align >= 16)
4025 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4027 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4028 i++;
4029 ofs += 2;
4031 else
4032 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4034 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4035 i++;
4036 ofs += 2;
4039 /* The remainder must be byte copies. */
4040 while (i < nregs)
4042 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4043 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4044 i++;
4045 ofs += 1;
4048 return 1;
4052 alpha_expand_block_clear (rtx operands[])
4054 rtx bytes_rtx = operands[1];
4055 rtx align_rtx = operands[3];
4056 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4057 HOST_WIDE_INT bytes = orig_bytes;
4058 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4059 HOST_WIDE_INT alignofs = 0;
4060 rtx orig_dst = operands[0];
4061 rtx tmp;
4062 int i, words, ofs = 0;
4064 if (orig_bytes <= 0)
4065 return 1;
4066 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4067 return 0;
4069 /* Look for stricter alignment. */
4070 tmp = XEXP (orig_dst, 0);
4071 if (REG_P (tmp))
4072 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4073 else if (GET_CODE (tmp) == PLUS
4074 && REG_P (XEXP (tmp, 0))
4075 && CONST_INT_P (XEXP (tmp, 1)))
4077 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4078 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4080 if (a > align)
4082 if (a >= 64)
4083 align = a, alignofs = 8 - c % 8;
4084 else if (a >= 32)
4085 align = a, alignofs = 4 - c % 4;
4086 else if (a >= 16)
4087 align = a, alignofs = 2 - c % 2;
4091 /* Handle an unaligned prefix first. */
4093 if (alignofs > 0)
4095 #if HOST_BITS_PER_WIDE_INT >= 64
4096 /* Given that alignofs is bounded by align, the only time BWX could
4097 generate three stores is for a 7 byte fill. Prefer two individual
4098 stores over a load/mask/store sequence. */
4099 if ((!TARGET_BWX || alignofs == 7)
4100 && align >= 32
4101 && !(alignofs == 4 && bytes >= 4))
4103 machine_mode mode = (align >= 64 ? DImode : SImode);
4104 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4105 rtx mem, tmp;
4106 HOST_WIDE_INT mask;
4108 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4109 set_mem_alias_set (mem, 0);
4111 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4112 if (bytes < alignofs)
4114 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4115 ofs += bytes;
4116 bytes = 0;
4118 else
4120 bytes -= alignofs;
4121 ofs += alignofs;
4123 alignofs = 0;
4125 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4126 NULL_RTX, 1, OPTAB_WIDEN);
4128 emit_move_insn (mem, tmp);
4130 #endif
4132 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4134 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4135 bytes -= 1;
4136 ofs += 1;
4137 alignofs -= 1;
4139 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4141 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4142 bytes -= 2;
4143 ofs += 2;
4144 alignofs -= 2;
4146 if (alignofs == 4 && bytes >= 4)
4148 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4149 bytes -= 4;
4150 ofs += 4;
4151 alignofs = 0;
4154 /* If we've not used the extra lead alignment information by now,
4155 we won't be able to. Downgrade align to match what's left over. */
4156 if (alignofs > 0)
4158 alignofs = alignofs & -alignofs;
4159 align = MIN (align, alignofs * BITS_PER_UNIT);
4163 /* Handle a block of contiguous long-words. */
4165 if (align >= 64 && bytes >= 8)
4167 words = bytes / 8;
4169 for (i = 0; i < words; ++i)
4170 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4171 const0_rtx);
4173 bytes -= words * 8;
4174 ofs += words * 8;
4177 /* If the block is large and appropriately aligned, emit a single
4178 store followed by a sequence of stq_u insns. */
4180 if (align >= 32 && bytes > 16)
4182 rtx orig_dsta;
4184 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4185 bytes -= 4;
4186 ofs += 4;
4188 orig_dsta = XEXP (orig_dst, 0);
4189 if (GET_CODE (orig_dsta) == LO_SUM)
4190 orig_dsta = force_reg (Pmode, orig_dsta);
4192 words = bytes / 8;
4193 for (i = 0; i < words; ++i)
4195 rtx mem
4196 = change_address (orig_dst, DImode,
4197 gen_rtx_AND (DImode,
4198 plus_constant (DImode, orig_dsta,
4199 ofs + i*8),
4200 GEN_INT (-8)));
4201 set_mem_alias_set (mem, 0);
4202 emit_move_insn (mem, const0_rtx);
4205 /* Depending on the alignment, the first stq_u may have overlapped
4206 with the initial stl, which means that the last stq_u didn't
4207 write as much as it would appear. Leave those questionable bytes
4208 unaccounted for. */
4209 bytes -= words * 8 - 4;
4210 ofs += words * 8 - 4;
4213 /* Handle a smaller block of aligned words. */
4215 if ((align >= 64 && bytes == 4)
4216 || (align == 32 && bytes >= 4))
4218 words = bytes / 4;
4220 for (i = 0; i < words; ++i)
4221 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4222 const0_rtx);
4224 bytes -= words * 4;
4225 ofs += words * 4;
4228 /* An unaligned block uses stq_u stores for as many as possible. */
4230 if (bytes >= 8)
4232 words = bytes / 8;
4234 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4236 bytes -= words * 8;
4237 ofs += words * 8;
4240 /* Next clean up any trailing pieces. */
4242 #if HOST_BITS_PER_WIDE_INT >= 64
4243 /* Count the number of bits in BYTES for which aligned stores could
4244 be emitted. */
4245 words = 0;
4246 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4247 if (bytes & i)
4248 words += 1;
4250 /* If we have appropriate alignment (and it wouldn't take too many
4251 instructions otherwise), mask out the bytes we need. */
4252 if (TARGET_BWX ? words > 2 : bytes > 0)
4254 if (align >= 64)
4256 rtx mem, tmp;
4257 HOST_WIDE_INT mask;
4259 mem = adjust_address (orig_dst, DImode, ofs);
4260 set_mem_alias_set (mem, 0);
4262 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4264 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4265 NULL_RTX, 1, OPTAB_WIDEN);
4267 emit_move_insn (mem, tmp);
4268 return 1;
4270 else if (align >= 32 && bytes < 4)
4272 rtx mem, tmp;
4273 HOST_WIDE_INT mask;
4275 mem = adjust_address (orig_dst, SImode, ofs);
4276 set_mem_alias_set (mem, 0);
4278 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4280 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4281 NULL_RTX, 1, OPTAB_WIDEN);
4283 emit_move_insn (mem, tmp);
4284 return 1;
4287 #endif
4289 if (!TARGET_BWX && bytes >= 4)
4291 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4292 bytes -= 4;
4293 ofs += 4;
4296 if (bytes >= 2)
4298 if (align >= 16)
4300 do {
4301 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4302 const0_rtx);
4303 bytes -= 2;
4304 ofs += 2;
4305 } while (bytes >= 2);
4307 else if (! TARGET_BWX)
4309 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4310 bytes -= 2;
4311 ofs += 2;
4315 while (bytes > 0)
4317 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4318 bytes -= 1;
4319 ofs += 1;
4322 return 1;
4325 /* Returns a mask so that zap(x, value) == x & mask. */
4328 alpha_expand_zap_mask (HOST_WIDE_INT value)
4330 rtx result;
4331 int i;
4333 if (HOST_BITS_PER_WIDE_INT >= 64)
4335 HOST_WIDE_INT mask = 0;
4337 for (i = 7; i >= 0; --i)
4339 mask <<= 8;
4340 if (!((value >> i) & 1))
4341 mask |= 0xff;
4344 result = gen_int_mode (mask, DImode);
4346 else
4348 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4350 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4352 for (i = 7; i >= 4; --i)
4354 mask_hi <<= 8;
4355 if (!((value >> i) & 1))
4356 mask_hi |= 0xff;
4359 for (i = 3; i >= 0; --i)
4361 mask_lo <<= 8;
4362 if (!((value >> i) & 1))
4363 mask_lo |= 0xff;
4366 result = immed_double_const (mask_lo, mask_hi, DImode);
4369 return result;
4372 void
4373 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4374 machine_mode mode,
4375 rtx op0, rtx op1, rtx op2)
4377 op0 = gen_lowpart (mode, op0);
4379 if (op1 == const0_rtx)
4380 op1 = CONST0_RTX (mode);
4381 else
4382 op1 = gen_lowpart (mode, op1);
4384 if (op2 == const0_rtx)
4385 op2 = CONST0_RTX (mode);
4386 else
4387 op2 = gen_lowpart (mode, op2);
4389 emit_insn ((*gen) (op0, op1, op2));
4392 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4393 COND is true. Mark the jump as unlikely to be taken. */
4395 static void
4396 emit_unlikely_jump (rtx cond, rtx label)
4398 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
4399 rtx x;
4401 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4402 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4403 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
4406 /* A subroutine of the atomic operation splitters. Emit a load-locked
4407 instruction in MODE. */
4409 static void
4410 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
4412 rtx (*fn) (rtx, rtx) = NULL;
4413 if (mode == SImode)
4414 fn = gen_load_locked_si;
4415 else if (mode == DImode)
4416 fn = gen_load_locked_di;
4417 emit_insn (fn (reg, mem));
4420 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4421 instruction in MODE. */
4423 static void
4424 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
4426 rtx (*fn) (rtx, rtx, rtx) = NULL;
4427 if (mode == SImode)
4428 fn = gen_store_conditional_si;
4429 else if (mode == DImode)
4430 fn = gen_store_conditional_di;
4431 emit_insn (fn (res, mem, val));
4434 /* Subroutines of the atomic operation splitters. Emit barriers
4435 as needed for the memory MODEL. */
4437 static void
4438 alpha_pre_atomic_barrier (enum memmodel model)
4440 if (need_atomic_barrier_p (model, true))
4441 emit_insn (gen_memory_barrier ());
4444 static void
4445 alpha_post_atomic_barrier (enum memmodel model)
4447 if (need_atomic_barrier_p (model, false))
4448 emit_insn (gen_memory_barrier ());
4451 /* A subroutine of the atomic operation splitters. Emit an insxl
4452 instruction in MODE. */
4454 static rtx
4455 emit_insxl (machine_mode mode, rtx op1, rtx op2)
4457 rtx ret = gen_reg_rtx (DImode);
4458 rtx (*fn) (rtx, rtx, rtx);
4460 switch (mode)
4462 case QImode:
4463 fn = gen_insbl;
4464 break;
4465 case HImode:
4466 fn = gen_inswl;
4467 break;
4468 case SImode:
4469 fn = gen_insll;
4470 break;
4471 case DImode:
4472 fn = gen_insql;
4473 break;
4474 default:
4475 gcc_unreachable ();
4478 op1 = force_reg (mode, op1);
4479 emit_insn (fn (ret, op1, op2));
4481 return ret;
4484 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4485 to perform. MEM is the memory on which to operate. VAL is the second
4486 operand of the binary operator. BEFORE and AFTER are optional locations to
4487 return the value of MEM either before of after the operation. SCRATCH is
4488 a scratch register. */
4490 void
4491 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4492 rtx after, rtx scratch, enum memmodel model)
4494 machine_mode mode = GET_MODE (mem);
4495 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4497 alpha_pre_atomic_barrier (model);
4499 label = gen_label_rtx ();
4500 emit_label (label);
4501 label = gen_rtx_LABEL_REF (DImode, label);
4503 if (before == NULL)
4504 before = scratch;
4505 emit_load_locked (mode, before, mem);
4507 if (code == NOT)
4509 x = gen_rtx_AND (mode, before, val);
4510 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4512 x = gen_rtx_NOT (mode, val);
4514 else
4515 x = gen_rtx_fmt_ee (code, mode, before, val);
4516 if (after)
4517 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4518 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4520 emit_store_conditional (mode, cond, mem, scratch);
4522 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4523 emit_unlikely_jump (x, label);
4525 alpha_post_atomic_barrier (model);
4528 /* Expand a compare and swap operation. */
4530 void
4531 alpha_split_compare_and_swap (rtx operands[])
4533 rtx cond, retval, mem, oldval, newval;
4534 bool is_weak;
4535 enum memmodel mod_s, mod_f;
4536 machine_mode mode;
4537 rtx label1, label2, x;
4539 cond = operands[0];
4540 retval = operands[1];
4541 mem = operands[2];
4542 oldval = operands[3];
4543 newval = operands[4];
4544 is_weak = (operands[5] != const0_rtx);
4545 mod_s = (enum memmodel) INTVAL (operands[6]);
4546 mod_f = (enum memmodel) INTVAL (operands[7]);
4547 mode = GET_MODE (mem);
4549 alpha_pre_atomic_barrier (mod_s);
4551 label1 = NULL_RTX;
4552 if (!is_weak)
4554 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4555 emit_label (XEXP (label1, 0));
4557 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4559 emit_load_locked (mode, retval, mem);
4561 x = gen_lowpart (DImode, retval);
4562 if (oldval == const0_rtx)
4564 emit_move_insn (cond, const0_rtx);
4565 x = gen_rtx_NE (DImode, x, const0_rtx);
4567 else
4569 x = gen_rtx_EQ (DImode, x, oldval);
4570 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4571 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4573 emit_unlikely_jump (x, label2);
4575 emit_move_insn (cond, newval);
4576 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4578 if (!is_weak)
4580 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4581 emit_unlikely_jump (x, label1);
4584 if (mod_f != MEMMODEL_RELAXED)
4585 emit_label (XEXP (label2, 0));
4587 alpha_post_atomic_barrier (mod_s);
4589 if (mod_f == MEMMODEL_RELAXED)
4590 emit_label (XEXP (label2, 0));
4593 void
4594 alpha_expand_compare_and_swap_12 (rtx operands[])
4596 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4597 machine_mode mode;
4598 rtx addr, align, wdst;
4599 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4601 cond = operands[0];
4602 dst = operands[1];
4603 mem = operands[2];
4604 oldval = operands[3];
4605 newval = operands[4];
4606 is_weak = operands[5];
4607 mod_s = operands[6];
4608 mod_f = operands[7];
4609 mode = GET_MODE (mem);
4611 /* We forced the address into a register via mem_noofs_operand. */
4612 addr = XEXP (mem, 0);
4613 gcc_assert (register_operand (addr, DImode));
4615 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4616 NULL_RTX, 1, OPTAB_DIRECT);
4618 oldval = convert_modes (DImode, mode, oldval, 1);
4620 if (newval != const0_rtx)
4621 newval = emit_insxl (mode, newval, addr);
4623 wdst = gen_reg_rtx (DImode);
4624 if (mode == QImode)
4625 gen = gen_atomic_compare_and_swapqi_1;
4626 else
4627 gen = gen_atomic_compare_and_swaphi_1;
4628 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4629 is_weak, mod_s, mod_f));
4631 emit_move_insn (dst, gen_lowpart (mode, wdst));
4634 void
4635 alpha_split_compare_and_swap_12 (rtx operands[])
4637 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4638 machine_mode mode;
4639 bool is_weak;
4640 enum memmodel mod_s, mod_f;
4641 rtx label1, label2, mem, addr, width, mask, x;
4643 cond = operands[0];
4644 dest = operands[1];
4645 orig_mem = operands[2];
4646 oldval = operands[3];
4647 newval = operands[4];
4648 align = operands[5];
4649 is_weak = (operands[6] != const0_rtx);
4650 mod_s = (enum memmodel) INTVAL (operands[7]);
4651 mod_f = (enum memmodel) INTVAL (operands[8]);
4652 scratch = operands[9];
4653 mode = GET_MODE (orig_mem);
4654 addr = XEXP (orig_mem, 0);
4656 mem = gen_rtx_MEM (DImode, align);
4657 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4658 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4659 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4661 alpha_pre_atomic_barrier (mod_s);
4663 label1 = NULL_RTX;
4664 if (!is_weak)
4666 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4667 emit_label (XEXP (label1, 0));
4669 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4671 emit_load_locked (DImode, scratch, mem);
4673 width = GEN_INT (GET_MODE_BITSIZE (mode));
4674 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4675 emit_insn (gen_extxl (dest, scratch, width, addr));
4677 if (oldval == const0_rtx)
4679 emit_move_insn (cond, const0_rtx);
4680 x = gen_rtx_NE (DImode, dest, const0_rtx);
4682 else
4684 x = gen_rtx_EQ (DImode, dest, oldval);
4685 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4686 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4688 emit_unlikely_jump (x, label2);
4690 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4692 if (newval != const0_rtx)
4693 emit_insn (gen_iordi3 (cond, cond, newval));
4695 emit_store_conditional (DImode, cond, mem, cond);
4697 if (!is_weak)
4699 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4700 emit_unlikely_jump (x, label1);
4703 if (mod_f != MEMMODEL_RELAXED)
4704 emit_label (XEXP (label2, 0));
4706 alpha_post_atomic_barrier (mod_s);
4708 if (mod_f == MEMMODEL_RELAXED)
4709 emit_label (XEXP (label2, 0));
4712 /* Expand an atomic exchange operation. */
4714 void
4715 alpha_split_atomic_exchange (rtx operands[])
4717 rtx retval, mem, val, scratch;
4718 enum memmodel model;
4719 machine_mode mode;
4720 rtx label, x, cond;
4722 retval = operands[0];
4723 mem = operands[1];
4724 val = operands[2];
4725 model = (enum memmodel) INTVAL (operands[3]);
4726 scratch = operands[4];
4727 mode = GET_MODE (mem);
4728 cond = gen_lowpart (DImode, scratch);
4730 alpha_pre_atomic_barrier (model);
4732 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4733 emit_label (XEXP (label, 0));
4735 emit_load_locked (mode, retval, mem);
4736 emit_move_insn (scratch, val);
4737 emit_store_conditional (mode, cond, mem, scratch);
4739 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4740 emit_unlikely_jump (x, label);
4742 alpha_post_atomic_barrier (model);
4745 void
4746 alpha_expand_atomic_exchange_12 (rtx operands[])
4748 rtx dst, mem, val, model;
4749 machine_mode mode;
4750 rtx addr, align, wdst;
4751 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4753 dst = operands[0];
4754 mem = operands[1];
4755 val = operands[2];
4756 model = operands[3];
4757 mode = GET_MODE (mem);
4759 /* We forced the address into a register via mem_noofs_operand. */
4760 addr = XEXP (mem, 0);
4761 gcc_assert (register_operand (addr, DImode));
4763 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4764 NULL_RTX, 1, OPTAB_DIRECT);
4766 /* Insert val into the correct byte location within the word. */
4767 if (val != const0_rtx)
4768 val = emit_insxl (mode, val, addr);
4770 wdst = gen_reg_rtx (DImode);
4771 if (mode == QImode)
4772 gen = gen_atomic_exchangeqi_1;
4773 else
4774 gen = gen_atomic_exchangehi_1;
4775 emit_insn (gen (wdst, mem, val, align, model));
4777 emit_move_insn (dst, gen_lowpart (mode, wdst));
4780 void
4781 alpha_split_atomic_exchange_12 (rtx operands[])
4783 rtx dest, orig_mem, addr, val, align, scratch;
4784 rtx label, mem, width, mask, x;
4785 machine_mode mode;
4786 enum memmodel model;
4788 dest = operands[0];
4789 orig_mem = operands[1];
4790 val = operands[2];
4791 align = operands[3];
4792 model = (enum memmodel) INTVAL (operands[4]);
4793 scratch = operands[5];
4794 mode = GET_MODE (orig_mem);
4795 addr = XEXP (orig_mem, 0);
4797 mem = gen_rtx_MEM (DImode, align);
4798 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4799 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4800 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4802 alpha_pre_atomic_barrier (model);
4804 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4805 emit_label (XEXP (label, 0));
4807 emit_load_locked (DImode, scratch, mem);
4809 width = GEN_INT (GET_MODE_BITSIZE (mode));
4810 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4811 emit_insn (gen_extxl (dest, scratch, width, addr));
4812 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4813 if (val != const0_rtx)
4814 emit_insn (gen_iordi3 (scratch, scratch, val));
4816 emit_store_conditional (DImode, scratch, mem, scratch);
4818 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4819 emit_unlikely_jump (x, label);
4821 alpha_post_atomic_barrier (model);
4824 /* Adjust the cost of a scheduling dependency. Return the new cost of
4825 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4827 static int
4828 alpha_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4830 enum attr_type dep_insn_type;
4832 /* If the dependence is an anti-dependence, there is no cost. For an
4833 output dependence, there is sometimes a cost, but it doesn't seem
4834 worth handling those few cases. */
4835 if (REG_NOTE_KIND (link) != 0)
4836 return cost;
4838 /* If we can't recognize the insns, we can't really do anything. */
4839 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4840 return cost;
4842 dep_insn_type = get_attr_type (dep_insn);
4844 /* Bring in the user-defined memory latency. */
4845 if (dep_insn_type == TYPE_ILD
4846 || dep_insn_type == TYPE_FLD
4847 || dep_insn_type == TYPE_LDSYM)
4848 cost += alpha_memory_latency-1;
4850 /* Everything else handled in DFA bypasses now. */
4852 return cost;
4855 /* The number of instructions that can be issued per cycle. */
4857 static int
4858 alpha_issue_rate (void)
4860 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4863 /* How many alternative schedules to try. This should be as wide as the
4864 scheduling freedom in the DFA, but no wider. Making this value too
4865 large results extra work for the scheduler.
4867 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4868 alternative schedules. For EV5, we can choose between E0/E1 and
4869 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4871 static int
4872 alpha_multipass_dfa_lookahead (void)
4874 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4877 /* Machine-specific function data. */
4879 struct GTY(()) alpha_links;
4881 struct string_traits : default_hashmap_traits
4883 static bool equal_keys (const char *const &a, const char *const &b)
4885 return strcmp (a, b) == 0;
4889 struct GTY(()) machine_function
4891 /* For flag_reorder_blocks_and_partition. */
4892 rtx gp_save_rtx;
4894 /* For VMS condition handlers. */
4895 bool uses_condition_handler;
4897 /* Linkage entries. */
4898 hash_map<const char *, alpha_links *, string_traits> *links;
4901 /* How to allocate a 'struct machine_function'. */
4903 static struct machine_function *
4904 alpha_init_machine_status (void)
4906 return ggc_cleared_alloc<machine_function> ();
4909 /* Support for frame based VMS condition handlers. */
4911 /* A VMS condition handler may be established for a function with a call to
4912 __builtin_establish_vms_condition_handler, and cancelled with a call to
4913 __builtin_revert_vms_condition_handler.
4915 The VMS Condition Handling Facility knows about the existence of a handler
4916 from the procedure descriptor .handler field. As the VMS native compilers,
4917 we store the user specified handler's address at a fixed location in the
4918 stack frame and point the procedure descriptor at a common wrapper which
4919 fetches the real handler's address and issues an indirect call.
4921 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4923 We force the procedure kind to PT_STACK, and the fixed frame location is
4924 fp+8, just before the register save area. We use the handler_data field in
4925 the procedure descriptor to state the fp offset at which the installed
4926 handler address can be found. */
4928 #define VMS_COND_HANDLER_FP_OFFSET 8
4930 /* Expand code to store the currently installed user VMS condition handler
4931 into TARGET and install HANDLER as the new condition handler. */
4933 void
4934 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4936 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4937 VMS_COND_HANDLER_FP_OFFSET);
4939 rtx handler_slot
4940 = gen_rtx_MEM (DImode, handler_slot_address);
4942 emit_move_insn (target, handler_slot);
4943 emit_move_insn (handler_slot, handler);
4945 /* Notify the start/prologue/epilogue emitters that the condition handler
4946 slot is needed. In addition to reserving the slot space, this will force
4947 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4948 use above is correct. */
4949 cfun->machine->uses_condition_handler = true;
4952 /* Expand code to store the current VMS condition handler into TARGET and
4953 nullify it. */
4955 void
4956 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4958 /* We implement this by establishing a null condition handler, with the tiny
4959 side effect of setting uses_condition_handler. This is a little bit
4960 pessimistic if no actual builtin_establish call is ever issued, which is
4961 not a real problem and expected never to happen anyway. */
4963 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4966 /* Functions to save and restore alpha_return_addr_rtx. */
4968 /* Start the ball rolling with RETURN_ADDR_RTX. */
4971 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4973 if (count != 0)
4974 return const0_rtx;
4976 return get_hard_reg_initial_val (Pmode, REG_RA);
4979 /* Return or create a memory slot containing the gp value for the current
4980 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4983 alpha_gp_save_rtx (void)
4985 rtx_insn *seq;
4986 rtx m = cfun->machine->gp_save_rtx;
4988 if (m == NULL)
4990 start_sequence ();
4992 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4993 m = validize_mem (m);
4994 emit_move_insn (m, pic_offset_table_rtx);
4996 seq = get_insns ();
4997 end_sequence ();
4999 /* We used to simply emit the sequence after entry_of_function.
5000 However this breaks the CFG if the first instruction in the
5001 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
5002 label. Emit the sequence properly on the edge. We are only
5003 invoked from dw2_build_landing_pads and finish_eh_generation
5004 will call commit_edge_insertions thanks to a kludge. */
5005 insert_insn_on_edge (seq,
5006 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
5008 cfun->machine->gp_save_rtx = m;
5011 return m;
5014 static void
5015 alpha_instantiate_decls (void)
5017 if (cfun->machine->gp_save_rtx != NULL_RTX)
5018 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
5021 static int
5022 alpha_ra_ever_killed (void)
5024 rtx_insn *top;
5026 if (!has_hard_reg_initial_val (Pmode, REG_RA))
5027 return (int)df_regs_ever_live_p (REG_RA);
5029 push_topmost_sequence ();
5030 top = get_insns ();
5031 pop_topmost_sequence ();
5033 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL);
5037 /* Return the trap mode suffix applicable to the current
5038 instruction, or NULL. */
5040 static const char *
5041 get_trap_mode_suffix (void)
5043 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
5045 switch (s)
5047 case TRAP_SUFFIX_NONE:
5048 return NULL;
5050 case TRAP_SUFFIX_SU:
5051 if (alpha_fptm >= ALPHA_FPTM_SU)
5052 return "su";
5053 return NULL;
5055 case TRAP_SUFFIX_SUI:
5056 if (alpha_fptm >= ALPHA_FPTM_SUI)
5057 return "sui";
5058 return NULL;
5060 case TRAP_SUFFIX_V_SV:
5061 switch (alpha_fptm)
5063 case ALPHA_FPTM_N:
5064 return NULL;
5065 case ALPHA_FPTM_U:
5066 return "v";
5067 case ALPHA_FPTM_SU:
5068 case ALPHA_FPTM_SUI:
5069 return "sv";
5070 default:
5071 gcc_unreachable ();
5074 case TRAP_SUFFIX_V_SV_SVI:
5075 switch (alpha_fptm)
5077 case ALPHA_FPTM_N:
5078 return NULL;
5079 case ALPHA_FPTM_U:
5080 return "v";
5081 case ALPHA_FPTM_SU:
5082 return "sv";
5083 case ALPHA_FPTM_SUI:
5084 return "svi";
5085 default:
5086 gcc_unreachable ();
5088 break;
5090 case TRAP_SUFFIX_U_SU_SUI:
5091 switch (alpha_fptm)
5093 case ALPHA_FPTM_N:
5094 return NULL;
5095 case ALPHA_FPTM_U:
5096 return "u";
5097 case ALPHA_FPTM_SU:
5098 return "su";
5099 case ALPHA_FPTM_SUI:
5100 return "sui";
5101 default:
5102 gcc_unreachable ();
5104 break;
5106 default:
5107 gcc_unreachable ();
5109 gcc_unreachable ();
5112 /* Return the rounding mode suffix applicable to the current
5113 instruction, or NULL. */
5115 static const char *
5116 get_round_mode_suffix (void)
5118 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5120 switch (s)
5122 case ROUND_SUFFIX_NONE:
5123 return NULL;
5124 case ROUND_SUFFIX_NORMAL:
5125 switch (alpha_fprm)
5127 case ALPHA_FPRM_NORM:
5128 return NULL;
5129 case ALPHA_FPRM_MINF:
5130 return "m";
5131 case ALPHA_FPRM_CHOP:
5132 return "c";
5133 case ALPHA_FPRM_DYN:
5134 return "d";
5135 default:
5136 gcc_unreachable ();
5138 break;
5140 case ROUND_SUFFIX_C:
5141 return "c";
5143 default:
5144 gcc_unreachable ();
5146 gcc_unreachable ();
5149 /* Print an operand. Recognize special options, documented below. */
5151 void
5152 print_operand (FILE *file, rtx x, int code)
5154 int i;
5156 switch (code)
5158 case '~':
5159 /* Print the assembler name of the current function. */
5160 assemble_name (file, alpha_fnname);
5161 break;
5163 case '&':
5164 if (const char *name = get_some_local_dynamic_name ())
5165 assemble_name (file, name);
5166 else
5167 output_operand_lossage ("'%%&' used without any "
5168 "local dynamic TLS references");
5169 break;
5171 case '/':
5173 const char *trap = get_trap_mode_suffix ();
5174 const char *round = get_round_mode_suffix ();
5176 if (trap || round)
5177 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5178 break;
5181 case ',':
5182 /* Generates single precision instruction suffix. */
5183 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5184 break;
5186 case '-':
5187 /* Generates double precision instruction suffix. */
5188 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5189 break;
5191 case '#':
5192 if (alpha_this_literal_sequence_number == 0)
5193 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5194 fprintf (file, "%d", alpha_this_literal_sequence_number);
5195 break;
5197 case '*':
5198 if (alpha_this_gpdisp_sequence_number == 0)
5199 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5200 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5201 break;
5203 case 'H':
5204 if (GET_CODE (x) == HIGH)
5205 output_addr_const (file, XEXP (x, 0));
5206 else
5207 output_operand_lossage ("invalid %%H value");
5208 break;
5210 case 'J':
5212 const char *lituse;
5214 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5216 x = XVECEXP (x, 0, 0);
5217 lituse = "lituse_tlsgd";
5219 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5221 x = XVECEXP (x, 0, 0);
5222 lituse = "lituse_tlsldm";
5224 else if (CONST_INT_P (x))
5225 lituse = "lituse_jsr";
5226 else
5228 output_operand_lossage ("invalid %%J value");
5229 break;
5232 if (x != const0_rtx)
5233 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5235 break;
5237 case 'j':
5239 const char *lituse;
5241 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5242 lituse = "lituse_jsrdirect";
5243 #else
5244 lituse = "lituse_jsr";
5245 #endif
5247 gcc_assert (INTVAL (x) != 0);
5248 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5250 break;
5251 case 'r':
5252 /* If this operand is the constant zero, write it as "$31". */
5253 if (REG_P (x))
5254 fprintf (file, "%s", reg_names[REGNO (x)]);
5255 else if (x == CONST0_RTX (GET_MODE (x)))
5256 fprintf (file, "$31");
5257 else
5258 output_operand_lossage ("invalid %%r value");
5259 break;
5261 case 'R':
5262 /* Similar, but for floating-point. */
5263 if (REG_P (x))
5264 fprintf (file, "%s", reg_names[REGNO (x)]);
5265 else if (x == CONST0_RTX (GET_MODE (x)))
5266 fprintf (file, "$f31");
5267 else
5268 output_operand_lossage ("invalid %%R value");
5269 break;
5271 case 'N':
5272 /* Write the 1's complement of a constant. */
5273 if (!CONST_INT_P (x))
5274 output_operand_lossage ("invalid %%N value");
5276 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5277 break;
5279 case 'P':
5280 /* Write 1 << C, for a constant C. */
5281 if (!CONST_INT_P (x))
5282 output_operand_lossage ("invalid %%P value");
5284 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5285 break;
5287 case 'h':
5288 /* Write the high-order 16 bits of a constant, sign-extended. */
5289 if (!CONST_INT_P (x))
5290 output_operand_lossage ("invalid %%h value");
5292 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5293 break;
5295 case 'L':
5296 /* Write the low-order 16 bits of a constant, sign-extended. */
5297 if (!CONST_INT_P (x))
5298 output_operand_lossage ("invalid %%L value");
5300 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5301 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5302 break;
5304 case 'm':
5305 /* Write mask for ZAP insn. */
5306 if (GET_CODE (x) == CONST_DOUBLE)
5308 HOST_WIDE_INT mask = 0;
5309 HOST_WIDE_INT value;
5311 value = CONST_DOUBLE_LOW (x);
5312 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5313 i++, value >>= 8)
5314 if (value & 0xff)
5315 mask |= (1 << i);
5317 value = CONST_DOUBLE_HIGH (x);
5318 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5319 i++, value >>= 8)
5320 if (value & 0xff)
5321 mask |= (1 << (i + sizeof (int)));
5323 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5326 else if (CONST_INT_P (x))
5328 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5330 for (i = 0; i < 8; i++, value >>= 8)
5331 if (value & 0xff)
5332 mask |= (1 << i);
5334 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5336 else
5337 output_operand_lossage ("invalid %%m value");
5338 break;
5340 case 'M':
5341 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5342 if (!CONST_INT_P (x)
5343 || (INTVAL (x) != 8 && INTVAL (x) != 16
5344 && INTVAL (x) != 32 && INTVAL (x) != 64))
5345 output_operand_lossage ("invalid %%M value");
5347 fprintf (file, "%s",
5348 (INTVAL (x) == 8 ? "b"
5349 : INTVAL (x) == 16 ? "w"
5350 : INTVAL (x) == 32 ? "l"
5351 : "q"));
5352 break;
5354 case 'U':
5355 /* Similar, except do it from the mask. */
5356 if (CONST_INT_P (x))
5358 HOST_WIDE_INT value = INTVAL (x);
5360 if (value == 0xff)
5362 fputc ('b', file);
5363 break;
5365 if (value == 0xffff)
5367 fputc ('w', file);
5368 break;
5370 if (value == 0xffffffff)
5372 fputc ('l', file);
5373 break;
5375 if (value == -1)
5377 fputc ('q', file);
5378 break;
5381 else if (HOST_BITS_PER_WIDE_INT == 32
5382 && GET_CODE (x) == CONST_DOUBLE
5383 && CONST_DOUBLE_LOW (x) == 0xffffffff
5384 && CONST_DOUBLE_HIGH (x) == 0)
5386 fputc ('l', file);
5387 break;
5389 output_operand_lossage ("invalid %%U value");
5390 break;
5392 case 's':
5393 /* Write the constant value divided by 8. */
5394 if (!CONST_INT_P (x)
5395 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5396 || (INTVAL (x) & 7) != 0)
5397 output_operand_lossage ("invalid %%s value");
5399 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5400 break;
5402 case 'S':
5403 /* Same, except compute (64 - c) / 8 */
5405 if (!CONST_INT_P (x)
5406 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5407 && (INTVAL (x) & 7) != 8)
5408 output_operand_lossage ("invalid %%s value");
5410 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5411 break;
5413 case 'C': case 'D': case 'c': case 'd':
5414 /* Write out comparison name. */
5416 enum rtx_code c = GET_CODE (x);
5418 if (!COMPARISON_P (x))
5419 output_operand_lossage ("invalid %%C value");
5421 else if (code == 'D')
5422 c = reverse_condition (c);
5423 else if (code == 'c')
5424 c = swap_condition (c);
5425 else if (code == 'd')
5426 c = swap_condition (reverse_condition (c));
5428 if (c == LEU)
5429 fprintf (file, "ule");
5430 else if (c == LTU)
5431 fprintf (file, "ult");
5432 else if (c == UNORDERED)
5433 fprintf (file, "un");
5434 else
5435 fprintf (file, "%s", GET_RTX_NAME (c));
5437 break;
5439 case 'E':
5440 /* Write the divide or modulus operator. */
5441 switch (GET_CODE (x))
5443 case DIV:
5444 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5445 break;
5446 case UDIV:
5447 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5448 break;
5449 case MOD:
5450 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5451 break;
5452 case UMOD:
5453 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5454 break;
5455 default:
5456 output_operand_lossage ("invalid %%E value");
5457 break;
5459 break;
5461 case 'A':
5462 /* Write "_u" for unaligned access. */
5463 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5464 fprintf (file, "_u");
5465 break;
5467 case 0:
5468 if (REG_P (x))
5469 fprintf (file, "%s", reg_names[REGNO (x)]);
5470 else if (MEM_P (x))
5471 output_address (XEXP (x, 0));
5472 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5474 switch (XINT (XEXP (x, 0), 1))
5476 case UNSPEC_DTPREL:
5477 case UNSPEC_TPREL:
5478 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5479 break;
5480 default:
5481 output_operand_lossage ("unknown relocation unspec");
5482 break;
5485 else
5486 output_addr_const (file, x);
5487 break;
5489 default:
5490 output_operand_lossage ("invalid %%xn code");
5494 void
5495 print_operand_address (FILE *file, rtx addr)
5497 int basereg = 31;
5498 HOST_WIDE_INT offset = 0;
5500 if (GET_CODE (addr) == AND)
5501 addr = XEXP (addr, 0);
5503 if (GET_CODE (addr) == PLUS
5504 && CONST_INT_P (XEXP (addr, 1)))
5506 offset = INTVAL (XEXP (addr, 1));
5507 addr = XEXP (addr, 0);
5510 if (GET_CODE (addr) == LO_SUM)
5512 const char *reloc16, *reloclo;
5513 rtx op1 = XEXP (addr, 1);
5515 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5517 op1 = XEXP (op1, 0);
5518 switch (XINT (op1, 1))
5520 case UNSPEC_DTPREL:
5521 reloc16 = NULL;
5522 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5523 break;
5524 case UNSPEC_TPREL:
5525 reloc16 = NULL;
5526 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5527 break;
5528 default:
5529 output_operand_lossage ("unknown relocation unspec");
5530 return;
5533 output_addr_const (file, XVECEXP (op1, 0, 0));
5535 else
5537 reloc16 = "gprel";
5538 reloclo = "gprellow";
5539 output_addr_const (file, op1);
5542 if (offset)
5543 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5545 addr = XEXP (addr, 0);
5546 switch (GET_CODE (addr))
5548 case REG:
5549 basereg = REGNO (addr);
5550 break;
5552 case SUBREG:
5553 basereg = subreg_regno (addr);
5554 break;
5556 default:
5557 gcc_unreachable ();
5560 fprintf (file, "($%d)\t\t!%s", basereg,
5561 (basereg == 29 ? reloc16 : reloclo));
5562 return;
5565 switch (GET_CODE (addr))
5567 case REG:
5568 basereg = REGNO (addr);
5569 break;
5571 case SUBREG:
5572 basereg = subreg_regno (addr);
5573 break;
5575 case CONST_INT:
5576 offset = INTVAL (addr);
5577 break;
5579 case SYMBOL_REF:
5580 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5581 fprintf (file, "%s", XSTR (addr, 0));
5582 return;
5584 case CONST:
5585 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5586 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5587 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5588 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5589 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5590 INTVAL (XEXP (XEXP (addr, 0), 1)));
5591 return;
5593 default:
5594 output_operand_lossage ("invalid operand address");
5595 return;
5598 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5601 /* Emit RTL insns to initialize the variable parts of a trampoline at
5602 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5603 for the static chain value for the function. */
5605 static void
5606 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5608 rtx fnaddr, mem, word1, word2;
5610 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5612 #ifdef POINTERS_EXTEND_UNSIGNED
5613 fnaddr = convert_memory_address (Pmode, fnaddr);
5614 chain_value = convert_memory_address (Pmode, chain_value);
5615 #endif
5617 if (TARGET_ABI_OPEN_VMS)
5619 const char *fnname;
5620 char *trname;
5622 /* Construct the name of the trampoline entry point. */
5623 fnname = XSTR (fnaddr, 0);
5624 trname = (char *) alloca (strlen (fnname) + 5);
5625 strcpy (trname, fnname);
5626 strcat (trname, "..tr");
5627 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5628 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5630 /* Trampoline (or "bounded") procedure descriptor is constructed from
5631 the function's procedure descriptor with certain fields zeroed IAW
5632 the VMS calling standard. This is stored in the first quadword. */
5633 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5634 word1 = expand_and (DImode, word1,
5635 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5636 NULL);
5638 else
5640 /* These 4 instructions are:
5641 ldq $1,24($27)
5642 ldq $27,16($27)
5643 jmp $31,($27),0
5645 We don't bother setting the HINT field of the jump; the nop
5646 is merely there for padding. */
5647 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5648 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5651 /* Store the first two words, as computed above. */
5652 mem = adjust_address (m_tramp, DImode, 0);
5653 emit_move_insn (mem, word1);
5654 mem = adjust_address (m_tramp, DImode, 8);
5655 emit_move_insn (mem, word2);
5657 /* Store function address and static chain value. */
5658 mem = adjust_address (m_tramp, Pmode, 16);
5659 emit_move_insn (mem, fnaddr);
5660 mem = adjust_address (m_tramp, Pmode, 24);
5661 emit_move_insn (mem, chain_value);
5663 if (TARGET_ABI_OSF)
5665 emit_insn (gen_imb ());
5666 #ifdef HAVE_ENABLE_EXECUTE_STACK
5667 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5668 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5669 #endif
5673 /* Determine where to put an argument to a function.
5674 Value is zero to push the argument on the stack,
5675 or a hard register in which to store the argument.
5677 MODE is the argument's machine mode.
5678 TYPE is the data type of the argument (as a tree).
5679 This is null for libcalls where that information may
5680 not be available.
5681 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5682 the preceding args and about the function being called.
5683 NAMED is nonzero if this argument is a named parameter
5684 (otherwise it is an extra parameter matching an ellipsis).
5686 On Alpha the first 6 words of args are normally in registers
5687 and the rest are pushed. */
5689 static rtx
5690 alpha_function_arg (cumulative_args_t cum_v, machine_mode mode,
5691 const_tree type, bool named ATTRIBUTE_UNUSED)
5693 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5694 int basereg;
5695 int num_args;
5697 /* Don't get confused and pass small structures in FP registers. */
5698 if (type && AGGREGATE_TYPE_P (type))
5699 basereg = 16;
5700 else
5702 #ifdef ENABLE_CHECKING
5703 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5704 values here. */
5705 gcc_assert (!COMPLEX_MODE_P (mode));
5706 #endif
5708 /* Set up defaults for FP operands passed in FP registers, and
5709 integral operands passed in integer registers. */
5710 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5711 basereg = 32 + 16;
5712 else
5713 basereg = 16;
5716 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5717 the two platforms, so we can't avoid conditional compilation. */
5718 #if TARGET_ABI_OPEN_VMS
5720 if (mode == VOIDmode)
5721 return alpha_arg_info_reg_val (*cum);
5723 num_args = cum->num_args;
5724 if (num_args >= 6
5725 || targetm.calls.must_pass_in_stack (mode, type))
5726 return NULL_RTX;
5728 #elif TARGET_ABI_OSF
5730 if (*cum >= 6)
5731 return NULL_RTX;
5732 num_args = *cum;
5734 /* VOID is passed as a special flag for "last argument". */
5735 if (type == void_type_node)
5736 basereg = 16;
5737 else if (targetm.calls.must_pass_in_stack (mode, type))
5738 return NULL_RTX;
5740 #else
5741 #error Unhandled ABI
5742 #endif
5744 return gen_rtx_REG (mode, num_args + basereg);
5747 /* Update the data in CUM to advance over an argument
5748 of mode MODE and data type TYPE.
5749 (TYPE is null for libcalls where that information may not be available.) */
5751 static void
5752 alpha_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
5753 const_tree type, bool named ATTRIBUTE_UNUSED)
5755 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5756 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5757 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5759 #if TARGET_ABI_OSF
5760 *cum += increment;
5761 #else
5762 if (!onstack && cum->num_args < 6)
5763 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5764 cum->num_args += increment;
5765 #endif
5768 static int
5769 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5770 machine_mode mode ATTRIBUTE_UNUSED,
5771 tree type ATTRIBUTE_UNUSED,
5772 bool named ATTRIBUTE_UNUSED)
5774 int words = 0;
5775 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5777 #if TARGET_ABI_OPEN_VMS
5778 if (cum->num_args < 6
5779 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5780 words = 6 - cum->num_args;
5781 #elif TARGET_ABI_OSF
5782 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5783 words = 6 - *cum;
5784 #else
5785 #error Unhandled ABI
5786 #endif
5788 return words * UNITS_PER_WORD;
5792 /* Return true if TYPE must be returned in memory, instead of in registers. */
5794 static bool
5795 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5797 machine_mode mode = VOIDmode;
5798 int size;
5800 if (type)
5802 mode = TYPE_MODE (type);
5804 /* All aggregates are returned in memory, except on OpenVMS where
5805 records that fit 64 bits should be returned by immediate value
5806 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5807 if (TARGET_ABI_OPEN_VMS
5808 && TREE_CODE (type) != ARRAY_TYPE
5809 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5810 return false;
5812 if (AGGREGATE_TYPE_P (type))
5813 return true;
5816 size = GET_MODE_SIZE (mode);
5817 switch (GET_MODE_CLASS (mode))
5819 case MODE_VECTOR_FLOAT:
5820 /* Pass all float vectors in memory, like an aggregate. */
5821 return true;
5823 case MODE_COMPLEX_FLOAT:
5824 /* We judge complex floats on the size of their element,
5825 not the size of the whole type. */
5826 size = GET_MODE_UNIT_SIZE (mode);
5827 break;
5829 case MODE_INT:
5830 case MODE_FLOAT:
5831 case MODE_COMPLEX_INT:
5832 case MODE_VECTOR_INT:
5833 break;
5835 default:
5836 /* ??? We get called on all sorts of random stuff from
5837 aggregate_value_p. We must return something, but it's not
5838 clear what's safe to return. Pretend it's a struct I
5839 guess. */
5840 return true;
5843 /* Otherwise types must fit in one register. */
5844 return size > UNITS_PER_WORD;
5847 /* Return true if TYPE should be passed by invisible reference. */
5849 static bool
5850 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5851 machine_mode mode,
5852 const_tree type ATTRIBUTE_UNUSED,
5853 bool named ATTRIBUTE_UNUSED)
5855 return mode == TFmode || mode == TCmode;
5858 /* Define how to find the value returned by a function. VALTYPE is the
5859 data type of the value (as a tree). If the precise function being
5860 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5861 MODE is set instead of VALTYPE for libcalls.
5863 On Alpha the value is found in $0 for integer functions and
5864 $f0 for floating-point functions. */
5867 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5868 machine_mode mode)
5870 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5871 enum mode_class mclass;
5873 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5875 if (valtype)
5876 mode = TYPE_MODE (valtype);
5878 mclass = GET_MODE_CLASS (mode);
5879 switch (mclass)
5881 case MODE_INT:
5882 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5883 where we have them returning both SImode and DImode. */
5884 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5885 PROMOTE_MODE (mode, dummy, valtype);
5886 /* FALLTHRU */
5888 case MODE_COMPLEX_INT:
5889 case MODE_VECTOR_INT:
5890 regnum = 0;
5891 break;
5893 case MODE_FLOAT:
5894 regnum = 32;
5895 break;
5897 case MODE_COMPLEX_FLOAT:
5899 machine_mode cmode = GET_MODE_INNER (mode);
5901 return gen_rtx_PARALLEL
5902 (VOIDmode,
5903 gen_rtvec (2,
5904 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5905 const0_rtx),
5906 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5907 GEN_INT (GET_MODE_SIZE (cmode)))));
5910 case MODE_RANDOM:
5911 /* We should only reach here for BLKmode on VMS. */
5912 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5913 regnum = 0;
5914 break;
5916 default:
5917 gcc_unreachable ();
5920 return gen_rtx_REG (mode, regnum);
5923 /* TCmode complex values are passed by invisible reference. We
5924 should not split these values. */
5926 static bool
5927 alpha_split_complex_arg (const_tree type)
5929 return TYPE_MODE (type) != TCmode;
5932 static tree
5933 alpha_build_builtin_va_list (void)
5935 tree base, ofs, space, record, type_decl;
5937 if (TARGET_ABI_OPEN_VMS)
5938 return ptr_type_node;
5940 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5941 type_decl = build_decl (BUILTINS_LOCATION,
5942 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5943 TYPE_STUB_DECL (record) = type_decl;
5944 TYPE_NAME (record) = type_decl;
5946 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5948 /* Dummy field to prevent alignment warnings. */
5949 space = build_decl (BUILTINS_LOCATION,
5950 FIELD_DECL, NULL_TREE, integer_type_node);
5951 DECL_FIELD_CONTEXT (space) = record;
5952 DECL_ARTIFICIAL (space) = 1;
5953 DECL_IGNORED_P (space) = 1;
5955 ofs = build_decl (BUILTINS_LOCATION,
5956 FIELD_DECL, get_identifier ("__offset"),
5957 integer_type_node);
5958 DECL_FIELD_CONTEXT (ofs) = record;
5959 DECL_CHAIN (ofs) = space;
5960 /* ??? This is a hack, __offset is marked volatile to prevent
5961 DCE that confuses stdarg optimization and results in
5962 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5963 TREE_THIS_VOLATILE (ofs) = 1;
5965 base = build_decl (BUILTINS_LOCATION,
5966 FIELD_DECL, get_identifier ("__base"),
5967 ptr_type_node);
5968 DECL_FIELD_CONTEXT (base) = record;
5969 DECL_CHAIN (base) = ofs;
5971 TYPE_FIELDS (record) = base;
5972 layout_type (record);
5974 va_list_gpr_counter_field = ofs;
5975 return record;
5978 #if TARGET_ABI_OSF
5979 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5980 and constant additions. */
5982 static gimple
5983 va_list_skip_additions (tree lhs)
5985 gimple stmt;
5987 for (;;)
5989 enum tree_code code;
5991 stmt = SSA_NAME_DEF_STMT (lhs);
5993 if (gimple_code (stmt) == GIMPLE_PHI)
5994 return stmt;
5996 if (!is_gimple_assign (stmt)
5997 || gimple_assign_lhs (stmt) != lhs)
5998 return NULL;
6000 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6001 return stmt;
6002 code = gimple_assign_rhs_code (stmt);
6003 if (!CONVERT_EXPR_CODE_P (code)
6004 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
6005 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
6006 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))))
6007 return stmt;
6009 lhs = gimple_assign_rhs1 (stmt);
6013 /* Check if LHS = RHS statement is
6014 LHS = *(ap.__base + ap.__offset + cst)
6016 LHS = *(ap.__base
6017 + ((ap.__offset + cst <= 47)
6018 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
6019 If the former, indicate that GPR registers are needed,
6020 if the latter, indicate that FPR registers are needed.
6022 Also look for LHS = (*ptr).field, where ptr is one of the forms
6023 listed above.
6025 On alpha, cfun->va_list_gpr_size is used as size of the needed
6026 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
6027 registers are needed and bit 1 set if FPR registers are needed.
6028 Return true if va_list references should not be scanned for the
6029 current statement. */
6031 static bool
6032 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
6034 tree base, offset, rhs;
6035 int offset_arg = 1;
6036 gimple base_stmt;
6038 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
6039 != GIMPLE_SINGLE_RHS)
6040 return false;
6042 rhs = gimple_assign_rhs1 (stmt);
6043 while (handled_component_p (rhs))
6044 rhs = TREE_OPERAND (rhs, 0);
6045 if (TREE_CODE (rhs) != MEM_REF
6046 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6047 return false;
6049 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6050 if (stmt == NULL
6051 || !is_gimple_assign (stmt)
6052 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6053 return false;
6055 base = gimple_assign_rhs1 (stmt);
6056 if (TREE_CODE (base) == SSA_NAME)
6058 base_stmt = va_list_skip_additions (base);
6059 if (base_stmt
6060 && is_gimple_assign (base_stmt)
6061 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6062 base = gimple_assign_rhs1 (base_stmt);
6065 if (TREE_CODE (base) != COMPONENT_REF
6066 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6068 base = gimple_assign_rhs2 (stmt);
6069 if (TREE_CODE (base) == SSA_NAME)
6071 base_stmt = va_list_skip_additions (base);
6072 if (base_stmt
6073 && is_gimple_assign (base_stmt)
6074 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6075 base = gimple_assign_rhs1 (base_stmt);
6078 if (TREE_CODE (base) != COMPONENT_REF
6079 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6080 return false;
6082 offset_arg = 0;
6085 base = get_base_address (base);
6086 if (TREE_CODE (base) != VAR_DECL
6087 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
6088 return false;
6090 offset = gimple_op (stmt, 1 + offset_arg);
6091 if (TREE_CODE (offset) == SSA_NAME)
6093 gimple offset_stmt = va_list_skip_additions (offset);
6095 if (offset_stmt
6096 && gimple_code (offset_stmt) == GIMPLE_PHI)
6098 HOST_WIDE_INT sub;
6099 gimple arg1_stmt, arg2_stmt;
6100 tree arg1, arg2;
6101 enum tree_code code1, code2;
6103 if (gimple_phi_num_args (offset_stmt) != 2)
6104 goto escapes;
6106 arg1_stmt
6107 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6108 arg2_stmt
6109 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6110 if (arg1_stmt == NULL
6111 || !is_gimple_assign (arg1_stmt)
6112 || arg2_stmt == NULL
6113 || !is_gimple_assign (arg2_stmt))
6114 goto escapes;
6116 code1 = gimple_assign_rhs_code (arg1_stmt);
6117 code2 = gimple_assign_rhs_code (arg2_stmt);
6118 if (code1 == COMPONENT_REF
6119 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6120 /* Do nothing. */;
6121 else if (code2 == COMPONENT_REF
6122 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6124 gimple tem = arg1_stmt;
6125 code2 = code1;
6126 arg1_stmt = arg2_stmt;
6127 arg2_stmt = tem;
6129 else
6130 goto escapes;
6132 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt)))
6133 goto escapes;
6135 sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt));
6136 if (code2 == MINUS_EXPR)
6137 sub = -sub;
6138 if (sub < -48 || sub > -32)
6139 goto escapes;
6141 arg1 = gimple_assign_rhs1 (arg1_stmt);
6142 arg2 = gimple_assign_rhs1 (arg2_stmt);
6143 if (TREE_CODE (arg2) == SSA_NAME)
6145 arg2_stmt = va_list_skip_additions (arg2);
6146 if (arg2_stmt == NULL
6147 || !is_gimple_assign (arg2_stmt)
6148 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6149 goto escapes;
6150 arg2 = gimple_assign_rhs1 (arg2_stmt);
6152 if (arg1 != arg2)
6153 goto escapes;
6155 if (TREE_CODE (arg1) != COMPONENT_REF
6156 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6157 || get_base_address (arg1) != base)
6158 goto escapes;
6160 /* Need floating point regs. */
6161 cfun->va_list_fpr_size |= 2;
6162 return false;
6164 if (offset_stmt
6165 && is_gimple_assign (offset_stmt)
6166 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6167 offset = gimple_assign_rhs1 (offset_stmt);
6169 if (TREE_CODE (offset) != COMPONENT_REF
6170 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6171 || get_base_address (offset) != base)
6172 goto escapes;
6173 else
6174 /* Need general regs. */
6175 cfun->va_list_fpr_size |= 1;
6176 return false;
6178 escapes:
6179 si->va_list_escapes = true;
6180 return false;
6182 #endif
6184 /* Perform any needed actions needed for a function that is receiving a
6185 variable number of arguments. */
6187 static void
6188 alpha_setup_incoming_varargs (cumulative_args_t pcum, machine_mode mode,
6189 tree type, int *pretend_size, int no_rtl)
6191 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6193 /* Skip the current argument. */
6194 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6195 true);
6197 #if TARGET_ABI_OPEN_VMS
6198 /* For VMS, we allocate space for all 6 arg registers plus a count.
6200 However, if NO registers need to be saved, don't allocate any space.
6201 This is not only because we won't need the space, but because AP
6202 includes the current_pretend_args_size and we don't want to mess up
6203 any ap-relative addresses already made. */
6204 if (cum.num_args < 6)
6206 if (!no_rtl)
6208 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6209 emit_insn (gen_arg_home ());
6211 *pretend_size = 7 * UNITS_PER_WORD;
6213 #else
6214 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6215 only push those that are remaining. However, if NO registers need to
6216 be saved, don't allocate any space. This is not only because we won't
6217 need the space, but because AP includes the current_pretend_args_size
6218 and we don't want to mess up any ap-relative addresses already made.
6220 If we are not to use the floating-point registers, save the integer
6221 registers where we would put the floating-point registers. This is
6222 not the most efficient way to implement varargs with just one register
6223 class, but it isn't worth doing anything more efficient in this rare
6224 case. */
6225 if (cum >= 6)
6226 return;
6228 if (!no_rtl)
6230 int count;
6231 alias_set_type set = get_varargs_alias_set ();
6232 rtx tmp;
6234 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6235 if (count > 6 - cum)
6236 count = 6 - cum;
6238 /* Detect whether integer registers or floating-point registers
6239 are needed by the detected va_arg statements. See above for
6240 how these values are computed. Note that the "escape" value
6241 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6242 these bits set. */
6243 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6245 if (cfun->va_list_fpr_size & 1)
6247 tmp = gen_rtx_MEM (BLKmode,
6248 plus_constant (Pmode, virtual_incoming_args_rtx,
6249 (cum + 6) * UNITS_PER_WORD));
6250 MEM_NOTRAP_P (tmp) = 1;
6251 set_mem_alias_set (tmp, set);
6252 move_block_from_reg (16 + cum, tmp, count);
6255 if (cfun->va_list_fpr_size & 2)
6257 tmp = gen_rtx_MEM (BLKmode,
6258 plus_constant (Pmode, virtual_incoming_args_rtx,
6259 cum * UNITS_PER_WORD));
6260 MEM_NOTRAP_P (tmp) = 1;
6261 set_mem_alias_set (tmp, set);
6262 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6265 *pretend_size = 12 * UNITS_PER_WORD;
6266 #endif
6269 static void
6270 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6272 HOST_WIDE_INT offset;
6273 tree t, offset_field, base_field;
6275 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6276 return;
6278 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6279 up by 48, storing fp arg registers in the first 48 bytes, and the
6280 integer arg registers in the next 48 bytes. This is only done,
6281 however, if any integer registers need to be stored.
6283 If no integer registers need be stored, then we must subtract 48
6284 in order to account for the integer arg registers which are counted
6285 in argsize above, but which are not actually stored on the stack.
6286 Must further be careful here about structures straddling the last
6287 integer argument register; that futzes with pretend_args_size,
6288 which changes the meaning of AP. */
6290 if (NUM_ARGS < 6)
6291 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6292 else
6293 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6295 if (TARGET_ABI_OPEN_VMS)
6297 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6298 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6299 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6300 TREE_SIDE_EFFECTS (t) = 1;
6301 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6303 else
6305 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6306 offset_field = DECL_CHAIN (base_field);
6308 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6309 valist, base_field, NULL_TREE);
6310 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6311 valist, offset_field, NULL_TREE);
6313 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6314 t = fold_build_pointer_plus_hwi (t, offset);
6315 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6316 TREE_SIDE_EFFECTS (t) = 1;
6317 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6319 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6320 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6321 TREE_SIDE_EFFECTS (t) = 1;
6322 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6326 static tree
6327 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6328 gimple_seq *pre_p)
6330 tree type_size, ptr_type, addend, t, addr;
6331 gimple_seq internal_post;
6333 /* If the type could not be passed in registers, skip the block
6334 reserved for the registers. */
6335 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6337 t = build_int_cst (TREE_TYPE (offset), 6*8);
6338 gimplify_assign (offset,
6339 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6340 pre_p);
6343 addend = offset;
6344 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6346 if (TREE_CODE (type) == COMPLEX_TYPE)
6348 tree real_part, imag_part, real_temp;
6350 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6351 offset, pre_p);
6353 /* Copy the value into a new temporary, lest the formal temporary
6354 be reused out from under us. */
6355 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6357 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6358 offset, pre_p);
6360 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6362 else if (TREE_CODE (type) == REAL_TYPE)
6364 tree fpaddend, cond, fourtyeight;
6366 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6367 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6368 addend, fourtyeight);
6369 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6370 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6371 fpaddend, addend);
6374 /* Build the final address and force that value into a temporary. */
6375 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6376 internal_post = NULL;
6377 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6378 gimple_seq_add_seq (pre_p, internal_post);
6380 /* Update the offset field. */
6381 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6382 if (type_size == NULL || TREE_OVERFLOW (type_size))
6383 t = size_zero_node;
6384 else
6386 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6387 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6388 t = size_binop (MULT_EXPR, t, size_int (8));
6390 t = fold_convert (TREE_TYPE (offset), t);
6391 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6392 pre_p);
6394 return build_va_arg_indirect_ref (addr);
6397 static tree
6398 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6399 gimple_seq *post_p)
6401 tree offset_field, base_field, offset, base, t, r;
6402 bool indirect;
6404 if (TARGET_ABI_OPEN_VMS)
6405 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6407 base_field = TYPE_FIELDS (va_list_type_node);
6408 offset_field = DECL_CHAIN (base_field);
6409 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6410 valist, base_field, NULL_TREE);
6411 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6412 valist, offset_field, NULL_TREE);
6414 /* Pull the fields of the structure out into temporaries. Since we never
6415 modify the base field, we can use a formal temporary. Sign-extend the
6416 offset field so that it's the proper width for pointer arithmetic. */
6417 base = get_formal_tmp_var (base_field, pre_p);
6419 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6420 offset = get_initialized_tmp_var (t, pre_p, NULL);
6422 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6423 if (indirect)
6424 type = build_pointer_type_for_mode (type, ptr_mode, true);
6426 /* Find the value. Note that this will be a stable indirection, or
6427 a composite of stable indirections in the case of complex. */
6428 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6430 /* Stuff the offset temporary back into its field. */
6431 gimplify_assign (unshare_expr (offset_field),
6432 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6434 if (indirect)
6435 r = build_va_arg_indirect_ref (r);
6437 return r;
6440 /* Builtins. */
6442 enum alpha_builtin
6444 ALPHA_BUILTIN_CMPBGE,
6445 ALPHA_BUILTIN_EXTBL,
6446 ALPHA_BUILTIN_EXTWL,
6447 ALPHA_BUILTIN_EXTLL,
6448 ALPHA_BUILTIN_EXTQL,
6449 ALPHA_BUILTIN_EXTWH,
6450 ALPHA_BUILTIN_EXTLH,
6451 ALPHA_BUILTIN_EXTQH,
6452 ALPHA_BUILTIN_INSBL,
6453 ALPHA_BUILTIN_INSWL,
6454 ALPHA_BUILTIN_INSLL,
6455 ALPHA_BUILTIN_INSQL,
6456 ALPHA_BUILTIN_INSWH,
6457 ALPHA_BUILTIN_INSLH,
6458 ALPHA_BUILTIN_INSQH,
6459 ALPHA_BUILTIN_MSKBL,
6460 ALPHA_BUILTIN_MSKWL,
6461 ALPHA_BUILTIN_MSKLL,
6462 ALPHA_BUILTIN_MSKQL,
6463 ALPHA_BUILTIN_MSKWH,
6464 ALPHA_BUILTIN_MSKLH,
6465 ALPHA_BUILTIN_MSKQH,
6466 ALPHA_BUILTIN_UMULH,
6467 ALPHA_BUILTIN_ZAP,
6468 ALPHA_BUILTIN_ZAPNOT,
6469 ALPHA_BUILTIN_AMASK,
6470 ALPHA_BUILTIN_IMPLVER,
6471 ALPHA_BUILTIN_RPCC,
6472 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6473 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6475 /* TARGET_MAX */
6476 ALPHA_BUILTIN_MINUB8,
6477 ALPHA_BUILTIN_MINSB8,
6478 ALPHA_BUILTIN_MINUW4,
6479 ALPHA_BUILTIN_MINSW4,
6480 ALPHA_BUILTIN_MAXUB8,
6481 ALPHA_BUILTIN_MAXSB8,
6482 ALPHA_BUILTIN_MAXUW4,
6483 ALPHA_BUILTIN_MAXSW4,
6484 ALPHA_BUILTIN_PERR,
6485 ALPHA_BUILTIN_PKLB,
6486 ALPHA_BUILTIN_PKWB,
6487 ALPHA_BUILTIN_UNPKBL,
6488 ALPHA_BUILTIN_UNPKBW,
6490 /* TARGET_CIX */
6491 ALPHA_BUILTIN_CTTZ,
6492 ALPHA_BUILTIN_CTLZ,
6493 ALPHA_BUILTIN_CTPOP,
6495 ALPHA_BUILTIN_max
6498 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6499 CODE_FOR_builtin_cmpbge,
6500 CODE_FOR_extbl,
6501 CODE_FOR_extwl,
6502 CODE_FOR_extll,
6503 CODE_FOR_extql,
6504 CODE_FOR_extwh,
6505 CODE_FOR_extlh,
6506 CODE_FOR_extqh,
6507 CODE_FOR_builtin_insbl,
6508 CODE_FOR_builtin_inswl,
6509 CODE_FOR_builtin_insll,
6510 CODE_FOR_insql,
6511 CODE_FOR_inswh,
6512 CODE_FOR_inslh,
6513 CODE_FOR_insqh,
6514 CODE_FOR_mskbl,
6515 CODE_FOR_mskwl,
6516 CODE_FOR_mskll,
6517 CODE_FOR_mskql,
6518 CODE_FOR_mskwh,
6519 CODE_FOR_msklh,
6520 CODE_FOR_mskqh,
6521 CODE_FOR_umuldi3_highpart,
6522 CODE_FOR_builtin_zap,
6523 CODE_FOR_builtin_zapnot,
6524 CODE_FOR_builtin_amask,
6525 CODE_FOR_builtin_implver,
6526 CODE_FOR_builtin_rpcc,
6527 CODE_FOR_builtin_establish_vms_condition_handler,
6528 CODE_FOR_builtin_revert_vms_condition_handler,
6530 /* TARGET_MAX */
6531 CODE_FOR_builtin_minub8,
6532 CODE_FOR_builtin_minsb8,
6533 CODE_FOR_builtin_minuw4,
6534 CODE_FOR_builtin_minsw4,
6535 CODE_FOR_builtin_maxub8,
6536 CODE_FOR_builtin_maxsb8,
6537 CODE_FOR_builtin_maxuw4,
6538 CODE_FOR_builtin_maxsw4,
6539 CODE_FOR_builtin_perr,
6540 CODE_FOR_builtin_pklb,
6541 CODE_FOR_builtin_pkwb,
6542 CODE_FOR_builtin_unpkbl,
6543 CODE_FOR_builtin_unpkbw,
6545 /* TARGET_CIX */
6546 CODE_FOR_ctzdi2,
6547 CODE_FOR_clzdi2,
6548 CODE_FOR_popcountdi2
6551 struct alpha_builtin_def
6553 const char *name;
6554 enum alpha_builtin code;
6555 unsigned int target_mask;
6556 bool is_const;
6559 static struct alpha_builtin_def const zero_arg_builtins[] = {
6560 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6561 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6564 static struct alpha_builtin_def const one_arg_builtins[] = {
6565 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6566 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6567 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6568 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6569 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6570 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6571 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6572 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6575 static struct alpha_builtin_def const two_arg_builtins[] = {
6576 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6577 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6578 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6579 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6580 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6581 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6582 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6583 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6584 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6585 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6586 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6587 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6588 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6589 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6590 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6591 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6592 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6593 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6594 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6595 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6596 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6597 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6598 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6599 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6600 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6601 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6602 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6603 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6604 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6605 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6606 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6607 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6608 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6609 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6612 static GTY(()) tree alpha_dimode_u;
6613 static GTY(()) tree alpha_v8qi_u;
6614 static GTY(()) tree alpha_v8qi_s;
6615 static GTY(()) tree alpha_v4hi_u;
6616 static GTY(()) tree alpha_v4hi_s;
6618 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6620 /* Return the alpha builtin for CODE. */
6622 static tree
6623 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6625 if (code >= ALPHA_BUILTIN_max)
6626 return error_mark_node;
6627 return alpha_builtins[code];
6630 /* Helper function of alpha_init_builtins. Add the built-in specified
6631 by NAME, TYPE, CODE, and ECF. */
6633 static void
6634 alpha_builtin_function (const char *name, tree ftype,
6635 enum alpha_builtin code, unsigned ecf)
6637 tree decl = add_builtin_function (name, ftype, (int) code,
6638 BUILT_IN_MD, NULL, NULL_TREE);
6640 if (ecf & ECF_CONST)
6641 TREE_READONLY (decl) = 1;
6642 if (ecf & ECF_NOTHROW)
6643 TREE_NOTHROW (decl) = 1;
6645 alpha_builtins [(int) code] = decl;
6648 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6649 functions pointed to by P, with function type FTYPE. */
6651 static void
6652 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6653 tree ftype)
6655 size_t i;
6657 for (i = 0; i < count; ++i, ++p)
6658 if ((target_flags & p->target_mask) == p->target_mask)
6659 alpha_builtin_function (p->name, ftype, p->code,
6660 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6663 static void
6664 alpha_init_builtins (void)
6666 tree ftype;
6668 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6669 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6670 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6671 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6672 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6674 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6675 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6677 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6678 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6680 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6681 alpha_dimode_u, NULL_TREE);
6682 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6684 if (TARGET_ABI_OPEN_VMS)
6686 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6687 NULL_TREE);
6688 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6689 ftype,
6690 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6693 ftype = build_function_type_list (ptr_type_node, void_type_node,
6694 NULL_TREE);
6695 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6696 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6698 vms_patch_builtins ();
6702 /* Expand an expression EXP that calls a built-in function,
6703 with result going to TARGET if that's convenient
6704 (and in mode MODE if that's convenient).
6705 SUBTARGET may be used as the target for computing one of EXP's operands.
6706 IGNORE is nonzero if the value is to be ignored. */
6708 static rtx
6709 alpha_expand_builtin (tree exp, rtx target,
6710 rtx subtarget ATTRIBUTE_UNUSED,
6711 machine_mode mode ATTRIBUTE_UNUSED,
6712 int ignore ATTRIBUTE_UNUSED)
6714 #define MAX_ARGS 2
6716 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6717 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6718 tree arg;
6719 call_expr_arg_iterator iter;
6720 enum insn_code icode;
6721 rtx op[MAX_ARGS], pat;
6722 int arity;
6723 bool nonvoid;
6725 if (fcode >= ALPHA_BUILTIN_max)
6726 internal_error ("bad builtin fcode");
6727 icode = code_for_builtin[fcode];
6728 if (icode == 0)
6729 internal_error ("bad builtin fcode");
6731 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6733 arity = 0;
6734 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6736 const struct insn_operand_data *insn_op;
6738 if (arg == error_mark_node)
6739 return NULL_RTX;
6740 if (arity > MAX_ARGS)
6741 return NULL_RTX;
6743 insn_op = &insn_data[icode].operand[arity + nonvoid];
6745 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6747 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6748 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6749 arity++;
6752 if (nonvoid)
6754 machine_mode tmode = insn_data[icode].operand[0].mode;
6755 if (!target
6756 || GET_MODE (target) != tmode
6757 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6758 target = gen_reg_rtx (tmode);
6761 switch (arity)
6763 case 0:
6764 pat = GEN_FCN (icode) (target);
6765 break;
6766 case 1:
6767 if (nonvoid)
6768 pat = GEN_FCN (icode) (target, op[0]);
6769 else
6770 pat = GEN_FCN (icode) (op[0]);
6771 break;
6772 case 2:
6773 pat = GEN_FCN (icode) (target, op[0], op[1]);
6774 break;
6775 default:
6776 gcc_unreachable ();
6778 if (!pat)
6779 return NULL_RTX;
6780 emit_insn (pat);
6782 if (nonvoid)
6783 return target;
6784 else
6785 return const0_rtx;
6789 /* Several bits below assume HWI >= 64 bits. This should be enforced
6790 by config.gcc. */
6791 #if HOST_BITS_PER_WIDE_INT < 64
6792 # error "HOST_WIDE_INT too small"
6793 #endif
6795 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6796 with an 8-bit output vector. OPINT contains the integer operands; bit N
6797 of OP_CONST is set if OPINT[N] is valid. */
6799 static tree
6800 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6802 if (op_const == 3)
6804 int i, val;
6805 for (i = 0, val = 0; i < 8; ++i)
6807 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6808 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6809 if (c0 >= c1)
6810 val |= 1 << i;
6812 return build_int_cst (alpha_dimode_u, val);
6814 else if (op_const == 2 && opint[1] == 0)
6815 return build_int_cst (alpha_dimode_u, 0xff);
6816 return NULL;
6819 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6820 specialized form of an AND operation. Other byte manipulation instructions
6821 are defined in terms of this instruction, so this is also used as a
6822 subroutine for other builtins.
6824 OP contains the tree operands; OPINT contains the extracted integer values.
6825 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6826 OPINT may be considered. */
6828 static tree
6829 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6830 long op_const)
6832 if (op_const & 2)
6834 unsigned HOST_WIDE_INT mask = 0;
6835 int i;
6837 for (i = 0; i < 8; ++i)
6838 if ((opint[1] >> i) & 1)
6839 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6841 if (op_const & 1)
6842 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6844 if (op)
6845 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6846 build_int_cst (alpha_dimode_u, mask));
6848 else if ((op_const & 1) && opint[0] == 0)
6849 return build_int_cst (alpha_dimode_u, 0);
6850 return NULL;
6853 /* Fold the builtins for the EXT family of instructions. */
6855 static tree
6856 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6857 long op_const, unsigned HOST_WIDE_INT bytemask,
6858 bool is_high)
6860 long zap_const = 2;
6861 tree *zap_op = NULL;
6863 if (op_const & 2)
6865 unsigned HOST_WIDE_INT loc;
6867 loc = opint[1] & 7;
6868 loc *= BITS_PER_UNIT;
6870 if (loc != 0)
6872 if (op_const & 1)
6874 unsigned HOST_WIDE_INT temp = opint[0];
6875 if (is_high)
6876 temp <<= loc;
6877 else
6878 temp >>= loc;
6879 opint[0] = temp;
6880 zap_const = 3;
6883 else
6884 zap_op = op;
6887 opint[1] = bytemask;
6888 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6891 /* Fold the builtins for the INS family of instructions. */
6893 static tree
6894 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6895 long op_const, unsigned HOST_WIDE_INT bytemask,
6896 bool is_high)
6898 if ((op_const & 1) && opint[0] == 0)
6899 return build_int_cst (alpha_dimode_u, 0);
6901 if (op_const & 2)
6903 unsigned HOST_WIDE_INT temp, loc, byteloc;
6904 tree *zap_op = NULL;
6906 loc = opint[1] & 7;
6907 bytemask <<= loc;
6909 temp = opint[0];
6910 if (is_high)
6912 byteloc = (64 - (loc * 8)) & 0x3f;
6913 if (byteloc == 0)
6914 zap_op = op;
6915 else
6916 temp >>= byteloc;
6917 bytemask >>= 8;
6919 else
6921 byteloc = loc * 8;
6922 if (byteloc == 0)
6923 zap_op = op;
6924 else
6925 temp <<= byteloc;
6928 opint[0] = temp;
6929 opint[1] = bytemask;
6930 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6933 return NULL;
6936 static tree
6937 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6938 long op_const, unsigned HOST_WIDE_INT bytemask,
6939 bool is_high)
6941 if (op_const & 2)
6943 unsigned HOST_WIDE_INT loc;
6945 loc = opint[1] & 7;
6946 bytemask <<= loc;
6948 if (is_high)
6949 bytemask >>= 8;
6951 opint[1] = bytemask ^ 0xff;
6954 return alpha_fold_builtin_zapnot (op, opint, op_const);
6957 static tree
6958 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6960 tree op0 = fold_convert (vtype, op[0]);
6961 tree op1 = fold_convert (vtype, op[1]);
6962 tree val = fold_build2 (code, vtype, op0, op1);
6963 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6966 static tree
6967 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6969 unsigned HOST_WIDE_INT temp = 0;
6970 int i;
6972 if (op_const != 3)
6973 return NULL;
6975 for (i = 0; i < 8; ++i)
6977 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6978 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6979 if (a >= b)
6980 temp += a - b;
6981 else
6982 temp += b - a;
6985 return build_int_cst (alpha_dimode_u, temp);
6988 static tree
6989 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6991 unsigned HOST_WIDE_INT temp;
6993 if (op_const == 0)
6994 return NULL;
6996 temp = opint[0] & 0xff;
6997 temp |= (opint[0] >> 24) & 0xff00;
6999 return build_int_cst (alpha_dimode_u, temp);
7002 static tree
7003 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
7005 unsigned HOST_WIDE_INT temp;
7007 if (op_const == 0)
7008 return NULL;
7010 temp = opint[0] & 0xff;
7011 temp |= (opint[0] >> 8) & 0xff00;
7012 temp |= (opint[0] >> 16) & 0xff0000;
7013 temp |= (opint[0] >> 24) & 0xff000000;
7015 return build_int_cst (alpha_dimode_u, temp);
7018 static tree
7019 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7021 unsigned HOST_WIDE_INT temp;
7023 if (op_const == 0)
7024 return NULL;
7026 temp = opint[0] & 0xff;
7027 temp |= (opint[0] & 0xff00) << 24;
7029 return build_int_cst (alpha_dimode_u, temp);
7032 static tree
7033 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7035 unsigned HOST_WIDE_INT temp;
7037 if (op_const == 0)
7038 return NULL;
7040 temp = opint[0] & 0xff;
7041 temp |= (opint[0] & 0x0000ff00) << 8;
7042 temp |= (opint[0] & 0x00ff0000) << 16;
7043 temp |= (opint[0] & 0xff000000) << 24;
7045 return build_int_cst (alpha_dimode_u, temp);
7048 static tree
7049 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7051 unsigned HOST_WIDE_INT temp;
7053 if (op_const == 0)
7054 return NULL;
7056 if (opint[0] == 0)
7057 temp = 64;
7058 else
7059 temp = exact_log2 (opint[0] & -opint[0]);
7061 return build_int_cst (alpha_dimode_u, temp);
7064 static tree
7065 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7067 unsigned HOST_WIDE_INT temp;
7069 if (op_const == 0)
7070 return NULL;
7072 if (opint[0] == 0)
7073 temp = 64;
7074 else
7075 temp = 64 - floor_log2 (opint[0]) - 1;
7077 return build_int_cst (alpha_dimode_u, temp);
7080 static tree
7081 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7083 unsigned HOST_WIDE_INT temp, op;
7085 if (op_const == 0)
7086 return NULL;
7088 op = opint[0];
7089 temp = 0;
7090 while (op)
7091 temp++, op &= op - 1;
7093 return build_int_cst (alpha_dimode_u, temp);
7096 /* Fold one of our builtin functions. */
7098 static tree
7099 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7100 bool ignore ATTRIBUTE_UNUSED)
7102 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7103 long op_const = 0;
7104 int i;
7106 if (n_args > MAX_ARGS)
7107 return NULL;
7109 for (i = 0; i < n_args; i++)
7111 tree arg = op[i];
7112 if (arg == error_mark_node)
7113 return NULL;
7115 opint[i] = 0;
7116 if (TREE_CODE (arg) == INTEGER_CST)
7118 op_const |= 1L << i;
7119 opint[i] = int_cst_value (arg);
7123 switch (DECL_FUNCTION_CODE (fndecl))
7125 case ALPHA_BUILTIN_CMPBGE:
7126 return alpha_fold_builtin_cmpbge (opint, op_const);
7128 case ALPHA_BUILTIN_EXTBL:
7129 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7130 case ALPHA_BUILTIN_EXTWL:
7131 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7132 case ALPHA_BUILTIN_EXTLL:
7133 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7134 case ALPHA_BUILTIN_EXTQL:
7135 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7136 case ALPHA_BUILTIN_EXTWH:
7137 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7138 case ALPHA_BUILTIN_EXTLH:
7139 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7140 case ALPHA_BUILTIN_EXTQH:
7141 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7143 case ALPHA_BUILTIN_INSBL:
7144 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7145 case ALPHA_BUILTIN_INSWL:
7146 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7147 case ALPHA_BUILTIN_INSLL:
7148 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7149 case ALPHA_BUILTIN_INSQL:
7150 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7151 case ALPHA_BUILTIN_INSWH:
7152 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7153 case ALPHA_BUILTIN_INSLH:
7154 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7155 case ALPHA_BUILTIN_INSQH:
7156 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7158 case ALPHA_BUILTIN_MSKBL:
7159 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7160 case ALPHA_BUILTIN_MSKWL:
7161 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7162 case ALPHA_BUILTIN_MSKLL:
7163 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7164 case ALPHA_BUILTIN_MSKQL:
7165 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7166 case ALPHA_BUILTIN_MSKWH:
7167 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7168 case ALPHA_BUILTIN_MSKLH:
7169 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7170 case ALPHA_BUILTIN_MSKQH:
7171 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7173 case ALPHA_BUILTIN_ZAP:
7174 opint[1] ^= 0xff;
7175 /* FALLTHRU */
7176 case ALPHA_BUILTIN_ZAPNOT:
7177 return alpha_fold_builtin_zapnot (op, opint, op_const);
7179 case ALPHA_BUILTIN_MINUB8:
7180 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7181 case ALPHA_BUILTIN_MINSB8:
7182 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7183 case ALPHA_BUILTIN_MINUW4:
7184 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7185 case ALPHA_BUILTIN_MINSW4:
7186 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7187 case ALPHA_BUILTIN_MAXUB8:
7188 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7189 case ALPHA_BUILTIN_MAXSB8:
7190 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7191 case ALPHA_BUILTIN_MAXUW4:
7192 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7193 case ALPHA_BUILTIN_MAXSW4:
7194 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7196 case ALPHA_BUILTIN_PERR:
7197 return alpha_fold_builtin_perr (opint, op_const);
7198 case ALPHA_BUILTIN_PKLB:
7199 return alpha_fold_builtin_pklb (opint, op_const);
7200 case ALPHA_BUILTIN_PKWB:
7201 return alpha_fold_builtin_pkwb (opint, op_const);
7202 case ALPHA_BUILTIN_UNPKBL:
7203 return alpha_fold_builtin_unpkbl (opint, op_const);
7204 case ALPHA_BUILTIN_UNPKBW:
7205 return alpha_fold_builtin_unpkbw (opint, op_const);
7207 case ALPHA_BUILTIN_CTTZ:
7208 return alpha_fold_builtin_cttz (opint, op_const);
7209 case ALPHA_BUILTIN_CTLZ:
7210 return alpha_fold_builtin_ctlz (opint, op_const);
7211 case ALPHA_BUILTIN_CTPOP:
7212 return alpha_fold_builtin_ctpop (opint, op_const);
7214 case ALPHA_BUILTIN_AMASK:
7215 case ALPHA_BUILTIN_IMPLVER:
7216 case ALPHA_BUILTIN_RPCC:
7217 /* None of these are foldable at compile-time. */
7218 default:
7219 return NULL;
7223 bool
7224 alpha_gimple_fold_builtin (gimple_stmt_iterator *gsi)
7226 bool changed = false;
7227 gimple stmt = gsi_stmt (*gsi);
7228 tree call = gimple_call_fn (stmt);
7229 gimple new_stmt = NULL;
7231 if (call)
7233 tree fndecl = gimple_call_fndecl (stmt);
7235 if (fndecl)
7237 tree arg0, arg1;
7239 switch (DECL_FUNCTION_CODE (fndecl))
7241 case ALPHA_BUILTIN_UMULH:
7242 arg0 = gimple_call_arg (stmt, 0);
7243 arg1 = gimple_call_arg (stmt, 1);
7245 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
7246 MULT_HIGHPART_EXPR, arg0, arg1);
7247 break;
7248 default:
7249 break;
7254 if (new_stmt)
7256 gsi_replace (gsi, new_stmt, true);
7257 changed = true;
7260 return changed;
7263 /* This page contains routines that are used to determine what the function
7264 prologue and epilogue code will do and write them out. */
7266 /* Compute the size of the save area in the stack. */
7268 /* These variables are used for communication between the following functions.
7269 They indicate various things about the current function being compiled
7270 that are used to tell what kind of prologue, epilogue and procedure
7271 descriptor to generate. */
7273 /* Nonzero if we need a stack procedure. */
7274 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7275 static enum alpha_procedure_types alpha_procedure_type;
7277 /* Register number (either FP or SP) that is used to unwind the frame. */
7278 static int vms_unwind_regno;
7280 /* Register number used to save FP. We need not have one for RA since
7281 we don't modify it for register procedures. This is only defined
7282 for register frame procedures. */
7283 static int vms_save_fp_regno;
7285 /* Register number used to reference objects off our PV. */
7286 static int vms_base_regno;
7288 /* Compute register masks for saved registers. */
7290 static void
7291 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7293 unsigned long imask = 0;
7294 unsigned long fmask = 0;
7295 unsigned int i;
7297 /* When outputting a thunk, we don't have valid register life info,
7298 but assemble_start_function wants to output .frame and .mask
7299 directives. */
7300 if (cfun->is_thunk)
7302 *imaskP = 0;
7303 *fmaskP = 0;
7304 return;
7307 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7308 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7310 /* One for every register we have to save. */
7311 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7312 if (! fixed_regs[i] && ! call_used_regs[i]
7313 && df_regs_ever_live_p (i) && i != REG_RA)
7315 if (i < 32)
7316 imask |= (1UL << i);
7317 else
7318 fmask |= (1UL << (i - 32));
7321 /* We need to restore these for the handler. */
7322 if (crtl->calls_eh_return)
7324 for (i = 0; ; ++i)
7326 unsigned regno = EH_RETURN_DATA_REGNO (i);
7327 if (regno == INVALID_REGNUM)
7328 break;
7329 imask |= 1UL << regno;
7333 /* If any register spilled, then spill the return address also. */
7334 /* ??? This is required by the Digital stack unwind specification
7335 and isn't needed if we're doing Dwarf2 unwinding. */
7336 if (imask || fmask || alpha_ra_ever_killed ())
7337 imask |= (1UL << REG_RA);
7339 *imaskP = imask;
7340 *fmaskP = fmask;
7344 alpha_sa_size (void)
7346 unsigned long mask[2];
7347 int sa_size = 0;
7348 int i, j;
7350 alpha_sa_mask (&mask[0], &mask[1]);
7352 for (j = 0; j < 2; ++j)
7353 for (i = 0; i < 32; ++i)
7354 if ((mask[j] >> i) & 1)
7355 sa_size++;
7357 if (TARGET_ABI_OPEN_VMS)
7359 /* Start with a stack procedure if we make any calls (REG_RA used), or
7360 need a frame pointer, with a register procedure if we otherwise need
7361 at least a slot, and with a null procedure in other cases. */
7362 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7363 alpha_procedure_type = PT_STACK;
7364 else if (get_frame_size() != 0)
7365 alpha_procedure_type = PT_REGISTER;
7366 else
7367 alpha_procedure_type = PT_NULL;
7369 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7370 made the final decision on stack procedure vs register procedure. */
7371 if (alpha_procedure_type == PT_STACK)
7372 sa_size -= 2;
7374 /* Decide whether to refer to objects off our PV via FP or PV.
7375 If we need FP for something else or if we receive a nonlocal
7376 goto (which expects PV to contain the value), we must use PV.
7377 Otherwise, start by assuming we can use FP. */
7379 vms_base_regno
7380 = (frame_pointer_needed
7381 || cfun->has_nonlocal_label
7382 || alpha_procedure_type == PT_STACK
7383 || crtl->outgoing_args_size)
7384 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7386 /* If we want to copy PV into FP, we need to find some register
7387 in which to save FP. */
7389 vms_save_fp_regno = -1;
7390 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7391 for (i = 0; i < 32; i++)
7392 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7393 vms_save_fp_regno = i;
7395 /* A VMS condition handler requires a stack procedure in our
7396 implementation. (not required by the calling standard). */
7397 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7398 || cfun->machine->uses_condition_handler)
7399 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7400 else if (alpha_procedure_type == PT_NULL)
7401 vms_base_regno = REG_PV;
7403 /* Stack unwinding should be done via FP unless we use it for PV. */
7404 vms_unwind_regno = (vms_base_regno == REG_PV
7405 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7407 /* If this is a stack procedure, allow space for saving FP, RA and
7408 a condition handler slot if needed. */
7409 if (alpha_procedure_type == PT_STACK)
7410 sa_size += 2 + cfun->machine->uses_condition_handler;
7412 else
7414 /* Our size must be even (multiple of 16 bytes). */
7415 if (sa_size & 1)
7416 sa_size++;
7419 return sa_size * 8;
7422 /* Define the offset between two registers, one to be eliminated,
7423 and the other its replacement, at the start of a routine. */
7425 HOST_WIDE_INT
7426 alpha_initial_elimination_offset (unsigned int from,
7427 unsigned int to ATTRIBUTE_UNUSED)
7429 HOST_WIDE_INT ret;
7431 ret = alpha_sa_size ();
7432 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7434 switch (from)
7436 case FRAME_POINTER_REGNUM:
7437 break;
7439 case ARG_POINTER_REGNUM:
7440 ret += (ALPHA_ROUND (get_frame_size ()
7441 + crtl->args.pretend_args_size)
7442 - crtl->args.pretend_args_size);
7443 break;
7445 default:
7446 gcc_unreachable ();
7449 return ret;
7452 #if TARGET_ABI_OPEN_VMS
7454 /* Worker function for TARGET_CAN_ELIMINATE. */
7456 static bool
7457 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7459 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7460 alpha_sa_size ();
7462 switch (alpha_procedure_type)
7464 case PT_NULL:
7465 /* NULL procedures have no frame of their own and we only
7466 know how to resolve from the current stack pointer. */
7467 return to == STACK_POINTER_REGNUM;
7469 case PT_REGISTER:
7470 case PT_STACK:
7471 /* We always eliminate except to the stack pointer if there is no
7472 usable frame pointer at hand. */
7473 return (to != STACK_POINTER_REGNUM
7474 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7477 gcc_unreachable ();
7480 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7481 designates the same location as FROM. */
7483 HOST_WIDE_INT
7484 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7486 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7487 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7488 on the proper computations and will need the register save area size
7489 in most cases. */
7491 HOST_WIDE_INT sa_size = alpha_sa_size ();
7493 /* PT_NULL procedures have no frame of their own and we only allow
7494 elimination to the stack pointer. This is the argument pointer and we
7495 resolve the soft frame pointer to that as well. */
7497 if (alpha_procedure_type == PT_NULL)
7498 return 0;
7500 /* For a PT_STACK procedure the frame layout looks as follows
7502 -----> decreasing addresses
7504 < size rounded up to 16 | likewise >
7505 --------------#------------------------------+++--------------+++-------#
7506 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7507 --------------#---------------------------------------------------------#
7508 ^ ^ ^ ^
7509 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7512 PT_REGISTER procedures are similar in that they may have a frame of their
7513 own. They have no regs-sa/pv/outgoing-args area.
7515 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7516 to STACK_PTR if need be. */
7519 HOST_WIDE_INT offset;
7520 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7522 switch (from)
7524 case FRAME_POINTER_REGNUM:
7525 offset = ALPHA_ROUND (sa_size + pv_save_size);
7526 break;
7527 case ARG_POINTER_REGNUM:
7528 offset = (ALPHA_ROUND (sa_size + pv_save_size
7529 + get_frame_size ()
7530 + crtl->args.pretend_args_size)
7531 - crtl->args.pretend_args_size);
7532 break;
7533 default:
7534 gcc_unreachable ();
7537 if (to == STACK_POINTER_REGNUM)
7538 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7540 return offset;
7544 #define COMMON_OBJECT "common_object"
7546 static tree
7547 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7548 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7549 bool *no_add_attrs ATTRIBUTE_UNUSED)
7551 tree decl = *node;
7552 gcc_assert (DECL_P (decl));
7554 DECL_COMMON (decl) = 1;
7555 return NULL_TREE;
7558 static const struct attribute_spec vms_attribute_table[] =
7560 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7561 affects_type_identity } */
7562 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7563 { NULL, 0, 0, false, false, false, NULL, false }
7566 void
7567 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7568 unsigned HOST_WIDE_INT size,
7569 unsigned int align)
7571 tree attr = DECL_ATTRIBUTES (decl);
7572 fprintf (file, "%s", COMMON_ASM_OP);
7573 assemble_name (file, name);
7574 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7575 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7576 fprintf (file, ",%u", align / BITS_PER_UNIT);
7577 if (attr)
7579 attr = lookup_attribute (COMMON_OBJECT, attr);
7580 if (attr)
7581 fprintf (file, ",%s",
7582 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7584 fputc ('\n', file);
7587 #undef COMMON_OBJECT
7589 #endif
7591 bool
7592 alpha_find_lo_sum_using_gp (rtx insn)
7594 subrtx_iterator::array_type array;
7595 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
7597 const_rtx x = *iter;
7598 if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx)
7599 return true;
7601 return false;
7604 static int
7605 alpha_does_function_need_gp (void)
7607 rtx_insn *insn;
7609 /* The GP being variable is an OSF abi thing. */
7610 if (! TARGET_ABI_OSF)
7611 return 0;
7613 /* We need the gp to load the address of __mcount. */
7614 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7615 return 1;
7617 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7618 if (cfun->is_thunk)
7619 return 1;
7621 /* The nonlocal receiver pattern assumes that the gp is valid for
7622 the nested function. Reasonable because it's almost always set
7623 correctly already. For the cases where that's wrong, make sure
7624 the nested function loads its gp on entry. */
7625 if (crtl->has_nonlocal_goto)
7626 return 1;
7628 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7629 Even if we are a static function, we still need to do this in case
7630 our address is taken and passed to something like qsort. */
7632 push_topmost_sequence ();
7633 insn = get_insns ();
7634 pop_topmost_sequence ();
7636 for (; insn; insn = NEXT_INSN (insn))
7637 if (NONDEBUG_INSN_P (insn)
7638 && GET_CODE (PATTERN (insn)) != USE
7639 && GET_CODE (PATTERN (insn)) != CLOBBER
7640 && get_attr_usegp (insn))
7641 return 1;
7643 return 0;
7647 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7648 sequences. */
7650 static rtx_insn *
7651 set_frame_related_p (void)
7653 rtx_insn *seq = get_insns ();
7654 rtx_insn *insn;
7656 end_sequence ();
7658 if (!seq)
7659 return NULL;
7661 if (INSN_P (seq))
7663 insn = seq;
7664 while (insn != NULL_RTX)
7666 RTX_FRAME_RELATED_P (insn) = 1;
7667 insn = NEXT_INSN (insn);
7669 seq = emit_insn (seq);
7671 else
7673 seq = emit_insn (seq);
7674 RTX_FRAME_RELATED_P (seq) = 1;
7676 return seq;
7679 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7681 /* Generates a store with the proper unwind info attached. VALUE is
7682 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7683 contains SP+FRAME_BIAS, and that is the unwind info that should be
7684 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7685 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7687 static void
7688 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7689 HOST_WIDE_INT base_ofs, rtx frame_reg)
7691 rtx addr, mem;
7692 rtx_insn *insn;
7694 addr = plus_constant (Pmode, base_reg, base_ofs);
7695 mem = gen_frame_mem (DImode, addr);
7697 insn = emit_move_insn (mem, value);
7698 RTX_FRAME_RELATED_P (insn) = 1;
7700 if (frame_bias || value != frame_reg)
7702 if (frame_bias)
7704 addr = plus_constant (Pmode, stack_pointer_rtx,
7705 frame_bias + base_ofs);
7706 mem = gen_rtx_MEM (DImode, addr);
7709 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7710 gen_rtx_SET (VOIDmode, mem, frame_reg));
7714 static void
7715 emit_frame_store (unsigned int regno, rtx base_reg,
7716 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7718 rtx reg = gen_rtx_REG (DImode, regno);
7719 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7722 /* Compute the frame size. SIZE is the size of the "naked" frame
7723 and SA_SIZE is the size of the register save area. */
7725 static HOST_WIDE_INT
7726 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7728 if (TARGET_ABI_OPEN_VMS)
7729 return ALPHA_ROUND (sa_size
7730 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7731 + size
7732 + crtl->args.pretend_args_size);
7733 else
7734 return ALPHA_ROUND (crtl->outgoing_args_size)
7735 + sa_size
7736 + ALPHA_ROUND (size
7737 + crtl->args.pretend_args_size);
7740 /* Write function prologue. */
7742 /* On vms we have two kinds of functions:
7744 - stack frame (PROC_STACK)
7745 these are 'normal' functions with local vars and which are
7746 calling other functions
7747 - register frame (PROC_REGISTER)
7748 keeps all data in registers, needs no stack
7750 We must pass this to the assembler so it can generate the
7751 proper pdsc (procedure descriptor)
7752 This is done with the '.pdesc' command.
7754 On not-vms, we don't really differentiate between the two, as we can
7755 simply allocate stack without saving registers. */
7757 void
7758 alpha_expand_prologue (void)
7760 /* Registers to save. */
7761 unsigned long imask = 0;
7762 unsigned long fmask = 0;
7763 /* Stack space needed for pushing registers clobbered by us. */
7764 HOST_WIDE_INT sa_size, sa_bias;
7765 /* Complete stack size needed. */
7766 HOST_WIDE_INT frame_size;
7767 /* Probed stack size; it additionally includes the size of
7768 the "reserve region" if any. */
7769 HOST_WIDE_INT probed_size;
7770 /* Offset from base reg to register save area. */
7771 HOST_WIDE_INT reg_offset;
7772 rtx sa_reg;
7773 int i;
7775 sa_size = alpha_sa_size ();
7776 frame_size = compute_frame_size (get_frame_size (), sa_size);
7778 if (flag_stack_usage_info)
7779 current_function_static_stack_size = frame_size;
7781 if (TARGET_ABI_OPEN_VMS)
7782 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7783 else
7784 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7786 alpha_sa_mask (&imask, &fmask);
7788 /* Emit an insn to reload GP, if needed. */
7789 if (TARGET_ABI_OSF)
7791 alpha_function_needs_gp = alpha_does_function_need_gp ();
7792 if (alpha_function_needs_gp)
7793 emit_insn (gen_prologue_ldgp ());
7796 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7797 the call to mcount ourselves, rather than having the linker do it
7798 magically in response to -pg. Since _mcount has special linkage,
7799 don't represent the call as a call. */
7800 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7801 emit_insn (gen_prologue_mcount ());
7803 /* Adjust the stack by the frame size. If the frame size is > 4096
7804 bytes, we need to be sure we probe somewhere in the first and last
7805 4096 bytes (we can probably get away without the latter test) and
7806 every 8192 bytes in between. If the frame size is > 32768, we
7807 do this in a loop. Otherwise, we generate the explicit probe
7808 instructions.
7810 Note that we are only allowed to adjust sp once in the prologue. */
7812 probed_size = frame_size;
7813 if (flag_stack_check)
7814 probed_size += STACK_CHECK_PROTECT;
7816 if (probed_size <= 32768)
7818 if (probed_size > 4096)
7820 int probed;
7822 for (probed = 4096; probed < probed_size; probed += 8192)
7823 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7825 /* We only have to do this probe if we aren't saving registers or
7826 if we are probing beyond the frame because of -fstack-check. */
7827 if ((sa_size == 0 && probed_size > probed - 4096)
7828 || flag_stack_check)
7829 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7832 if (frame_size != 0)
7833 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7834 GEN_INT (-frame_size))));
7836 else
7838 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7839 number of 8192 byte blocks to probe. We then probe each block
7840 in the loop and then set SP to the proper location. If the
7841 amount remaining is > 4096, we have to do one more probe if we
7842 are not saving any registers or if we are probing beyond the
7843 frame because of -fstack-check. */
7845 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7846 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7847 rtx ptr = gen_rtx_REG (DImode, 22);
7848 rtx count = gen_rtx_REG (DImode, 23);
7849 rtx seq;
7851 emit_move_insn (count, GEN_INT (blocks));
7852 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7854 /* Because of the difficulty in emitting a new basic block this
7855 late in the compilation, generate the loop as a single insn. */
7856 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7858 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7860 rtx last = gen_rtx_MEM (DImode,
7861 plus_constant (Pmode, ptr, -leftover));
7862 MEM_VOLATILE_P (last) = 1;
7863 emit_move_insn (last, const0_rtx);
7866 if (flag_stack_check)
7868 /* If -fstack-check is specified we have to load the entire
7869 constant into a register and subtract from the sp in one go,
7870 because the probed stack size is not equal to the frame size. */
7871 HOST_WIDE_INT lo, hi;
7872 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7873 hi = frame_size - lo;
7875 emit_move_insn (ptr, GEN_INT (hi));
7876 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7877 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7878 ptr));
7880 else
7882 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7883 GEN_INT (-leftover)));
7886 /* This alternative is special, because the DWARF code cannot
7887 possibly intuit through the loop above. So we invent this
7888 note it looks at instead. */
7889 RTX_FRAME_RELATED_P (seq) = 1;
7890 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7891 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7892 plus_constant (Pmode, stack_pointer_rtx,
7893 -frame_size)));
7896 /* Cope with very large offsets to the register save area. */
7897 sa_bias = 0;
7898 sa_reg = stack_pointer_rtx;
7899 if (reg_offset + sa_size > 0x8000)
7901 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7902 rtx sa_bias_rtx;
7904 if (low + sa_size <= 0x8000)
7905 sa_bias = reg_offset - low, reg_offset = low;
7906 else
7907 sa_bias = reg_offset, reg_offset = 0;
7909 sa_reg = gen_rtx_REG (DImode, 24);
7910 sa_bias_rtx = GEN_INT (sa_bias);
7912 if (add_operand (sa_bias_rtx, DImode))
7913 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7914 else
7916 emit_move_insn (sa_reg, sa_bias_rtx);
7917 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7921 /* Save regs in stack order. Beginning with VMS PV. */
7922 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7923 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7925 /* Save register RA next. */
7926 if (imask & (1UL << REG_RA))
7928 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7929 imask &= ~(1UL << REG_RA);
7930 reg_offset += 8;
7933 /* Now save any other registers required to be saved. */
7934 for (i = 0; i < 31; i++)
7935 if (imask & (1UL << i))
7937 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7938 reg_offset += 8;
7941 for (i = 0; i < 31; i++)
7942 if (fmask & (1UL << i))
7944 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7945 reg_offset += 8;
7948 if (TARGET_ABI_OPEN_VMS)
7950 /* Register frame procedures save the fp. */
7951 if (alpha_procedure_type == PT_REGISTER)
7953 rtx_insn *insn =
7954 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7955 hard_frame_pointer_rtx);
7956 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7957 RTX_FRAME_RELATED_P (insn) = 1;
7960 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7961 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7962 gen_rtx_REG (DImode, REG_PV)));
7964 if (alpha_procedure_type != PT_NULL
7965 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7966 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7968 /* If we have to allocate space for outgoing args, do it now. */
7969 if (crtl->outgoing_args_size != 0)
7971 rtx_insn *seq
7972 = emit_move_insn (stack_pointer_rtx,
7973 plus_constant
7974 (Pmode, hard_frame_pointer_rtx,
7975 - (ALPHA_ROUND
7976 (crtl->outgoing_args_size))));
7978 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7979 if ! frame_pointer_needed. Setting the bit will change the CFA
7980 computation rule to use sp again, which would be wrong if we had
7981 frame_pointer_needed, as this means sp might move unpredictably
7982 later on.
7984 Also, note that
7985 frame_pointer_needed
7986 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7988 crtl->outgoing_args_size != 0
7989 => alpha_procedure_type != PT_NULL,
7991 so when we are not setting the bit here, we are guaranteed to
7992 have emitted an FRP frame pointer update just before. */
7993 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7996 else
7998 /* If we need a frame pointer, set it from the stack pointer. */
7999 if (frame_pointer_needed)
8001 if (TARGET_CAN_FAULT_IN_PROLOGUE)
8002 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8003 else
8004 /* This must always be the last instruction in the
8005 prologue, thus we emit a special move + clobber. */
8006 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
8007 stack_pointer_rtx, sa_reg)));
8011 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8012 the prologue, for exception handling reasons, we cannot do this for
8013 any insn that might fault. We could prevent this for mems with a
8014 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8015 have to prevent all such scheduling with a blockage.
8017 Linux, on the other hand, never bothered to implement OSF/1's
8018 exception handling, and so doesn't care about such things. Anyone
8019 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8021 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8022 emit_insn (gen_blockage ());
8025 /* Count the number of .file directives, so that .loc is up to date. */
8026 int num_source_filenames = 0;
8028 /* Output the textual info surrounding the prologue. */
8030 void
8031 alpha_start_function (FILE *file, const char *fnname,
8032 tree decl ATTRIBUTE_UNUSED)
8034 unsigned long imask = 0;
8035 unsigned long fmask = 0;
8036 /* Stack space needed for pushing registers clobbered by us. */
8037 HOST_WIDE_INT sa_size;
8038 /* Complete stack size needed. */
8039 unsigned HOST_WIDE_INT frame_size;
8040 /* The maximum debuggable frame size. */
8041 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
8042 /* Offset from base reg to register save area. */
8043 HOST_WIDE_INT reg_offset;
8044 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8045 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8046 int i;
8048 #if TARGET_ABI_OPEN_VMS
8049 vms_start_function (fnname);
8050 #endif
8052 alpha_fnname = fnname;
8053 sa_size = alpha_sa_size ();
8054 frame_size = compute_frame_size (get_frame_size (), sa_size);
8056 if (TARGET_ABI_OPEN_VMS)
8057 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8058 else
8059 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8061 alpha_sa_mask (&imask, &fmask);
8063 /* Issue function start and label. */
8064 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
8066 fputs ("\t.ent ", file);
8067 assemble_name (file, fnname);
8068 putc ('\n', file);
8070 /* If the function needs GP, we'll write the "..ng" label there.
8071 Otherwise, do it here. */
8072 if (TARGET_ABI_OSF
8073 && ! alpha_function_needs_gp
8074 && ! cfun->is_thunk)
8076 putc ('$', file);
8077 assemble_name (file, fnname);
8078 fputs ("..ng:\n", file);
8081 /* Nested functions on VMS that are potentially called via trampoline
8082 get a special transfer entry point that loads the called functions
8083 procedure descriptor and static chain. */
8084 if (TARGET_ABI_OPEN_VMS
8085 && !TREE_PUBLIC (decl)
8086 && DECL_CONTEXT (decl)
8087 && !TYPE_P (DECL_CONTEXT (decl))
8088 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
8090 strcpy (tramp_label, fnname);
8091 strcat (tramp_label, "..tr");
8092 ASM_OUTPUT_LABEL (file, tramp_label);
8093 fprintf (file, "\tldq $1,24($27)\n");
8094 fprintf (file, "\tldq $27,16($27)\n");
8097 strcpy (entry_label, fnname);
8098 if (TARGET_ABI_OPEN_VMS)
8099 strcat (entry_label, "..en");
8101 ASM_OUTPUT_LABEL (file, entry_label);
8102 inside_function = TRUE;
8104 if (TARGET_ABI_OPEN_VMS)
8105 fprintf (file, "\t.base $%d\n", vms_base_regno);
8107 if (TARGET_ABI_OSF
8108 && TARGET_IEEE_CONFORMANT
8109 && !flag_inhibit_size_directive)
8111 /* Set flags in procedure descriptor to request IEEE-conformant
8112 math-library routines. The value we set it to is PDSC_EXC_IEEE
8113 (/usr/include/pdsc.h). */
8114 fputs ("\t.eflag 48\n", file);
8117 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8118 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8119 alpha_arg_offset = -frame_size + 48;
8121 /* Describe our frame. If the frame size is larger than an integer,
8122 print it as zero to avoid an assembler error. We won't be
8123 properly describing such a frame, but that's the best we can do. */
8124 if (TARGET_ABI_OPEN_VMS)
8125 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8126 HOST_WIDE_INT_PRINT_DEC "\n",
8127 vms_unwind_regno,
8128 frame_size >= (1UL << 31) ? 0 : frame_size,
8129 reg_offset);
8130 else if (!flag_inhibit_size_directive)
8131 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8132 (frame_pointer_needed
8133 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8134 frame_size >= max_frame_size ? 0 : frame_size,
8135 crtl->args.pretend_args_size);
8137 /* Describe which registers were spilled. */
8138 if (TARGET_ABI_OPEN_VMS)
8140 if (imask)
8141 /* ??? Does VMS care if mask contains ra? The old code didn't
8142 set it, so I don't here. */
8143 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8144 if (fmask)
8145 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8146 if (alpha_procedure_type == PT_REGISTER)
8147 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8149 else if (!flag_inhibit_size_directive)
8151 if (imask)
8153 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8154 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8156 for (i = 0; i < 32; ++i)
8157 if (imask & (1UL << i))
8158 reg_offset += 8;
8161 if (fmask)
8162 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8163 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8166 #if TARGET_ABI_OPEN_VMS
8167 /* If a user condition handler has been installed at some point, emit
8168 the procedure descriptor bits to point the Condition Handling Facility
8169 at the indirection wrapper, and state the fp offset at which the user
8170 handler may be found. */
8171 if (cfun->machine->uses_condition_handler)
8173 fprintf (file, "\t.handler __gcc_shell_handler\n");
8174 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8177 #ifdef TARGET_VMS_CRASH_DEBUG
8178 /* Support of minimal traceback info. */
8179 switch_to_section (readonly_data_section);
8180 fprintf (file, "\t.align 3\n");
8181 assemble_name (file, fnname); fputs ("..na:\n", file);
8182 fputs ("\t.ascii \"", file);
8183 assemble_name (file, fnname);
8184 fputs ("\\0\"\n", file);
8185 switch_to_section (text_section);
8186 #endif
8187 #endif /* TARGET_ABI_OPEN_VMS */
8190 /* Emit the .prologue note at the scheduled end of the prologue. */
8192 static void
8193 alpha_output_function_end_prologue (FILE *file)
8195 if (TARGET_ABI_OPEN_VMS)
8196 fputs ("\t.prologue\n", file);
8197 else if (!flag_inhibit_size_directive)
8198 fprintf (file, "\t.prologue %d\n",
8199 alpha_function_needs_gp || cfun->is_thunk);
8202 /* Write function epilogue. */
8204 void
8205 alpha_expand_epilogue (void)
8207 /* Registers to save. */
8208 unsigned long imask = 0;
8209 unsigned long fmask = 0;
8210 /* Stack space needed for pushing registers clobbered by us. */
8211 HOST_WIDE_INT sa_size;
8212 /* Complete stack size needed. */
8213 HOST_WIDE_INT frame_size;
8214 /* Offset from base reg to register save area. */
8215 HOST_WIDE_INT reg_offset;
8216 int fp_is_frame_pointer, fp_offset;
8217 rtx sa_reg, sa_reg_exp = NULL;
8218 rtx sp_adj1, sp_adj2, mem, reg, insn;
8219 rtx eh_ofs;
8220 rtx cfa_restores = NULL_RTX;
8221 int i;
8223 sa_size = alpha_sa_size ();
8224 frame_size = compute_frame_size (get_frame_size (), sa_size);
8226 if (TARGET_ABI_OPEN_VMS)
8228 if (alpha_procedure_type == PT_STACK)
8229 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8230 else
8231 reg_offset = 0;
8233 else
8234 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8236 alpha_sa_mask (&imask, &fmask);
8238 fp_is_frame_pointer
8239 = (TARGET_ABI_OPEN_VMS
8240 ? alpha_procedure_type == PT_STACK
8241 : frame_pointer_needed);
8242 fp_offset = 0;
8243 sa_reg = stack_pointer_rtx;
8245 if (crtl->calls_eh_return)
8246 eh_ofs = EH_RETURN_STACKADJ_RTX;
8247 else
8248 eh_ofs = NULL_RTX;
8250 if (sa_size)
8252 /* If we have a frame pointer, restore SP from it. */
8253 if (TARGET_ABI_OPEN_VMS
8254 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8255 : frame_pointer_needed)
8256 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8258 /* Cope with very large offsets to the register save area. */
8259 if (reg_offset + sa_size > 0x8000)
8261 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8262 HOST_WIDE_INT bias;
8264 if (low + sa_size <= 0x8000)
8265 bias = reg_offset - low, reg_offset = low;
8266 else
8267 bias = reg_offset, reg_offset = 0;
8269 sa_reg = gen_rtx_REG (DImode, 22);
8270 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8272 emit_move_insn (sa_reg, sa_reg_exp);
8275 /* Restore registers in order, excepting a true frame pointer. */
8277 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8278 reg = gen_rtx_REG (DImode, REG_RA);
8279 emit_move_insn (reg, mem);
8280 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8282 reg_offset += 8;
8283 imask &= ~(1UL << REG_RA);
8285 for (i = 0; i < 31; ++i)
8286 if (imask & (1UL << i))
8288 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8289 fp_offset = reg_offset;
8290 else
8292 mem = gen_frame_mem (DImode,
8293 plus_constant (Pmode, sa_reg,
8294 reg_offset));
8295 reg = gen_rtx_REG (DImode, i);
8296 emit_move_insn (reg, mem);
8297 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8298 cfa_restores);
8300 reg_offset += 8;
8303 for (i = 0; i < 31; ++i)
8304 if (fmask & (1UL << i))
8306 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8307 reg_offset));
8308 reg = gen_rtx_REG (DFmode, i+32);
8309 emit_move_insn (reg, mem);
8310 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8311 reg_offset += 8;
8315 if (frame_size || eh_ofs)
8317 sp_adj1 = stack_pointer_rtx;
8319 if (eh_ofs)
8321 sp_adj1 = gen_rtx_REG (DImode, 23);
8322 emit_move_insn (sp_adj1,
8323 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8326 /* If the stack size is large, begin computation into a temporary
8327 register so as not to interfere with a potential fp restore,
8328 which must be consecutive with an SP restore. */
8329 if (frame_size < 32768 && !cfun->calls_alloca)
8330 sp_adj2 = GEN_INT (frame_size);
8331 else if (frame_size < 0x40007fffL)
8333 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8335 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8336 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8337 sp_adj1 = sa_reg;
8338 else
8340 sp_adj1 = gen_rtx_REG (DImode, 23);
8341 emit_move_insn (sp_adj1, sp_adj2);
8343 sp_adj2 = GEN_INT (low);
8345 else
8347 rtx tmp = gen_rtx_REG (DImode, 23);
8348 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8349 if (!sp_adj2)
8351 /* We can't drop new things to memory this late, afaik,
8352 so build it up by pieces. */
8353 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8354 -(frame_size < 0));
8355 gcc_assert (sp_adj2);
8359 /* From now on, things must be in order. So emit blockages. */
8361 /* Restore the frame pointer. */
8362 if (fp_is_frame_pointer)
8364 emit_insn (gen_blockage ());
8365 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8366 fp_offset));
8367 emit_move_insn (hard_frame_pointer_rtx, mem);
8368 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8369 hard_frame_pointer_rtx, cfa_restores);
8371 else if (TARGET_ABI_OPEN_VMS)
8373 emit_insn (gen_blockage ());
8374 emit_move_insn (hard_frame_pointer_rtx,
8375 gen_rtx_REG (DImode, vms_save_fp_regno));
8376 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8377 hard_frame_pointer_rtx, cfa_restores);
8380 /* Restore the stack pointer. */
8381 emit_insn (gen_blockage ());
8382 if (sp_adj2 == const0_rtx)
8383 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8384 else
8385 insn = emit_move_insn (stack_pointer_rtx,
8386 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8387 REG_NOTES (insn) = cfa_restores;
8388 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8389 RTX_FRAME_RELATED_P (insn) = 1;
8391 else
8393 gcc_assert (cfa_restores == NULL);
8395 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8397 emit_insn (gen_blockage ());
8398 insn = emit_move_insn (hard_frame_pointer_rtx,
8399 gen_rtx_REG (DImode, vms_save_fp_regno));
8400 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8401 RTX_FRAME_RELATED_P (insn) = 1;
8406 /* Output the rest of the textual info surrounding the epilogue. */
8408 void
8409 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8411 rtx_insn *insn;
8413 /* We output a nop after noreturn calls at the very end of the function to
8414 ensure that the return address always remains in the caller's code range,
8415 as not doing so might confuse unwinding engines. */
8416 insn = get_last_insn ();
8417 if (!INSN_P (insn))
8418 insn = prev_active_insn (insn);
8419 if (insn && CALL_P (insn))
8420 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8422 #if TARGET_ABI_OPEN_VMS
8423 /* Write the linkage entries. */
8424 alpha_write_linkage (file, fnname);
8425 #endif
8427 /* End the function. */
8428 if (TARGET_ABI_OPEN_VMS
8429 || !flag_inhibit_size_directive)
8431 fputs ("\t.end ", file);
8432 assemble_name (file, fnname);
8433 putc ('\n', file);
8435 inside_function = FALSE;
8438 #if TARGET_ABI_OSF
8439 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8441 In order to avoid the hordes of differences between generated code
8442 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8443 lots of code loading up large constants, generate rtl and emit it
8444 instead of going straight to text.
8446 Not sure why this idea hasn't been explored before... */
8448 static void
8449 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8450 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8451 tree function)
8453 HOST_WIDE_INT hi, lo;
8454 rtx this_rtx, funexp;
8455 rtx_insn *insn;
8457 /* We always require a valid GP. */
8458 emit_insn (gen_prologue_ldgp ());
8459 emit_note (NOTE_INSN_PROLOGUE_END);
8461 /* Find the "this" pointer. If the function returns a structure,
8462 the structure return pointer is in $16. */
8463 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8464 this_rtx = gen_rtx_REG (Pmode, 17);
8465 else
8466 this_rtx = gen_rtx_REG (Pmode, 16);
8468 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8469 entire constant for the add. */
8470 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8471 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8472 if (hi + lo == delta)
8474 if (hi)
8475 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8476 if (lo)
8477 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8479 else
8481 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8482 delta, -(delta < 0));
8483 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8486 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8487 if (vcall_offset)
8489 rtx tmp, tmp2;
8491 tmp = gen_rtx_REG (Pmode, 0);
8492 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8494 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8495 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8496 if (hi + lo == vcall_offset)
8498 if (hi)
8499 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8501 else
8503 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8504 vcall_offset, -(vcall_offset < 0));
8505 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8506 lo = 0;
8508 if (lo)
8509 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8510 else
8511 tmp2 = tmp;
8512 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8514 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8517 /* Generate a tail call to the target function. */
8518 if (! TREE_USED (function))
8520 assemble_external (function);
8521 TREE_USED (function) = 1;
8523 funexp = XEXP (DECL_RTL (function), 0);
8524 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8525 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8526 SIBLING_CALL_P (insn) = 1;
8528 /* Run just enough of rest_of_compilation to get the insns emitted.
8529 There's not really enough bulk here to make other passes such as
8530 instruction scheduling worth while. Note that use_thunk calls
8531 assemble_start_function and assemble_end_function. */
8532 insn = get_insns ();
8533 shorten_branches (insn);
8534 final_start_function (insn, file, 1);
8535 final (insn, file, 1);
8536 final_end_function ();
8538 #endif /* TARGET_ABI_OSF */
8540 /* Debugging support. */
8542 #include "gstab.h"
8544 /* Name of the file containing the current function. */
8546 static const char *current_function_file = "";
8548 /* Offsets to alpha virtual arg/local debugging pointers. */
8550 long alpha_arg_offset;
8551 long alpha_auto_offset;
8553 /* Emit a new filename to a stream. */
8555 void
8556 alpha_output_filename (FILE *stream, const char *name)
8558 static int first_time = TRUE;
8560 if (first_time)
8562 first_time = FALSE;
8563 ++num_source_filenames;
8564 current_function_file = name;
8565 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8566 output_quoted_string (stream, name);
8567 fprintf (stream, "\n");
8570 else if (name != current_function_file
8571 && strcmp (name, current_function_file) != 0)
8573 ++num_source_filenames;
8574 current_function_file = name;
8575 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8577 output_quoted_string (stream, name);
8578 fprintf (stream, "\n");
8582 /* Structure to show the current status of registers and memory. */
8584 struct shadow_summary
8586 struct {
8587 unsigned int i : 31; /* Mask of int regs */
8588 unsigned int fp : 31; /* Mask of fp regs */
8589 unsigned int mem : 1; /* mem == imem | fpmem */
8590 } used, defd;
8593 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8594 to the summary structure. SET is nonzero if the insn is setting the
8595 object, otherwise zero. */
8597 static void
8598 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8600 const char *format_ptr;
8601 int i, j;
8603 if (x == 0)
8604 return;
8606 switch (GET_CODE (x))
8608 /* ??? Note that this case would be incorrect if the Alpha had a
8609 ZERO_EXTRACT in SET_DEST. */
8610 case SET:
8611 summarize_insn (SET_SRC (x), sum, 0);
8612 summarize_insn (SET_DEST (x), sum, 1);
8613 break;
8615 case CLOBBER:
8616 summarize_insn (XEXP (x, 0), sum, 1);
8617 break;
8619 case USE:
8620 summarize_insn (XEXP (x, 0), sum, 0);
8621 break;
8623 case ASM_OPERANDS:
8624 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8625 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8626 break;
8628 case PARALLEL:
8629 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8630 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8631 break;
8633 case SUBREG:
8634 summarize_insn (SUBREG_REG (x), sum, 0);
8635 break;
8637 case REG:
8639 int regno = REGNO (x);
8640 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8642 if (regno == 31 || regno == 63)
8643 break;
8645 if (set)
8647 if (regno < 32)
8648 sum->defd.i |= mask;
8649 else
8650 sum->defd.fp |= mask;
8652 else
8654 if (regno < 32)
8655 sum->used.i |= mask;
8656 else
8657 sum->used.fp |= mask;
8660 break;
8662 case MEM:
8663 if (set)
8664 sum->defd.mem = 1;
8665 else
8666 sum->used.mem = 1;
8668 /* Find the regs used in memory address computation: */
8669 summarize_insn (XEXP (x, 0), sum, 0);
8670 break;
8672 case CONST_INT: case CONST_DOUBLE:
8673 case SYMBOL_REF: case LABEL_REF: case CONST:
8674 case SCRATCH: case ASM_INPUT:
8675 break;
8677 /* Handle common unary and binary ops for efficiency. */
8678 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8679 case MOD: case UDIV: case UMOD: case AND: case IOR:
8680 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8681 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8682 case NE: case EQ: case GE: case GT: case LE:
8683 case LT: case GEU: case GTU: case LEU: case LTU:
8684 summarize_insn (XEXP (x, 0), sum, 0);
8685 summarize_insn (XEXP (x, 1), sum, 0);
8686 break;
8688 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8689 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8690 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8691 case SQRT: case FFS:
8692 summarize_insn (XEXP (x, 0), sum, 0);
8693 break;
8695 default:
8696 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8697 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8698 switch (format_ptr[i])
8700 case 'e':
8701 summarize_insn (XEXP (x, i), sum, 0);
8702 break;
8704 case 'E':
8705 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8706 summarize_insn (XVECEXP (x, i, j), sum, 0);
8707 break;
8709 case 'i':
8710 break;
8712 default:
8713 gcc_unreachable ();
8718 /* Ensure a sufficient number of `trapb' insns are in the code when
8719 the user requests code with a trap precision of functions or
8720 instructions.
8722 In naive mode, when the user requests a trap-precision of
8723 "instruction", a trapb is needed after every instruction that may
8724 generate a trap. This ensures that the code is resumption safe but
8725 it is also slow.
8727 When optimizations are turned on, we delay issuing a trapb as long
8728 as possible. In this context, a trap shadow is the sequence of
8729 instructions that starts with a (potentially) trap generating
8730 instruction and extends to the next trapb or call_pal instruction
8731 (but GCC never generates call_pal by itself). We can delay (and
8732 therefore sometimes omit) a trapb subject to the following
8733 conditions:
8735 (a) On entry to the trap shadow, if any Alpha register or memory
8736 location contains a value that is used as an operand value by some
8737 instruction in the trap shadow (live on entry), then no instruction
8738 in the trap shadow may modify the register or memory location.
8740 (b) Within the trap shadow, the computation of the base register
8741 for a memory load or store instruction may not involve using the
8742 result of an instruction that might generate an UNPREDICTABLE
8743 result.
8745 (c) Within the trap shadow, no register may be used more than once
8746 as a destination register. (This is to make life easier for the
8747 trap-handler.)
8749 (d) The trap shadow may not include any branch instructions. */
8751 static void
8752 alpha_handle_trap_shadows (void)
8754 struct shadow_summary shadow;
8755 int trap_pending, exception_nesting;
8756 rtx_insn *i, *n;
8758 trap_pending = 0;
8759 exception_nesting = 0;
8760 shadow.used.i = 0;
8761 shadow.used.fp = 0;
8762 shadow.used.mem = 0;
8763 shadow.defd = shadow.used;
8765 for (i = get_insns (); i ; i = NEXT_INSN (i))
8767 if (NOTE_P (i))
8769 switch (NOTE_KIND (i))
8771 case NOTE_INSN_EH_REGION_BEG:
8772 exception_nesting++;
8773 if (trap_pending)
8774 goto close_shadow;
8775 break;
8777 case NOTE_INSN_EH_REGION_END:
8778 exception_nesting--;
8779 if (trap_pending)
8780 goto close_shadow;
8781 break;
8783 case NOTE_INSN_EPILOGUE_BEG:
8784 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8785 goto close_shadow;
8786 break;
8789 else if (trap_pending)
8791 if (alpha_tp == ALPHA_TP_FUNC)
8793 if (JUMP_P (i)
8794 && GET_CODE (PATTERN (i)) == RETURN)
8795 goto close_shadow;
8797 else if (alpha_tp == ALPHA_TP_INSN)
8799 if (optimize > 0)
8801 struct shadow_summary sum;
8803 sum.used.i = 0;
8804 sum.used.fp = 0;
8805 sum.used.mem = 0;
8806 sum.defd = sum.used;
8808 switch (GET_CODE (i))
8810 case INSN:
8811 /* Annoyingly, get_attr_trap will die on these. */
8812 if (GET_CODE (PATTERN (i)) == USE
8813 || GET_CODE (PATTERN (i)) == CLOBBER)
8814 break;
8816 summarize_insn (PATTERN (i), &sum, 0);
8818 if ((sum.defd.i & shadow.defd.i)
8819 || (sum.defd.fp & shadow.defd.fp))
8821 /* (c) would be violated */
8822 goto close_shadow;
8825 /* Combine shadow with summary of current insn: */
8826 shadow.used.i |= sum.used.i;
8827 shadow.used.fp |= sum.used.fp;
8828 shadow.used.mem |= sum.used.mem;
8829 shadow.defd.i |= sum.defd.i;
8830 shadow.defd.fp |= sum.defd.fp;
8831 shadow.defd.mem |= sum.defd.mem;
8833 if ((sum.defd.i & shadow.used.i)
8834 || (sum.defd.fp & shadow.used.fp)
8835 || (sum.defd.mem & shadow.used.mem))
8837 /* (a) would be violated (also takes care of (b)) */
8838 gcc_assert (get_attr_trap (i) != TRAP_YES
8839 || (!(sum.defd.i & sum.used.i)
8840 && !(sum.defd.fp & sum.used.fp)));
8842 goto close_shadow;
8844 break;
8846 case BARRIER:
8847 /* __builtin_unreachable can expand to no code at all,
8848 leaving (barrier) RTXes in the instruction stream. */
8849 goto close_shadow_notrapb;
8851 case JUMP_INSN:
8852 case CALL_INSN:
8853 case CODE_LABEL:
8854 goto close_shadow;
8856 default:
8857 gcc_unreachable ();
8860 else
8862 close_shadow:
8863 n = emit_insn_before (gen_trapb (), i);
8864 PUT_MODE (n, TImode);
8865 PUT_MODE (i, TImode);
8866 close_shadow_notrapb:
8867 trap_pending = 0;
8868 shadow.used.i = 0;
8869 shadow.used.fp = 0;
8870 shadow.used.mem = 0;
8871 shadow.defd = shadow.used;
8876 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8877 && NONJUMP_INSN_P (i)
8878 && GET_CODE (PATTERN (i)) != USE
8879 && GET_CODE (PATTERN (i)) != CLOBBER
8880 && get_attr_trap (i) == TRAP_YES)
8882 if (optimize && !trap_pending)
8883 summarize_insn (PATTERN (i), &shadow, 0);
8884 trap_pending = 1;
8889 /* Alpha can only issue instruction groups simultaneously if they are
8890 suitably aligned. This is very processor-specific. */
8891 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8892 that are marked "fake". These instructions do not exist on that target,
8893 but it is possible to see these insns with deranged combinations of
8894 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8895 choose a result at random. */
8897 enum alphaev4_pipe {
8898 EV4_STOP = 0,
8899 EV4_IB0 = 1,
8900 EV4_IB1 = 2,
8901 EV4_IBX = 4
8904 enum alphaev5_pipe {
8905 EV5_STOP = 0,
8906 EV5_NONE = 1,
8907 EV5_E01 = 2,
8908 EV5_E0 = 4,
8909 EV5_E1 = 8,
8910 EV5_FAM = 16,
8911 EV5_FA = 32,
8912 EV5_FM = 64
8915 static enum alphaev4_pipe
8916 alphaev4_insn_pipe (rtx_insn *insn)
8918 if (recog_memoized (insn) < 0)
8919 return EV4_STOP;
8920 if (get_attr_length (insn) != 4)
8921 return EV4_STOP;
8923 switch (get_attr_type (insn))
8925 case TYPE_ILD:
8926 case TYPE_LDSYM:
8927 case TYPE_FLD:
8928 case TYPE_LD_L:
8929 return EV4_IBX;
8931 case TYPE_IADD:
8932 case TYPE_ILOG:
8933 case TYPE_ICMOV:
8934 case TYPE_ICMP:
8935 case TYPE_FST:
8936 case TYPE_SHIFT:
8937 case TYPE_IMUL:
8938 case TYPE_FBR:
8939 case TYPE_MVI: /* fake */
8940 return EV4_IB0;
8942 case TYPE_IST:
8943 case TYPE_MISC:
8944 case TYPE_IBR:
8945 case TYPE_JSR:
8946 case TYPE_CALLPAL:
8947 case TYPE_FCPYS:
8948 case TYPE_FCMOV:
8949 case TYPE_FADD:
8950 case TYPE_FDIV:
8951 case TYPE_FMUL:
8952 case TYPE_ST_C:
8953 case TYPE_MB:
8954 case TYPE_FSQRT: /* fake */
8955 case TYPE_FTOI: /* fake */
8956 case TYPE_ITOF: /* fake */
8957 return EV4_IB1;
8959 default:
8960 gcc_unreachable ();
8964 static enum alphaev5_pipe
8965 alphaev5_insn_pipe (rtx_insn *insn)
8967 if (recog_memoized (insn) < 0)
8968 return EV5_STOP;
8969 if (get_attr_length (insn) != 4)
8970 return EV5_STOP;
8972 switch (get_attr_type (insn))
8974 case TYPE_ILD:
8975 case TYPE_FLD:
8976 case TYPE_LDSYM:
8977 case TYPE_IADD:
8978 case TYPE_ILOG:
8979 case TYPE_ICMOV:
8980 case TYPE_ICMP:
8981 return EV5_E01;
8983 case TYPE_IST:
8984 case TYPE_FST:
8985 case TYPE_SHIFT:
8986 case TYPE_IMUL:
8987 case TYPE_MISC:
8988 case TYPE_MVI:
8989 case TYPE_LD_L:
8990 case TYPE_ST_C:
8991 case TYPE_MB:
8992 case TYPE_FTOI: /* fake */
8993 case TYPE_ITOF: /* fake */
8994 return EV5_E0;
8996 case TYPE_IBR:
8997 case TYPE_JSR:
8998 case TYPE_CALLPAL:
8999 return EV5_E1;
9001 case TYPE_FCPYS:
9002 return EV5_FAM;
9004 case TYPE_FBR:
9005 case TYPE_FCMOV:
9006 case TYPE_FADD:
9007 case TYPE_FDIV:
9008 case TYPE_FSQRT: /* fake */
9009 return EV5_FA;
9011 case TYPE_FMUL:
9012 return EV5_FM;
9014 default:
9015 gcc_unreachable ();
9019 /* IN_USE is a mask of the slots currently filled within the insn group.
9020 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9021 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9023 LEN is, of course, the length of the group in bytes. */
9025 static rtx_insn *
9026 alphaev4_next_group (rtx_insn *insn, int *pin_use, int *plen)
9028 int len, in_use;
9030 len = in_use = 0;
9032 if (! INSN_P (insn)
9033 || GET_CODE (PATTERN (insn)) == CLOBBER
9034 || GET_CODE (PATTERN (insn)) == USE)
9035 goto next_and_done;
9037 while (1)
9039 enum alphaev4_pipe pipe;
9041 pipe = alphaev4_insn_pipe (insn);
9042 switch (pipe)
9044 case EV4_STOP:
9045 /* Force complex instructions to start new groups. */
9046 if (in_use)
9047 goto done;
9049 /* If this is a completely unrecognized insn, it's an asm.
9050 We don't know how long it is, so record length as -1 to
9051 signal a needed realignment. */
9052 if (recog_memoized (insn) < 0)
9053 len = -1;
9054 else
9055 len = get_attr_length (insn);
9056 goto next_and_done;
9058 case EV4_IBX:
9059 if (in_use & EV4_IB0)
9061 if (in_use & EV4_IB1)
9062 goto done;
9063 in_use |= EV4_IB1;
9065 else
9066 in_use |= EV4_IB0 | EV4_IBX;
9067 break;
9069 case EV4_IB0:
9070 if (in_use & EV4_IB0)
9072 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9073 goto done;
9074 in_use |= EV4_IB1;
9076 in_use |= EV4_IB0;
9077 break;
9079 case EV4_IB1:
9080 if (in_use & EV4_IB1)
9081 goto done;
9082 in_use |= EV4_IB1;
9083 break;
9085 default:
9086 gcc_unreachable ();
9088 len += 4;
9090 /* Haifa doesn't do well scheduling branches. */
9091 if (JUMP_P (insn))
9092 goto next_and_done;
9094 next:
9095 insn = next_nonnote_insn (insn);
9097 if (!insn || ! INSN_P (insn))
9098 goto done;
9100 /* Let Haifa tell us where it thinks insn group boundaries are. */
9101 if (GET_MODE (insn) == TImode)
9102 goto done;
9104 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9105 goto next;
9108 next_and_done:
9109 insn = next_nonnote_insn (insn);
9111 done:
9112 *plen = len;
9113 *pin_use = in_use;
9114 return insn;
9117 /* IN_USE is a mask of the slots currently filled within the insn group.
9118 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9119 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9121 LEN is, of course, the length of the group in bytes. */
9123 static rtx_insn *
9124 alphaev5_next_group (rtx_insn *insn, int *pin_use, int *plen)
9126 int len, in_use;
9128 len = in_use = 0;
9130 if (! INSN_P (insn)
9131 || GET_CODE (PATTERN (insn)) == CLOBBER
9132 || GET_CODE (PATTERN (insn)) == USE)
9133 goto next_and_done;
9135 while (1)
9137 enum alphaev5_pipe pipe;
9139 pipe = alphaev5_insn_pipe (insn);
9140 switch (pipe)
9142 case EV5_STOP:
9143 /* Force complex instructions to start new groups. */
9144 if (in_use)
9145 goto done;
9147 /* If this is a completely unrecognized insn, it's an asm.
9148 We don't know how long it is, so record length as -1 to
9149 signal a needed realignment. */
9150 if (recog_memoized (insn) < 0)
9151 len = -1;
9152 else
9153 len = get_attr_length (insn);
9154 goto next_and_done;
9156 /* ??? Most of the places below, we would like to assert never
9157 happen, as it would indicate an error either in Haifa, or
9158 in the scheduling description. Unfortunately, Haifa never
9159 schedules the last instruction of the BB, so we don't have
9160 an accurate TI bit to go off. */
9161 case EV5_E01:
9162 if (in_use & EV5_E0)
9164 if (in_use & EV5_E1)
9165 goto done;
9166 in_use |= EV5_E1;
9168 else
9169 in_use |= EV5_E0 | EV5_E01;
9170 break;
9172 case EV5_E0:
9173 if (in_use & EV5_E0)
9175 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9176 goto done;
9177 in_use |= EV5_E1;
9179 in_use |= EV5_E0;
9180 break;
9182 case EV5_E1:
9183 if (in_use & EV5_E1)
9184 goto done;
9185 in_use |= EV5_E1;
9186 break;
9188 case EV5_FAM:
9189 if (in_use & EV5_FA)
9191 if (in_use & EV5_FM)
9192 goto done;
9193 in_use |= EV5_FM;
9195 else
9196 in_use |= EV5_FA | EV5_FAM;
9197 break;
9199 case EV5_FA:
9200 if (in_use & EV5_FA)
9201 goto done;
9202 in_use |= EV5_FA;
9203 break;
9205 case EV5_FM:
9206 if (in_use & EV5_FM)
9207 goto done;
9208 in_use |= EV5_FM;
9209 break;
9211 case EV5_NONE:
9212 break;
9214 default:
9215 gcc_unreachable ();
9217 len += 4;
9219 /* Haifa doesn't do well scheduling branches. */
9220 /* ??? If this is predicted not-taken, slotting continues, except
9221 that no more IBR, FBR, or JSR insns may be slotted. */
9222 if (JUMP_P (insn))
9223 goto next_and_done;
9225 next:
9226 insn = next_nonnote_insn (insn);
9228 if (!insn || ! INSN_P (insn))
9229 goto done;
9231 /* Let Haifa tell us where it thinks insn group boundaries are. */
9232 if (GET_MODE (insn) == TImode)
9233 goto done;
9235 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9236 goto next;
9239 next_and_done:
9240 insn = next_nonnote_insn (insn);
9242 done:
9243 *plen = len;
9244 *pin_use = in_use;
9245 return insn;
9248 static rtx
9249 alphaev4_next_nop (int *pin_use)
9251 int in_use = *pin_use;
9252 rtx nop;
9254 if (!(in_use & EV4_IB0))
9256 in_use |= EV4_IB0;
9257 nop = gen_nop ();
9259 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9261 in_use |= EV4_IB1;
9262 nop = gen_nop ();
9264 else if (TARGET_FP && !(in_use & EV4_IB1))
9266 in_use |= EV4_IB1;
9267 nop = gen_fnop ();
9269 else
9270 nop = gen_unop ();
9272 *pin_use = in_use;
9273 return nop;
9276 static rtx
9277 alphaev5_next_nop (int *pin_use)
9279 int in_use = *pin_use;
9280 rtx nop;
9282 if (!(in_use & EV5_E1))
9284 in_use |= EV5_E1;
9285 nop = gen_nop ();
9287 else if (TARGET_FP && !(in_use & EV5_FA))
9289 in_use |= EV5_FA;
9290 nop = gen_fnop ();
9292 else if (TARGET_FP && !(in_use & EV5_FM))
9294 in_use |= EV5_FM;
9295 nop = gen_fnop ();
9297 else
9298 nop = gen_unop ();
9300 *pin_use = in_use;
9301 return nop;
9304 /* The instruction group alignment main loop. */
9306 static void
9307 alpha_align_insns_1 (unsigned int max_align,
9308 rtx_insn *(*next_group) (rtx_insn *, int *, int *),
9309 rtx (*next_nop) (int *))
9311 /* ALIGN is the known alignment for the insn group. */
9312 unsigned int align;
9313 /* OFS is the offset of the current insn in the insn group. */
9314 int ofs;
9315 int prev_in_use, in_use, len, ldgp;
9316 rtx_insn *i, *next;
9318 /* Let shorten branches care for assigning alignments to code labels. */
9319 shorten_branches (get_insns ());
9321 if (align_functions < 4)
9322 align = 4;
9323 else if ((unsigned int) align_functions < max_align)
9324 align = align_functions;
9325 else
9326 align = max_align;
9328 ofs = prev_in_use = 0;
9329 i = get_insns ();
9330 if (NOTE_P (i))
9331 i = next_nonnote_insn (i);
9333 ldgp = alpha_function_needs_gp ? 8 : 0;
9335 while (i)
9337 next = (*next_group) (i, &in_use, &len);
9339 /* When we see a label, resync alignment etc. */
9340 if (LABEL_P (i))
9342 unsigned int new_align = 1 << label_to_alignment (i);
9344 if (new_align >= align)
9346 align = new_align < max_align ? new_align : max_align;
9347 ofs = 0;
9350 else if (ofs & (new_align-1))
9351 ofs = (ofs | (new_align-1)) + 1;
9352 gcc_assert (!len);
9355 /* Handle complex instructions special. */
9356 else if (in_use == 0)
9358 /* Asms will have length < 0. This is a signal that we have
9359 lost alignment knowledge. Assume, however, that the asm
9360 will not mis-align instructions. */
9361 if (len < 0)
9363 ofs = 0;
9364 align = 4;
9365 len = 0;
9369 /* If the known alignment is smaller than the recognized insn group,
9370 realign the output. */
9371 else if ((int) align < len)
9373 unsigned int new_log_align = len > 8 ? 4 : 3;
9374 rtx_insn *prev, *where;
9376 where = prev = prev_nonnote_insn (i);
9377 if (!where || !LABEL_P (where))
9378 where = i;
9380 /* Can't realign between a call and its gp reload. */
9381 if (! (TARGET_EXPLICIT_RELOCS
9382 && prev && CALL_P (prev)))
9384 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9385 align = 1 << new_log_align;
9386 ofs = 0;
9390 /* We may not insert padding inside the initial ldgp sequence. */
9391 else if (ldgp > 0)
9392 ldgp -= len;
9394 /* If the group won't fit in the same INT16 as the previous,
9395 we need to add padding to keep the group together. Rather
9396 than simply leaving the insn filling to the assembler, we
9397 can make use of the knowledge of what sorts of instructions
9398 were issued in the previous group to make sure that all of
9399 the added nops are really free. */
9400 else if (ofs + len > (int) align)
9402 int nop_count = (align - ofs) / 4;
9403 rtx_insn *where;
9405 /* Insert nops before labels, branches, and calls to truly merge
9406 the execution of the nops with the previous instruction group. */
9407 where = prev_nonnote_insn (i);
9408 if (where)
9410 if (LABEL_P (where))
9412 rtx_insn *where2 = prev_nonnote_insn (where);
9413 if (where2 && JUMP_P (where2))
9414 where = where2;
9416 else if (NONJUMP_INSN_P (where))
9417 where = i;
9419 else
9420 where = i;
9423 emit_insn_before ((*next_nop)(&prev_in_use), where);
9424 while (--nop_count);
9425 ofs = 0;
9428 ofs = (ofs + len) & (align - 1);
9429 prev_in_use = in_use;
9430 i = next;
9434 static void
9435 alpha_align_insns (void)
9437 if (alpha_tune == PROCESSOR_EV4)
9438 alpha_align_insns_1 (8, alphaev4_next_group, alphaev4_next_nop);
9439 else if (alpha_tune == PROCESSOR_EV5)
9440 alpha_align_insns_1 (16, alphaev5_next_group, alphaev5_next_nop);
9441 else
9442 gcc_unreachable ();
9445 /* Insert an unop between sibcall or noreturn function call and GP load. */
9447 static void
9448 alpha_pad_function_end (void)
9450 rtx_insn *insn, *next;
9452 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9454 if (!CALL_P (insn)
9455 || !(SIBLING_CALL_P (insn)
9456 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9457 continue;
9459 /* Make sure we do not split a call and its corresponding
9460 CALL_ARG_LOCATION note. */
9461 next = NEXT_INSN (insn);
9462 if (next == NULL)
9463 continue;
9464 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9465 insn = next;
9467 next = next_active_insn (insn);
9468 if (next)
9470 rtx pat = PATTERN (next);
9472 if (GET_CODE (pat) == SET
9473 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9474 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9475 emit_insn_after (gen_unop (), insn);
9480 /* Machine dependent reorg pass. */
9482 static void
9483 alpha_reorg (void)
9485 /* Workaround for a linker error that triggers when an exception
9486 handler immediatelly follows a sibcall or a noreturn function.
9488 In the sibcall case:
9490 The instruction stream from an object file:
9492 1d8: 00 00 fb 6b jmp (t12)
9493 1dc: 00 00 ba 27 ldah gp,0(ra)
9494 1e0: 00 00 bd 23 lda gp,0(gp)
9495 1e4: 00 00 7d a7 ldq t12,0(gp)
9496 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9498 was converted in the final link pass to:
9500 12003aa88: 67 fa ff c3 br 120039428 <...>
9501 12003aa8c: 00 00 fe 2f unop
9502 12003aa90: 00 00 fe 2f unop
9503 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9504 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9506 And in the noreturn case:
9508 The instruction stream from an object file:
9510 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9511 58: 00 00 ba 27 ldah gp,0(ra)
9512 5c: 00 00 bd 23 lda gp,0(gp)
9513 60: 00 00 7d a7 ldq t12,0(gp)
9514 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9516 was converted in the final link pass to:
9518 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9519 fdb28: 00 00 fe 2f unop
9520 fdb2c: 00 00 fe 2f unop
9521 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9522 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9524 GP load instructions were wrongly cleared by the linker relaxation
9525 pass. This workaround prevents removal of GP loads by inserting
9526 an unop instruction between a sibcall or noreturn function call and
9527 exception handler prologue. */
9529 if (current_function_has_exception_handlers ())
9530 alpha_pad_function_end ();
9533 static void
9534 alpha_file_start (void)
9536 default_file_start ();
9538 fputs ("\t.set noreorder\n", asm_out_file);
9539 fputs ("\t.set volatile\n", asm_out_file);
9540 if (TARGET_ABI_OSF)
9541 fputs ("\t.set noat\n", asm_out_file);
9542 if (TARGET_EXPLICIT_RELOCS)
9543 fputs ("\t.set nomacro\n", asm_out_file);
9544 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9546 const char *arch;
9548 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9549 arch = "ev6";
9550 else if (TARGET_MAX)
9551 arch = "pca56";
9552 else if (TARGET_BWX)
9553 arch = "ev56";
9554 else if (alpha_cpu == PROCESSOR_EV5)
9555 arch = "ev5";
9556 else
9557 arch = "ev4";
9559 fprintf (asm_out_file, "\t.arch %s\n", arch);
9563 /* Since we don't have a .dynbss section, we should not allow global
9564 relocations in the .rodata section. */
9566 static int
9567 alpha_elf_reloc_rw_mask (void)
9569 return flag_pic ? 3 : 2;
9572 /* Return a section for X. The only special thing we do here is to
9573 honor small data. */
9575 static section *
9576 alpha_elf_select_rtx_section (machine_mode mode, rtx x,
9577 unsigned HOST_WIDE_INT align)
9579 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9580 /* ??? Consider using mergeable sdata sections. */
9581 return sdata_section;
9582 else
9583 return default_elf_select_rtx_section (mode, x, align);
9586 static unsigned int
9587 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9589 unsigned int flags = 0;
9591 if (strcmp (name, ".sdata") == 0
9592 || strncmp (name, ".sdata.", 7) == 0
9593 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9594 || strcmp (name, ".sbss") == 0
9595 || strncmp (name, ".sbss.", 6) == 0
9596 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9597 flags = SECTION_SMALL;
9599 flags |= default_section_type_flags (decl, name, reloc);
9600 return flags;
9603 /* Structure to collect function names for final output in link section. */
9604 /* Note that items marked with GTY can't be ifdef'ed out. */
9606 enum reloc_kind
9608 KIND_LINKAGE,
9609 KIND_CODEADDR
9612 struct GTY(()) alpha_links
9614 rtx func;
9615 rtx linkage;
9616 enum reloc_kind rkind;
9619 #if TARGET_ABI_OPEN_VMS
9621 /* Return the VMS argument type corresponding to MODE. */
9623 enum avms_arg_type
9624 alpha_arg_type (machine_mode mode)
9626 switch (mode)
9628 case SFmode:
9629 return TARGET_FLOAT_VAX ? FF : FS;
9630 case DFmode:
9631 return TARGET_FLOAT_VAX ? FD : FT;
9632 default:
9633 return I64;
9637 /* Return an rtx for an integer representing the VMS Argument Information
9638 register value. */
9641 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9643 unsigned HOST_WIDE_INT regval = cum.num_args;
9644 int i;
9646 for (i = 0; i < 6; i++)
9647 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9649 return GEN_INT (regval);
9653 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9654 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9655 this is the reference to the linkage pointer value, 0 if this is the
9656 reference to the function entry value. RFLAG is 1 if this a reduced
9657 reference (code address only), 0 if this is a full reference. */
9660 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9662 struct alpha_links *al = NULL;
9663 const char *name = XSTR (func, 0);
9665 if (cfun->machine->links)
9667 /* Is this name already defined? */
9668 alpha_links *slot = cfun->machine->links->get (name);
9669 if (slot)
9670 al = *slot;
9672 else
9673 cfun->machine->links
9674 = hash_map<const char *, alpha_links *, string_traits>::create_ggc (64);
9676 if (al == NULL)
9678 size_t buf_len;
9679 char *linksym;
9680 tree id;
9682 if (name[0] == '*')
9683 name++;
9685 /* Follow transparent alias, as this is used for CRTL translations. */
9686 id = maybe_get_identifier (name);
9687 if (id)
9689 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9690 id = TREE_CHAIN (id);
9691 name = IDENTIFIER_POINTER (id);
9694 buf_len = strlen (name) + 8 + 9;
9695 linksym = (char *) alloca (buf_len);
9696 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9698 al = ggc_alloc<alpha_links> ();
9699 al->func = func;
9700 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9702 cfun->machine->links->put (ggc_strdup (name), al);
9705 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9707 if (lflag)
9708 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9709 else
9710 return al->linkage;
9713 static int
9714 alpha_write_one_linkage (const char *name, alpha_links *link, FILE *steam)
9716 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9717 if (link->rkind == KIND_CODEADDR)
9719 /* External and used, request code address. */
9720 fprintf (stream, "\t.code_address ");
9722 else
9724 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9725 && SYMBOL_REF_LOCAL_P (link->func))
9727 /* Locally defined, build linkage pair. */
9728 fprintf (stream, "\t.quad %s..en\n", name);
9729 fprintf (stream, "\t.quad ");
9731 else
9733 /* External, request linkage pair. */
9734 fprintf (stream, "\t.linkage ");
9737 assemble_name (stream, name);
9738 fputs ("\n", stream);
9740 return 0;
9743 static void
9744 alpha_write_linkage (FILE *stream, const char *funname)
9746 fprintf (stream, "\t.link\n");
9747 fprintf (stream, "\t.align 3\n");
9748 in_section = NULL;
9750 #ifdef TARGET_VMS_CRASH_DEBUG
9751 fputs ("\t.name ", stream);
9752 assemble_name (stream, funname);
9753 fputs ("..na\n", stream);
9754 #endif
9756 ASM_OUTPUT_LABEL (stream, funname);
9757 fprintf (stream, "\t.pdesc ");
9758 assemble_name (stream, funname);
9759 fprintf (stream, "..en,%s\n",
9760 alpha_procedure_type == PT_STACK ? "stack"
9761 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9763 if (cfun->machine->links)
9765 hash_map<const char *, alpha_links *, string_traits>::iterator iter
9766 = cfun->machine->links->begin ();
9767 for (; iter != cfun->machine->links->end (); ++iter)
9768 alpha_write_one_linkage ((*iter).first, (*iter).second, stream);
9772 /* Switch to an arbitrary section NAME with attributes as specified
9773 by FLAGS. ALIGN specifies any known alignment requirements for
9774 the section; 0 if the default should be used. */
9776 static void
9777 vms_asm_named_section (const char *name, unsigned int flags,
9778 tree decl ATTRIBUTE_UNUSED)
9780 fputc ('\n', asm_out_file);
9781 fprintf (asm_out_file, ".section\t%s", name);
9783 if (flags & SECTION_DEBUG)
9784 fprintf (asm_out_file, ",NOWRT");
9786 fputc ('\n', asm_out_file);
9789 /* Record an element in the table of global constructors. SYMBOL is
9790 a SYMBOL_REF of the function to be called; PRIORITY is a number
9791 between 0 and MAX_INIT_PRIORITY.
9793 Differs from default_ctors_section_asm_out_constructor in that the
9794 width of the .ctors entry is always 64 bits, rather than the 32 bits
9795 used by a normal pointer. */
9797 static void
9798 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9800 switch_to_section (ctors_section);
9801 assemble_align (BITS_PER_WORD);
9802 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9805 static void
9806 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9808 switch_to_section (dtors_section);
9809 assemble_align (BITS_PER_WORD);
9810 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9812 #else
9814 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9815 bool lflag ATTRIBUTE_UNUSED,
9816 bool rflag ATTRIBUTE_UNUSED)
9818 return NULL_RTX;
9821 #endif /* TARGET_ABI_OPEN_VMS */
9823 static void
9824 alpha_init_libfuncs (void)
9826 if (TARGET_ABI_OPEN_VMS)
9828 /* Use the VMS runtime library functions for division and
9829 remainder. */
9830 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9831 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9832 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9833 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9834 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9835 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9836 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9837 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9838 abort_libfunc = init_one_libfunc ("decc$abort");
9839 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9840 #ifdef MEM_LIBFUNCS_INIT
9841 MEM_LIBFUNCS_INIT;
9842 #endif
9846 /* On the Alpha, we use this to disable the floating-point registers
9847 when they don't exist. */
9849 static void
9850 alpha_conditional_register_usage (void)
9852 int i;
9853 if (! TARGET_FPREGS)
9854 for (i = 32; i < 63; i++)
9855 fixed_regs[i] = call_used_regs[i] = 1;
9858 /* Canonicalize a comparison from one we don't have to one we do have. */
9860 static void
9861 alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9862 bool op0_preserve_value)
9864 if (!op0_preserve_value
9865 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9866 && (REG_P (*op1) || *op1 == const0_rtx))
9868 rtx tem = *op0;
9869 *op0 = *op1;
9870 *op1 = tem;
9871 *code = (int)swap_condition ((enum rtx_code)*code);
9874 if ((*code == LT || *code == LTU)
9875 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9877 *code = *code == LT ? LE : LEU;
9878 *op1 = GEN_INT (255);
9882 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9884 static void
9885 alpha_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
9887 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17);
9889 tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
9890 tree new_fenv_var, reload_fenv, restore_fnenv;
9891 tree update_call, atomic_feraiseexcept, hold_fnclex;
9893 /* Assume OSF/1 compatible interfaces. */
9894 if (!TARGET_ABI_OSF)
9895 return;
9897 /* Generate the equivalent of :
9898 unsigned long fenv_var;
9899 fenv_var = __ieee_get_fp_control ();
9901 unsigned long masked_fenv;
9902 masked_fenv = fenv_var & mask;
9904 __ieee_set_fp_control (masked_fenv); */
9906 fenv_var = create_tmp_var (long_unsigned_type_node);
9907 get_fpscr
9908 = build_fn_decl ("__ieee_get_fp_control",
9909 build_function_type_list (long_unsigned_type_node, NULL));
9910 set_fpscr
9911 = build_fn_decl ("__ieee_set_fp_control",
9912 build_function_type_list (void_type_node, NULL));
9913 mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK);
9914 ld_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node,
9915 fenv_var, build_call_expr (get_fpscr, 0));
9916 masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask);
9917 hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
9918 *hold = build2 (COMPOUND_EXPR, void_type_node,
9919 build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
9920 hold_fnclex);
9922 /* Store the value of masked_fenv to clear the exceptions:
9923 __ieee_set_fp_control (masked_fenv); */
9925 *clear = build_call_expr (set_fpscr, 1, masked_fenv);
9927 /* Generate the equivalent of :
9928 unsigned long new_fenv_var;
9929 new_fenv_var = __ieee_get_fp_control ();
9931 __ieee_set_fp_control (fenv_var);
9933 __atomic_feraiseexcept (new_fenv_var); */
9935 new_fenv_var = create_tmp_var (long_unsigned_type_node);
9936 reload_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node, new_fenv_var,
9937 build_call_expr (get_fpscr, 0));
9938 restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
9939 atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
9940 update_call
9941 = build_call_expr (atomic_feraiseexcept, 1,
9942 fold_convert (integer_type_node, new_fenv_var));
9943 *update = build2 (COMPOUND_EXPR, void_type_node,
9944 build2 (COMPOUND_EXPR, void_type_node,
9945 reload_fenv, restore_fnenv), update_call);
9948 /* Initialize the GCC target structure. */
9949 #if TARGET_ABI_OPEN_VMS
9950 # undef TARGET_ATTRIBUTE_TABLE
9951 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9952 # undef TARGET_CAN_ELIMINATE
9953 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9954 #endif
9956 #undef TARGET_IN_SMALL_DATA_P
9957 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9959 #undef TARGET_ASM_ALIGNED_HI_OP
9960 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9961 #undef TARGET_ASM_ALIGNED_DI_OP
9962 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9964 /* Default unaligned ops are provided for ELF systems. To get unaligned
9965 data for non-ELF systems, we have to turn off auto alignment. */
9966 #if TARGET_ABI_OPEN_VMS
9967 #undef TARGET_ASM_UNALIGNED_HI_OP
9968 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9969 #undef TARGET_ASM_UNALIGNED_SI_OP
9970 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9971 #undef TARGET_ASM_UNALIGNED_DI_OP
9972 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9973 #endif
9975 #undef TARGET_ASM_RELOC_RW_MASK
9976 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9977 #undef TARGET_ASM_SELECT_RTX_SECTION
9978 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9979 #undef TARGET_SECTION_TYPE_FLAGS
9980 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9982 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9983 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9985 #undef TARGET_INIT_LIBFUNCS
9986 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9988 #undef TARGET_LEGITIMIZE_ADDRESS
9989 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9990 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9991 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9993 #undef TARGET_ASM_FILE_START
9994 #define TARGET_ASM_FILE_START alpha_file_start
9996 #undef TARGET_SCHED_ADJUST_COST
9997 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9998 #undef TARGET_SCHED_ISSUE_RATE
9999 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10000 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10001 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10002 alpha_multipass_dfa_lookahead
10004 #undef TARGET_HAVE_TLS
10005 #define TARGET_HAVE_TLS HAVE_AS_TLS
10007 #undef TARGET_BUILTIN_DECL
10008 #define TARGET_BUILTIN_DECL alpha_builtin_decl
10009 #undef TARGET_INIT_BUILTINS
10010 #define TARGET_INIT_BUILTINS alpha_init_builtins
10011 #undef TARGET_EXPAND_BUILTIN
10012 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10013 #undef TARGET_FOLD_BUILTIN
10014 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10015 #undef TARGET_GIMPLE_FOLD_BUILTIN
10016 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
10018 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10019 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10020 #undef TARGET_CANNOT_COPY_INSN_P
10021 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10022 #undef TARGET_LEGITIMATE_CONSTANT_P
10023 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
10024 #undef TARGET_CANNOT_FORCE_CONST_MEM
10025 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10027 #if TARGET_ABI_OSF
10028 #undef TARGET_ASM_OUTPUT_MI_THUNK
10029 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10030 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10031 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10032 #undef TARGET_STDARG_OPTIMIZE_HOOK
10033 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10034 #endif
10036 /* Use 16-bits anchor. */
10037 #undef TARGET_MIN_ANCHOR_OFFSET
10038 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
10039 #undef TARGET_MAX_ANCHOR_OFFSET
10040 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
10041 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10042 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
10044 #undef TARGET_RTX_COSTS
10045 #define TARGET_RTX_COSTS alpha_rtx_costs
10046 #undef TARGET_ADDRESS_COST
10047 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
10049 #undef TARGET_MACHINE_DEPENDENT_REORG
10050 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10052 #undef TARGET_PROMOTE_FUNCTION_MODE
10053 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
10054 #undef TARGET_PROMOTE_PROTOTYPES
10055 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10056 #undef TARGET_RETURN_IN_MEMORY
10057 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10058 #undef TARGET_PASS_BY_REFERENCE
10059 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10060 #undef TARGET_SETUP_INCOMING_VARARGS
10061 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10062 #undef TARGET_STRICT_ARGUMENT_NAMING
10063 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10064 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10065 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10066 #undef TARGET_SPLIT_COMPLEX_ARG
10067 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10068 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10069 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10070 #undef TARGET_ARG_PARTIAL_BYTES
10071 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10072 #undef TARGET_FUNCTION_ARG
10073 #define TARGET_FUNCTION_ARG alpha_function_arg
10074 #undef TARGET_FUNCTION_ARG_ADVANCE
10075 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
10076 #undef TARGET_TRAMPOLINE_INIT
10077 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
10079 #undef TARGET_INSTANTIATE_DECLS
10080 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10082 #undef TARGET_SECONDARY_RELOAD
10083 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10085 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10086 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10087 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10088 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10090 #undef TARGET_BUILD_BUILTIN_VA_LIST
10091 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10093 #undef TARGET_EXPAND_BUILTIN_VA_START
10094 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10096 /* The Alpha architecture does not require sequential consistency. See
10097 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10098 for an example of how it can be violated in practice. */
10099 #undef TARGET_RELAXED_ORDERING
10100 #define TARGET_RELAXED_ORDERING true
10102 #undef TARGET_OPTION_OVERRIDE
10103 #define TARGET_OPTION_OVERRIDE alpha_option_override
10105 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10106 #undef TARGET_MANGLE_TYPE
10107 #define TARGET_MANGLE_TYPE alpha_mangle_type
10108 #endif
10110 #undef TARGET_LEGITIMATE_ADDRESS_P
10111 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10113 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10114 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10116 #undef TARGET_CANONICALIZE_COMPARISON
10117 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10119 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10120 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10122 struct gcc_target targetm = TARGET_INITIALIZER;
10125 #include "gt-alpha.h"