2015-01-14 Christophe Lyon <christophe.lyon@linaro.org>
[official-gcc.git] / gcc / config / alpha / alpha.c
blobafb49bb514a95cf551d38151e651421d71afd8aa
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "varasm.h"
41 #include "regs.h"
42 #include "hard-reg-set.h"
43 #include "insn-config.h"
44 #include "conditions.h"
45 #include "output.h"
46 #include "insn-attr.h"
47 #include "flags.h"
48 #include "recog.h"
49 #include "expr.h"
50 #include "insn-codes.h"
51 #include "optabs.h"
52 #include "reload.h"
53 #include "obstack.h"
54 #include "except.h"
55 #include "input.h"
56 #include "function.h"
57 #include "diagnostic-core.h"
58 #include "ggc.h"
59 #include "tm_p.h"
60 #include "target.h"
61 #include "target-def.h"
62 #include "common/common-target.h"
63 #include "debug.h"
64 #include "langhooks.h"
65 #include "hash-map.h"
66 #include "hash-table.h"
67 #include "predict.h"
68 #include "dominance.h"
69 #include "cfg.h"
70 #include "cfgrtl.h"
71 #include "cfganal.h"
72 #include "lcm.h"
73 #include "cfgbuild.h"
74 #include "cfgcleanup.h"
75 #include "basic-block.h"
76 #include "tree-ssa-alias.h"
77 #include "internal-fn.h"
78 #include "gimple-fold.h"
79 #include "tree-eh.h"
80 #include "gimple-expr.h"
81 #include "is-a.h"
82 #include "gimple.h"
83 #include "tree-pass.h"
84 #include "context.h"
85 #include "pass_manager.h"
86 #include "gimple-iterator.h"
87 #include "gimplify.h"
88 #include "gimple-ssa.h"
89 #include "stringpool.h"
90 #include "tree-ssanames.h"
91 #include "tree-stdarg.h"
92 #include "tm-constrs.h"
93 #include "df.h"
94 #include "libfuncs.h"
95 #include "opts.h"
96 #include "params.h"
97 #include "builtins.h"
98 #include "rtl-iter.h"
100 /* Specify which cpu to schedule for. */
101 enum processor_type alpha_tune;
103 /* Which cpu we're generating code for. */
104 enum processor_type alpha_cpu;
106 static const char * const alpha_cpu_name[] =
108 "ev4", "ev5", "ev6"
111 /* Specify how accurate floating-point traps need to be. */
113 enum alpha_trap_precision alpha_tp;
115 /* Specify the floating-point rounding mode. */
117 enum alpha_fp_rounding_mode alpha_fprm;
119 /* Specify which things cause traps. */
121 enum alpha_fp_trap_mode alpha_fptm;
123 /* Nonzero if inside of a function, because the Alpha asm can't
124 handle .files inside of functions. */
126 static int inside_function = FALSE;
128 /* The number of cycles of latency we should assume on memory reads. */
130 int alpha_memory_latency = 3;
132 /* Whether the function needs the GP. */
134 static int alpha_function_needs_gp;
136 /* The assembler name of the current function. */
138 static const char *alpha_fnname;
140 /* The next explicit relocation sequence number. */
141 extern GTY(()) int alpha_next_sequence_number;
142 int alpha_next_sequence_number = 1;
144 /* The literal and gpdisp sequence numbers for this insn, as printed
145 by %# and %* respectively. */
146 extern GTY(()) int alpha_this_literal_sequence_number;
147 extern GTY(()) int alpha_this_gpdisp_sequence_number;
148 int alpha_this_literal_sequence_number;
149 int alpha_this_gpdisp_sequence_number;
151 /* Costs of various operations on the different architectures. */
153 struct alpha_rtx_cost_data
155 unsigned char fp_add;
156 unsigned char fp_mult;
157 unsigned char fp_div_sf;
158 unsigned char fp_div_df;
159 unsigned char int_mult_si;
160 unsigned char int_mult_di;
161 unsigned char int_shift;
162 unsigned char int_cmov;
163 unsigned short int_div;
166 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
168 { /* EV4 */
169 COSTS_N_INSNS (6), /* fp_add */
170 COSTS_N_INSNS (6), /* fp_mult */
171 COSTS_N_INSNS (34), /* fp_div_sf */
172 COSTS_N_INSNS (63), /* fp_div_df */
173 COSTS_N_INSNS (23), /* int_mult_si */
174 COSTS_N_INSNS (23), /* int_mult_di */
175 COSTS_N_INSNS (2), /* int_shift */
176 COSTS_N_INSNS (2), /* int_cmov */
177 COSTS_N_INSNS (97), /* int_div */
179 { /* EV5 */
180 COSTS_N_INSNS (4), /* fp_add */
181 COSTS_N_INSNS (4), /* fp_mult */
182 COSTS_N_INSNS (15), /* fp_div_sf */
183 COSTS_N_INSNS (22), /* fp_div_df */
184 COSTS_N_INSNS (8), /* int_mult_si */
185 COSTS_N_INSNS (12), /* int_mult_di */
186 COSTS_N_INSNS (1) + 1, /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (83), /* int_div */
190 { /* EV6 */
191 COSTS_N_INSNS (4), /* fp_add */
192 COSTS_N_INSNS (4), /* fp_mult */
193 COSTS_N_INSNS (12), /* fp_div_sf */
194 COSTS_N_INSNS (15), /* fp_div_df */
195 COSTS_N_INSNS (7), /* int_mult_si */
196 COSTS_N_INSNS (7), /* int_mult_di */
197 COSTS_N_INSNS (1), /* int_shift */
198 COSTS_N_INSNS (2), /* int_cmov */
199 COSTS_N_INSNS (86), /* int_div */
203 /* Similar but tuned for code size instead of execution latency. The
204 extra +N is fractional cost tuning based on latency. It's used to
205 encourage use of cheaper insns like shift, but only if there's just
206 one of them. */
208 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
210 COSTS_N_INSNS (1), /* fp_add */
211 COSTS_N_INSNS (1), /* fp_mult */
212 COSTS_N_INSNS (1), /* fp_div_sf */
213 COSTS_N_INSNS (1) + 1, /* fp_div_df */
214 COSTS_N_INSNS (1) + 1, /* int_mult_si */
215 COSTS_N_INSNS (1) + 2, /* int_mult_di */
216 COSTS_N_INSNS (1), /* int_shift */
217 COSTS_N_INSNS (1), /* int_cmov */
218 COSTS_N_INSNS (6), /* int_div */
221 /* Get the number of args of a function in one of two ways. */
222 #if TARGET_ABI_OPEN_VMS
223 #define NUM_ARGS crtl->args.info.num_args
224 #else
225 #define NUM_ARGS crtl->args.info
226 #endif
228 #define REG_PV 27
229 #define REG_RA 26
231 /* Declarations of static functions. */
232 static struct machine_function *alpha_init_machine_status (void);
233 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
234 static void alpha_handle_trap_shadows (void);
235 static void alpha_align_insns (void);
237 #if TARGET_ABI_OPEN_VMS
238 static void alpha_write_linkage (FILE *, const char *);
239 static bool vms_valid_pointer_mode (machine_mode);
240 #else
241 #define vms_patch_builtins() gcc_unreachable()
242 #endif
244 static unsigned int
245 rest_of_handle_trap_shadows (void)
247 alpha_handle_trap_shadows ();
248 return 0;
251 namespace {
253 const pass_data pass_data_handle_trap_shadows =
255 RTL_PASS,
256 "trap_shadows", /* name */
257 OPTGROUP_NONE, /* optinfo_flags */
258 TV_NONE, /* tv_id */
259 0, /* properties_required */
260 0, /* properties_provided */
261 0, /* properties_destroyed */
262 0, /* todo_flags_start */
263 TODO_df_finish, /* todo_flags_finish */
266 class pass_handle_trap_shadows : public rtl_opt_pass
268 public:
269 pass_handle_trap_shadows(gcc::context *ctxt)
270 : rtl_opt_pass(pass_data_handle_trap_shadows, ctxt)
273 /* opt_pass methods: */
274 virtual bool gate (function *)
276 return alpha_tp != ALPHA_TP_PROG || flag_exceptions;
279 virtual unsigned int execute (function *)
281 return rest_of_handle_trap_shadows ();
284 }; // class pass_handle_trap_shadows
286 } // anon namespace
288 rtl_opt_pass *
289 make_pass_handle_trap_shadows (gcc::context *ctxt)
291 return new pass_handle_trap_shadows (ctxt);
294 static unsigned int
295 rest_of_align_insns (void)
297 alpha_align_insns ();
298 return 0;
301 namespace {
303 const pass_data pass_data_align_insns =
305 RTL_PASS,
306 "align_insns", /* name */
307 OPTGROUP_NONE, /* optinfo_flags */
308 TV_NONE, /* tv_id */
309 0, /* properties_required */
310 0, /* properties_provided */
311 0, /* properties_destroyed */
312 0, /* todo_flags_start */
313 TODO_df_finish, /* todo_flags_finish */
316 class pass_align_insns : public rtl_opt_pass
318 public:
319 pass_align_insns(gcc::context *ctxt)
320 : rtl_opt_pass(pass_data_align_insns, ctxt)
323 /* opt_pass methods: */
324 virtual bool gate (function *)
326 /* Due to the number of extra trapb insns, don't bother fixing up
327 alignment when trap precision is instruction. Moreover, we can
328 only do our job when sched2 is run. */
329 return ((alpha_tune == PROCESSOR_EV4
330 || alpha_tune == PROCESSOR_EV5)
331 && optimize && !optimize_size
332 && alpha_tp != ALPHA_TP_INSN
333 && flag_schedule_insns_after_reload);
336 virtual unsigned int execute (function *)
338 return rest_of_align_insns ();
341 }; // class pass_align_insns
343 } // anon namespace
345 rtl_opt_pass *
346 make_pass_align_insns (gcc::context *ctxt)
348 return new pass_align_insns (ctxt);
351 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
352 /* Implement TARGET_MANGLE_TYPE. */
354 static const char *
355 alpha_mangle_type (const_tree type)
357 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
358 && TARGET_LONG_DOUBLE_128)
359 return "g";
361 /* For all other types, use normal C++ mangling. */
362 return NULL;
364 #endif
366 /* Parse target option strings. */
368 static void
369 alpha_option_override (void)
371 static const struct cpu_table {
372 const char *const name;
373 const enum processor_type processor;
374 const int flags;
375 const unsigned short line_size; /* in bytes */
376 const unsigned short l1_size; /* in kb. */
377 const unsigned short l2_size; /* in kb. */
378 } cpu_table[] = {
379 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
380 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
381 had 64k to 8M 8-byte direct Bcache. */
382 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
383 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
384 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
386 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
387 and 1M to 16M 64 byte L3 (not modeled).
388 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
389 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
390 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
391 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
392 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
393 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
394 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
395 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
396 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
398 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
399 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
400 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
401 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
402 64, 64, 16*1024 },
403 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
404 64, 64, 16*1024 }
407 opt_pass *pass_handle_trap_shadows = make_pass_handle_trap_shadows (g);
408 struct register_pass_info handle_trap_shadows_info
409 = { pass_handle_trap_shadows, "eh_ranges",
410 1, PASS_POS_INSERT_AFTER
413 opt_pass *pass_align_insns = make_pass_align_insns (g);
414 struct register_pass_info align_insns_info
415 = { pass_align_insns, "shorten",
416 1, PASS_POS_INSERT_BEFORE
419 int const ct_size = ARRAY_SIZE (cpu_table);
420 int line_size = 0, l1_size = 0, l2_size = 0;
421 int i;
423 #ifdef SUBTARGET_OVERRIDE_OPTIONS
424 SUBTARGET_OVERRIDE_OPTIONS;
425 #endif
427 /* Default to full IEEE compliance mode for Go language. */
428 if (strcmp (lang_hooks.name, "GNU Go") == 0
429 && !(target_flags_explicit & MASK_IEEE))
430 target_flags |= MASK_IEEE;
432 alpha_fprm = ALPHA_FPRM_NORM;
433 alpha_tp = ALPHA_TP_PROG;
434 alpha_fptm = ALPHA_FPTM_N;
436 if (TARGET_IEEE)
438 alpha_tp = ALPHA_TP_INSN;
439 alpha_fptm = ALPHA_FPTM_SU;
441 if (TARGET_IEEE_WITH_INEXACT)
443 alpha_tp = ALPHA_TP_INSN;
444 alpha_fptm = ALPHA_FPTM_SUI;
447 if (alpha_tp_string)
449 if (! strcmp (alpha_tp_string, "p"))
450 alpha_tp = ALPHA_TP_PROG;
451 else if (! strcmp (alpha_tp_string, "f"))
452 alpha_tp = ALPHA_TP_FUNC;
453 else if (! strcmp (alpha_tp_string, "i"))
454 alpha_tp = ALPHA_TP_INSN;
455 else
456 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
459 if (alpha_fprm_string)
461 if (! strcmp (alpha_fprm_string, "n"))
462 alpha_fprm = ALPHA_FPRM_NORM;
463 else if (! strcmp (alpha_fprm_string, "m"))
464 alpha_fprm = ALPHA_FPRM_MINF;
465 else if (! strcmp (alpha_fprm_string, "c"))
466 alpha_fprm = ALPHA_FPRM_CHOP;
467 else if (! strcmp (alpha_fprm_string,"d"))
468 alpha_fprm = ALPHA_FPRM_DYN;
469 else
470 error ("bad value %qs for -mfp-rounding-mode switch",
471 alpha_fprm_string);
474 if (alpha_fptm_string)
476 if (strcmp (alpha_fptm_string, "n") == 0)
477 alpha_fptm = ALPHA_FPTM_N;
478 else if (strcmp (alpha_fptm_string, "u") == 0)
479 alpha_fptm = ALPHA_FPTM_U;
480 else if (strcmp (alpha_fptm_string, "su") == 0)
481 alpha_fptm = ALPHA_FPTM_SU;
482 else if (strcmp (alpha_fptm_string, "sui") == 0)
483 alpha_fptm = ALPHA_FPTM_SUI;
484 else
485 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
488 if (alpha_cpu_string)
490 for (i = 0; i < ct_size; i++)
491 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
493 alpha_tune = alpha_cpu = cpu_table[i].processor;
494 line_size = cpu_table[i].line_size;
495 l1_size = cpu_table[i].l1_size;
496 l2_size = cpu_table[i].l2_size;
497 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
498 target_flags |= cpu_table[i].flags;
499 break;
501 if (i == ct_size)
502 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
505 if (alpha_tune_string)
507 for (i = 0; i < ct_size; i++)
508 if (! strcmp (alpha_tune_string, cpu_table [i].name))
510 alpha_tune = cpu_table[i].processor;
511 line_size = cpu_table[i].line_size;
512 l1_size = cpu_table[i].l1_size;
513 l2_size = cpu_table[i].l2_size;
514 break;
516 if (i == ct_size)
517 error ("bad value %qs for -mtune switch", alpha_tune_string);
520 if (line_size)
521 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
522 global_options.x_param_values,
523 global_options_set.x_param_values);
524 if (l1_size)
525 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
526 global_options.x_param_values,
527 global_options_set.x_param_values);
528 if (l2_size)
529 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
530 global_options.x_param_values,
531 global_options_set.x_param_values);
533 /* Do some sanity checks on the above options. */
535 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
536 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
538 warning (0, "fp software completion requires -mtrap-precision=i");
539 alpha_tp = ALPHA_TP_INSN;
542 if (alpha_cpu == PROCESSOR_EV6)
544 /* Except for EV6 pass 1 (not released), we always have precise
545 arithmetic traps. Which means we can do software completion
546 without minding trap shadows. */
547 alpha_tp = ALPHA_TP_PROG;
550 if (TARGET_FLOAT_VAX)
552 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
554 warning (0, "rounding mode not supported for VAX floats");
555 alpha_fprm = ALPHA_FPRM_NORM;
557 if (alpha_fptm == ALPHA_FPTM_SUI)
559 warning (0, "trap mode not supported for VAX floats");
560 alpha_fptm = ALPHA_FPTM_SU;
562 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
563 warning (0, "128-bit long double not supported for VAX floats");
564 target_flags &= ~MASK_LONG_DOUBLE_128;
568 char *end;
569 int lat;
571 if (!alpha_mlat_string)
572 alpha_mlat_string = "L1";
574 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
575 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
577 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
578 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
579 && alpha_mlat_string[2] == '\0')
581 static int const cache_latency[][4] =
583 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
584 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
585 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
588 lat = alpha_mlat_string[1] - '0';
589 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
591 warning (0, "L%d cache latency unknown for %s",
592 lat, alpha_cpu_name[alpha_tune]);
593 lat = 3;
595 else
596 lat = cache_latency[alpha_tune][lat-1];
598 else if (! strcmp (alpha_mlat_string, "main"))
600 /* Most current memories have about 370ns latency. This is
601 a reasonable guess for a fast cpu. */
602 lat = 150;
604 else
606 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
607 lat = 3;
610 alpha_memory_latency = lat;
613 /* Default the definition of "small data" to 8 bytes. */
614 if (!global_options_set.x_g_switch_value)
615 g_switch_value = 8;
617 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
618 if (flag_pic == 1)
619 target_flags |= MASK_SMALL_DATA;
620 else if (flag_pic == 2)
621 target_flags &= ~MASK_SMALL_DATA;
623 /* Align labels and loops for optimal branching. */
624 /* ??? Kludge these by not doing anything if we don't optimize. */
625 if (optimize > 0)
627 if (align_loops <= 0)
628 align_loops = 16;
629 if (align_jumps <= 0)
630 align_jumps = 16;
632 if (align_functions <= 0)
633 align_functions = 16;
635 /* Register variables and functions with the garbage collector. */
637 /* Set up function hooks. */
638 init_machine_status = alpha_init_machine_status;
640 /* Tell the compiler when we're using VAX floating point. */
641 if (TARGET_FLOAT_VAX)
643 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
644 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
645 REAL_MODE_FORMAT (TFmode) = NULL;
648 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
649 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
650 target_flags |= MASK_LONG_DOUBLE_128;
651 #endif
653 /* This needs to be done at start up. It's convenient to do it here. */
654 register_pass (&handle_trap_shadows_info);
655 register_pass (&align_insns_info);
658 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
661 zap_mask (HOST_WIDE_INT value)
663 int i;
665 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
666 i++, value >>= 8)
667 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
668 return 0;
670 return 1;
673 /* Return true if OP is valid for a particular TLS relocation.
674 We are already guaranteed that OP is a CONST. */
677 tls_symbolic_operand_1 (rtx op, int size, int unspec)
679 op = XEXP (op, 0);
681 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
682 return 0;
683 op = XVECEXP (op, 0, 0);
685 if (GET_CODE (op) != SYMBOL_REF)
686 return 0;
688 switch (SYMBOL_REF_TLS_MODEL (op))
690 case TLS_MODEL_LOCAL_DYNAMIC:
691 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
692 case TLS_MODEL_INITIAL_EXEC:
693 return unspec == UNSPEC_TPREL && size == 64;
694 case TLS_MODEL_LOCAL_EXEC:
695 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
696 default:
697 gcc_unreachable ();
701 /* Used by aligned_memory_operand and unaligned_memory_operand to
702 resolve what reload is going to do with OP if it's a register. */
705 resolve_reload_operand (rtx op)
707 if (reload_in_progress)
709 rtx tmp = op;
710 if (GET_CODE (tmp) == SUBREG)
711 tmp = SUBREG_REG (tmp);
712 if (REG_P (tmp)
713 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
715 op = reg_equiv_memory_loc (REGNO (tmp));
716 if (op == 0)
717 return 0;
720 return op;
723 /* The scalar modes supported differs from the default check-what-c-supports
724 version in that sometimes TFmode is available even when long double
725 indicates only DFmode. */
727 static bool
728 alpha_scalar_mode_supported_p (machine_mode mode)
730 switch (mode)
732 case QImode:
733 case HImode:
734 case SImode:
735 case DImode:
736 case TImode: /* via optabs.c */
737 return true;
739 case SFmode:
740 case DFmode:
741 return true;
743 case TFmode:
744 return TARGET_HAS_XFLOATING_LIBS;
746 default:
747 return false;
751 /* Alpha implements a couple of integer vector mode operations when
752 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
753 which allows the vectorizer to operate on e.g. move instructions,
754 or when expand_vector_operations can do something useful. */
756 static bool
757 alpha_vector_mode_supported_p (machine_mode mode)
759 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
762 /* Return 1 if this function can directly return via $26. */
765 direct_return (void)
767 return (TARGET_ABI_OSF
768 && reload_completed
769 && alpha_sa_size () == 0
770 && get_frame_size () == 0
771 && crtl->outgoing_args_size == 0
772 && crtl->args.pretend_args_size == 0);
775 /* Return the TLS model to use for SYMBOL. */
777 static enum tls_model
778 tls_symbolic_operand_type (rtx symbol)
780 enum tls_model model;
782 if (GET_CODE (symbol) != SYMBOL_REF)
783 return TLS_MODEL_NONE;
784 model = SYMBOL_REF_TLS_MODEL (symbol);
786 /* Local-exec with a 64-bit size is the same code as initial-exec. */
787 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
788 model = TLS_MODEL_INITIAL_EXEC;
790 return model;
793 /* Return true if the function DECL will share the same GP as any
794 function in the current unit of translation. */
796 static bool
797 decl_has_samegp (const_tree decl)
799 /* Functions that are not local can be overridden, and thus may
800 not share the same gp. */
801 if (!(*targetm.binds_local_p) (decl))
802 return false;
804 /* If -msmall-data is in effect, assume that there is only one GP
805 for the module, and so any local symbol has this property. We
806 need explicit relocations to be able to enforce this for symbols
807 not defined in this unit of translation, however. */
808 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
809 return true;
811 /* Functions that are not external are defined in this UoT. */
812 /* ??? Irritatingly, static functions not yet emitted are still
813 marked "external". Apply this to non-static functions only. */
814 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
817 /* Return true if EXP should be placed in the small data section. */
819 static bool
820 alpha_in_small_data_p (const_tree exp)
822 /* We want to merge strings, so we never consider them small data. */
823 if (TREE_CODE (exp) == STRING_CST)
824 return false;
826 /* Functions are never in the small data area. Duh. */
827 if (TREE_CODE (exp) == FUNCTION_DECL)
828 return false;
830 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
832 const char *section = DECL_SECTION_NAME (exp);
833 if (strcmp (section, ".sdata") == 0
834 || strcmp (section, ".sbss") == 0)
835 return true;
837 else
839 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
841 /* If this is an incomplete type with size 0, then we can't put it
842 in sdata because it might be too big when completed. */
843 if (size > 0 && size <= g_switch_value)
844 return true;
847 return false;
850 #if TARGET_ABI_OPEN_VMS
851 static bool
852 vms_valid_pointer_mode (machine_mode mode)
854 return (mode == SImode || mode == DImode);
857 static bool
858 alpha_linkage_symbol_p (const char *symname)
860 int symlen = strlen (symname);
862 if (symlen > 4)
863 return strcmp (&symname [symlen - 4], "..lk") == 0;
865 return false;
868 #define LINKAGE_SYMBOL_REF_P(X) \
869 ((GET_CODE (X) == SYMBOL_REF \
870 && alpha_linkage_symbol_p (XSTR (X, 0))) \
871 || (GET_CODE (X) == CONST \
872 && GET_CODE (XEXP (X, 0)) == PLUS \
873 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
874 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
875 #endif
877 /* legitimate_address_p recognizes an RTL expression that is a valid
878 memory address for an instruction. The MODE argument is the
879 machine mode for the MEM expression that wants to use this address.
881 For Alpha, we have either a constant address or the sum of a
882 register and a constant address, or just a register. For DImode,
883 any of those forms can be surrounded with an AND that clear the
884 low-order three bits; this is an "unaligned" access. */
886 static bool
887 alpha_legitimate_address_p (machine_mode mode, rtx x, bool strict)
889 /* If this is an ldq_u type address, discard the outer AND. */
890 if (mode == DImode
891 && GET_CODE (x) == AND
892 && CONST_INT_P (XEXP (x, 1))
893 && INTVAL (XEXP (x, 1)) == -8)
894 x = XEXP (x, 0);
896 /* Discard non-paradoxical subregs. */
897 if (GET_CODE (x) == SUBREG
898 && (GET_MODE_SIZE (GET_MODE (x))
899 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
900 x = SUBREG_REG (x);
902 /* Unadorned general registers are valid. */
903 if (REG_P (x)
904 && (strict
905 ? STRICT_REG_OK_FOR_BASE_P (x)
906 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
907 return true;
909 /* Constant addresses (i.e. +/- 32k) are valid. */
910 if (CONSTANT_ADDRESS_P (x))
911 return true;
913 #if TARGET_ABI_OPEN_VMS
914 if (LINKAGE_SYMBOL_REF_P (x))
915 return true;
916 #endif
918 /* Register plus a small constant offset is valid. */
919 if (GET_CODE (x) == PLUS)
921 rtx ofs = XEXP (x, 1);
922 x = XEXP (x, 0);
924 /* Discard non-paradoxical subregs. */
925 if (GET_CODE (x) == SUBREG
926 && (GET_MODE_SIZE (GET_MODE (x))
927 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
928 x = SUBREG_REG (x);
930 if (REG_P (x))
932 if (! strict
933 && NONSTRICT_REG_OK_FP_BASE_P (x)
934 && CONST_INT_P (ofs))
935 return true;
936 if ((strict
937 ? STRICT_REG_OK_FOR_BASE_P (x)
938 : NONSTRICT_REG_OK_FOR_BASE_P (x))
939 && CONSTANT_ADDRESS_P (ofs))
940 return true;
944 /* If we're managing explicit relocations, LO_SUM is valid, as are small
945 data symbols. Avoid explicit relocations of modes larger than word
946 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
947 else if (TARGET_EXPLICIT_RELOCS
948 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
950 if (small_symbolic_operand (x, Pmode))
951 return true;
953 if (GET_CODE (x) == LO_SUM)
955 rtx ofs = XEXP (x, 1);
956 x = XEXP (x, 0);
958 /* Discard non-paradoxical subregs. */
959 if (GET_CODE (x) == SUBREG
960 && (GET_MODE_SIZE (GET_MODE (x))
961 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
962 x = SUBREG_REG (x);
964 /* Must have a valid base register. */
965 if (! (REG_P (x)
966 && (strict
967 ? STRICT_REG_OK_FOR_BASE_P (x)
968 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
969 return false;
971 /* The symbol must be local. */
972 if (local_symbolic_operand (ofs, Pmode)
973 || dtp32_symbolic_operand (ofs, Pmode)
974 || tp32_symbolic_operand (ofs, Pmode))
975 return true;
979 return false;
982 /* Build the SYMBOL_REF for __tls_get_addr. */
984 static GTY(()) rtx tls_get_addr_libfunc;
986 static rtx
987 get_tls_get_addr (void)
989 if (!tls_get_addr_libfunc)
990 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
991 return tls_get_addr_libfunc;
994 /* Try machine-dependent ways of modifying an illegitimate address
995 to be legitimate. If we find one, return the new, valid address. */
997 static rtx
998 alpha_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode)
1000 HOST_WIDE_INT addend;
1002 /* If the address is (plus reg const_int) and the CONST_INT is not a
1003 valid offset, compute the high part of the constant and add it to
1004 the register. Then our address is (plus temp low-part-const). */
1005 if (GET_CODE (x) == PLUS
1006 && REG_P (XEXP (x, 0))
1007 && CONST_INT_P (XEXP (x, 1))
1008 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1010 addend = INTVAL (XEXP (x, 1));
1011 x = XEXP (x, 0);
1012 goto split_addend;
1015 /* If the address is (const (plus FOO const_int)), find the low-order
1016 part of the CONST_INT. Then load FOO plus any high-order part of the
1017 CONST_INT into a register. Our address is (plus reg low-part-const).
1018 This is done to reduce the number of GOT entries. */
1019 if (can_create_pseudo_p ()
1020 && GET_CODE (x) == CONST
1021 && GET_CODE (XEXP (x, 0)) == PLUS
1022 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
1024 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1025 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1026 goto split_addend;
1029 /* If we have a (plus reg const), emit the load as in (2), then add
1030 the two registers, and finally generate (plus reg low-part-const) as
1031 our address. */
1032 if (can_create_pseudo_p ()
1033 && GET_CODE (x) == PLUS
1034 && REG_P (XEXP (x, 0))
1035 && GET_CODE (XEXP (x, 1)) == CONST
1036 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1037 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
1039 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1040 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1041 XEXP (XEXP (XEXP (x, 1), 0), 0),
1042 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1043 goto split_addend;
1046 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1047 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1048 around +/- 32k offset. */
1049 if (TARGET_EXPLICIT_RELOCS
1050 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1051 && symbolic_operand (x, Pmode))
1053 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1055 switch (tls_symbolic_operand_type (x))
1057 case TLS_MODEL_NONE:
1058 break;
1060 case TLS_MODEL_GLOBAL_DYNAMIC:
1061 start_sequence ();
1063 r0 = gen_rtx_REG (Pmode, 0);
1064 r16 = gen_rtx_REG (Pmode, 16);
1065 tga = get_tls_get_addr ();
1066 dest = gen_reg_rtx (Pmode);
1067 seq = GEN_INT (alpha_next_sequence_number++);
1069 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1070 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1071 insn = emit_call_insn (insn);
1072 RTL_CONST_CALL_P (insn) = 1;
1073 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1075 insn = get_insns ();
1076 end_sequence ();
1078 emit_libcall_block (insn, dest, r0, x);
1079 return dest;
1081 case TLS_MODEL_LOCAL_DYNAMIC:
1082 start_sequence ();
1084 r0 = gen_rtx_REG (Pmode, 0);
1085 r16 = gen_rtx_REG (Pmode, 16);
1086 tga = get_tls_get_addr ();
1087 scratch = gen_reg_rtx (Pmode);
1088 seq = GEN_INT (alpha_next_sequence_number++);
1090 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1091 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1092 insn = emit_call_insn (insn);
1093 RTL_CONST_CALL_P (insn) = 1;
1094 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1096 insn = get_insns ();
1097 end_sequence ();
1099 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1100 UNSPEC_TLSLDM_CALL);
1101 emit_libcall_block (insn, scratch, r0, eqv);
1103 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1104 eqv = gen_rtx_CONST (Pmode, eqv);
1106 if (alpha_tls_size == 64)
1108 dest = gen_reg_rtx (Pmode);
1109 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1110 emit_insn (gen_adddi3 (dest, dest, scratch));
1111 return dest;
1113 if (alpha_tls_size == 32)
1115 insn = gen_rtx_HIGH (Pmode, eqv);
1116 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1117 scratch = gen_reg_rtx (Pmode);
1118 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1120 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1122 case TLS_MODEL_INITIAL_EXEC:
1123 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1124 eqv = gen_rtx_CONST (Pmode, eqv);
1125 tp = gen_reg_rtx (Pmode);
1126 scratch = gen_reg_rtx (Pmode);
1127 dest = gen_reg_rtx (Pmode);
1129 emit_insn (gen_get_thread_pointerdi (tp));
1130 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1131 emit_insn (gen_adddi3 (dest, tp, scratch));
1132 return dest;
1134 case TLS_MODEL_LOCAL_EXEC:
1135 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1136 eqv = gen_rtx_CONST (Pmode, eqv);
1137 tp = gen_reg_rtx (Pmode);
1139 emit_insn (gen_get_thread_pointerdi (tp));
1140 if (alpha_tls_size == 32)
1142 insn = gen_rtx_HIGH (Pmode, eqv);
1143 insn = gen_rtx_PLUS (Pmode, tp, insn);
1144 tp = gen_reg_rtx (Pmode);
1145 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1147 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1149 default:
1150 gcc_unreachable ();
1153 if (local_symbolic_operand (x, Pmode))
1155 if (small_symbolic_operand (x, Pmode))
1156 return x;
1157 else
1159 if (can_create_pseudo_p ())
1160 scratch = gen_reg_rtx (Pmode);
1161 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1162 gen_rtx_HIGH (Pmode, x)));
1163 return gen_rtx_LO_SUM (Pmode, scratch, x);
1168 return NULL;
1170 split_addend:
1172 HOST_WIDE_INT low, high;
1174 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1175 addend -= low;
1176 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1177 addend -= high;
1179 if (addend)
1180 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1181 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1182 1, OPTAB_LIB_WIDEN);
1183 if (high)
1184 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1185 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1186 1, OPTAB_LIB_WIDEN);
1188 return plus_constant (Pmode, x, low);
1193 /* Try machine-dependent ways of modifying an illegitimate address
1194 to be legitimate. Return X or the new, valid address. */
1196 static rtx
1197 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1198 machine_mode mode)
1200 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1201 return new_x ? new_x : x;
1204 /* Return true if ADDR has an effect that depends on the machine mode it
1205 is used for. On the Alpha this is true only for the unaligned modes.
1206 We can simplify the test since we know that the address must be valid. */
1208 static bool
1209 alpha_mode_dependent_address_p (const_rtx addr,
1210 addr_space_t as ATTRIBUTE_UNUSED)
1212 return GET_CODE (addr) == AND;
1215 /* Primarily this is required for TLS symbols, but given that our move
1216 patterns *ought* to be able to handle any symbol at any time, we
1217 should never be spilling symbolic operands to the constant pool, ever. */
1219 static bool
1220 alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1222 enum rtx_code code = GET_CODE (x);
1223 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1226 /* We do not allow indirect calls to be optimized into sibling calls, nor
1227 can we allow a call to a function with a different GP to be optimized
1228 into a sibcall. */
1230 static bool
1231 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1233 /* Can't do indirect tail calls, since we don't know if the target
1234 uses the same GP. */
1235 if (!decl)
1236 return false;
1238 /* Otherwise, we can make a tail call if the target function shares
1239 the same GP. */
1240 return decl_has_samegp (decl);
1243 bool
1244 some_small_symbolic_operand_int (rtx x)
1246 subrtx_var_iterator::array_type array;
1247 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
1249 rtx x = *iter;
1250 /* Don't re-split. */
1251 if (GET_CODE (x) == LO_SUM)
1252 iter.skip_subrtxes ();
1253 else if (small_symbolic_operand (x, Pmode))
1254 return true;
1256 return false;
1260 split_small_symbolic_operand (rtx x)
1262 x = copy_insn (x);
1263 subrtx_ptr_iterator::array_type array;
1264 FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL)
1266 rtx *ptr = *iter;
1267 rtx x = *ptr;
1268 /* Don't re-split. */
1269 if (GET_CODE (x) == LO_SUM)
1270 iter.skip_subrtxes ();
1271 else if (small_symbolic_operand (x, Pmode))
1273 *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1274 iter.skip_subrtxes ();
1277 return x;
1280 /* Indicate that INSN cannot be duplicated. This is true for any insn
1281 that we've marked with gpdisp relocs, since those have to stay in
1282 1-1 correspondence with one another.
1284 Technically we could copy them if we could set up a mapping from one
1285 sequence number to another, across the set of insns to be duplicated.
1286 This seems overly complicated and error-prone since interblock motion
1287 from sched-ebb could move one of the pair of insns to a different block.
1289 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1290 then they'll be in a different block from their ldgp. Which could lead
1291 the bb reorder code to think that it would be ok to copy just the block
1292 containing the call and branch to the block containing the ldgp. */
1294 static bool
1295 alpha_cannot_copy_insn_p (rtx_insn *insn)
1297 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1298 return false;
1299 if (recog_memoized (insn) >= 0)
1300 return get_attr_cannot_copy (insn);
1301 else
1302 return false;
1306 /* Try a machine-dependent way of reloading an illegitimate address
1307 operand. If we find one, push the reload and return the new rtx. */
1310 alpha_legitimize_reload_address (rtx x,
1311 machine_mode mode ATTRIBUTE_UNUSED,
1312 int opnum, int type,
1313 int ind_levels ATTRIBUTE_UNUSED)
1315 /* We must recognize output that we have already generated ourselves. */
1316 if (GET_CODE (x) == PLUS
1317 && GET_CODE (XEXP (x, 0)) == PLUS
1318 && REG_P (XEXP (XEXP (x, 0), 0))
1319 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1320 && CONST_INT_P (XEXP (x, 1)))
1322 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1323 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1324 opnum, (enum reload_type) type);
1325 return x;
1328 /* We wish to handle large displacements off a base register by
1329 splitting the addend across an ldah and the mem insn. This
1330 cuts number of extra insns needed from 3 to 1. */
1331 if (GET_CODE (x) == PLUS
1332 && REG_P (XEXP (x, 0))
1333 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1334 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1335 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1337 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1338 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1339 HOST_WIDE_INT high
1340 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1342 /* Check for 32-bit overflow. */
1343 if (high + low != val)
1344 return NULL_RTX;
1346 /* Reload the high part into a base reg; leave the low part
1347 in the mem directly. */
1348 x = gen_rtx_PLUS (GET_MODE (x),
1349 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1350 GEN_INT (high)),
1351 GEN_INT (low));
1353 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1354 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1355 opnum, (enum reload_type) type);
1356 return x;
1359 return NULL_RTX;
1362 /* Compute a (partial) cost for rtx X. Return true if the complete
1363 cost has been computed, and false if subexpressions should be
1364 scanned. In either case, *TOTAL contains the cost result. */
1366 static bool
1367 alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1368 bool speed)
1370 machine_mode mode = GET_MODE (x);
1371 bool float_mode_p = FLOAT_MODE_P (mode);
1372 const struct alpha_rtx_cost_data *cost_data;
1374 if (!speed)
1375 cost_data = &alpha_rtx_cost_size;
1376 else
1377 cost_data = &alpha_rtx_cost_data[alpha_tune];
1379 switch (code)
1381 case CONST_INT:
1382 /* If this is an 8-bit constant, return zero since it can be used
1383 nearly anywhere with no cost. If it is a valid operand for an
1384 ADD or AND, likewise return 0 if we know it will be used in that
1385 context. Otherwise, return 2 since it might be used there later.
1386 All other constants take at least two insns. */
1387 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1389 *total = 0;
1390 return true;
1392 /* FALLTHRU */
1394 case CONST_DOUBLE:
1395 if (x == CONST0_RTX (mode))
1396 *total = 0;
1397 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1398 || (outer_code == AND && and_operand (x, VOIDmode)))
1399 *total = 0;
1400 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1401 *total = 2;
1402 else
1403 *total = COSTS_N_INSNS (2);
1404 return true;
1406 case CONST:
1407 case SYMBOL_REF:
1408 case LABEL_REF:
1409 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1410 *total = COSTS_N_INSNS (outer_code != MEM);
1411 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1412 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1413 else if (tls_symbolic_operand_type (x))
1414 /* Estimate of cost for call_pal rduniq. */
1415 /* ??? How many insns do we emit here? More than one... */
1416 *total = COSTS_N_INSNS (15);
1417 else
1418 /* Otherwise we do a load from the GOT. */
1419 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1420 return true;
1422 case HIGH:
1423 /* This is effectively an add_operand. */
1424 *total = 2;
1425 return true;
1427 case PLUS:
1428 case MINUS:
1429 if (float_mode_p)
1430 *total = cost_data->fp_add;
1431 else if (GET_CODE (XEXP (x, 0)) == MULT
1432 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1434 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1435 (enum rtx_code) outer_code, opno, speed)
1436 + rtx_cost (XEXP (x, 1),
1437 (enum rtx_code) outer_code, opno, speed)
1438 + COSTS_N_INSNS (1));
1439 return true;
1441 return false;
1443 case MULT:
1444 if (float_mode_p)
1445 *total = cost_data->fp_mult;
1446 else if (mode == DImode)
1447 *total = cost_data->int_mult_di;
1448 else
1449 *total = cost_data->int_mult_si;
1450 return false;
1452 case ASHIFT:
1453 if (CONST_INT_P (XEXP (x, 1))
1454 && INTVAL (XEXP (x, 1)) <= 3)
1456 *total = COSTS_N_INSNS (1);
1457 return false;
1459 /* FALLTHRU */
1461 case ASHIFTRT:
1462 case LSHIFTRT:
1463 *total = cost_data->int_shift;
1464 return false;
1466 case IF_THEN_ELSE:
1467 if (float_mode_p)
1468 *total = cost_data->fp_add;
1469 else
1470 *total = cost_data->int_cmov;
1471 return false;
1473 case DIV:
1474 case UDIV:
1475 case MOD:
1476 case UMOD:
1477 if (!float_mode_p)
1478 *total = cost_data->int_div;
1479 else if (mode == SFmode)
1480 *total = cost_data->fp_div_sf;
1481 else
1482 *total = cost_data->fp_div_df;
1483 return false;
1485 case MEM:
1486 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1487 return true;
1489 case NEG:
1490 if (! float_mode_p)
1492 *total = COSTS_N_INSNS (1);
1493 return false;
1495 /* FALLTHRU */
1497 case ABS:
1498 if (! float_mode_p)
1500 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1501 return false;
1503 /* FALLTHRU */
1505 case FLOAT:
1506 case UNSIGNED_FLOAT:
1507 case FIX:
1508 case UNSIGNED_FIX:
1509 case FLOAT_TRUNCATE:
1510 *total = cost_data->fp_add;
1511 return false;
1513 case FLOAT_EXTEND:
1514 if (MEM_P (XEXP (x, 0)))
1515 *total = 0;
1516 else
1517 *total = cost_data->fp_add;
1518 return false;
1520 default:
1521 return false;
1525 /* REF is an alignable memory location. Place an aligned SImode
1526 reference into *PALIGNED_MEM and the number of bits to shift into
1527 *PBITNUM. SCRATCH is a free register for use in reloading out
1528 of range stack slots. */
1530 void
1531 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1533 rtx base;
1534 HOST_WIDE_INT disp, offset;
1536 gcc_assert (MEM_P (ref));
1538 if (reload_in_progress
1539 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1541 base = find_replacement (&XEXP (ref, 0));
1542 gcc_assert (memory_address_p (GET_MODE (ref), base));
1544 else
1545 base = XEXP (ref, 0);
1547 if (GET_CODE (base) == PLUS)
1548 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1549 else
1550 disp = 0;
1552 /* Find the byte offset within an aligned word. If the memory itself is
1553 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1554 will have examined the base register and determined it is aligned, and
1555 thus displacements from it are naturally alignable. */
1556 if (MEM_ALIGN (ref) >= 32)
1557 offset = 0;
1558 else
1559 offset = disp & 3;
1561 /* The location should not cross aligned word boundary. */
1562 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1563 <= GET_MODE_SIZE (SImode));
1565 /* Access the entire aligned word. */
1566 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1568 /* Convert the byte offset within the word to a bit offset. */
1569 offset *= BITS_PER_UNIT;
1570 *pbitnum = GEN_INT (offset);
1573 /* Similar, but just get the address. Handle the two reload cases.
1574 Add EXTRA_OFFSET to the address we return. */
1577 get_unaligned_address (rtx ref)
1579 rtx base;
1580 HOST_WIDE_INT offset = 0;
1582 gcc_assert (MEM_P (ref));
1584 if (reload_in_progress
1585 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1587 base = find_replacement (&XEXP (ref, 0));
1589 gcc_assert (memory_address_p (GET_MODE (ref), base));
1591 else
1592 base = XEXP (ref, 0);
1594 if (GET_CODE (base) == PLUS)
1595 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1597 return plus_constant (Pmode, base, offset);
1600 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1601 X is always returned in a register. */
1604 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1606 if (GET_CODE (addr) == PLUS)
1608 ofs += INTVAL (XEXP (addr, 1));
1609 addr = XEXP (addr, 0);
1612 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1613 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1616 /* On the Alpha, all (non-symbolic) constants except zero go into
1617 a floating-point register via memory. Note that we cannot
1618 return anything that is not a subset of RCLASS, and that some
1619 symbolic constants cannot be dropped to memory. */
1621 enum reg_class
1622 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1624 /* Zero is present in any register class. */
1625 if (x == CONST0_RTX (GET_MODE (x)))
1626 return rclass;
1628 /* These sorts of constants we can easily drop to memory. */
1629 if (CONST_INT_P (x)
1630 || GET_CODE (x) == CONST_DOUBLE
1631 || GET_CODE (x) == CONST_VECTOR)
1633 if (rclass == FLOAT_REGS)
1634 return NO_REGS;
1635 if (rclass == ALL_REGS)
1636 return GENERAL_REGS;
1637 return rclass;
1640 /* All other kinds of constants should not (and in the case of HIGH
1641 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1642 secondary reload. */
1643 if (CONSTANT_P (x))
1644 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1646 return rclass;
1649 /* Inform reload about cases where moving X with a mode MODE to a register in
1650 RCLASS requires an extra scratch or immediate register. Return the class
1651 needed for the immediate register. */
1653 static reg_class_t
1654 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1655 machine_mode mode, secondary_reload_info *sri)
1657 enum reg_class rclass = (enum reg_class) rclass_i;
1659 /* Loading and storing HImode or QImode values to and from memory
1660 usually requires a scratch register. */
1661 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1663 if (any_memory_operand (x, mode))
1665 if (in_p)
1667 if (!aligned_memory_operand (x, mode))
1668 sri->icode = direct_optab_handler (reload_in_optab, mode);
1670 else
1671 sri->icode = direct_optab_handler (reload_out_optab, mode);
1672 return NO_REGS;
1676 /* We also cannot do integral arithmetic into FP regs, as might result
1677 from register elimination into a DImode fp register. */
1678 if (rclass == FLOAT_REGS)
1680 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1681 return GENERAL_REGS;
1682 if (in_p && INTEGRAL_MODE_P (mode)
1683 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1684 return GENERAL_REGS;
1687 return NO_REGS;
1690 /* Given SEQ, which is an INSN list, look for any MEMs in either
1691 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1692 volatile flags from REF into each of the MEMs found. If REF is not
1693 a MEM, don't do anything. */
1695 void
1696 alpha_set_memflags (rtx seq, rtx ref)
1698 rtx_insn *insn;
1700 if (!MEM_P (ref))
1701 return;
1703 /* This is only called from alpha.md, after having had something
1704 generated from one of the insn patterns. So if everything is
1705 zero, the pattern is already up-to-date. */
1706 if (!MEM_VOLATILE_P (ref)
1707 && !MEM_NOTRAP_P (ref)
1708 && !MEM_READONLY_P (ref))
1709 return;
1711 subrtx_var_iterator::array_type array;
1712 for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
1713 if (INSN_P (insn))
1714 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1716 rtx x = *iter;
1717 if (MEM_P (x))
1719 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref);
1720 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref);
1721 MEM_READONLY_P (x) = MEM_READONLY_P (ref);
1722 /* Sadly, we cannot use alias sets because the extra
1723 aliasing produced by the AND interferes. Given that
1724 two-byte quantities are the only thing we would be
1725 able to differentiate anyway, there does not seem to
1726 be any point in convoluting the early out of the
1727 alias check. */
1728 iter.skip_subrtxes ();
1731 else
1732 gcc_unreachable ();
1735 static rtx alpha_emit_set_const (rtx, machine_mode, HOST_WIDE_INT,
1736 int, bool);
1738 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1739 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1740 and return pc_rtx if successful. */
1742 static rtx
1743 alpha_emit_set_const_1 (rtx target, machine_mode mode,
1744 HOST_WIDE_INT c, int n, bool no_output)
1746 HOST_WIDE_INT new_const;
1747 int i, bits;
1748 /* Use a pseudo if highly optimizing and still generating RTL. */
1749 rtx subtarget
1750 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1751 rtx temp, insn;
1753 /* If this is a sign-extended 32-bit constant, we can do this in at most
1754 three insns, so do it if we have enough insns left. We always have
1755 a sign-extended 32-bit constant when compiling on a narrow machine. */
1757 if (HOST_BITS_PER_WIDE_INT != 64
1758 || c >> 31 == -1 || c >> 31 == 0)
1760 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1761 HOST_WIDE_INT tmp1 = c - low;
1762 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1763 HOST_WIDE_INT extra = 0;
1765 /* If HIGH will be interpreted as negative but the constant is
1766 positive, we must adjust it to do two ldha insns. */
1768 if ((high & 0x8000) != 0 && c >= 0)
1770 extra = 0x4000;
1771 tmp1 -= 0x40000000;
1772 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1775 if (c == low || (low == 0 && extra == 0))
1777 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1778 but that meant that we can't handle INT_MIN on 32-bit machines
1779 (like NT/Alpha), because we recurse indefinitely through
1780 emit_move_insn to gen_movdi. So instead, since we know exactly
1781 what we want, create it explicitly. */
1783 if (no_output)
1784 return pc_rtx;
1785 if (target == NULL)
1786 target = gen_reg_rtx (mode);
1787 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1788 return target;
1790 else if (n >= 2 + (extra != 0))
1792 if (no_output)
1793 return pc_rtx;
1794 if (!can_create_pseudo_p ())
1796 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1797 temp = target;
1799 else
1800 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1801 subtarget, mode);
1803 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1804 This means that if we go through expand_binop, we'll try to
1805 generate extensions, etc, which will require new pseudos, which
1806 will fail during some split phases. The SImode add patterns
1807 still exist, but are not named. So build the insns by hand. */
1809 if (extra != 0)
1811 if (! subtarget)
1812 subtarget = gen_reg_rtx (mode);
1813 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1814 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1815 emit_insn (insn);
1816 temp = subtarget;
1819 if (target == NULL)
1820 target = gen_reg_rtx (mode);
1821 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1822 insn = gen_rtx_SET (VOIDmode, target, insn);
1823 emit_insn (insn);
1824 return target;
1828 /* If we couldn't do it that way, try some other methods. But if we have
1829 no instructions left, don't bother. Likewise, if this is SImode and
1830 we can't make pseudos, we can't do anything since the expand_binop
1831 and expand_unop calls will widen and try to make pseudos. */
1833 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1834 return 0;
1836 /* Next, see if we can load a related constant and then shift and possibly
1837 negate it to get the constant we want. Try this once each increasing
1838 numbers of insns. */
1840 for (i = 1; i < n; i++)
1842 /* First, see if minus some low bits, we've an easy load of
1843 high bits. */
1845 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1846 if (new_const != 0)
1848 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1849 if (temp)
1851 if (no_output)
1852 return temp;
1853 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1854 target, 0, OPTAB_WIDEN);
1858 /* Next try complementing. */
1859 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1860 if (temp)
1862 if (no_output)
1863 return temp;
1864 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1867 /* Next try to form a constant and do a left shift. We can do this
1868 if some low-order bits are zero; the exact_log2 call below tells
1869 us that information. The bits we are shifting out could be any
1870 value, but here we'll just try the 0- and sign-extended forms of
1871 the constant. To try to increase the chance of having the same
1872 constant in more than one insn, start at the highest number of
1873 bits to shift, but try all possibilities in case a ZAPNOT will
1874 be useful. */
1876 bits = exact_log2 (c & -c);
1877 if (bits > 0)
1878 for (; bits > 0; bits--)
1880 new_const = c >> bits;
1881 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1882 if (!temp && c < 0)
1884 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1885 temp = alpha_emit_set_const (subtarget, mode, new_const,
1886 i, no_output);
1888 if (temp)
1890 if (no_output)
1891 return temp;
1892 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1893 target, 0, OPTAB_WIDEN);
1897 /* Now try high-order zero bits. Here we try the shifted-in bits as
1898 all zero and all ones. Be careful to avoid shifting outside the
1899 mode and to avoid shifting outside the host wide int size. */
1900 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1901 confuse the recursive call and set all of the high 32 bits. */
1903 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1904 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1905 if (bits > 0)
1906 for (; bits > 0; bits--)
1908 new_const = c << bits;
1909 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1910 if (!temp)
1912 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1913 temp = alpha_emit_set_const (subtarget, mode, new_const,
1914 i, no_output);
1916 if (temp)
1918 if (no_output)
1919 return temp;
1920 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1921 target, 1, OPTAB_WIDEN);
1925 /* Now try high-order 1 bits. We get that with a sign-extension.
1926 But one bit isn't enough here. Be careful to avoid shifting outside
1927 the mode and to avoid shifting outside the host wide int size. */
1929 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1930 - floor_log2 (~ c) - 2);
1931 if (bits > 0)
1932 for (; bits > 0; bits--)
1934 new_const = c << bits;
1935 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1936 if (!temp)
1938 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1939 temp = alpha_emit_set_const (subtarget, mode, new_const,
1940 i, no_output);
1942 if (temp)
1944 if (no_output)
1945 return temp;
1946 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1947 target, 0, OPTAB_WIDEN);
1952 #if HOST_BITS_PER_WIDE_INT == 64
1953 /* Finally, see if can load a value into the target that is the same as the
1954 constant except that all bytes that are 0 are changed to be 0xff. If we
1955 can, then we can do a ZAPNOT to obtain the desired constant. */
1957 new_const = c;
1958 for (i = 0; i < 64; i += 8)
1959 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1960 new_const |= (HOST_WIDE_INT) 0xff << i;
1962 /* We are only called for SImode and DImode. If this is SImode, ensure that
1963 we are sign extended to a full word. */
1965 if (mode == SImode)
1966 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1968 if (new_const != c)
1970 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1971 if (temp)
1973 if (no_output)
1974 return temp;
1975 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1976 target, 0, OPTAB_WIDEN);
1979 #endif
1981 return 0;
1984 /* Try to output insns to set TARGET equal to the constant C if it can be
1985 done in less than N insns. Do all computations in MODE. Returns the place
1986 where the output has been placed if it can be done and the insns have been
1987 emitted. If it would take more than N insns, zero is returned and no
1988 insns and emitted. */
1990 static rtx
1991 alpha_emit_set_const (rtx target, machine_mode mode,
1992 HOST_WIDE_INT c, int n, bool no_output)
1994 machine_mode orig_mode = mode;
1995 rtx orig_target = target;
1996 rtx result = 0;
1997 int i;
1999 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2000 can't load this constant in one insn, do this in DImode. */
2001 if (!can_create_pseudo_p () && mode == SImode
2002 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
2004 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
2005 if (result)
2006 return result;
2008 target = no_output ? NULL : gen_lowpart (DImode, target);
2009 mode = DImode;
2011 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
2013 target = no_output ? NULL : gen_lowpart (DImode, target);
2014 mode = DImode;
2017 /* Try 1 insn, then 2, then up to N. */
2018 for (i = 1; i <= n; i++)
2020 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
2021 if (result)
2023 rtx_insn *insn;
2024 rtx set;
2026 if (no_output)
2027 return result;
2029 insn = get_last_insn ();
2030 set = single_set (insn);
2031 if (! CONSTANT_P (SET_SRC (set)))
2032 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2033 break;
2037 /* Allow for the case where we changed the mode of TARGET. */
2038 if (result)
2040 if (result == target)
2041 result = orig_target;
2042 else if (mode != orig_mode)
2043 result = gen_lowpart (orig_mode, result);
2046 return result;
2049 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2050 fall back to a straight forward decomposition. We do this to avoid
2051 exponential run times encountered when looking for longer sequences
2052 with alpha_emit_set_const. */
2054 static rtx
2055 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2057 HOST_WIDE_INT d1, d2, d3, d4;
2059 /* Decompose the entire word */
2060 #if HOST_BITS_PER_WIDE_INT >= 64
2061 gcc_assert (c2 == -(c1 < 0));
2062 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2063 c1 -= d1;
2064 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2065 c1 = (c1 - d2) >> 32;
2066 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2067 c1 -= d3;
2068 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2069 gcc_assert (c1 == d4);
2070 #else
2071 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2072 c1 -= d1;
2073 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2074 gcc_assert (c1 == d2);
2075 c2 += (d2 < 0);
2076 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2077 c2 -= d3;
2078 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2079 gcc_assert (c2 == d4);
2080 #endif
2082 /* Construct the high word */
2083 if (d4)
2085 emit_move_insn (target, GEN_INT (d4));
2086 if (d3)
2087 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2089 else
2090 emit_move_insn (target, GEN_INT (d3));
2092 /* Shift it into place */
2093 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2095 /* Add in the low bits. */
2096 if (d2)
2097 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2098 if (d1)
2099 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2101 return target;
2104 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2105 the low 64 bits. */
2107 static void
2108 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2110 HOST_WIDE_INT i0, i1;
2112 if (GET_CODE (x) == CONST_VECTOR)
2113 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2116 if (CONST_INT_P (x))
2118 i0 = INTVAL (x);
2119 i1 = -(i0 < 0);
2121 else if (HOST_BITS_PER_WIDE_INT >= 64)
2123 i0 = CONST_DOUBLE_LOW (x);
2124 i1 = -(i0 < 0);
2126 else
2128 i0 = CONST_DOUBLE_LOW (x);
2129 i1 = CONST_DOUBLE_HIGH (x);
2132 *p0 = i0;
2133 *p1 = i1;
2136 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2137 we are willing to load the value into a register via a move pattern.
2138 Normally this is all symbolic constants, integral constants that
2139 take three or fewer instructions, and floating-point zero. */
2141 bool
2142 alpha_legitimate_constant_p (machine_mode mode, rtx x)
2144 HOST_WIDE_INT i0, i1;
2146 switch (GET_CODE (x))
2148 case LABEL_REF:
2149 case HIGH:
2150 return true;
2152 case CONST:
2153 if (GET_CODE (XEXP (x, 0)) == PLUS
2154 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2155 x = XEXP (XEXP (x, 0), 0);
2156 else
2157 return true;
2159 if (GET_CODE (x) != SYMBOL_REF)
2160 return true;
2162 /* FALLTHRU */
2164 case SYMBOL_REF:
2165 /* TLS symbols are never valid. */
2166 return SYMBOL_REF_TLS_MODEL (x) == 0;
2168 case CONST_DOUBLE:
2169 if (x == CONST0_RTX (mode))
2170 return true;
2171 if (FLOAT_MODE_P (mode))
2172 return false;
2173 goto do_integer;
2175 case CONST_VECTOR:
2176 if (x == CONST0_RTX (mode))
2177 return true;
2178 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2179 return false;
2180 if (GET_MODE_SIZE (mode) != 8)
2181 return false;
2182 goto do_integer;
2184 case CONST_INT:
2185 do_integer:
2186 if (TARGET_BUILD_CONSTANTS)
2187 return true;
2188 alpha_extract_integer (x, &i0, &i1);
2189 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2190 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2191 return false;
2193 default:
2194 return false;
2198 /* Operand 1 is known to be a constant, and should require more than one
2199 instruction to load. Emit that multi-part load. */
2201 bool
2202 alpha_split_const_mov (machine_mode mode, rtx *operands)
2204 HOST_WIDE_INT i0, i1;
2205 rtx temp = NULL_RTX;
2207 alpha_extract_integer (operands[1], &i0, &i1);
2209 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2210 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2212 if (!temp && TARGET_BUILD_CONSTANTS)
2213 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2215 if (temp)
2217 if (!rtx_equal_p (operands[0], temp))
2218 emit_move_insn (operands[0], temp);
2219 return true;
2222 return false;
2225 /* Expand a move instruction; return true if all work is done.
2226 We don't handle non-bwx subword loads here. */
2228 bool
2229 alpha_expand_mov (machine_mode mode, rtx *operands)
2231 rtx tmp;
2233 /* If the output is not a register, the input must be. */
2234 if (MEM_P (operands[0])
2235 && ! reg_or_0_operand (operands[1], mode))
2236 operands[1] = force_reg (mode, operands[1]);
2238 /* Allow legitimize_address to perform some simplifications. */
2239 if (mode == Pmode && symbolic_operand (operands[1], mode))
2241 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2242 if (tmp)
2244 if (tmp == operands[0])
2245 return true;
2246 operands[1] = tmp;
2247 return false;
2251 /* Early out for non-constants and valid constants. */
2252 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2253 return false;
2255 /* Split large integers. */
2256 if (CONST_INT_P (operands[1])
2257 || GET_CODE (operands[1]) == CONST_DOUBLE
2258 || GET_CODE (operands[1]) == CONST_VECTOR)
2260 if (alpha_split_const_mov (mode, operands))
2261 return true;
2264 /* Otherwise we've nothing left but to drop the thing to memory. */
2265 tmp = force_const_mem (mode, operands[1]);
2267 if (tmp == NULL_RTX)
2268 return false;
2270 if (reload_in_progress)
2272 emit_move_insn (operands[0], XEXP (tmp, 0));
2273 operands[1] = replace_equiv_address (tmp, operands[0]);
2275 else
2276 operands[1] = validize_mem (tmp);
2277 return false;
2280 /* Expand a non-bwx QImode or HImode move instruction;
2281 return true if all work is done. */
2283 bool
2284 alpha_expand_mov_nobwx (machine_mode mode, rtx *operands)
2286 rtx seq;
2288 /* If the output is not a register, the input must be. */
2289 if (MEM_P (operands[0]))
2290 operands[1] = force_reg (mode, operands[1]);
2292 /* Handle four memory cases, unaligned and aligned for either the input
2293 or the output. The only case where we can be called during reload is
2294 for aligned loads; all other cases require temporaries. */
2296 if (any_memory_operand (operands[1], mode))
2298 if (aligned_memory_operand (operands[1], mode))
2300 if (reload_in_progress)
2302 if (mode == QImode)
2303 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2304 else
2305 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2306 emit_insn (seq);
2308 else
2310 rtx aligned_mem, bitnum;
2311 rtx scratch = gen_reg_rtx (SImode);
2312 rtx subtarget;
2313 bool copyout;
2315 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2317 subtarget = operands[0];
2318 if (REG_P (subtarget))
2319 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2320 else
2321 subtarget = gen_reg_rtx (DImode), copyout = true;
2323 if (mode == QImode)
2324 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2325 bitnum, scratch);
2326 else
2327 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2328 bitnum, scratch);
2329 emit_insn (seq);
2331 if (copyout)
2332 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2335 else
2337 /* Don't pass these as parameters since that makes the generated
2338 code depend on parameter evaluation order which will cause
2339 bootstrap failures. */
2341 rtx temp1, temp2, subtarget, ua;
2342 bool copyout;
2344 temp1 = gen_reg_rtx (DImode);
2345 temp2 = gen_reg_rtx (DImode);
2347 subtarget = operands[0];
2348 if (REG_P (subtarget))
2349 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2350 else
2351 subtarget = gen_reg_rtx (DImode), copyout = true;
2353 ua = get_unaligned_address (operands[1]);
2354 if (mode == QImode)
2355 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2356 else
2357 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2359 alpha_set_memflags (seq, operands[1]);
2360 emit_insn (seq);
2362 if (copyout)
2363 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2365 return true;
2368 if (any_memory_operand (operands[0], mode))
2370 if (aligned_memory_operand (operands[0], mode))
2372 rtx aligned_mem, bitnum;
2373 rtx temp1 = gen_reg_rtx (SImode);
2374 rtx temp2 = gen_reg_rtx (SImode);
2376 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2378 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2379 temp1, temp2));
2381 else
2383 rtx temp1 = gen_reg_rtx (DImode);
2384 rtx temp2 = gen_reg_rtx (DImode);
2385 rtx temp3 = gen_reg_rtx (DImode);
2386 rtx ua = get_unaligned_address (operands[0]);
2388 if (mode == QImode)
2389 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2390 else
2391 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2393 alpha_set_memflags (seq, operands[0]);
2394 emit_insn (seq);
2396 return true;
2399 return false;
2402 /* Implement the movmisalign patterns. One of the operands is a memory
2403 that is not naturally aligned. Emit instructions to load it. */
2405 void
2406 alpha_expand_movmisalign (machine_mode mode, rtx *operands)
2408 /* Honor misaligned loads, for those we promised to do so. */
2409 if (MEM_P (operands[1]))
2411 rtx tmp;
2413 if (register_operand (operands[0], mode))
2414 tmp = operands[0];
2415 else
2416 tmp = gen_reg_rtx (mode);
2418 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2419 if (tmp != operands[0])
2420 emit_move_insn (operands[0], tmp);
2422 else if (MEM_P (operands[0]))
2424 if (!reg_or_0_operand (operands[1], mode))
2425 operands[1] = force_reg (mode, operands[1]);
2426 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2428 else
2429 gcc_unreachable ();
2432 /* Generate an unsigned DImode to FP conversion. This is the same code
2433 optabs would emit if we didn't have TFmode patterns.
2435 For SFmode, this is the only construction I've found that can pass
2436 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2437 intermediates will work, because you'll get intermediate rounding
2438 that ruins the end result. Some of this could be fixed by turning
2439 on round-to-positive-infinity, but that requires diddling the fpsr,
2440 which kills performance. I tried turning this around and converting
2441 to a negative number, so that I could turn on /m, but either I did
2442 it wrong or there's something else cause I wound up with the exact
2443 same single-bit error. There is a branch-less form of this same code:
2445 srl $16,1,$1
2446 and $16,1,$2
2447 cmplt $16,0,$3
2448 or $1,$2,$2
2449 cmovge $16,$16,$2
2450 itoft $3,$f10
2451 itoft $2,$f11
2452 cvtqs $f11,$f11
2453 adds $f11,$f11,$f0
2454 fcmoveq $f10,$f11,$f0
2456 I'm not using it because it's the same number of instructions as
2457 this branch-full form, and it has more serialized long latency
2458 instructions on the critical path.
2460 For DFmode, we can avoid rounding errors by breaking up the word
2461 into two pieces, converting them separately, and adding them back:
2463 LC0: .long 0,0x5f800000
2465 itoft $16,$f11
2466 lda $2,LC0
2467 cmplt $16,0,$1
2468 cpyse $f11,$f31,$f10
2469 cpyse $f31,$f11,$f11
2470 s4addq $1,$2,$1
2471 lds $f12,0($1)
2472 cvtqt $f10,$f10
2473 cvtqt $f11,$f11
2474 addt $f12,$f10,$f0
2475 addt $f0,$f11,$f0
2477 This doesn't seem to be a clear-cut win over the optabs form.
2478 It probably all depends on the distribution of numbers being
2479 converted -- in the optabs form, all but high-bit-set has a
2480 much lower minimum execution time. */
2482 void
2483 alpha_emit_floatuns (rtx operands[2])
2485 rtx neglab, donelab, i0, i1, f0, in, out;
2486 machine_mode mode;
2488 out = operands[0];
2489 in = force_reg (DImode, operands[1]);
2490 mode = GET_MODE (out);
2491 neglab = gen_label_rtx ();
2492 donelab = gen_label_rtx ();
2493 i0 = gen_reg_rtx (DImode);
2494 i1 = gen_reg_rtx (DImode);
2495 f0 = gen_reg_rtx (mode);
2497 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2499 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2500 emit_jump_insn (gen_jump (donelab));
2501 emit_barrier ();
2503 emit_label (neglab);
2505 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2506 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2507 emit_insn (gen_iordi3 (i0, i0, i1));
2508 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2509 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2511 emit_label (donelab);
2514 /* Generate the comparison for a conditional branch. */
2516 void
2517 alpha_emit_conditional_branch (rtx operands[], machine_mode cmp_mode)
2519 enum rtx_code cmp_code, branch_code;
2520 machine_mode branch_mode = VOIDmode;
2521 enum rtx_code code = GET_CODE (operands[0]);
2522 rtx op0 = operands[1], op1 = operands[2];
2523 rtx tem;
2525 if (cmp_mode == TFmode)
2527 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2528 op1 = const0_rtx;
2529 cmp_mode = DImode;
2532 /* The general case: fold the comparison code to the types of compares
2533 that we have, choosing the branch as necessary. */
2534 switch (code)
2536 case EQ: case LE: case LT: case LEU: case LTU:
2537 case UNORDERED:
2538 /* We have these compares. */
2539 cmp_code = code, branch_code = NE;
2540 break;
2542 case NE:
2543 case ORDERED:
2544 /* These must be reversed. */
2545 cmp_code = reverse_condition (code), branch_code = EQ;
2546 break;
2548 case GE: case GT: case GEU: case GTU:
2549 /* For FP, we swap them, for INT, we reverse them. */
2550 if (cmp_mode == DFmode)
2552 cmp_code = swap_condition (code);
2553 branch_code = NE;
2554 std::swap (op0, op1);
2556 else
2558 cmp_code = reverse_condition (code);
2559 branch_code = EQ;
2561 break;
2563 default:
2564 gcc_unreachable ();
2567 if (cmp_mode == DFmode)
2569 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2571 /* When we are not as concerned about non-finite values, and we
2572 are comparing against zero, we can branch directly. */
2573 if (op1 == CONST0_RTX (DFmode))
2574 cmp_code = UNKNOWN, branch_code = code;
2575 else if (op0 == CONST0_RTX (DFmode))
2577 /* Undo the swap we probably did just above. */
2578 std::swap (op0, op1);
2579 branch_code = swap_condition (cmp_code);
2580 cmp_code = UNKNOWN;
2583 else
2585 /* ??? We mark the branch mode to be CCmode to prevent the
2586 compare and branch from being combined, since the compare
2587 insn follows IEEE rules that the branch does not. */
2588 branch_mode = CCmode;
2591 else
2593 /* The following optimizations are only for signed compares. */
2594 if (code != LEU && code != LTU && code != GEU && code != GTU)
2596 /* Whee. Compare and branch against 0 directly. */
2597 if (op1 == const0_rtx)
2598 cmp_code = UNKNOWN, branch_code = code;
2600 /* If the constants doesn't fit into an immediate, but can
2601 be generated by lda/ldah, we adjust the argument and
2602 compare against zero, so we can use beq/bne directly. */
2603 /* ??? Don't do this when comparing against symbols, otherwise
2604 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2605 be declared false out of hand (at least for non-weak). */
2606 else if (CONST_INT_P (op1)
2607 && (code == EQ || code == NE)
2608 && !(symbolic_operand (op0, VOIDmode)
2609 || (REG_P (op0) && REG_POINTER (op0))))
2611 rtx n_op1 = GEN_INT (-INTVAL (op1));
2613 if (! satisfies_constraint_I (op1)
2614 && (satisfies_constraint_K (n_op1)
2615 || satisfies_constraint_L (n_op1)))
2616 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2620 if (!reg_or_0_operand (op0, DImode))
2621 op0 = force_reg (DImode, op0);
2622 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2623 op1 = force_reg (DImode, op1);
2626 /* Emit an initial compare instruction, if necessary. */
2627 tem = op0;
2628 if (cmp_code != UNKNOWN)
2630 tem = gen_reg_rtx (cmp_mode);
2631 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2634 /* Emit the branch instruction. */
2635 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2636 gen_rtx_IF_THEN_ELSE (VOIDmode,
2637 gen_rtx_fmt_ee (branch_code,
2638 branch_mode, tem,
2639 CONST0_RTX (cmp_mode)),
2640 gen_rtx_LABEL_REF (VOIDmode,
2641 operands[3]),
2642 pc_rtx));
2643 emit_jump_insn (tem);
2646 /* Certain simplifications can be done to make invalid setcc operations
2647 valid. Return the final comparison, or NULL if we can't work. */
2649 bool
2650 alpha_emit_setcc (rtx operands[], machine_mode cmp_mode)
2652 enum rtx_code cmp_code;
2653 enum rtx_code code = GET_CODE (operands[1]);
2654 rtx op0 = operands[2], op1 = operands[3];
2655 rtx tmp;
2657 if (cmp_mode == TFmode)
2659 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2660 op1 = const0_rtx;
2661 cmp_mode = DImode;
2664 if (cmp_mode == DFmode && !TARGET_FIX)
2665 return 0;
2667 /* The general case: fold the comparison code to the types of compares
2668 that we have, choosing the branch as necessary. */
2670 cmp_code = UNKNOWN;
2671 switch (code)
2673 case EQ: case LE: case LT: case LEU: case LTU:
2674 case UNORDERED:
2675 /* We have these compares. */
2676 if (cmp_mode == DFmode)
2677 cmp_code = code, code = NE;
2678 break;
2680 case NE:
2681 if (cmp_mode == DImode && op1 == const0_rtx)
2682 break;
2683 /* FALLTHRU */
2685 case ORDERED:
2686 cmp_code = reverse_condition (code);
2687 code = EQ;
2688 break;
2690 case GE: case GT: case GEU: case GTU:
2691 /* These normally need swapping, but for integer zero we have
2692 special patterns that recognize swapped operands. */
2693 if (cmp_mode == DImode && op1 == const0_rtx)
2694 break;
2695 code = swap_condition (code);
2696 if (cmp_mode == DFmode)
2697 cmp_code = code, code = NE;
2698 std::swap (op0, op1);
2699 break;
2701 default:
2702 gcc_unreachable ();
2705 if (cmp_mode == DImode)
2707 if (!register_operand (op0, DImode))
2708 op0 = force_reg (DImode, op0);
2709 if (!reg_or_8bit_operand (op1, DImode))
2710 op1 = force_reg (DImode, op1);
2713 /* Emit an initial compare instruction, if necessary. */
2714 if (cmp_code != UNKNOWN)
2716 tmp = gen_reg_rtx (cmp_mode);
2717 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2718 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2720 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2721 op1 = const0_rtx;
2724 /* Emit the setcc instruction. */
2725 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2726 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2727 return true;
2731 /* Rewrite a comparison against zero CMP of the form
2732 (CODE (cc0) (const_int 0)) so it can be written validly in
2733 a conditional move (if_then_else CMP ...).
2734 If both of the operands that set cc0 are nonzero we must emit
2735 an insn to perform the compare (it can't be done within
2736 the conditional move). */
2739 alpha_emit_conditional_move (rtx cmp, machine_mode mode)
2741 enum rtx_code code = GET_CODE (cmp);
2742 enum rtx_code cmov_code = NE;
2743 rtx op0 = XEXP (cmp, 0);
2744 rtx op1 = XEXP (cmp, 1);
2745 machine_mode cmp_mode
2746 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2747 machine_mode cmov_mode = VOIDmode;
2748 int local_fast_math = flag_unsafe_math_optimizations;
2749 rtx tem;
2751 if (cmp_mode == TFmode)
2753 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2754 op1 = const0_rtx;
2755 cmp_mode = DImode;
2758 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2760 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2762 enum rtx_code cmp_code;
2764 if (! TARGET_FIX)
2765 return 0;
2767 /* If we have fp<->int register move instructions, do a cmov by
2768 performing the comparison in fp registers, and move the
2769 zero/nonzero value to integer registers, where we can then
2770 use a normal cmov, or vice-versa. */
2772 switch (code)
2774 case EQ: case LE: case LT: case LEU: case LTU:
2775 case UNORDERED:
2776 /* We have these compares. */
2777 cmp_code = code, code = NE;
2778 break;
2780 case NE:
2781 case ORDERED:
2782 /* These must be reversed. */
2783 cmp_code = reverse_condition (code), code = EQ;
2784 break;
2786 case GE: case GT: case GEU: case GTU:
2787 /* These normally need swapping, but for integer zero we have
2788 special patterns that recognize swapped operands. */
2789 if (cmp_mode == DImode && op1 == const0_rtx)
2790 cmp_code = code, code = NE;
2791 else
2793 cmp_code = swap_condition (code);
2794 code = NE;
2795 std::swap (op0, op1);
2797 break;
2799 default:
2800 gcc_unreachable ();
2803 if (cmp_mode == DImode)
2805 if (!reg_or_0_operand (op0, DImode))
2806 op0 = force_reg (DImode, op0);
2807 if (!reg_or_8bit_operand (op1, DImode))
2808 op1 = force_reg (DImode, op1);
2811 tem = gen_reg_rtx (cmp_mode);
2812 emit_insn (gen_rtx_SET (VOIDmode, tem,
2813 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2814 op0, op1)));
2816 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2817 op0 = gen_lowpart (cmp_mode, tem);
2818 op1 = CONST0_RTX (cmp_mode);
2819 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2820 local_fast_math = 1;
2823 if (cmp_mode == DImode)
2825 if (!reg_or_0_operand (op0, DImode))
2826 op0 = force_reg (DImode, op0);
2827 if (!reg_or_8bit_operand (op1, DImode))
2828 op1 = force_reg (DImode, op1);
2831 /* We may be able to use a conditional move directly.
2832 This avoids emitting spurious compares. */
2833 if (signed_comparison_operator (cmp, VOIDmode)
2834 && (cmp_mode == DImode || local_fast_math)
2835 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2836 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2838 /* We can't put the comparison inside the conditional move;
2839 emit a compare instruction and put that inside the
2840 conditional move. Make sure we emit only comparisons we have;
2841 swap or reverse as necessary. */
2843 if (!can_create_pseudo_p ())
2844 return NULL_RTX;
2846 switch (code)
2848 case EQ: case LE: case LT: case LEU: case LTU:
2849 case UNORDERED:
2850 /* We have these compares: */
2851 break;
2853 case NE:
2854 case ORDERED:
2855 /* These must be reversed. */
2856 code = reverse_condition (code);
2857 cmov_code = EQ;
2858 break;
2860 case GE: case GT: case GEU: case GTU:
2861 /* These normally need swapping, but for integer zero we have
2862 special patterns that recognize swapped operands. */
2863 if (cmp_mode == DImode && op1 == const0_rtx)
2864 break;
2865 code = swap_condition (code);
2866 std::swap (op0, op1);
2867 break;
2869 default:
2870 gcc_unreachable ();
2873 if (cmp_mode == DImode)
2875 if (!reg_or_0_operand (op0, DImode))
2876 op0 = force_reg (DImode, op0);
2877 if (!reg_or_8bit_operand (op1, DImode))
2878 op1 = force_reg (DImode, op1);
2881 /* ??? We mark the branch mode to be CCmode to prevent the compare
2882 and cmov from being combined, since the compare insn follows IEEE
2883 rules that the cmov does not. */
2884 if (cmp_mode == DFmode && !local_fast_math)
2885 cmov_mode = CCmode;
2887 tem = gen_reg_rtx (cmp_mode);
2888 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2889 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2892 /* Simplify a conditional move of two constants into a setcc with
2893 arithmetic. This is done with a splitter since combine would
2894 just undo the work if done during code generation. It also catches
2895 cases we wouldn't have before cse. */
2898 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2899 rtx t_rtx, rtx f_rtx)
2901 HOST_WIDE_INT t, f, diff;
2902 machine_mode mode;
2903 rtx target, subtarget, tmp;
2905 mode = GET_MODE (dest);
2906 t = INTVAL (t_rtx);
2907 f = INTVAL (f_rtx);
2908 diff = t - f;
2910 if (((code == NE || code == EQ) && diff < 0)
2911 || (code == GE || code == GT))
2913 code = reverse_condition (code);
2914 diff = t, t = f, f = diff;
2915 diff = t - f;
2918 subtarget = target = dest;
2919 if (mode != DImode)
2921 target = gen_lowpart (DImode, dest);
2922 if (can_create_pseudo_p ())
2923 subtarget = gen_reg_rtx (DImode);
2924 else
2925 subtarget = target;
2927 /* Below, we must be careful to use copy_rtx on target and subtarget
2928 in intermediate insns, as they may be a subreg rtx, which may not
2929 be shared. */
2931 if (f == 0 && exact_log2 (diff) > 0
2932 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2933 viable over a longer latency cmove. On EV5, the E0 slot is a
2934 scarce resource, and on EV4 shift has the same latency as a cmove. */
2935 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2937 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2938 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2940 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2941 GEN_INT (exact_log2 (t)));
2942 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2944 else if (f == 0 && t == -1)
2946 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2947 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2949 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2951 else if (diff == 1 || diff == 4 || diff == 8)
2953 rtx add_op;
2955 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2956 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2958 if (diff == 1)
2959 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2960 else
2962 add_op = GEN_INT (f);
2963 if (sext_add_operand (add_op, mode))
2965 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2966 GEN_INT (diff));
2967 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2968 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2970 else
2971 return 0;
2974 else
2975 return 0;
2977 return 1;
2980 /* Look up the function X_floating library function name for the
2981 given operation. */
2983 struct GTY(()) xfloating_op
2985 const enum rtx_code code;
2986 const char *const GTY((skip)) osf_func;
2987 const char *const GTY((skip)) vms_func;
2988 rtx libcall;
2991 static GTY(()) struct xfloating_op xfloating_ops[] =
2993 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2994 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2995 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2996 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2997 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2998 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2999 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
3000 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
3001 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
3002 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
3003 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
3004 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
3005 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
3006 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
3007 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
3010 static GTY(()) struct xfloating_op vax_cvt_ops[] =
3012 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
3013 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3016 static rtx
3017 alpha_lookup_xfloating_lib_func (enum rtx_code code)
3019 struct xfloating_op *ops = xfloating_ops;
3020 long n = ARRAY_SIZE (xfloating_ops);
3021 long i;
3023 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
3025 /* How irritating. Nothing to key off for the main table. */
3026 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
3028 ops = vax_cvt_ops;
3029 n = ARRAY_SIZE (vax_cvt_ops);
3032 for (i = 0; i < n; ++i, ++ops)
3033 if (ops->code == code)
3035 rtx func = ops->libcall;
3036 if (!func)
3038 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3039 ? ops->vms_func : ops->osf_func);
3040 ops->libcall = func;
3042 return func;
3045 gcc_unreachable ();
3048 /* Most X_floating operations take the rounding mode as an argument.
3049 Compute that here. */
3051 static int
3052 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3053 enum alpha_fp_rounding_mode round)
3055 int mode;
3057 switch (round)
3059 case ALPHA_FPRM_NORM:
3060 mode = 2;
3061 break;
3062 case ALPHA_FPRM_MINF:
3063 mode = 1;
3064 break;
3065 case ALPHA_FPRM_CHOP:
3066 mode = 0;
3067 break;
3068 case ALPHA_FPRM_DYN:
3069 mode = 4;
3070 break;
3071 default:
3072 gcc_unreachable ();
3074 /* XXX For reference, round to +inf is mode = 3. */
3077 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3078 mode |= 0x10000;
3080 return mode;
3083 /* Emit an X_floating library function call.
3085 Note that these functions do not follow normal calling conventions:
3086 TFmode arguments are passed in two integer registers (as opposed to
3087 indirect); TFmode return values appear in R16+R17.
3089 FUNC is the function to call.
3090 TARGET is where the output belongs.
3091 OPERANDS are the inputs.
3092 NOPERANDS is the count of inputs.
3093 EQUIV is the expression equivalent for the function.
3096 static void
3097 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3098 int noperands, rtx equiv)
3100 rtx usage = NULL_RTX, tmp, reg;
3101 int regno = 16, i;
3103 start_sequence ();
3105 for (i = 0; i < noperands; ++i)
3107 switch (GET_MODE (operands[i]))
3109 case TFmode:
3110 reg = gen_rtx_REG (TFmode, regno);
3111 regno += 2;
3112 break;
3114 case DFmode:
3115 reg = gen_rtx_REG (DFmode, regno + 32);
3116 regno += 1;
3117 break;
3119 case VOIDmode:
3120 gcc_assert (CONST_INT_P (operands[i]));
3121 /* FALLTHRU */
3122 case DImode:
3123 reg = gen_rtx_REG (DImode, regno);
3124 regno += 1;
3125 break;
3127 default:
3128 gcc_unreachable ();
3131 emit_move_insn (reg, operands[i]);
3132 use_reg (&usage, reg);
3135 switch (GET_MODE (target))
3137 case TFmode:
3138 reg = gen_rtx_REG (TFmode, 16);
3139 break;
3140 case DFmode:
3141 reg = gen_rtx_REG (DFmode, 32);
3142 break;
3143 case DImode:
3144 reg = gen_rtx_REG (DImode, 0);
3145 break;
3146 default:
3147 gcc_unreachable ();
3150 tmp = gen_rtx_MEM (QImode, func);
3151 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3152 const0_rtx, const0_rtx));
3153 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3154 RTL_CONST_CALL_P (tmp) = 1;
3156 tmp = get_insns ();
3157 end_sequence ();
3159 emit_libcall_block (tmp, target, reg, equiv);
3162 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3164 void
3165 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3167 rtx func;
3168 int mode;
3169 rtx out_operands[3];
3171 func = alpha_lookup_xfloating_lib_func (code);
3172 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3174 out_operands[0] = operands[1];
3175 out_operands[1] = operands[2];
3176 out_operands[2] = GEN_INT (mode);
3177 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3178 gen_rtx_fmt_ee (code, TFmode, operands[1],
3179 operands[2]));
3182 /* Emit an X_floating library function call for a comparison. */
3184 static rtx
3185 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3187 enum rtx_code cmp_code, res_code;
3188 rtx func, out, operands[2], note;
3190 /* X_floating library comparison functions return
3191 -1 unordered
3192 0 false
3193 1 true
3194 Convert the compare against the raw return value. */
3196 cmp_code = *pcode;
3197 switch (cmp_code)
3199 case UNORDERED:
3200 cmp_code = EQ;
3201 res_code = LT;
3202 break;
3203 case ORDERED:
3204 cmp_code = EQ;
3205 res_code = GE;
3206 break;
3207 case NE:
3208 res_code = NE;
3209 break;
3210 case EQ:
3211 case LT:
3212 case GT:
3213 case LE:
3214 case GE:
3215 res_code = GT;
3216 break;
3217 default:
3218 gcc_unreachable ();
3220 *pcode = res_code;
3222 func = alpha_lookup_xfloating_lib_func (cmp_code);
3224 operands[0] = op0;
3225 operands[1] = op1;
3226 out = gen_reg_rtx (DImode);
3228 /* What's actually returned is -1,0,1, not a proper boolean value. */
3229 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3230 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
3231 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3233 return out;
3236 /* Emit an X_floating library function call for a conversion. */
3238 void
3239 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3241 int noperands = 1, mode;
3242 rtx out_operands[2];
3243 rtx func;
3244 enum rtx_code code = orig_code;
3246 if (code == UNSIGNED_FIX)
3247 code = FIX;
3249 func = alpha_lookup_xfloating_lib_func (code);
3251 out_operands[0] = operands[1];
3253 switch (code)
3255 case FIX:
3256 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3257 out_operands[1] = GEN_INT (mode);
3258 noperands = 2;
3259 break;
3260 case FLOAT_TRUNCATE:
3261 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3262 out_operands[1] = GEN_INT (mode);
3263 noperands = 2;
3264 break;
3265 default:
3266 break;
3269 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3270 gen_rtx_fmt_e (orig_code,
3271 GET_MODE (operands[0]),
3272 operands[1]));
3275 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3276 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3277 guarantee that the sequence
3278 set (OP[0] OP[2])
3279 set (OP[1] OP[3])
3280 is valid. Naturally, output operand ordering is little-endian.
3281 This is used by *movtf_internal and *movti_internal. */
3283 void
3284 alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
3285 bool fixup_overlap)
3287 switch (GET_CODE (operands[1]))
3289 case REG:
3290 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3291 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3292 break;
3294 case MEM:
3295 operands[3] = adjust_address (operands[1], DImode, 8);
3296 operands[2] = adjust_address (operands[1], DImode, 0);
3297 break;
3299 case CONST_INT:
3300 case CONST_DOUBLE:
3301 gcc_assert (operands[1] == CONST0_RTX (mode));
3302 operands[2] = operands[3] = const0_rtx;
3303 break;
3305 default:
3306 gcc_unreachable ();
3309 switch (GET_CODE (operands[0]))
3311 case REG:
3312 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3313 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3314 break;
3316 case MEM:
3317 operands[1] = adjust_address (operands[0], DImode, 8);
3318 operands[0] = adjust_address (operands[0], DImode, 0);
3319 break;
3321 default:
3322 gcc_unreachable ();
3325 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3327 std::swap (operands[0], operands[1]);
3328 std::swap (operands[2], operands[3]);
3332 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3333 op2 is a register containing the sign bit, operation is the
3334 logical operation to be performed. */
3336 void
3337 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3339 rtx high_bit = operands[2];
3340 rtx scratch;
3341 int move;
3343 alpha_split_tmode_pair (operands, TFmode, false);
3345 /* Detect three flavors of operand overlap. */
3346 move = 1;
3347 if (rtx_equal_p (operands[0], operands[2]))
3348 move = 0;
3349 else if (rtx_equal_p (operands[1], operands[2]))
3351 if (rtx_equal_p (operands[0], high_bit))
3352 move = 2;
3353 else
3354 move = -1;
3357 if (move < 0)
3358 emit_move_insn (operands[0], operands[2]);
3360 /* ??? If the destination overlaps both source tf and high_bit, then
3361 assume source tf is dead in its entirety and use the other half
3362 for a scratch register. Otherwise "scratch" is just the proper
3363 destination register. */
3364 scratch = operands[move < 2 ? 1 : 3];
3366 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3368 if (move > 0)
3370 emit_move_insn (operands[0], operands[2]);
3371 if (move > 1)
3372 emit_move_insn (operands[1], scratch);
3376 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3377 unaligned data:
3379 unsigned: signed:
3380 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3381 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3382 lda r3,X(r11) lda r3,X+2(r11)
3383 extwl r1,r3,r1 extql r1,r3,r1
3384 extwh r2,r3,r2 extqh r2,r3,r2
3385 or r1.r2.r1 or r1,r2,r1
3386 sra r1,48,r1
3388 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3389 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3390 lda r3,X(r11) lda r3,X(r11)
3391 extll r1,r3,r1 extll r1,r3,r1
3392 extlh r2,r3,r2 extlh r2,r3,r2
3393 or r1.r2.r1 addl r1,r2,r1
3395 quad: ldq_u r1,X(r11)
3396 ldq_u r2,X+7(r11)
3397 lda r3,X(r11)
3398 extql r1,r3,r1
3399 extqh r2,r3,r2
3400 or r1.r2.r1
3403 void
3404 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3405 HOST_WIDE_INT ofs, int sign)
3407 rtx meml, memh, addr, extl, exth, tmp, mema;
3408 machine_mode mode;
3410 if (TARGET_BWX && size == 2)
3412 meml = adjust_address (mem, QImode, ofs);
3413 memh = adjust_address (mem, QImode, ofs+1);
3414 extl = gen_reg_rtx (DImode);
3415 exth = gen_reg_rtx (DImode);
3416 emit_insn (gen_zero_extendqidi2 (extl, meml));
3417 emit_insn (gen_zero_extendqidi2 (exth, memh));
3418 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3419 NULL, 1, OPTAB_LIB_WIDEN);
3420 addr = expand_simple_binop (DImode, IOR, extl, exth,
3421 NULL, 1, OPTAB_LIB_WIDEN);
3423 if (sign && GET_MODE (tgt) != HImode)
3425 addr = gen_lowpart (HImode, addr);
3426 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3428 else
3430 if (GET_MODE (tgt) != DImode)
3431 addr = gen_lowpart (GET_MODE (tgt), addr);
3432 emit_move_insn (tgt, addr);
3434 return;
3437 meml = gen_reg_rtx (DImode);
3438 memh = gen_reg_rtx (DImode);
3439 addr = gen_reg_rtx (DImode);
3440 extl = gen_reg_rtx (DImode);
3441 exth = gen_reg_rtx (DImode);
3443 mema = XEXP (mem, 0);
3444 if (GET_CODE (mema) == LO_SUM)
3445 mema = force_reg (Pmode, mema);
3447 /* AND addresses cannot be in any alias set, since they may implicitly
3448 alias surrounding code. Ideally we'd have some alias set that
3449 covered all types except those with alignment 8 or higher. */
3451 tmp = change_address (mem, DImode,
3452 gen_rtx_AND (DImode,
3453 plus_constant (DImode, mema, ofs),
3454 GEN_INT (-8)));
3455 set_mem_alias_set (tmp, 0);
3456 emit_move_insn (meml, tmp);
3458 tmp = change_address (mem, DImode,
3459 gen_rtx_AND (DImode,
3460 plus_constant (DImode, mema,
3461 ofs + size - 1),
3462 GEN_INT (-8)));
3463 set_mem_alias_set (tmp, 0);
3464 emit_move_insn (memh, tmp);
3466 if (sign && size == 2)
3468 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3470 emit_insn (gen_extql (extl, meml, addr));
3471 emit_insn (gen_extqh (exth, memh, addr));
3473 /* We must use tgt here for the target. Alpha-vms port fails if we use
3474 addr for the target, because addr is marked as a pointer and combine
3475 knows that pointers are always sign-extended 32-bit values. */
3476 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3477 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3478 addr, 1, OPTAB_WIDEN);
3480 else
3482 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3483 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3484 switch ((int) size)
3486 case 2:
3487 emit_insn (gen_extwh (exth, memh, addr));
3488 mode = HImode;
3489 break;
3490 case 4:
3491 emit_insn (gen_extlh (exth, memh, addr));
3492 mode = SImode;
3493 break;
3494 case 8:
3495 emit_insn (gen_extqh (exth, memh, addr));
3496 mode = DImode;
3497 break;
3498 default:
3499 gcc_unreachable ();
3502 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3503 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3504 sign, OPTAB_WIDEN);
3507 if (addr != tgt)
3508 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3511 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3513 void
3514 alpha_expand_unaligned_store (rtx dst, rtx src,
3515 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3517 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3519 if (TARGET_BWX && size == 2)
3521 if (src != const0_rtx)
3523 dstl = gen_lowpart (QImode, src);
3524 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3525 NULL, 1, OPTAB_LIB_WIDEN);
3526 dsth = gen_lowpart (QImode, dsth);
3528 else
3529 dstl = dsth = const0_rtx;
3531 meml = adjust_address (dst, QImode, ofs);
3532 memh = adjust_address (dst, QImode, ofs+1);
3534 emit_move_insn (meml, dstl);
3535 emit_move_insn (memh, dsth);
3536 return;
3539 dstl = gen_reg_rtx (DImode);
3540 dsth = gen_reg_rtx (DImode);
3541 insl = gen_reg_rtx (DImode);
3542 insh = gen_reg_rtx (DImode);
3544 dsta = XEXP (dst, 0);
3545 if (GET_CODE (dsta) == LO_SUM)
3546 dsta = force_reg (Pmode, dsta);
3548 /* AND addresses cannot be in any alias set, since they may implicitly
3549 alias surrounding code. Ideally we'd have some alias set that
3550 covered all types except those with alignment 8 or higher. */
3552 meml = change_address (dst, DImode,
3553 gen_rtx_AND (DImode,
3554 plus_constant (DImode, dsta, ofs),
3555 GEN_INT (-8)));
3556 set_mem_alias_set (meml, 0);
3558 memh = change_address (dst, DImode,
3559 gen_rtx_AND (DImode,
3560 plus_constant (DImode, dsta,
3561 ofs + size - 1),
3562 GEN_INT (-8)));
3563 set_mem_alias_set (memh, 0);
3565 emit_move_insn (dsth, memh);
3566 emit_move_insn (dstl, meml);
3568 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3570 if (src != CONST0_RTX (GET_MODE (src)))
3572 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3573 GEN_INT (size*8), addr));
3575 switch ((int) size)
3577 case 2:
3578 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3579 break;
3580 case 4:
3581 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3582 break;
3583 case 8:
3584 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3585 break;
3586 default:
3587 gcc_unreachable ();
3591 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3593 switch ((int) size)
3595 case 2:
3596 emit_insn (gen_mskwl (dstl, dstl, addr));
3597 break;
3598 case 4:
3599 emit_insn (gen_mskll (dstl, dstl, addr));
3600 break;
3601 case 8:
3602 emit_insn (gen_mskql (dstl, dstl, addr));
3603 break;
3604 default:
3605 gcc_unreachable ();
3608 if (src != CONST0_RTX (GET_MODE (src)))
3610 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3611 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3614 /* Must store high before low for degenerate case of aligned. */
3615 emit_move_insn (memh, dsth);
3616 emit_move_insn (meml, dstl);
3619 /* The block move code tries to maximize speed by separating loads and
3620 stores at the expense of register pressure: we load all of the data
3621 before we store it back out. There are two secondary effects worth
3622 mentioning, that this speeds copying to/from aligned and unaligned
3623 buffers, and that it makes the code significantly easier to write. */
3625 #define MAX_MOVE_WORDS 8
3627 /* Load an integral number of consecutive unaligned quadwords. */
3629 static void
3630 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3631 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3633 rtx const im8 = GEN_INT (-8);
3634 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3635 rtx sreg, areg, tmp, smema;
3636 HOST_WIDE_INT i;
3638 smema = XEXP (smem, 0);
3639 if (GET_CODE (smema) == LO_SUM)
3640 smema = force_reg (Pmode, smema);
3642 /* Generate all the tmp registers we need. */
3643 for (i = 0; i < words; ++i)
3645 data_regs[i] = out_regs[i];
3646 ext_tmps[i] = gen_reg_rtx (DImode);
3648 data_regs[words] = gen_reg_rtx (DImode);
3650 if (ofs != 0)
3651 smem = adjust_address (smem, GET_MODE (smem), ofs);
3653 /* Load up all of the source data. */
3654 for (i = 0; i < words; ++i)
3656 tmp = change_address (smem, DImode,
3657 gen_rtx_AND (DImode,
3658 plus_constant (DImode, smema, 8*i),
3659 im8));
3660 set_mem_alias_set (tmp, 0);
3661 emit_move_insn (data_regs[i], tmp);
3664 tmp = change_address (smem, DImode,
3665 gen_rtx_AND (DImode,
3666 plus_constant (DImode, smema,
3667 8*words - 1),
3668 im8));
3669 set_mem_alias_set (tmp, 0);
3670 emit_move_insn (data_regs[words], tmp);
3672 /* Extract the half-word fragments. Unfortunately DEC decided to make
3673 extxh with offset zero a noop instead of zeroing the register, so
3674 we must take care of that edge condition ourselves with cmov. */
3676 sreg = copy_addr_to_reg (smema);
3677 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3678 1, OPTAB_WIDEN);
3679 for (i = 0; i < words; ++i)
3681 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3682 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3683 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3684 gen_rtx_IF_THEN_ELSE (DImode,
3685 gen_rtx_EQ (DImode, areg,
3686 const0_rtx),
3687 const0_rtx, ext_tmps[i])));
3690 /* Merge the half-words into whole words. */
3691 for (i = 0; i < words; ++i)
3693 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3694 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3698 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3699 may be NULL to store zeros. */
3701 static void
3702 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3703 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3705 rtx const im8 = GEN_INT (-8);
3706 rtx ins_tmps[MAX_MOVE_WORDS];
3707 rtx st_tmp_1, st_tmp_2, dreg;
3708 rtx st_addr_1, st_addr_2, dmema;
3709 HOST_WIDE_INT i;
3711 dmema = XEXP (dmem, 0);
3712 if (GET_CODE (dmema) == LO_SUM)
3713 dmema = force_reg (Pmode, dmema);
3715 /* Generate all the tmp registers we need. */
3716 if (data_regs != NULL)
3717 for (i = 0; i < words; ++i)
3718 ins_tmps[i] = gen_reg_rtx(DImode);
3719 st_tmp_1 = gen_reg_rtx(DImode);
3720 st_tmp_2 = gen_reg_rtx(DImode);
3722 if (ofs != 0)
3723 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3725 st_addr_2 = change_address (dmem, DImode,
3726 gen_rtx_AND (DImode,
3727 plus_constant (DImode, dmema,
3728 words*8 - 1),
3729 im8));
3730 set_mem_alias_set (st_addr_2, 0);
3732 st_addr_1 = change_address (dmem, DImode,
3733 gen_rtx_AND (DImode, dmema, im8));
3734 set_mem_alias_set (st_addr_1, 0);
3736 /* Load up the destination end bits. */
3737 emit_move_insn (st_tmp_2, st_addr_2);
3738 emit_move_insn (st_tmp_1, st_addr_1);
3740 /* Shift the input data into place. */
3741 dreg = copy_addr_to_reg (dmema);
3742 if (data_regs != NULL)
3744 for (i = words-1; i >= 0; --i)
3746 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3747 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3749 for (i = words-1; i > 0; --i)
3751 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3752 ins_tmps[i-1], ins_tmps[i-1], 1,
3753 OPTAB_WIDEN);
3757 /* Split and merge the ends with the destination data. */
3758 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3759 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3761 if (data_regs != NULL)
3763 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3764 st_tmp_2, 1, OPTAB_WIDEN);
3765 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3766 st_tmp_1, 1, OPTAB_WIDEN);
3769 /* Store it all. */
3770 emit_move_insn (st_addr_2, st_tmp_2);
3771 for (i = words-1; i > 0; --i)
3773 rtx tmp = change_address (dmem, DImode,
3774 gen_rtx_AND (DImode,
3775 plus_constant (DImode,
3776 dmema, i*8),
3777 im8));
3778 set_mem_alias_set (tmp, 0);
3779 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3781 emit_move_insn (st_addr_1, st_tmp_1);
3785 /* Expand string/block move operations.
3787 operands[0] is the pointer to the destination.
3788 operands[1] is the pointer to the source.
3789 operands[2] is the number of bytes to move.
3790 operands[3] is the alignment. */
3793 alpha_expand_block_move (rtx operands[])
3795 rtx bytes_rtx = operands[2];
3796 rtx align_rtx = operands[3];
3797 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3798 HOST_WIDE_INT bytes = orig_bytes;
3799 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3800 HOST_WIDE_INT dst_align = src_align;
3801 rtx orig_src = operands[1];
3802 rtx orig_dst = operands[0];
3803 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3804 rtx tmp;
3805 unsigned int i, words, ofs, nregs = 0;
3807 if (orig_bytes <= 0)
3808 return 1;
3809 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3810 return 0;
3812 /* Look for additional alignment information from recorded register info. */
3814 tmp = XEXP (orig_src, 0);
3815 if (REG_P (tmp))
3816 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3817 else if (GET_CODE (tmp) == PLUS
3818 && REG_P (XEXP (tmp, 0))
3819 && CONST_INT_P (XEXP (tmp, 1)))
3821 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3822 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3824 if (a > src_align)
3826 if (a >= 64 && c % 8 == 0)
3827 src_align = 64;
3828 else if (a >= 32 && c % 4 == 0)
3829 src_align = 32;
3830 else if (a >= 16 && c % 2 == 0)
3831 src_align = 16;
3835 tmp = XEXP (orig_dst, 0);
3836 if (REG_P (tmp))
3837 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3838 else if (GET_CODE (tmp) == PLUS
3839 && REG_P (XEXP (tmp, 0))
3840 && CONST_INT_P (XEXP (tmp, 1)))
3842 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3843 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3845 if (a > dst_align)
3847 if (a >= 64 && c % 8 == 0)
3848 dst_align = 64;
3849 else if (a >= 32 && c % 4 == 0)
3850 dst_align = 32;
3851 else if (a >= 16 && c % 2 == 0)
3852 dst_align = 16;
3856 ofs = 0;
3857 if (src_align >= 64 && bytes >= 8)
3859 words = bytes / 8;
3861 for (i = 0; i < words; ++i)
3862 data_regs[nregs + i] = gen_reg_rtx (DImode);
3864 for (i = 0; i < words; ++i)
3865 emit_move_insn (data_regs[nregs + i],
3866 adjust_address (orig_src, DImode, ofs + i * 8));
3868 nregs += words;
3869 bytes -= words * 8;
3870 ofs += words * 8;
3873 if (src_align >= 32 && bytes >= 4)
3875 words = bytes / 4;
3877 for (i = 0; i < words; ++i)
3878 data_regs[nregs + i] = gen_reg_rtx (SImode);
3880 for (i = 0; i < words; ++i)
3881 emit_move_insn (data_regs[nregs + i],
3882 adjust_address (orig_src, SImode, ofs + i * 4));
3884 nregs += words;
3885 bytes -= words * 4;
3886 ofs += words * 4;
3889 if (bytes >= 8)
3891 words = bytes / 8;
3893 for (i = 0; i < words+1; ++i)
3894 data_regs[nregs + i] = gen_reg_rtx (DImode);
3896 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3897 words, ofs);
3899 nregs += words;
3900 bytes -= words * 8;
3901 ofs += words * 8;
3904 if (! TARGET_BWX && bytes >= 4)
3906 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3907 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3908 bytes -= 4;
3909 ofs += 4;
3912 if (bytes >= 2)
3914 if (src_align >= 16)
3916 do {
3917 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3918 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3919 bytes -= 2;
3920 ofs += 2;
3921 } while (bytes >= 2);
3923 else if (! TARGET_BWX)
3925 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3926 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3927 bytes -= 2;
3928 ofs += 2;
3932 while (bytes > 0)
3934 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3935 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3936 bytes -= 1;
3937 ofs += 1;
3940 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3942 /* Now save it back out again. */
3944 i = 0, ofs = 0;
3946 /* Write out the data in whatever chunks reading the source allowed. */
3947 if (dst_align >= 64)
3949 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3951 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3952 data_regs[i]);
3953 ofs += 8;
3954 i++;
3958 if (dst_align >= 32)
3960 /* If the source has remaining DImode regs, write them out in
3961 two pieces. */
3962 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3964 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3965 NULL_RTX, 1, OPTAB_WIDEN);
3967 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3968 gen_lowpart (SImode, data_regs[i]));
3969 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3970 gen_lowpart (SImode, tmp));
3971 ofs += 8;
3972 i++;
3975 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3977 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3978 data_regs[i]);
3979 ofs += 4;
3980 i++;
3984 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3986 /* Write out a remaining block of words using unaligned methods. */
3988 for (words = 1; i + words < nregs; words++)
3989 if (GET_MODE (data_regs[i + words]) != DImode)
3990 break;
3992 if (words == 1)
3993 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3994 else
3995 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3996 words, ofs);
3998 i += words;
3999 ofs += words * 8;
4002 /* Due to the above, this won't be aligned. */
4003 /* ??? If we have more than one of these, consider constructing full
4004 words in registers and using alpha_expand_unaligned_store_words. */
4005 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4008 ofs += 4;
4009 i++;
4012 if (dst_align >= 16)
4013 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4015 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4016 i++;
4017 ofs += 2;
4019 else
4020 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4022 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4023 i++;
4024 ofs += 2;
4027 /* The remainder must be byte copies. */
4028 while (i < nregs)
4030 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4031 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4032 i++;
4033 ofs += 1;
4036 return 1;
4040 alpha_expand_block_clear (rtx operands[])
4042 rtx bytes_rtx = operands[1];
4043 rtx align_rtx = operands[3];
4044 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4045 HOST_WIDE_INT bytes = orig_bytes;
4046 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4047 HOST_WIDE_INT alignofs = 0;
4048 rtx orig_dst = operands[0];
4049 rtx tmp;
4050 int i, words, ofs = 0;
4052 if (orig_bytes <= 0)
4053 return 1;
4054 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4055 return 0;
4057 /* Look for stricter alignment. */
4058 tmp = XEXP (orig_dst, 0);
4059 if (REG_P (tmp))
4060 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4061 else if (GET_CODE (tmp) == PLUS
4062 && REG_P (XEXP (tmp, 0))
4063 && CONST_INT_P (XEXP (tmp, 1)))
4065 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4066 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4068 if (a > align)
4070 if (a >= 64)
4071 align = a, alignofs = 8 - c % 8;
4072 else if (a >= 32)
4073 align = a, alignofs = 4 - c % 4;
4074 else if (a >= 16)
4075 align = a, alignofs = 2 - c % 2;
4079 /* Handle an unaligned prefix first. */
4081 if (alignofs > 0)
4083 #if HOST_BITS_PER_WIDE_INT >= 64
4084 /* Given that alignofs is bounded by align, the only time BWX could
4085 generate three stores is for a 7 byte fill. Prefer two individual
4086 stores over a load/mask/store sequence. */
4087 if ((!TARGET_BWX || alignofs == 7)
4088 && align >= 32
4089 && !(alignofs == 4 && bytes >= 4))
4091 machine_mode mode = (align >= 64 ? DImode : SImode);
4092 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4093 rtx mem, tmp;
4094 HOST_WIDE_INT mask;
4096 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4097 set_mem_alias_set (mem, 0);
4099 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4100 if (bytes < alignofs)
4102 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4103 ofs += bytes;
4104 bytes = 0;
4106 else
4108 bytes -= alignofs;
4109 ofs += alignofs;
4111 alignofs = 0;
4113 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4114 NULL_RTX, 1, OPTAB_WIDEN);
4116 emit_move_insn (mem, tmp);
4118 #endif
4120 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4122 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4123 bytes -= 1;
4124 ofs += 1;
4125 alignofs -= 1;
4127 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4129 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4130 bytes -= 2;
4131 ofs += 2;
4132 alignofs -= 2;
4134 if (alignofs == 4 && bytes >= 4)
4136 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4137 bytes -= 4;
4138 ofs += 4;
4139 alignofs = 0;
4142 /* If we've not used the extra lead alignment information by now,
4143 we won't be able to. Downgrade align to match what's left over. */
4144 if (alignofs > 0)
4146 alignofs = alignofs & -alignofs;
4147 align = MIN (align, alignofs * BITS_PER_UNIT);
4151 /* Handle a block of contiguous long-words. */
4153 if (align >= 64 && bytes >= 8)
4155 words = bytes / 8;
4157 for (i = 0; i < words; ++i)
4158 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4159 const0_rtx);
4161 bytes -= words * 8;
4162 ofs += words * 8;
4165 /* If the block is large and appropriately aligned, emit a single
4166 store followed by a sequence of stq_u insns. */
4168 if (align >= 32 && bytes > 16)
4170 rtx orig_dsta;
4172 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4173 bytes -= 4;
4174 ofs += 4;
4176 orig_dsta = XEXP (orig_dst, 0);
4177 if (GET_CODE (orig_dsta) == LO_SUM)
4178 orig_dsta = force_reg (Pmode, orig_dsta);
4180 words = bytes / 8;
4181 for (i = 0; i < words; ++i)
4183 rtx mem
4184 = change_address (orig_dst, DImode,
4185 gen_rtx_AND (DImode,
4186 plus_constant (DImode, orig_dsta,
4187 ofs + i*8),
4188 GEN_INT (-8)));
4189 set_mem_alias_set (mem, 0);
4190 emit_move_insn (mem, const0_rtx);
4193 /* Depending on the alignment, the first stq_u may have overlapped
4194 with the initial stl, which means that the last stq_u didn't
4195 write as much as it would appear. Leave those questionable bytes
4196 unaccounted for. */
4197 bytes -= words * 8 - 4;
4198 ofs += words * 8 - 4;
4201 /* Handle a smaller block of aligned words. */
4203 if ((align >= 64 && bytes == 4)
4204 || (align == 32 && bytes >= 4))
4206 words = bytes / 4;
4208 for (i = 0; i < words; ++i)
4209 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4210 const0_rtx);
4212 bytes -= words * 4;
4213 ofs += words * 4;
4216 /* An unaligned block uses stq_u stores for as many as possible. */
4218 if (bytes >= 8)
4220 words = bytes / 8;
4222 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4224 bytes -= words * 8;
4225 ofs += words * 8;
4228 /* Next clean up any trailing pieces. */
4230 #if HOST_BITS_PER_WIDE_INT >= 64
4231 /* Count the number of bits in BYTES for which aligned stores could
4232 be emitted. */
4233 words = 0;
4234 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4235 if (bytes & i)
4236 words += 1;
4238 /* If we have appropriate alignment (and it wouldn't take too many
4239 instructions otherwise), mask out the bytes we need. */
4240 if (TARGET_BWX ? words > 2 : bytes > 0)
4242 if (align >= 64)
4244 rtx mem, tmp;
4245 HOST_WIDE_INT mask;
4247 mem = adjust_address (orig_dst, DImode, ofs);
4248 set_mem_alias_set (mem, 0);
4250 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4252 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4255 emit_move_insn (mem, tmp);
4256 return 1;
4258 else if (align >= 32 && bytes < 4)
4260 rtx mem, tmp;
4261 HOST_WIDE_INT mask;
4263 mem = adjust_address (orig_dst, SImode, ofs);
4264 set_mem_alias_set (mem, 0);
4266 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4268 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4269 NULL_RTX, 1, OPTAB_WIDEN);
4271 emit_move_insn (mem, tmp);
4272 return 1;
4275 #endif
4277 if (!TARGET_BWX && bytes >= 4)
4279 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4280 bytes -= 4;
4281 ofs += 4;
4284 if (bytes >= 2)
4286 if (align >= 16)
4288 do {
4289 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4290 const0_rtx);
4291 bytes -= 2;
4292 ofs += 2;
4293 } while (bytes >= 2);
4295 else if (! TARGET_BWX)
4297 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4298 bytes -= 2;
4299 ofs += 2;
4303 while (bytes > 0)
4305 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4306 bytes -= 1;
4307 ofs += 1;
4310 return 1;
4313 /* Returns a mask so that zap(x, value) == x & mask. */
4316 alpha_expand_zap_mask (HOST_WIDE_INT value)
4318 rtx result;
4319 int i;
4321 if (HOST_BITS_PER_WIDE_INT >= 64)
4323 HOST_WIDE_INT mask = 0;
4325 for (i = 7; i >= 0; --i)
4327 mask <<= 8;
4328 if (!((value >> i) & 1))
4329 mask |= 0xff;
4332 result = gen_int_mode (mask, DImode);
4334 else
4336 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4338 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4340 for (i = 7; i >= 4; --i)
4342 mask_hi <<= 8;
4343 if (!((value >> i) & 1))
4344 mask_hi |= 0xff;
4347 for (i = 3; i >= 0; --i)
4349 mask_lo <<= 8;
4350 if (!((value >> i) & 1))
4351 mask_lo |= 0xff;
4354 result = immed_double_const (mask_lo, mask_hi, DImode);
4357 return result;
4360 void
4361 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4362 machine_mode mode,
4363 rtx op0, rtx op1, rtx op2)
4365 op0 = gen_lowpart (mode, op0);
4367 if (op1 == const0_rtx)
4368 op1 = CONST0_RTX (mode);
4369 else
4370 op1 = gen_lowpart (mode, op1);
4372 if (op2 == const0_rtx)
4373 op2 = CONST0_RTX (mode);
4374 else
4375 op2 = gen_lowpart (mode, op2);
4377 emit_insn ((*gen) (op0, op1, op2));
4380 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4381 COND is true. Mark the jump as unlikely to be taken. */
4383 static void
4384 emit_unlikely_jump (rtx cond, rtx label)
4386 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
4387 rtx x;
4389 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4390 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4391 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
4394 /* A subroutine of the atomic operation splitters. Emit a load-locked
4395 instruction in MODE. */
4397 static void
4398 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
4400 rtx (*fn) (rtx, rtx) = NULL;
4401 if (mode == SImode)
4402 fn = gen_load_locked_si;
4403 else if (mode == DImode)
4404 fn = gen_load_locked_di;
4405 emit_insn (fn (reg, mem));
4408 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4409 instruction in MODE. */
4411 static void
4412 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
4414 rtx (*fn) (rtx, rtx, rtx) = NULL;
4415 if (mode == SImode)
4416 fn = gen_store_conditional_si;
4417 else if (mode == DImode)
4418 fn = gen_store_conditional_di;
4419 emit_insn (fn (res, mem, val));
4422 /* Subroutines of the atomic operation splitters. Emit barriers
4423 as needed for the memory MODEL. */
4425 static void
4426 alpha_pre_atomic_barrier (enum memmodel model)
4428 if (need_atomic_barrier_p (model, true))
4429 emit_insn (gen_memory_barrier ());
4432 static void
4433 alpha_post_atomic_barrier (enum memmodel model)
4435 if (need_atomic_barrier_p (model, false))
4436 emit_insn (gen_memory_barrier ());
4439 /* A subroutine of the atomic operation splitters. Emit an insxl
4440 instruction in MODE. */
4442 static rtx
4443 emit_insxl (machine_mode mode, rtx op1, rtx op2)
4445 rtx ret = gen_reg_rtx (DImode);
4446 rtx (*fn) (rtx, rtx, rtx);
4448 switch (mode)
4450 case QImode:
4451 fn = gen_insbl;
4452 break;
4453 case HImode:
4454 fn = gen_inswl;
4455 break;
4456 case SImode:
4457 fn = gen_insll;
4458 break;
4459 case DImode:
4460 fn = gen_insql;
4461 break;
4462 default:
4463 gcc_unreachable ();
4466 op1 = force_reg (mode, op1);
4467 emit_insn (fn (ret, op1, op2));
4469 return ret;
4472 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4473 to perform. MEM is the memory on which to operate. VAL is the second
4474 operand of the binary operator. BEFORE and AFTER are optional locations to
4475 return the value of MEM either before of after the operation. SCRATCH is
4476 a scratch register. */
4478 void
4479 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4480 rtx after, rtx scratch, enum memmodel model)
4482 machine_mode mode = GET_MODE (mem);
4483 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4485 alpha_pre_atomic_barrier (model);
4487 label = gen_label_rtx ();
4488 emit_label (label);
4489 label = gen_rtx_LABEL_REF (DImode, label);
4491 if (before == NULL)
4492 before = scratch;
4493 emit_load_locked (mode, before, mem);
4495 if (code == NOT)
4497 x = gen_rtx_AND (mode, before, val);
4498 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4500 x = gen_rtx_NOT (mode, val);
4502 else
4503 x = gen_rtx_fmt_ee (code, mode, before, val);
4504 if (after)
4505 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4506 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4508 emit_store_conditional (mode, cond, mem, scratch);
4510 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4511 emit_unlikely_jump (x, label);
4513 alpha_post_atomic_barrier (model);
4516 /* Expand a compare and swap operation. */
4518 void
4519 alpha_split_compare_and_swap (rtx operands[])
4521 rtx cond, retval, mem, oldval, newval;
4522 bool is_weak;
4523 enum memmodel mod_s, mod_f;
4524 machine_mode mode;
4525 rtx label1, label2, x;
4527 cond = operands[0];
4528 retval = operands[1];
4529 mem = operands[2];
4530 oldval = operands[3];
4531 newval = operands[4];
4532 is_weak = (operands[5] != const0_rtx);
4533 mod_s = (enum memmodel) INTVAL (operands[6]);
4534 mod_f = (enum memmodel) INTVAL (operands[7]);
4535 mode = GET_MODE (mem);
4537 alpha_pre_atomic_barrier (mod_s);
4539 label1 = NULL_RTX;
4540 if (!is_weak)
4542 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4543 emit_label (XEXP (label1, 0));
4545 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4547 emit_load_locked (mode, retval, mem);
4549 x = gen_lowpart (DImode, retval);
4550 if (oldval == const0_rtx)
4552 emit_move_insn (cond, const0_rtx);
4553 x = gen_rtx_NE (DImode, x, const0_rtx);
4555 else
4557 x = gen_rtx_EQ (DImode, x, oldval);
4558 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4559 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4561 emit_unlikely_jump (x, label2);
4563 emit_move_insn (cond, newval);
4564 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4566 if (!is_weak)
4568 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4569 emit_unlikely_jump (x, label1);
4572 if (mod_f != MEMMODEL_RELAXED)
4573 emit_label (XEXP (label2, 0));
4575 alpha_post_atomic_barrier (mod_s);
4577 if (mod_f == MEMMODEL_RELAXED)
4578 emit_label (XEXP (label2, 0));
4581 void
4582 alpha_expand_compare_and_swap_12 (rtx operands[])
4584 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4585 machine_mode mode;
4586 rtx addr, align, wdst;
4587 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4589 cond = operands[0];
4590 dst = operands[1];
4591 mem = operands[2];
4592 oldval = operands[3];
4593 newval = operands[4];
4594 is_weak = operands[5];
4595 mod_s = operands[6];
4596 mod_f = operands[7];
4597 mode = GET_MODE (mem);
4599 /* We forced the address into a register via mem_noofs_operand. */
4600 addr = XEXP (mem, 0);
4601 gcc_assert (register_operand (addr, DImode));
4603 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4604 NULL_RTX, 1, OPTAB_DIRECT);
4606 oldval = convert_modes (DImode, mode, oldval, 1);
4608 if (newval != const0_rtx)
4609 newval = emit_insxl (mode, newval, addr);
4611 wdst = gen_reg_rtx (DImode);
4612 if (mode == QImode)
4613 gen = gen_atomic_compare_and_swapqi_1;
4614 else
4615 gen = gen_atomic_compare_and_swaphi_1;
4616 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4617 is_weak, mod_s, mod_f));
4619 emit_move_insn (dst, gen_lowpart (mode, wdst));
4622 void
4623 alpha_split_compare_and_swap_12 (rtx operands[])
4625 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4626 machine_mode mode;
4627 bool is_weak;
4628 enum memmodel mod_s, mod_f;
4629 rtx label1, label2, mem, addr, width, mask, x;
4631 cond = operands[0];
4632 dest = operands[1];
4633 orig_mem = operands[2];
4634 oldval = operands[3];
4635 newval = operands[4];
4636 align = operands[5];
4637 is_weak = (operands[6] != const0_rtx);
4638 mod_s = (enum memmodel) INTVAL (operands[7]);
4639 mod_f = (enum memmodel) INTVAL (operands[8]);
4640 scratch = operands[9];
4641 mode = GET_MODE (orig_mem);
4642 addr = XEXP (orig_mem, 0);
4644 mem = gen_rtx_MEM (DImode, align);
4645 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4646 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4647 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4649 alpha_pre_atomic_barrier (mod_s);
4651 label1 = NULL_RTX;
4652 if (!is_weak)
4654 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4655 emit_label (XEXP (label1, 0));
4657 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4659 emit_load_locked (DImode, scratch, mem);
4661 width = GEN_INT (GET_MODE_BITSIZE (mode));
4662 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4663 emit_insn (gen_extxl (dest, scratch, width, addr));
4665 if (oldval == const0_rtx)
4667 emit_move_insn (cond, const0_rtx);
4668 x = gen_rtx_NE (DImode, dest, const0_rtx);
4670 else
4672 x = gen_rtx_EQ (DImode, dest, oldval);
4673 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4674 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4676 emit_unlikely_jump (x, label2);
4678 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4680 if (newval != const0_rtx)
4681 emit_insn (gen_iordi3 (cond, cond, newval));
4683 emit_store_conditional (DImode, cond, mem, cond);
4685 if (!is_weak)
4687 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4688 emit_unlikely_jump (x, label1);
4691 if (mod_f != MEMMODEL_RELAXED)
4692 emit_label (XEXP (label2, 0));
4694 alpha_post_atomic_barrier (mod_s);
4696 if (mod_f == MEMMODEL_RELAXED)
4697 emit_label (XEXP (label2, 0));
4700 /* Expand an atomic exchange operation. */
4702 void
4703 alpha_split_atomic_exchange (rtx operands[])
4705 rtx retval, mem, val, scratch;
4706 enum memmodel model;
4707 machine_mode mode;
4708 rtx label, x, cond;
4710 retval = operands[0];
4711 mem = operands[1];
4712 val = operands[2];
4713 model = (enum memmodel) INTVAL (operands[3]);
4714 scratch = operands[4];
4715 mode = GET_MODE (mem);
4716 cond = gen_lowpart (DImode, scratch);
4718 alpha_pre_atomic_barrier (model);
4720 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4721 emit_label (XEXP (label, 0));
4723 emit_load_locked (mode, retval, mem);
4724 emit_move_insn (scratch, val);
4725 emit_store_conditional (mode, cond, mem, scratch);
4727 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4728 emit_unlikely_jump (x, label);
4730 alpha_post_atomic_barrier (model);
4733 void
4734 alpha_expand_atomic_exchange_12 (rtx operands[])
4736 rtx dst, mem, val, model;
4737 machine_mode mode;
4738 rtx addr, align, wdst;
4739 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4741 dst = operands[0];
4742 mem = operands[1];
4743 val = operands[2];
4744 model = operands[3];
4745 mode = GET_MODE (mem);
4747 /* We forced the address into a register via mem_noofs_operand. */
4748 addr = XEXP (mem, 0);
4749 gcc_assert (register_operand (addr, DImode));
4751 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4752 NULL_RTX, 1, OPTAB_DIRECT);
4754 /* Insert val into the correct byte location within the word. */
4755 if (val != const0_rtx)
4756 val = emit_insxl (mode, val, addr);
4758 wdst = gen_reg_rtx (DImode);
4759 if (mode == QImode)
4760 gen = gen_atomic_exchangeqi_1;
4761 else
4762 gen = gen_atomic_exchangehi_1;
4763 emit_insn (gen (wdst, mem, val, align, model));
4765 emit_move_insn (dst, gen_lowpart (mode, wdst));
4768 void
4769 alpha_split_atomic_exchange_12 (rtx operands[])
4771 rtx dest, orig_mem, addr, val, align, scratch;
4772 rtx label, mem, width, mask, x;
4773 machine_mode mode;
4774 enum memmodel model;
4776 dest = operands[0];
4777 orig_mem = operands[1];
4778 val = operands[2];
4779 align = operands[3];
4780 model = (enum memmodel) INTVAL (operands[4]);
4781 scratch = operands[5];
4782 mode = GET_MODE (orig_mem);
4783 addr = XEXP (orig_mem, 0);
4785 mem = gen_rtx_MEM (DImode, align);
4786 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4787 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4788 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4790 alpha_pre_atomic_barrier (model);
4792 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4793 emit_label (XEXP (label, 0));
4795 emit_load_locked (DImode, scratch, mem);
4797 width = GEN_INT (GET_MODE_BITSIZE (mode));
4798 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4799 emit_insn (gen_extxl (dest, scratch, width, addr));
4800 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4801 if (val != const0_rtx)
4802 emit_insn (gen_iordi3 (scratch, scratch, val));
4804 emit_store_conditional (DImode, scratch, mem, scratch);
4806 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4807 emit_unlikely_jump (x, label);
4809 alpha_post_atomic_barrier (model);
4812 /* Adjust the cost of a scheduling dependency. Return the new cost of
4813 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4815 static int
4816 alpha_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4818 enum attr_type dep_insn_type;
4820 /* If the dependence is an anti-dependence, there is no cost. For an
4821 output dependence, there is sometimes a cost, but it doesn't seem
4822 worth handling those few cases. */
4823 if (REG_NOTE_KIND (link) != 0)
4824 return cost;
4826 /* If we can't recognize the insns, we can't really do anything. */
4827 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4828 return cost;
4830 dep_insn_type = get_attr_type (dep_insn);
4832 /* Bring in the user-defined memory latency. */
4833 if (dep_insn_type == TYPE_ILD
4834 || dep_insn_type == TYPE_FLD
4835 || dep_insn_type == TYPE_LDSYM)
4836 cost += alpha_memory_latency-1;
4838 /* Everything else handled in DFA bypasses now. */
4840 return cost;
4843 /* The number of instructions that can be issued per cycle. */
4845 static int
4846 alpha_issue_rate (void)
4848 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4851 /* How many alternative schedules to try. This should be as wide as the
4852 scheduling freedom in the DFA, but no wider. Making this value too
4853 large results extra work for the scheduler.
4855 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4856 alternative schedules. For EV5, we can choose between E0/E1 and
4857 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4859 static int
4860 alpha_multipass_dfa_lookahead (void)
4862 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4865 /* Machine-specific function data. */
4867 struct GTY(()) alpha_links;
4869 struct string_traits : default_hashmap_traits
4871 static bool equal_keys (const char *const &a, const char *const &b)
4873 return strcmp (a, b) == 0;
4877 struct GTY(()) machine_function
4879 /* For flag_reorder_blocks_and_partition. */
4880 rtx gp_save_rtx;
4882 /* For VMS condition handlers. */
4883 bool uses_condition_handler;
4885 /* Linkage entries. */
4886 hash_map<const char *, alpha_links *, string_traits> *links;
4889 /* How to allocate a 'struct machine_function'. */
4891 static struct machine_function *
4892 alpha_init_machine_status (void)
4894 return ggc_cleared_alloc<machine_function> ();
4897 /* Support for frame based VMS condition handlers. */
4899 /* A VMS condition handler may be established for a function with a call to
4900 __builtin_establish_vms_condition_handler, and cancelled with a call to
4901 __builtin_revert_vms_condition_handler.
4903 The VMS Condition Handling Facility knows about the existence of a handler
4904 from the procedure descriptor .handler field. As the VMS native compilers,
4905 we store the user specified handler's address at a fixed location in the
4906 stack frame and point the procedure descriptor at a common wrapper which
4907 fetches the real handler's address and issues an indirect call.
4909 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4911 We force the procedure kind to PT_STACK, and the fixed frame location is
4912 fp+8, just before the register save area. We use the handler_data field in
4913 the procedure descriptor to state the fp offset at which the installed
4914 handler address can be found. */
4916 #define VMS_COND_HANDLER_FP_OFFSET 8
4918 /* Expand code to store the currently installed user VMS condition handler
4919 into TARGET and install HANDLER as the new condition handler. */
4921 void
4922 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4924 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4925 VMS_COND_HANDLER_FP_OFFSET);
4927 rtx handler_slot
4928 = gen_rtx_MEM (DImode, handler_slot_address);
4930 emit_move_insn (target, handler_slot);
4931 emit_move_insn (handler_slot, handler);
4933 /* Notify the start/prologue/epilogue emitters that the condition handler
4934 slot is needed. In addition to reserving the slot space, this will force
4935 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4936 use above is correct. */
4937 cfun->machine->uses_condition_handler = true;
4940 /* Expand code to store the current VMS condition handler into TARGET and
4941 nullify it. */
4943 void
4944 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4946 /* We implement this by establishing a null condition handler, with the tiny
4947 side effect of setting uses_condition_handler. This is a little bit
4948 pessimistic if no actual builtin_establish call is ever issued, which is
4949 not a real problem and expected never to happen anyway. */
4951 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4954 /* Functions to save and restore alpha_return_addr_rtx. */
4956 /* Start the ball rolling with RETURN_ADDR_RTX. */
4959 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4961 if (count != 0)
4962 return const0_rtx;
4964 return get_hard_reg_initial_val (Pmode, REG_RA);
4967 /* Return or create a memory slot containing the gp value for the current
4968 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4971 alpha_gp_save_rtx (void)
4973 rtx_insn *seq;
4974 rtx m = cfun->machine->gp_save_rtx;
4976 if (m == NULL)
4978 start_sequence ();
4980 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4981 m = validize_mem (m);
4982 emit_move_insn (m, pic_offset_table_rtx);
4984 seq = get_insns ();
4985 end_sequence ();
4987 /* We used to simply emit the sequence after entry_of_function.
4988 However this breaks the CFG if the first instruction in the
4989 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4990 label. Emit the sequence properly on the edge. We are only
4991 invoked from dw2_build_landing_pads and finish_eh_generation
4992 will call commit_edge_insertions thanks to a kludge. */
4993 insert_insn_on_edge (seq,
4994 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4996 cfun->machine->gp_save_rtx = m;
4999 return m;
5002 static void
5003 alpha_instantiate_decls (void)
5005 if (cfun->machine->gp_save_rtx != NULL_RTX)
5006 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
5009 static int
5010 alpha_ra_ever_killed (void)
5012 rtx_insn *top;
5014 if (!has_hard_reg_initial_val (Pmode, REG_RA))
5015 return (int)df_regs_ever_live_p (REG_RA);
5017 push_topmost_sequence ();
5018 top = get_insns ();
5019 pop_topmost_sequence ();
5021 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL);
5025 /* Return the trap mode suffix applicable to the current
5026 instruction, or NULL. */
5028 static const char *
5029 get_trap_mode_suffix (void)
5031 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
5033 switch (s)
5035 case TRAP_SUFFIX_NONE:
5036 return NULL;
5038 case TRAP_SUFFIX_SU:
5039 if (alpha_fptm >= ALPHA_FPTM_SU)
5040 return "su";
5041 return NULL;
5043 case TRAP_SUFFIX_SUI:
5044 if (alpha_fptm >= ALPHA_FPTM_SUI)
5045 return "sui";
5046 return NULL;
5048 case TRAP_SUFFIX_V_SV:
5049 switch (alpha_fptm)
5051 case ALPHA_FPTM_N:
5052 return NULL;
5053 case ALPHA_FPTM_U:
5054 return "v";
5055 case ALPHA_FPTM_SU:
5056 case ALPHA_FPTM_SUI:
5057 return "sv";
5058 default:
5059 gcc_unreachable ();
5062 case TRAP_SUFFIX_V_SV_SVI:
5063 switch (alpha_fptm)
5065 case ALPHA_FPTM_N:
5066 return NULL;
5067 case ALPHA_FPTM_U:
5068 return "v";
5069 case ALPHA_FPTM_SU:
5070 return "sv";
5071 case ALPHA_FPTM_SUI:
5072 return "svi";
5073 default:
5074 gcc_unreachable ();
5076 break;
5078 case TRAP_SUFFIX_U_SU_SUI:
5079 switch (alpha_fptm)
5081 case ALPHA_FPTM_N:
5082 return NULL;
5083 case ALPHA_FPTM_U:
5084 return "u";
5085 case ALPHA_FPTM_SU:
5086 return "su";
5087 case ALPHA_FPTM_SUI:
5088 return "sui";
5089 default:
5090 gcc_unreachable ();
5092 break;
5094 default:
5095 gcc_unreachable ();
5097 gcc_unreachable ();
5100 /* Return the rounding mode suffix applicable to the current
5101 instruction, or NULL. */
5103 static const char *
5104 get_round_mode_suffix (void)
5106 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5108 switch (s)
5110 case ROUND_SUFFIX_NONE:
5111 return NULL;
5112 case ROUND_SUFFIX_NORMAL:
5113 switch (alpha_fprm)
5115 case ALPHA_FPRM_NORM:
5116 return NULL;
5117 case ALPHA_FPRM_MINF:
5118 return "m";
5119 case ALPHA_FPRM_CHOP:
5120 return "c";
5121 case ALPHA_FPRM_DYN:
5122 return "d";
5123 default:
5124 gcc_unreachable ();
5126 break;
5128 case ROUND_SUFFIX_C:
5129 return "c";
5131 default:
5132 gcc_unreachable ();
5134 gcc_unreachable ();
5137 /* Print an operand. Recognize special options, documented below. */
5139 void
5140 print_operand (FILE *file, rtx x, int code)
5142 int i;
5144 switch (code)
5146 case '~':
5147 /* Print the assembler name of the current function. */
5148 assemble_name (file, alpha_fnname);
5149 break;
5151 case '&':
5152 if (const char *name = get_some_local_dynamic_name ())
5153 assemble_name (file, name);
5154 else
5155 output_operand_lossage ("'%%&' used without any "
5156 "local dynamic TLS references");
5157 break;
5159 case '/':
5161 const char *trap = get_trap_mode_suffix ();
5162 const char *round = get_round_mode_suffix ();
5164 if (trap || round)
5165 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5166 break;
5169 case ',':
5170 /* Generates single precision instruction suffix. */
5171 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5172 break;
5174 case '-':
5175 /* Generates double precision instruction suffix. */
5176 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5177 break;
5179 case '#':
5180 if (alpha_this_literal_sequence_number == 0)
5181 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5182 fprintf (file, "%d", alpha_this_literal_sequence_number);
5183 break;
5185 case '*':
5186 if (alpha_this_gpdisp_sequence_number == 0)
5187 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5188 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5189 break;
5191 case 'H':
5192 if (GET_CODE (x) == HIGH)
5193 output_addr_const (file, XEXP (x, 0));
5194 else
5195 output_operand_lossage ("invalid %%H value");
5196 break;
5198 case 'J':
5200 const char *lituse;
5202 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5204 x = XVECEXP (x, 0, 0);
5205 lituse = "lituse_tlsgd";
5207 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5209 x = XVECEXP (x, 0, 0);
5210 lituse = "lituse_tlsldm";
5212 else if (CONST_INT_P (x))
5213 lituse = "lituse_jsr";
5214 else
5216 output_operand_lossage ("invalid %%J value");
5217 break;
5220 if (x != const0_rtx)
5221 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5223 break;
5225 case 'j':
5227 const char *lituse;
5229 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5230 lituse = "lituse_jsrdirect";
5231 #else
5232 lituse = "lituse_jsr";
5233 #endif
5235 gcc_assert (INTVAL (x) != 0);
5236 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5238 break;
5239 case 'r':
5240 /* If this operand is the constant zero, write it as "$31". */
5241 if (REG_P (x))
5242 fprintf (file, "%s", reg_names[REGNO (x)]);
5243 else if (x == CONST0_RTX (GET_MODE (x)))
5244 fprintf (file, "$31");
5245 else
5246 output_operand_lossage ("invalid %%r value");
5247 break;
5249 case 'R':
5250 /* Similar, but for floating-point. */
5251 if (REG_P (x))
5252 fprintf (file, "%s", reg_names[REGNO (x)]);
5253 else if (x == CONST0_RTX (GET_MODE (x)))
5254 fprintf (file, "$f31");
5255 else
5256 output_operand_lossage ("invalid %%R value");
5257 break;
5259 case 'N':
5260 /* Write the 1's complement of a constant. */
5261 if (!CONST_INT_P (x))
5262 output_operand_lossage ("invalid %%N value");
5264 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5265 break;
5267 case 'P':
5268 /* Write 1 << C, for a constant C. */
5269 if (!CONST_INT_P (x))
5270 output_operand_lossage ("invalid %%P value");
5272 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5273 break;
5275 case 'h':
5276 /* Write the high-order 16 bits of a constant, sign-extended. */
5277 if (!CONST_INT_P (x))
5278 output_operand_lossage ("invalid %%h value");
5280 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5281 break;
5283 case 'L':
5284 /* Write the low-order 16 bits of a constant, sign-extended. */
5285 if (!CONST_INT_P (x))
5286 output_operand_lossage ("invalid %%L value");
5288 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5289 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5290 break;
5292 case 'm':
5293 /* Write mask for ZAP insn. */
5294 if (GET_CODE (x) == CONST_DOUBLE)
5296 HOST_WIDE_INT mask = 0;
5297 HOST_WIDE_INT value;
5299 value = CONST_DOUBLE_LOW (x);
5300 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5301 i++, value >>= 8)
5302 if (value & 0xff)
5303 mask |= (1 << i);
5305 value = CONST_DOUBLE_HIGH (x);
5306 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5307 i++, value >>= 8)
5308 if (value & 0xff)
5309 mask |= (1 << (i + sizeof (int)));
5311 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5314 else if (CONST_INT_P (x))
5316 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5318 for (i = 0; i < 8; i++, value >>= 8)
5319 if (value & 0xff)
5320 mask |= (1 << i);
5322 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5324 else
5325 output_operand_lossage ("invalid %%m value");
5326 break;
5328 case 'M':
5329 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5330 if (!CONST_INT_P (x)
5331 || (INTVAL (x) != 8 && INTVAL (x) != 16
5332 && INTVAL (x) != 32 && INTVAL (x) != 64))
5333 output_operand_lossage ("invalid %%M value");
5335 fprintf (file, "%s",
5336 (INTVAL (x) == 8 ? "b"
5337 : INTVAL (x) == 16 ? "w"
5338 : INTVAL (x) == 32 ? "l"
5339 : "q"));
5340 break;
5342 case 'U':
5343 /* Similar, except do it from the mask. */
5344 if (CONST_INT_P (x))
5346 HOST_WIDE_INT value = INTVAL (x);
5348 if (value == 0xff)
5350 fputc ('b', file);
5351 break;
5353 if (value == 0xffff)
5355 fputc ('w', file);
5356 break;
5358 if (value == 0xffffffff)
5360 fputc ('l', file);
5361 break;
5363 if (value == -1)
5365 fputc ('q', file);
5366 break;
5369 else if (HOST_BITS_PER_WIDE_INT == 32
5370 && GET_CODE (x) == CONST_DOUBLE
5371 && CONST_DOUBLE_LOW (x) == 0xffffffff
5372 && CONST_DOUBLE_HIGH (x) == 0)
5374 fputc ('l', file);
5375 break;
5377 output_operand_lossage ("invalid %%U value");
5378 break;
5380 case 's':
5381 /* Write the constant value divided by 8. */
5382 if (!CONST_INT_P (x)
5383 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5384 || (INTVAL (x) & 7) != 0)
5385 output_operand_lossage ("invalid %%s value");
5387 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5388 break;
5390 case 'S':
5391 /* Same, except compute (64 - c) / 8 */
5393 if (!CONST_INT_P (x)
5394 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5395 && (INTVAL (x) & 7) != 8)
5396 output_operand_lossage ("invalid %%s value");
5398 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5399 break;
5401 case 'C': case 'D': case 'c': case 'd':
5402 /* Write out comparison name. */
5404 enum rtx_code c = GET_CODE (x);
5406 if (!COMPARISON_P (x))
5407 output_operand_lossage ("invalid %%C value");
5409 else if (code == 'D')
5410 c = reverse_condition (c);
5411 else if (code == 'c')
5412 c = swap_condition (c);
5413 else if (code == 'd')
5414 c = swap_condition (reverse_condition (c));
5416 if (c == LEU)
5417 fprintf (file, "ule");
5418 else if (c == LTU)
5419 fprintf (file, "ult");
5420 else if (c == UNORDERED)
5421 fprintf (file, "un");
5422 else
5423 fprintf (file, "%s", GET_RTX_NAME (c));
5425 break;
5427 case 'E':
5428 /* Write the divide or modulus operator. */
5429 switch (GET_CODE (x))
5431 case DIV:
5432 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5433 break;
5434 case UDIV:
5435 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5436 break;
5437 case MOD:
5438 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5439 break;
5440 case UMOD:
5441 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5442 break;
5443 default:
5444 output_operand_lossage ("invalid %%E value");
5445 break;
5447 break;
5449 case 'A':
5450 /* Write "_u" for unaligned access. */
5451 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5452 fprintf (file, "_u");
5453 break;
5455 case 0:
5456 if (REG_P (x))
5457 fprintf (file, "%s", reg_names[REGNO (x)]);
5458 else if (MEM_P (x))
5459 output_address (XEXP (x, 0));
5460 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5462 switch (XINT (XEXP (x, 0), 1))
5464 case UNSPEC_DTPREL:
5465 case UNSPEC_TPREL:
5466 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5467 break;
5468 default:
5469 output_operand_lossage ("unknown relocation unspec");
5470 break;
5473 else
5474 output_addr_const (file, x);
5475 break;
5477 default:
5478 output_operand_lossage ("invalid %%xn code");
5482 void
5483 print_operand_address (FILE *file, rtx addr)
5485 int basereg = 31;
5486 HOST_WIDE_INT offset = 0;
5488 if (GET_CODE (addr) == AND)
5489 addr = XEXP (addr, 0);
5491 if (GET_CODE (addr) == PLUS
5492 && CONST_INT_P (XEXP (addr, 1)))
5494 offset = INTVAL (XEXP (addr, 1));
5495 addr = XEXP (addr, 0);
5498 if (GET_CODE (addr) == LO_SUM)
5500 const char *reloc16, *reloclo;
5501 rtx op1 = XEXP (addr, 1);
5503 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5505 op1 = XEXP (op1, 0);
5506 switch (XINT (op1, 1))
5508 case UNSPEC_DTPREL:
5509 reloc16 = NULL;
5510 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5511 break;
5512 case UNSPEC_TPREL:
5513 reloc16 = NULL;
5514 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5515 break;
5516 default:
5517 output_operand_lossage ("unknown relocation unspec");
5518 return;
5521 output_addr_const (file, XVECEXP (op1, 0, 0));
5523 else
5525 reloc16 = "gprel";
5526 reloclo = "gprellow";
5527 output_addr_const (file, op1);
5530 if (offset)
5531 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5533 addr = XEXP (addr, 0);
5534 switch (GET_CODE (addr))
5536 case REG:
5537 basereg = REGNO (addr);
5538 break;
5540 case SUBREG:
5541 basereg = subreg_regno (addr);
5542 break;
5544 default:
5545 gcc_unreachable ();
5548 fprintf (file, "($%d)\t\t!%s", basereg,
5549 (basereg == 29 ? reloc16 : reloclo));
5550 return;
5553 switch (GET_CODE (addr))
5555 case REG:
5556 basereg = REGNO (addr);
5557 break;
5559 case SUBREG:
5560 basereg = subreg_regno (addr);
5561 break;
5563 case CONST_INT:
5564 offset = INTVAL (addr);
5565 break;
5567 case SYMBOL_REF:
5568 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5569 fprintf (file, "%s", XSTR (addr, 0));
5570 return;
5572 case CONST:
5573 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5574 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5575 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5576 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5577 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5578 INTVAL (XEXP (XEXP (addr, 0), 1)));
5579 return;
5581 default:
5582 output_operand_lossage ("invalid operand address");
5583 return;
5586 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5589 /* Emit RTL insns to initialize the variable parts of a trampoline at
5590 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5591 for the static chain value for the function. */
5593 static void
5594 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5596 rtx fnaddr, mem, word1, word2;
5598 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5600 #ifdef POINTERS_EXTEND_UNSIGNED
5601 fnaddr = convert_memory_address (Pmode, fnaddr);
5602 chain_value = convert_memory_address (Pmode, chain_value);
5603 #endif
5605 if (TARGET_ABI_OPEN_VMS)
5607 const char *fnname;
5608 char *trname;
5610 /* Construct the name of the trampoline entry point. */
5611 fnname = XSTR (fnaddr, 0);
5612 trname = (char *) alloca (strlen (fnname) + 5);
5613 strcpy (trname, fnname);
5614 strcat (trname, "..tr");
5615 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5616 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5618 /* Trampoline (or "bounded") procedure descriptor is constructed from
5619 the function's procedure descriptor with certain fields zeroed IAW
5620 the VMS calling standard. This is stored in the first quadword. */
5621 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5622 word1 = expand_and (DImode, word1,
5623 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5624 NULL);
5626 else
5628 /* These 4 instructions are:
5629 ldq $1,24($27)
5630 ldq $27,16($27)
5631 jmp $31,($27),0
5633 We don't bother setting the HINT field of the jump; the nop
5634 is merely there for padding. */
5635 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5636 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5639 /* Store the first two words, as computed above. */
5640 mem = adjust_address (m_tramp, DImode, 0);
5641 emit_move_insn (mem, word1);
5642 mem = adjust_address (m_tramp, DImode, 8);
5643 emit_move_insn (mem, word2);
5645 /* Store function address and static chain value. */
5646 mem = adjust_address (m_tramp, Pmode, 16);
5647 emit_move_insn (mem, fnaddr);
5648 mem = adjust_address (m_tramp, Pmode, 24);
5649 emit_move_insn (mem, chain_value);
5651 if (TARGET_ABI_OSF)
5653 emit_insn (gen_imb ());
5654 #ifdef HAVE_ENABLE_EXECUTE_STACK
5655 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5656 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5657 #endif
5661 /* Determine where to put an argument to a function.
5662 Value is zero to push the argument on the stack,
5663 or a hard register in which to store the argument.
5665 MODE is the argument's machine mode.
5666 TYPE is the data type of the argument (as a tree).
5667 This is null for libcalls where that information may
5668 not be available.
5669 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5670 the preceding args and about the function being called.
5671 NAMED is nonzero if this argument is a named parameter
5672 (otherwise it is an extra parameter matching an ellipsis).
5674 On Alpha the first 6 words of args are normally in registers
5675 and the rest are pushed. */
5677 static rtx
5678 alpha_function_arg (cumulative_args_t cum_v, machine_mode mode,
5679 const_tree type, bool named ATTRIBUTE_UNUSED)
5681 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5682 int basereg;
5683 int num_args;
5685 /* Don't get confused and pass small structures in FP registers. */
5686 if (type && AGGREGATE_TYPE_P (type))
5687 basereg = 16;
5688 else
5690 #ifdef ENABLE_CHECKING
5691 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5692 values here. */
5693 gcc_assert (!COMPLEX_MODE_P (mode));
5694 #endif
5696 /* Set up defaults for FP operands passed in FP registers, and
5697 integral operands passed in integer registers. */
5698 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5699 basereg = 32 + 16;
5700 else
5701 basereg = 16;
5704 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5705 the two platforms, so we can't avoid conditional compilation. */
5706 #if TARGET_ABI_OPEN_VMS
5708 if (mode == VOIDmode)
5709 return alpha_arg_info_reg_val (*cum);
5711 num_args = cum->num_args;
5712 if (num_args >= 6
5713 || targetm.calls.must_pass_in_stack (mode, type))
5714 return NULL_RTX;
5716 #elif TARGET_ABI_OSF
5718 if (*cum >= 6)
5719 return NULL_RTX;
5720 num_args = *cum;
5722 /* VOID is passed as a special flag for "last argument". */
5723 if (type == void_type_node)
5724 basereg = 16;
5725 else if (targetm.calls.must_pass_in_stack (mode, type))
5726 return NULL_RTX;
5728 #else
5729 #error Unhandled ABI
5730 #endif
5732 return gen_rtx_REG (mode, num_args + basereg);
5735 /* Update the data in CUM to advance over an argument
5736 of mode MODE and data type TYPE.
5737 (TYPE is null for libcalls where that information may not be available.) */
5739 static void
5740 alpha_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
5741 const_tree type, bool named ATTRIBUTE_UNUSED)
5743 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5744 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5745 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5747 #if TARGET_ABI_OSF
5748 *cum += increment;
5749 #else
5750 if (!onstack && cum->num_args < 6)
5751 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5752 cum->num_args += increment;
5753 #endif
5756 static int
5757 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5758 machine_mode mode ATTRIBUTE_UNUSED,
5759 tree type ATTRIBUTE_UNUSED,
5760 bool named ATTRIBUTE_UNUSED)
5762 int words = 0;
5763 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5765 #if TARGET_ABI_OPEN_VMS
5766 if (cum->num_args < 6
5767 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5768 words = 6 - cum->num_args;
5769 #elif TARGET_ABI_OSF
5770 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5771 words = 6 - *cum;
5772 #else
5773 #error Unhandled ABI
5774 #endif
5776 return words * UNITS_PER_WORD;
5780 /* Return true if TYPE must be returned in memory, instead of in registers. */
5782 static bool
5783 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5785 machine_mode mode = VOIDmode;
5786 int size;
5788 if (type)
5790 mode = TYPE_MODE (type);
5792 /* All aggregates are returned in memory, except on OpenVMS where
5793 records that fit 64 bits should be returned by immediate value
5794 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5795 if (TARGET_ABI_OPEN_VMS
5796 && TREE_CODE (type) != ARRAY_TYPE
5797 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5798 return false;
5800 if (AGGREGATE_TYPE_P (type))
5801 return true;
5804 size = GET_MODE_SIZE (mode);
5805 switch (GET_MODE_CLASS (mode))
5807 case MODE_VECTOR_FLOAT:
5808 /* Pass all float vectors in memory, like an aggregate. */
5809 return true;
5811 case MODE_COMPLEX_FLOAT:
5812 /* We judge complex floats on the size of their element,
5813 not the size of the whole type. */
5814 size = GET_MODE_UNIT_SIZE (mode);
5815 break;
5817 case MODE_INT:
5818 case MODE_FLOAT:
5819 case MODE_COMPLEX_INT:
5820 case MODE_VECTOR_INT:
5821 break;
5823 default:
5824 /* ??? We get called on all sorts of random stuff from
5825 aggregate_value_p. We must return something, but it's not
5826 clear what's safe to return. Pretend it's a struct I
5827 guess. */
5828 return true;
5831 /* Otherwise types must fit in one register. */
5832 return size > UNITS_PER_WORD;
5835 /* Return true if TYPE should be passed by invisible reference. */
5837 static bool
5838 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5839 machine_mode mode,
5840 const_tree type ATTRIBUTE_UNUSED,
5841 bool named ATTRIBUTE_UNUSED)
5843 return mode == TFmode || mode == TCmode;
5846 /* Define how to find the value returned by a function. VALTYPE is the
5847 data type of the value (as a tree). If the precise function being
5848 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5849 MODE is set instead of VALTYPE for libcalls.
5851 On Alpha the value is found in $0 for integer functions and
5852 $f0 for floating-point functions. */
5855 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5856 machine_mode mode)
5858 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5859 enum mode_class mclass;
5861 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5863 if (valtype)
5864 mode = TYPE_MODE (valtype);
5866 mclass = GET_MODE_CLASS (mode);
5867 switch (mclass)
5869 case MODE_INT:
5870 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5871 where we have them returning both SImode and DImode. */
5872 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5873 PROMOTE_MODE (mode, dummy, valtype);
5874 /* FALLTHRU */
5876 case MODE_COMPLEX_INT:
5877 case MODE_VECTOR_INT:
5878 regnum = 0;
5879 break;
5881 case MODE_FLOAT:
5882 regnum = 32;
5883 break;
5885 case MODE_COMPLEX_FLOAT:
5887 machine_mode cmode = GET_MODE_INNER (mode);
5889 return gen_rtx_PARALLEL
5890 (VOIDmode,
5891 gen_rtvec (2,
5892 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5893 const0_rtx),
5894 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5895 GEN_INT (GET_MODE_SIZE (cmode)))));
5898 case MODE_RANDOM:
5899 /* We should only reach here for BLKmode on VMS. */
5900 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5901 regnum = 0;
5902 break;
5904 default:
5905 gcc_unreachable ();
5908 return gen_rtx_REG (mode, regnum);
5911 /* TCmode complex values are passed by invisible reference. We
5912 should not split these values. */
5914 static bool
5915 alpha_split_complex_arg (const_tree type)
5917 return TYPE_MODE (type) != TCmode;
5920 static tree
5921 alpha_build_builtin_va_list (void)
5923 tree base, ofs, space, record, type_decl;
5925 if (TARGET_ABI_OPEN_VMS)
5926 return ptr_type_node;
5928 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5929 type_decl = build_decl (BUILTINS_LOCATION,
5930 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5931 TYPE_STUB_DECL (record) = type_decl;
5932 TYPE_NAME (record) = type_decl;
5934 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5936 /* Dummy field to prevent alignment warnings. */
5937 space = build_decl (BUILTINS_LOCATION,
5938 FIELD_DECL, NULL_TREE, integer_type_node);
5939 DECL_FIELD_CONTEXT (space) = record;
5940 DECL_ARTIFICIAL (space) = 1;
5941 DECL_IGNORED_P (space) = 1;
5943 ofs = build_decl (BUILTINS_LOCATION,
5944 FIELD_DECL, get_identifier ("__offset"),
5945 integer_type_node);
5946 DECL_FIELD_CONTEXT (ofs) = record;
5947 DECL_CHAIN (ofs) = space;
5948 /* ??? This is a hack, __offset is marked volatile to prevent
5949 DCE that confuses stdarg optimization and results in
5950 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5951 TREE_THIS_VOLATILE (ofs) = 1;
5953 base = build_decl (BUILTINS_LOCATION,
5954 FIELD_DECL, get_identifier ("__base"),
5955 ptr_type_node);
5956 DECL_FIELD_CONTEXT (base) = record;
5957 DECL_CHAIN (base) = ofs;
5959 TYPE_FIELDS (record) = base;
5960 layout_type (record);
5962 va_list_gpr_counter_field = ofs;
5963 return record;
5966 #if TARGET_ABI_OSF
5967 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5968 and constant additions. */
5970 static gimple
5971 va_list_skip_additions (tree lhs)
5973 gimple stmt;
5975 for (;;)
5977 enum tree_code code;
5979 stmt = SSA_NAME_DEF_STMT (lhs);
5981 if (gimple_code (stmt) == GIMPLE_PHI)
5982 return stmt;
5984 if (!is_gimple_assign (stmt)
5985 || gimple_assign_lhs (stmt) != lhs)
5986 return NULL;
5988 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5989 return stmt;
5990 code = gimple_assign_rhs_code (stmt);
5991 if (!CONVERT_EXPR_CODE_P (code)
5992 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5993 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5994 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))))
5995 return stmt;
5997 lhs = gimple_assign_rhs1 (stmt);
6001 /* Check if LHS = RHS statement is
6002 LHS = *(ap.__base + ap.__offset + cst)
6004 LHS = *(ap.__base
6005 + ((ap.__offset + cst <= 47)
6006 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
6007 If the former, indicate that GPR registers are needed,
6008 if the latter, indicate that FPR registers are needed.
6010 Also look for LHS = (*ptr).field, where ptr is one of the forms
6011 listed above.
6013 On alpha, cfun->va_list_gpr_size is used as size of the needed
6014 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
6015 registers are needed and bit 1 set if FPR registers are needed.
6016 Return true if va_list references should not be scanned for the
6017 current statement. */
6019 static bool
6020 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
6022 tree base, offset, rhs;
6023 int offset_arg = 1;
6024 gimple base_stmt;
6026 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
6027 != GIMPLE_SINGLE_RHS)
6028 return false;
6030 rhs = gimple_assign_rhs1 (stmt);
6031 while (handled_component_p (rhs))
6032 rhs = TREE_OPERAND (rhs, 0);
6033 if (TREE_CODE (rhs) != MEM_REF
6034 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6035 return false;
6037 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6038 if (stmt == NULL
6039 || !is_gimple_assign (stmt)
6040 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6041 return false;
6043 base = gimple_assign_rhs1 (stmt);
6044 if (TREE_CODE (base) == SSA_NAME)
6046 base_stmt = va_list_skip_additions (base);
6047 if (base_stmt
6048 && is_gimple_assign (base_stmt)
6049 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6050 base = gimple_assign_rhs1 (base_stmt);
6053 if (TREE_CODE (base) != COMPONENT_REF
6054 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6056 base = gimple_assign_rhs2 (stmt);
6057 if (TREE_CODE (base) == SSA_NAME)
6059 base_stmt = va_list_skip_additions (base);
6060 if (base_stmt
6061 && is_gimple_assign (base_stmt)
6062 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6063 base = gimple_assign_rhs1 (base_stmt);
6066 if (TREE_CODE (base) != COMPONENT_REF
6067 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6068 return false;
6070 offset_arg = 0;
6073 base = get_base_address (base);
6074 if (TREE_CODE (base) != VAR_DECL
6075 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
6076 return false;
6078 offset = gimple_op (stmt, 1 + offset_arg);
6079 if (TREE_CODE (offset) == SSA_NAME)
6081 gimple offset_stmt = va_list_skip_additions (offset);
6083 if (offset_stmt
6084 && gimple_code (offset_stmt) == GIMPLE_PHI)
6086 HOST_WIDE_INT sub;
6087 gimple arg1_stmt, arg2_stmt;
6088 tree arg1, arg2;
6089 enum tree_code code1, code2;
6091 if (gimple_phi_num_args (offset_stmt) != 2)
6092 goto escapes;
6094 arg1_stmt
6095 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6096 arg2_stmt
6097 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6098 if (arg1_stmt == NULL
6099 || !is_gimple_assign (arg1_stmt)
6100 || arg2_stmt == NULL
6101 || !is_gimple_assign (arg2_stmt))
6102 goto escapes;
6104 code1 = gimple_assign_rhs_code (arg1_stmt);
6105 code2 = gimple_assign_rhs_code (arg2_stmt);
6106 if (code1 == COMPONENT_REF
6107 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6108 /* Do nothing. */;
6109 else if (code2 == COMPONENT_REF
6110 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6112 gimple tem = arg1_stmt;
6113 code2 = code1;
6114 arg1_stmt = arg2_stmt;
6115 arg2_stmt = tem;
6117 else
6118 goto escapes;
6120 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt)))
6121 goto escapes;
6123 sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt));
6124 if (code2 == MINUS_EXPR)
6125 sub = -sub;
6126 if (sub < -48 || sub > -32)
6127 goto escapes;
6129 arg1 = gimple_assign_rhs1 (arg1_stmt);
6130 arg2 = gimple_assign_rhs1 (arg2_stmt);
6131 if (TREE_CODE (arg2) == SSA_NAME)
6133 arg2_stmt = va_list_skip_additions (arg2);
6134 if (arg2_stmt == NULL
6135 || !is_gimple_assign (arg2_stmt)
6136 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6137 goto escapes;
6138 arg2 = gimple_assign_rhs1 (arg2_stmt);
6140 if (arg1 != arg2)
6141 goto escapes;
6143 if (TREE_CODE (arg1) != COMPONENT_REF
6144 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6145 || get_base_address (arg1) != base)
6146 goto escapes;
6148 /* Need floating point regs. */
6149 cfun->va_list_fpr_size |= 2;
6150 return false;
6152 if (offset_stmt
6153 && is_gimple_assign (offset_stmt)
6154 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6155 offset = gimple_assign_rhs1 (offset_stmt);
6157 if (TREE_CODE (offset) != COMPONENT_REF
6158 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6159 || get_base_address (offset) != base)
6160 goto escapes;
6161 else
6162 /* Need general regs. */
6163 cfun->va_list_fpr_size |= 1;
6164 return false;
6166 escapes:
6167 si->va_list_escapes = true;
6168 return false;
6170 #endif
6172 /* Perform any needed actions needed for a function that is receiving a
6173 variable number of arguments. */
6175 static void
6176 alpha_setup_incoming_varargs (cumulative_args_t pcum, machine_mode mode,
6177 tree type, int *pretend_size, int no_rtl)
6179 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6181 /* Skip the current argument. */
6182 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6183 true);
6185 #if TARGET_ABI_OPEN_VMS
6186 /* For VMS, we allocate space for all 6 arg registers plus a count.
6188 However, if NO registers need to be saved, don't allocate any space.
6189 This is not only because we won't need the space, but because AP
6190 includes the current_pretend_args_size and we don't want to mess up
6191 any ap-relative addresses already made. */
6192 if (cum.num_args < 6)
6194 if (!no_rtl)
6196 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6197 emit_insn (gen_arg_home ());
6199 *pretend_size = 7 * UNITS_PER_WORD;
6201 #else
6202 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6203 only push those that are remaining. However, if NO registers need to
6204 be saved, don't allocate any space. This is not only because we won't
6205 need the space, but because AP includes the current_pretend_args_size
6206 and we don't want to mess up any ap-relative addresses already made.
6208 If we are not to use the floating-point registers, save the integer
6209 registers where we would put the floating-point registers. This is
6210 not the most efficient way to implement varargs with just one register
6211 class, but it isn't worth doing anything more efficient in this rare
6212 case. */
6213 if (cum >= 6)
6214 return;
6216 if (!no_rtl)
6218 int count;
6219 alias_set_type set = get_varargs_alias_set ();
6220 rtx tmp;
6222 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6223 if (count > 6 - cum)
6224 count = 6 - cum;
6226 /* Detect whether integer registers or floating-point registers
6227 are needed by the detected va_arg statements. See above for
6228 how these values are computed. Note that the "escape" value
6229 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6230 these bits set. */
6231 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6233 if (cfun->va_list_fpr_size & 1)
6235 tmp = gen_rtx_MEM (BLKmode,
6236 plus_constant (Pmode, virtual_incoming_args_rtx,
6237 (cum + 6) * UNITS_PER_WORD));
6238 MEM_NOTRAP_P (tmp) = 1;
6239 set_mem_alias_set (tmp, set);
6240 move_block_from_reg (16 + cum, tmp, count);
6243 if (cfun->va_list_fpr_size & 2)
6245 tmp = gen_rtx_MEM (BLKmode,
6246 plus_constant (Pmode, virtual_incoming_args_rtx,
6247 cum * UNITS_PER_WORD));
6248 MEM_NOTRAP_P (tmp) = 1;
6249 set_mem_alias_set (tmp, set);
6250 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6253 *pretend_size = 12 * UNITS_PER_WORD;
6254 #endif
6257 static void
6258 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6260 HOST_WIDE_INT offset;
6261 tree t, offset_field, base_field;
6263 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6264 return;
6266 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6267 up by 48, storing fp arg registers in the first 48 bytes, and the
6268 integer arg registers in the next 48 bytes. This is only done,
6269 however, if any integer registers need to be stored.
6271 If no integer registers need be stored, then we must subtract 48
6272 in order to account for the integer arg registers which are counted
6273 in argsize above, but which are not actually stored on the stack.
6274 Must further be careful here about structures straddling the last
6275 integer argument register; that futzes with pretend_args_size,
6276 which changes the meaning of AP. */
6278 if (NUM_ARGS < 6)
6279 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6280 else
6281 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6283 if (TARGET_ABI_OPEN_VMS)
6285 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6286 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6287 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6288 TREE_SIDE_EFFECTS (t) = 1;
6289 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6291 else
6293 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6294 offset_field = DECL_CHAIN (base_field);
6296 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6297 valist, base_field, NULL_TREE);
6298 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6299 valist, offset_field, NULL_TREE);
6301 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6302 t = fold_build_pointer_plus_hwi (t, offset);
6303 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6304 TREE_SIDE_EFFECTS (t) = 1;
6305 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6307 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6308 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6309 TREE_SIDE_EFFECTS (t) = 1;
6310 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6314 static tree
6315 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6316 gimple_seq *pre_p)
6318 tree type_size, ptr_type, addend, t, addr;
6319 gimple_seq internal_post;
6321 /* If the type could not be passed in registers, skip the block
6322 reserved for the registers. */
6323 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6325 t = build_int_cst (TREE_TYPE (offset), 6*8);
6326 gimplify_assign (offset,
6327 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6328 pre_p);
6331 addend = offset;
6332 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6334 if (TREE_CODE (type) == COMPLEX_TYPE)
6336 tree real_part, imag_part, real_temp;
6338 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6339 offset, pre_p);
6341 /* Copy the value into a new temporary, lest the formal temporary
6342 be reused out from under us. */
6343 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6345 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6346 offset, pre_p);
6348 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6350 else if (TREE_CODE (type) == REAL_TYPE)
6352 tree fpaddend, cond, fourtyeight;
6354 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6355 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6356 addend, fourtyeight);
6357 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6358 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6359 fpaddend, addend);
6362 /* Build the final address and force that value into a temporary. */
6363 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6364 internal_post = NULL;
6365 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6366 gimple_seq_add_seq (pre_p, internal_post);
6368 /* Update the offset field. */
6369 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6370 if (type_size == NULL || TREE_OVERFLOW (type_size))
6371 t = size_zero_node;
6372 else
6374 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6375 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6376 t = size_binop (MULT_EXPR, t, size_int (8));
6378 t = fold_convert (TREE_TYPE (offset), t);
6379 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6380 pre_p);
6382 return build_va_arg_indirect_ref (addr);
6385 static tree
6386 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6387 gimple_seq *post_p)
6389 tree offset_field, base_field, offset, base, t, r;
6390 bool indirect;
6392 if (TARGET_ABI_OPEN_VMS)
6393 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6395 base_field = TYPE_FIELDS (va_list_type_node);
6396 offset_field = DECL_CHAIN (base_field);
6397 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6398 valist, base_field, NULL_TREE);
6399 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6400 valist, offset_field, NULL_TREE);
6402 /* Pull the fields of the structure out into temporaries. Since we never
6403 modify the base field, we can use a formal temporary. Sign-extend the
6404 offset field so that it's the proper width for pointer arithmetic. */
6405 base = get_formal_tmp_var (base_field, pre_p);
6407 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6408 offset = get_initialized_tmp_var (t, pre_p, NULL);
6410 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6411 if (indirect)
6412 type = build_pointer_type_for_mode (type, ptr_mode, true);
6414 /* Find the value. Note that this will be a stable indirection, or
6415 a composite of stable indirections in the case of complex. */
6416 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6418 /* Stuff the offset temporary back into its field. */
6419 gimplify_assign (unshare_expr (offset_field),
6420 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6422 if (indirect)
6423 r = build_va_arg_indirect_ref (r);
6425 return r;
6428 /* Builtins. */
6430 enum alpha_builtin
6432 ALPHA_BUILTIN_CMPBGE,
6433 ALPHA_BUILTIN_EXTBL,
6434 ALPHA_BUILTIN_EXTWL,
6435 ALPHA_BUILTIN_EXTLL,
6436 ALPHA_BUILTIN_EXTQL,
6437 ALPHA_BUILTIN_EXTWH,
6438 ALPHA_BUILTIN_EXTLH,
6439 ALPHA_BUILTIN_EXTQH,
6440 ALPHA_BUILTIN_INSBL,
6441 ALPHA_BUILTIN_INSWL,
6442 ALPHA_BUILTIN_INSLL,
6443 ALPHA_BUILTIN_INSQL,
6444 ALPHA_BUILTIN_INSWH,
6445 ALPHA_BUILTIN_INSLH,
6446 ALPHA_BUILTIN_INSQH,
6447 ALPHA_BUILTIN_MSKBL,
6448 ALPHA_BUILTIN_MSKWL,
6449 ALPHA_BUILTIN_MSKLL,
6450 ALPHA_BUILTIN_MSKQL,
6451 ALPHA_BUILTIN_MSKWH,
6452 ALPHA_BUILTIN_MSKLH,
6453 ALPHA_BUILTIN_MSKQH,
6454 ALPHA_BUILTIN_UMULH,
6455 ALPHA_BUILTIN_ZAP,
6456 ALPHA_BUILTIN_ZAPNOT,
6457 ALPHA_BUILTIN_AMASK,
6458 ALPHA_BUILTIN_IMPLVER,
6459 ALPHA_BUILTIN_RPCC,
6460 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6461 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6463 /* TARGET_MAX */
6464 ALPHA_BUILTIN_MINUB8,
6465 ALPHA_BUILTIN_MINSB8,
6466 ALPHA_BUILTIN_MINUW4,
6467 ALPHA_BUILTIN_MINSW4,
6468 ALPHA_BUILTIN_MAXUB8,
6469 ALPHA_BUILTIN_MAXSB8,
6470 ALPHA_BUILTIN_MAXUW4,
6471 ALPHA_BUILTIN_MAXSW4,
6472 ALPHA_BUILTIN_PERR,
6473 ALPHA_BUILTIN_PKLB,
6474 ALPHA_BUILTIN_PKWB,
6475 ALPHA_BUILTIN_UNPKBL,
6476 ALPHA_BUILTIN_UNPKBW,
6478 /* TARGET_CIX */
6479 ALPHA_BUILTIN_CTTZ,
6480 ALPHA_BUILTIN_CTLZ,
6481 ALPHA_BUILTIN_CTPOP,
6483 ALPHA_BUILTIN_max
6486 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6487 CODE_FOR_builtin_cmpbge,
6488 CODE_FOR_extbl,
6489 CODE_FOR_extwl,
6490 CODE_FOR_extll,
6491 CODE_FOR_extql,
6492 CODE_FOR_extwh,
6493 CODE_FOR_extlh,
6494 CODE_FOR_extqh,
6495 CODE_FOR_builtin_insbl,
6496 CODE_FOR_builtin_inswl,
6497 CODE_FOR_builtin_insll,
6498 CODE_FOR_insql,
6499 CODE_FOR_inswh,
6500 CODE_FOR_inslh,
6501 CODE_FOR_insqh,
6502 CODE_FOR_mskbl,
6503 CODE_FOR_mskwl,
6504 CODE_FOR_mskll,
6505 CODE_FOR_mskql,
6506 CODE_FOR_mskwh,
6507 CODE_FOR_msklh,
6508 CODE_FOR_mskqh,
6509 CODE_FOR_umuldi3_highpart,
6510 CODE_FOR_builtin_zap,
6511 CODE_FOR_builtin_zapnot,
6512 CODE_FOR_builtin_amask,
6513 CODE_FOR_builtin_implver,
6514 CODE_FOR_builtin_rpcc,
6515 CODE_FOR_builtin_establish_vms_condition_handler,
6516 CODE_FOR_builtin_revert_vms_condition_handler,
6518 /* TARGET_MAX */
6519 CODE_FOR_builtin_minub8,
6520 CODE_FOR_builtin_minsb8,
6521 CODE_FOR_builtin_minuw4,
6522 CODE_FOR_builtin_minsw4,
6523 CODE_FOR_builtin_maxub8,
6524 CODE_FOR_builtin_maxsb8,
6525 CODE_FOR_builtin_maxuw4,
6526 CODE_FOR_builtin_maxsw4,
6527 CODE_FOR_builtin_perr,
6528 CODE_FOR_builtin_pklb,
6529 CODE_FOR_builtin_pkwb,
6530 CODE_FOR_builtin_unpkbl,
6531 CODE_FOR_builtin_unpkbw,
6533 /* TARGET_CIX */
6534 CODE_FOR_ctzdi2,
6535 CODE_FOR_clzdi2,
6536 CODE_FOR_popcountdi2
6539 struct alpha_builtin_def
6541 const char *name;
6542 enum alpha_builtin code;
6543 unsigned int target_mask;
6544 bool is_const;
6547 static struct alpha_builtin_def const zero_arg_builtins[] = {
6548 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6549 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6552 static struct alpha_builtin_def const one_arg_builtins[] = {
6553 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6554 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6555 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6556 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6557 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6558 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6559 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6560 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6563 static struct alpha_builtin_def const two_arg_builtins[] = {
6564 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6565 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6566 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6567 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6568 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6569 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6570 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6571 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6572 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6573 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6574 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6575 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6576 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6577 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6578 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6579 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6580 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6581 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6582 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6583 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6584 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6585 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6586 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6587 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6588 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6589 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6590 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6591 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6592 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6593 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6594 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6595 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6596 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6597 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6600 static GTY(()) tree alpha_dimode_u;
6601 static GTY(()) tree alpha_v8qi_u;
6602 static GTY(()) tree alpha_v8qi_s;
6603 static GTY(()) tree alpha_v4hi_u;
6604 static GTY(()) tree alpha_v4hi_s;
6606 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6608 /* Return the alpha builtin for CODE. */
6610 static tree
6611 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6613 if (code >= ALPHA_BUILTIN_max)
6614 return error_mark_node;
6615 return alpha_builtins[code];
6618 /* Helper function of alpha_init_builtins. Add the built-in specified
6619 by NAME, TYPE, CODE, and ECF. */
6621 static void
6622 alpha_builtin_function (const char *name, tree ftype,
6623 enum alpha_builtin code, unsigned ecf)
6625 tree decl = add_builtin_function (name, ftype, (int) code,
6626 BUILT_IN_MD, NULL, NULL_TREE);
6628 if (ecf & ECF_CONST)
6629 TREE_READONLY (decl) = 1;
6630 if (ecf & ECF_NOTHROW)
6631 TREE_NOTHROW (decl) = 1;
6633 alpha_builtins [(int) code] = decl;
6636 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6637 functions pointed to by P, with function type FTYPE. */
6639 static void
6640 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6641 tree ftype)
6643 size_t i;
6645 for (i = 0; i < count; ++i, ++p)
6646 if ((target_flags & p->target_mask) == p->target_mask)
6647 alpha_builtin_function (p->name, ftype, p->code,
6648 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6651 static void
6652 alpha_init_builtins (void)
6654 tree ftype;
6656 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6657 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6658 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6659 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6660 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6662 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6663 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6665 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6666 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6668 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6669 alpha_dimode_u, NULL_TREE);
6670 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6672 if (TARGET_ABI_OPEN_VMS)
6674 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6675 NULL_TREE);
6676 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6677 ftype,
6678 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6681 ftype = build_function_type_list (ptr_type_node, void_type_node,
6682 NULL_TREE);
6683 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6684 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6686 vms_patch_builtins ();
6690 /* Expand an expression EXP that calls a built-in function,
6691 with result going to TARGET if that's convenient
6692 (and in mode MODE if that's convenient).
6693 SUBTARGET may be used as the target for computing one of EXP's operands.
6694 IGNORE is nonzero if the value is to be ignored. */
6696 static rtx
6697 alpha_expand_builtin (tree exp, rtx target,
6698 rtx subtarget ATTRIBUTE_UNUSED,
6699 machine_mode mode ATTRIBUTE_UNUSED,
6700 int ignore ATTRIBUTE_UNUSED)
6702 #define MAX_ARGS 2
6704 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6705 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6706 tree arg;
6707 call_expr_arg_iterator iter;
6708 enum insn_code icode;
6709 rtx op[MAX_ARGS], pat;
6710 int arity;
6711 bool nonvoid;
6713 if (fcode >= ALPHA_BUILTIN_max)
6714 internal_error ("bad builtin fcode");
6715 icode = code_for_builtin[fcode];
6716 if (icode == 0)
6717 internal_error ("bad builtin fcode");
6719 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6721 arity = 0;
6722 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6724 const struct insn_operand_data *insn_op;
6726 if (arg == error_mark_node)
6727 return NULL_RTX;
6728 if (arity > MAX_ARGS)
6729 return NULL_RTX;
6731 insn_op = &insn_data[icode].operand[arity + nonvoid];
6733 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6735 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6736 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6737 arity++;
6740 if (nonvoid)
6742 machine_mode tmode = insn_data[icode].operand[0].mode;
6743 if (!target
6744 || GET_MODE (target) != tmode
6745 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6746 target = gen_reg_rtx (tmode);
6749 switch (arity)
6751 case 0:
6752 pat = GEN_FCN (icode) (target);
6753 break;
6754 case 1:
6755 if (nonvoid)
6756 pat = GEN_FCN (icode) (target, op[0]);
6757 else
6758 pat = GEN_FCN (icode) (op[0]);
6759 break;
6760 case 2:
6761 pat = GEN_FCN (icode) (target, op[0], op[1]);
6762 break;
6763 default:
6764 gcc_unreachable ();
6766 if (!pat)
6767 return NULL_RTX;
6768 emit_insn (pat);
6770 if (nonvoid)
6771 return target;
6772 else
6773 return const0_rtx;
6777 /* Several bits below assume HWI >= 64 bits. This should be enforced
6778 by config.gcc. */
6779 #if HOST_BITS_PER_WIDE_INT < 64
6780 # error "HOST_WIDE_INT too small"
6781 #endif
6783 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6784 with an 8-bit output vector. OPINT contains the integer operands; bit N
6785 of OP_CONST is set if OPINT[N] is valid. */
6787 static tree
6788 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6790 if (op_const == 3)
6792 int i, val;
6793 for (i = 0, val = 0; i < 8; ++i)
6795 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6796 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6797 if (c0 >= c1)
6798 val |= 1 << i;
6800 return build_int_cst (alpha_dimode_u, val);
6802 else if (op_const == 2 && opint[1] == 0)
6803 return build_int_cst (alpha_dimode_u, 0xff);
6804 return NULL;
6807 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6808 specialized form of an AND operation. Other byte manipulation instructions
6809 are defined in terms of this instruction, so this is also used as a
6810 subroutine for other builtins.
6812 OP contains the tree operands; OPINT contains the extracted integer values.
6813 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6814 OPINT may be considered. */
6816 static tree
6817 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6818 long op_const)
6820 if (op_const & 2)
6822 unsigned HOST_WIDE_INT mask = 0;
6823 int i;
6825 for (i = 0; i < 8; ++i)
6826 if ((opint[1] >> i) & 1)
6827 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6829 if (op_const & 1)
6830 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6832 if (op)
6833 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6834 build_int_cst (alpha_dimode_u, mask));
6836 else if ((op_const & 1) && opint[0] == 0)
6837 return build_int_cst (alpha_dimode_u, 0);
6838 return NULL;
6841 /* Fold the builtins for the EXT family of instructions. */
6843 static tree
6844 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6845 long op_const, unsigned HOST_WIDE_INT bytemask,
6846 bool is_high)
6848 long zap_const = 2;
6849 tree *zap_op = NULL;
6851 if (op_const & 2)
6853 unsigned HOST_WIDE_INT loc;
6855 loc = opint[1] & 7;
6856 loc *= BITS_PER_UNIT;
6858 if (loc != 0)
6860 if (op_const & 1)
6862 unsigned HOST_WIDE_INT temp = opint[0];
6863 if (is_high)
6864 temp <<= loc;
6865 else
6866 temp >>= loc;
6867 opint[0] = temp;
6868 zap_const = 3;
6871 else
6872 zap_op = op;
6875 opint[1] = bytemask;
6876 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6879 /* Fold the builtins for the INS family of instructions. */
6881 static tree
6882 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6883 long op_const, unsigned HOST_WIDE_INT bytemask,
6884 bool is_high)
6886 if ((op_const & 1) && opint[0] == 0)
6887 return build_int_cst (alpha_dimode_u, 0);
6889 if (op_const & 2)
6891 unsigned HOST_WIDE_INT temp, loc, byteloc;
6892 tree *zap_op = NULL;
6894 loc = opint[1] & 7;
6895 bytemask <<= loc;
6897 temp = opint[0];
6898 if (is_high)
6900 byteloc = (64 - (loc * 8)) & 0x3f;
6901 if (byteloc == 0)
6902 zap_op = op;
6903 else
6904 temp >>= byteloc;
6905 bytemask >>= 8;
6907 else
6909 byteloc = loc * 8;
6910 if (byteloc == 0)
6911 zap_op = op;
6912 else
6913 temp <<= byteloc;
6916 opint[0] = temp;
6917 opint[1] = bytemask;
6918 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6921 return NULL;
6924 static tree
6925 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6926 long op_const, unsigned HOST_WIDE_INT bytemask,
6927 bool is_high)
6929 if (op_const & 2)
6931 unsigned HOST_WIDE_INT loc;
6933 loc = opint[1] & 7;
6934 bytemask <<= loc;
6936 if (is_high)
6937 bytemask >>= 8;
6939 opint[1] = bytemask ^ 0xff;
6942 return alpha_fold_builtin_zapnot (op, opint, op_const);
6945 static tree
6946 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6948 tree op0 = fold_convert (vtype, op[0]);
6949 tree op1 = fold_convert (vtype, op[1]);
6950 tree val = fold_build2 (code, vtype, op0, op1);
6951 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6954 static tree
6955 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6957 unsigned HOST_WIDE_INT temp = 0;
6958 int i;
6960 if (op_const != 3)
6961 return NULL;
6963 for (i = 0; i < 8; ++i)
6965 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6966 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6967 if (a >= b)
6968 temp += a - b;
6969 else
6970 temp += b - a;
6973 return build_int_cst (alpha_dimode_u, temp);
6976 static tree
6977 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6979 unsigned HOST_WIDE_INT temp;
6981 if (op_const == 0)
6982 return NULL;
6984 temp = opint[0] & 0xff;
6985 temp |= (opint[0] >> 24) & 0xff00;
6987 return build_int_cst (alpha_dimode_u, temp);
6990 static tree
6991 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6993 unsigned HOST_WIDE_INT temp;
6995 if (op_const == 0)
6996 return NULL;
6998 temp = opint[0] & 0xff;
6999 temp |= (opint[0] >> 8) & 0xff00;
7000 temp |= (opint[0] >> 16) & 0xff0000;
7001 temp |= (opint[0] >> 24) & 0xff000000;
7003 return build_int_cst (alpha_dimode_u, temp);
7006 static tree
7007 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7009 unsigned HOST_WIDE_INT temp;
7011 if (op_const == 0)
7012 return NULL;
7014 temp = opint[0] & 0xff;
7015 temp |= (opint[0] & 0xff00) << 24;
7017 return build_int_cst (alpha_dimode_u, temp);
7020 static tree
7021 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7023 unsigned HOST_WIDE_INT temp;
7025 if (op_const == 0)
7026 return NULL;
7028 temp = opint[0] & 0xff;
7029 temp |= (opint[0] & 0x0000ff00) << 8;
7030 temp |= (opint[0] & 0x00ff0000) << 16;
7031 temp |= (opint[0] & 0xff000000) << 24;
7033 return build_int_cst (alpha_dimode_u, temp);
7036 static tree
7037 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7039 unsigned HOST_WIDE_INT temp;
7041 if (op_const == 0)
7042 return NULL;
7044 if (opint[0] == 0)
7045 temp = 64;
7046 else
7047 temp = exact_log2 (opint[0] & -opint[0]);
7049 return build_int_cst (alpha_dimode_u, temp);
7052 static tree
7053 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7055 unsigned HOST_WIDE_INT temp;
7057 if (op_const == 0)
7058 return NULL;
7060 if (opint[0] == 0)
7061 temp = 64;
7062 else
7063 temp = 64 - floor_log2 (opint[0]) - 1;
7065 return build_int_cst (alpha_dimode_u, temp);
7068 static tree
7069 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7071 unsigned HOST_WIDE_INT temp, op;
7073 if (op_const == 0)
7074 return NULL;
7076 op = opint[0];
7077 temp = 0;
7078 while (op)
7079 temp++, op &= op - 1;
7081 return build_int_cst (alpha_dimode_u, temp);
7084 /* Fold one of our builtin functions. */
7086 static tree
7087 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7088 bool ignore ATTRIBUTE_UNUSED)
7090 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7091 long op_const = 0;
7092 int i;
7094 if (n_args > MAX_ARGS)
7095 return NULL;
7097 for (i = 0; i < n_args; i++)
7099 tree arg = op[i];
7100 if (arg == error_mark_node)
7101 return NULL;
7103 opint[i] = 0;
7104 if (TREE_CODE (arg) == INTEGER_CST)
7106 op_const |= 1L << i;
7107 opint[i] = int_cst_value (arg);
7111 switch (DECL_FUNCTION_CODE (fndecl))
7113 case ALPHA_BUILTIN_CMPBGE:
7114 return alpha_fold_builtin_cmpbge (opint, op_const);
7116 case ALPHA_BUILTIN_EXTBL:
7117 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7118 case ALPHA_BUILTIN_EXTWL:
7119 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7120 case ALPHA_BUILTIN_EXTLL:
7121 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7122 case ALPHA_BUILTIN_EXTQL:
7123 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7124 case ALPHA_BUILTIN_EXTWH:
7125 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7126 case ALPHA_BUILTIN_EXTLH:
7127 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7128 case ALPHA_BUILTIN_EXTQH:
7129 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7131 case ALPHA_BUILTIN_INSBL:
7132 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7133 case ALPHA_BUILTIN_INSWL:
7134 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7135 case ALPHA_BUILTIN_INSLL:
7136 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7137 case ALPHA_BUILTIN_INSQL:
7138 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7139 case ALPHA_BUILTIN_INSWH:
7140 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7141 case ALPHA_BUILTIN_INSLH:
7142 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7143 case ALPHA_BUILTIN_INSQH:
7144 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7146 case ALPHA_BUILTIN_MSKBL:
7147 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7148 case ALPHA_BUILTIN_MSKWL:
7149 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7150 case ALPHA_BUILTIN_MSKLL:
7151 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7152 case ALPHA_BUILTIN_MSKQL:
7153 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7154 case ALPHA_BUILTIN_MSKWH:
7155 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7156 case ALPHA_BUILTIN_MSKLH:
7157 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7158 case ALPHA_BUILTIN_MSKQH:
7159 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7161 case ALPHA_BUILTIN_ZAP:
7162 opint[1] ^= 0xff;
7163 /* FALLTHRU */
7164 case ALPHA_BUILTIN_ZAPNOT:
7165 return alpha_fold_builtin_zapnot (op, opint, op_const);
7167 case ALPHA_BUILTIN_MINUB8:
7168 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7169 case ALPHA_BUILTIN_MINSB8:
7170 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7171 case ALPHA_BUILTIN_MINUW4:
7172 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7173 case ALPHA_BUILTIN_MINSW4:
7174 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7175 case ALPHA_BUILTIN_MAXUB8:
7176 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7177 case ALPHA_BUILTIN_MAXSB8:
7178 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7179 case ALPHA_BUILTIN_MAXUW4:
7180 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7181 case ALPHA_BUILTIN_MAXSW4:
7182 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7184 case ALPHA_BUILTIN_PERR:
7185 return alpha_fold_builtin_perr (opint, op_const);
7186 case ALPHA_BUILTIN_PKLB:
7187 return alpha_fold_builtin_pklb (opint, op_const);
7188 case ALPHA_BUILTIN_PKWB:
7189 return alpha_fold_builtin_pkwb (opint, op_const);
7190 case ALPHA_BUILTIN_UNPKBL:
7191 return alpha_fold_builtin_unpkbl (opint, op_const);
7192 case ALPHA_BUILTIN_UNPKBW:
7193 return alpha_fold_builtin_unpkbw (opint, op_const);
7195 case ALPHA_BUILTIN_CTTZ:
7196 return alpha_fold_builtin_cttz (opint, op_const);
7197 case ALPHA_BUILTIN_CTLZ:
7198 return alpha_fold_builtin_ctlz (opint, op_const);
7199 case ALPHA_BUILTIN_CTPOP:
7200 return alpha_fold_builtin_ctpop (opint, op_const);
7202 case ALPHA_BUILTIN_AMASK:
7203 case ALPHA_BUILTIN_IMPLVER:
7204 case ALPHA_BUILTIN_RPCC:
7205 /* None of these are foldable at compile-time. */
7206 default:
7207 return NULL;
7211 bool
7212 alpha_gimple_fold_builtin (gimple_stmt_iterator *gsi)
7214 bool changed = false;
7215 gimple stmt = gsi_stmt (*gsi);
7216 tree call = gimple_call_fn (stmt);
7217 gimple new_stmt = NULL;
7219 if (call)
7221 tree fndecl = gimple_call_fndecl (stmt);
7223 if (fndecl)
7225 tree arg0, arg1;
7227 switch (DECL_FUNCTION_CODE (fndecl))
7229 case ALPHA_BUILTIN_UMULH:
7230 arg0 = gimple_call_arg (stmt, 0);
7231 arg1 = gimple_call_arg (stmt, 1);
7233 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
7234 MULT_HIGHPART_EXPR, arg0, arg1);
7235 break;
7236 default:
7237 break;
7242 if (new_stmt)
7244 gsi_replace (gsi, new_stmt, true);
7245 changed = true;
7248 return changed;
7251 /* This page contains routines that are used to determine what the function
7252 prologue and epilogue code will do and write them out. */
7254 /* Compute the size of the save area in the stack. */
7256 /* These variables are used for communication between the following functions.
7257 They indicate various things about the current function being compiled
7258 that are used to tell what kind of prologue, epilogue and procedure
7259 descriptor to generate. */
7261 /* Nonzero if we need a stack procedure. */
7262 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7263 static enum alpha_procedure_types alpha_procedure_type;
7265 /* Register number (either FP or SP) that is used to unwind the frame. */
7266 static int vms_unwind_regno;
7268 /* Register number used to save FP. We need not have one for RA since
7269 we don't modify it for register procedures. This is only defined
7270 for register frame procedures. */
7271 static int vms_save_fp_regno;
7273 /* Register number used to reference objects off our PV. */
7274 static int vms_base_regno;
7276 /* Compute register masks for saved registers. */
7278 static void
7279 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7281 unsigned long imask = 0;
7282 unsigned long fmask = 0;
7283 unsigned int i;
7285 /* When outputting a thunk, we don't have valid register life info,
7286 but assemble_start_function wants to output .frame and .mask
7287 directives. */
7288 if (cfun->is_thunk)
7290 *imaskP = 0;
7291 *fmaskP = 0;
7292 return;
7295 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7296 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7298 /* One for every register we have to save. */
7299 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7300 if (! fixed_regs[i] && ! call_used_regs[i]
7301 && df_regs_ever_live_p (i) && i != REG_RA)
7303 if (i < 32)
7304 imask |= (1UL << i);
7305 else
7306 fmask |= (1UL << (i - 32));
7309 /* We need to restore these for the handler. */
7310 if (crtl->calls_eh_return)
7312 for (i = 0; ; ++i)
7314 unsigned regno = EH_RETURN_DATA_REGNO (i);
7315 if (regno == INVALID_REGNUM)
7316 break;
7317 imask |= 1UL << regno;
7321 /* If any register spilled, then spill the return address also. */
7322 /* ??? This is required by the Digital stack unwind specification
7323 and isn't needed if we're doing Dwarf2 unwinding. */
7324 if (imask || fmask || alpha_ra_ever_killed ())
7325 imask |= (1UL << REG_RA);
7327 *imaskP = imask;
7328 *fmaskP = fmask;
7332 alpha_sa_size (void)
7334 unsigned long mask[2];
7335 int sa_size = 0;
7336 int i, j;
7338 alpha_sa_mask (&mask[0], &mask[1]);
7340 for (j = 0; j < 2; ++j)
7341 for (i = 0; i < 32; ++i)
7342 if ((mask[j] >> i) & 1)
7343 sa_size++;
7345 if (TARGET_ABI_OPEN_VMS)
7347 /* Start with a stack procedure if we make any calls (REG_RA used), or
7348 need a frame pointer, with a register procedure if we otherwise need
7349 at least a slot, and with a null procedure in other cases. */
7350 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7351 alpha_procedure_type = PT_STACK;
7352 else if (get_frame_size() != 0)
7353 alpha_procedure_type = PT_REGISTER;
7354 else
7355 alpha_procedure_type = PT_NULL;
7357 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7358 made the final decision on stack procedure vs register procedure. */
7359 if (alpha_procedure_type == PT_STACK)
7360 sa_size -= 2;
7362 /* Decide whether to refer to objects off our PV via FP or PV.
7363 If we need FP for something else or if we receive a nonlocal
7364 goto (which expects PV to contain the value), we must use PV.
7365 Otherwise, start by assuming we can use FP. */
7367 vms_base_regno
7368 = (frame_pointer_needed
7369 || cfun->has_nonlocal_label
7370 || alpha_procedure_type == PT_STACK
7371 || crtl->outgoing_args_size)
7372 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7374 /* If we want to copy PV into FP, we need to find some register
7375 in which to save FP. */
7377 vms_save_fp_regno = -1;
7378 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7379 for (i = 0; i < 32; i++)
7380 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7381 vms_save_fp_regno = i;
7383 /* A VMS condition handler requires a stack procedure in our
7384 implementation. (not required by the calling standard). */
7385 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7386 || cfun->machine->uses_condition_handler)
7387 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7388 else if (alpha_procedure_type == PT_NULL)
7389 vms_base_regno = REG_PV;
7391 /* Stack unwinding should be done via FP unless we use it for PV. */
7392 vms_unwind_regno = (vms_base_regno == REG_PV
7393 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7395 /* If this is a stack procedure, allow space for saving FP, RA and
7396 a condition handler slot if needed. */
7397 if (alpha_procedure_type == PT_STACK)
7398 sa_size += 2 + cfun->machine->uses_condition_handler;
7400 else
7402 /* Our size must be even (multiple of 16 bytes). */
7403 if (sa_size & 1)
7404 sa_size++;
7407 return sa_size * 8;
7410 /* Define the offset between two registers, one to be eliminated,
7411 and the other its replacement, at the start of a routine. */
7413 HOST_WIDE_INT
7414 alpha_initial_elimination_offset (unsigned int from,
7415 unsigned int to ATTRIBUTE_UNUSED)
7417 HOST_WIDE_INT ret;
7419 ret = alpha_sa_size ();
7420 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7422 switch (from)
7424 case FRAME_POINTER_REGNUM:
7425 break;
7427 case ARG_POINTER_REGNUM:
7428 ret += (ALPHA_ROUND (get_frame_size ()
7429 + crtl->args.pretend_args_size)
7430 - crtl->args.pretend_args_size);
7431 break;
7433 default:
7434 gcc_unreachable ();
7437 return ret;
7440 #if TARGET_ABI_OPEN_VMS
7442 /* Worker function for TARGET_CAN_ELIMINATE. */
7444 static bool
7445 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7447 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7448 alpha_sa_size ();
7450 switch (alpha_procedure_type)
7452 case PT_NULL:
7453 /* NULL procedures have no frame of their own and we only
7454 know how to resolve from the current stack pointer. */
7455 return to == STACK_POINTER_REGNUM;
7457 case PT_REGISTER:
7458 case PT_STACK:
7459 /* We always eliminate except to the stack pointer if there is no
7460 usable frame pointer at hand. */
7461 return (to != STACK_POINTER_REGNUM
7462 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7465 gcc_unreachable ();
7468 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7469 designates the same location as FROM. */
7471 HOST_WIDE_INT
7472 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7474 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7475 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7476 on the proper computations and will need the register save area size
7477 in most cases. */
7479 HOST_WIDE_INT sa_size = alpha_sa_size ();
7481 /* PT_NULL procedures have no frame of their own and we only allow
7482 elimination to the stack pointer. This is the argument pointer and we
7483 resolve the soft frame pointer to that as well. */
7485 if (alpha_procedure_type == PT_NULL)
7486 return 0;
7488 /* For a PT_STACK procedure the frame layout looks as follows
7490 -----> decreasing addresses
7492 < size rounded up to 16 | likewise >
7493 --------------#------------------------------+++--------------+++-------#
7494 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7495 --------------#---------------------------------------------------------#
7496 ^ ^ ^ ^
7497 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7500 PT_REGISTER procedures are similar in that they may have a frame of their
7501 own. They have no regs-sa/pv/outgoing-args area.
7503 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7504 to STACK_PTR if need be. */
7507 HOST_WIDE_INT offset;
7508 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7510 switch (from)
7512 case FRAME_POINTER_REGNUM:
7513 offset = ALPHA_ROUND (sa_size + pv_save_size);
7514 break;
7515 case ARG_POINTER_REGNUM:
7516 offset = (ALPHA_ROUND (sa_size + pv_save_size
7517 + get_frame_size ()
7518 + crtl->args.pretend_args_size)
7519 - crtl->args.pretend_args_size);
7520 break;
7521 default:
7522 gcc_unreachable ();
7525 if (to == STACK_POINTER_REGNUM)
7526 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7528 return offset;
7532 #define COMMON_OBJECT "common_object"
7534 static tree
7535 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7536 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7537 bool *no_add_attrs ATTRIBUTE_UNUSED)
7539 tree decl = *node;
7540 gcc_assert (DECL_P (decl));
7542 DECL_COMMON (decl) = 1;
7543 return NULL_TREE;
7546 static const struct attribute_spec vms_attribute_table[] =
7548 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7549 affects_type_identity } */
7550 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7551 { NULL, 0, 0, false, false, false, NULL, false }
7554 void
7555 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7556 unsigned HOST_WIDE_INT size,
7557 unsigned int align)
7559 tree attr = DECL_ATTRIBUTES (decl);
7560 fprintf (file, "%s", COMMON_ASM_OP);
7561 assemble_name (file, name);
7562 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7563 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7564 fprintf (file, ",%u", align / BITS_PER_UNIT);
7565 if (attr)
7567 attr = lookup_attribute (COMMON_OBJECT, attr);
7568 if (attr)
7569 fprintf (file, ",%s",
7570 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7572 fputc ('\n', file);
7575 #undef COMMON_OBJECT
7577 #endif
7579 bool
7580 alpha_find_lo_sum_using_gp (rtx insn)
7582 subrtx_iterator::array_type array;
7583 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
7585 const_rtx x = *iter;
7586 if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx)
7587 return true;
7589 return false;
7592 static int
7593 alpha_does_function_need_gp (void)
7595 rtx_insn *insn;
7597 /* The GP being variable is an OSF abi thing. */
7598 if (! TARGET_ABI_OSF)
7599 return 0;
7601 /* We need the gp to load the address of __mcount. */
7602 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7603 return 1;
7605 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7606 if (cfun->is_thunk)
7607 return 1;
7609 /* The nonlocal receiver pattern assumes that the gp is valid for
7610 the nested function. Reasonable because it's almost always set
7611 correctly already. For the cases where that's wrong, make sure
7612 the nested function loads its gp on entry. */
7613 if (crtl->has_nonlocal_goto)
7614 return 1;
7616 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7617 Even if we are a static function, we still need to do this in case
7618 our address is taken and passed to something like qsort. */
7620 push_topmost_sequence ();
7621 insn = get_insns ();
7622 pop_topmost_sequence ();
7624 for (; insn; insn = NEXT_INSN (insn))
7625 if (NONDEBUG_INSN_P (insn)
7626 && GET_CODE (PATTERN (insn)) != USE
7627 && GET_CODE (PATTERN (insn)) != CLOBBER
7628 && get_attr_usegp (insn))
7629 return 1;
7631 return 0;
7635 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7636 sequences. */
7638 static rtx_insn *
7639 set_frame_related_p (void)
7641 rtx_insn *seq = get_insns ();
7642 rtx_insn *insn;
7644 end_sequence ();
7646 if (!seq)
7647 return NULL;
7649 if (INSN_P (seq))
7651 insn = seq;
7652 while (insn != NULL_RTX)
7654 RTX_FRAME_RELATED_P (insn) = 1;
7655 insn = NEXT_INSN (insn);
7657 seq = emit_insn (seq);
7659 else
7661 seq = emit_insn (seq);
7662 RTX_FRAME_RELATED_P (seq) = 1;
7664 return seq;
7667 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7669 /* Generates a store with the proper unwind info attached. VALUE is
7670 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7671 contains SP+FRAME_BIAS, and that is the unwind info that should be
7672 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7673 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7675 static void
7676 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7677 HOST_WIDE_INT base_ofs, rtx frame_reg)
7679 rtx addr, mem;
7680 rtx_insn *insn;
7682 addr = plus_constant (Pmode, base_reg, base_ofs);
7683 mem = gen_frame_mem (DImode, addr);
7685 insn = emit_move_insn (mem, value);
7686 RTX_FRAME_RELATED_P (insn) = 1;
7688 if (frame_bias || value != frame_reg)
7690 if (frame_bias)
7692 addr = plus_constant (Pmode, stack_pointer_rtx,
7693 frame_bias + base_ofs);
7694 mem = gen_rtx_MEM (DImode, addr);
7697 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7698 gen_rtx_SET (VOIDmode, mem, frame_reg));
7702 static void
7703 emit_frame_store (unsigned int regno, rtx base_reg,
7704 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7706 rtx reg = gen_rtx_REG (DImode, regno);
7707 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7710 /* Compute the frame size. SIZE is the size of the "naked" frame
7711 and SA_SIZE is the size of the register save area. */
7713 static HOST_WIDE_INT
7714 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7716 if (TARGET_ABI_OPEN_VMS)
7717 return ALPHA_ROUND (sa_size
7718 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7719 + size
7720 + crtl->args.pretend_args_size);
7721 else
7722 return ALPHA_ROUND (crtl->outgoing_args_size)
7723 + sa_size
7724 + ALPHA_ROUND (size
7725 + crtl->args.pretend_args_size);
7728 /* Write function prologue. */
7730 /* On vms we have two kinds of functions:
7732 - stack frame (PROC_STACK)
7733 these are 'normal' functions with local vars and which are
7734 calling other functions
7735 - register frame (PROC_REGISTER)
7736 keeps all data in registers, needs no stack
7738 We must pass this to the assembler so it can generate the
7739 proper pdsc (procedure descriptor)
7740 This is done with the '.pdesc' command.
7742 On not-vms, we don't really differentiate between the two, as we can
7743 simply allocate stack without saving registers. */
7745 void
7746 alpha_expand_prologue (void)
7748 /* Registers to save. */
7749 unsigned long imask = 0;
7750 unsigned long fmask = 0;
7751 /* Stack space needed for pushing registers clobbered by us. */
7752 HOST_WIDE_INT sa_size, sa_bias;
7753 /* Complete stack size needed. */
7754 HOST_WIDE_INT frame_size;
7755 /* Probed stack size; it additionally includes the size of
7756 the "reserve region" if any. */
7757 HOST_WIDE_INT probed_size;
7758 /* Offset from base reg to register save area. */
7759 HOST_WIDE_INT reg_offset;
7760 rtx sa_reg;
7761 int i;
7763 sa_size = alpha_sa_size ();
7764 frame_size = compute_frame_size (get_frame_size (), sa_size);
7766 if (flag_stack_usage_info)
7767 current_function_static_stack_size = frame_size;
7769 if (TARGET_ABI_OPEN_VMS)
7770 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7771 else
7772 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7774 alpha_sa_mask (&imask, &fmask);
7776 /* Emit an insn to reload GP, if needed. */
7777 if (TARGET_ABI_OSF)
7779 alpha_function_needs_gp = alpha_does_function_need_gp ();
7780 if (alpha_function_needs_gp)
7781 emit_insn (gen_prologue_ldgp ());
7784 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7785 the call to mcount ourselves, rather than having the linker do it
7786 magically in response to -pg. Since _mcount has special linkage,
7787 don't represent the call as a call. */
7788 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7789 emit_insn (gen_prologue_mcount ());
7791 /* Adjust the stack by the frame size. If the frame size is > 4096
7792 bytes, we need to be sure we probe somewhere in the first and last
7793 4096 bytes (we can probably get away without the latter test) and
7794 every 8192 bytes in between. If the frame size is > 32768, we
7795 do this in a loop. Otherwise, we generate the explicit probe
7796 instructions.
7798 Note that we are only allowed to adjust sp once in the prologue. */
7800 probed_size = frame_size;
7801 if (flag_stack_check)
7802 probed_size += STACK_CHECK_PROTECT;
7804 if (probed_size <= 32768)
7806 if (probed_size > 4096)
7808 int probed;
7810 for (probed = 4096; probed < probed_size; probed += 8192)
7811 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7813 /* We only have to do this probe if we aren't saving registers or
7814 if we are probing beyond the frame because of -fstack-check. */
7815 if ((sa_size == 0 && probed_size > probed - 4096)
7816 || flag_stack_check)
7817 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7820 if (frame_size != 0)
7821 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7822 GEN_INT (-frame_size))));
7824 else
7826 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7827 number of 8192 byte blocks to probe. We then probe each block
7828 in the loop and then set SP to the proper location. If the
7829 amount remaining is > 4096, we have to do one more probe if we
7830 are not saving any registers or if we are probing beyond the
7831 frame because of -fstack-check. */
7833 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7834 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7835 rtx ptr = gen_rtx_REG (DImode, 22);
7836 rtx count = gen_rtx_REG (DImode, 23);
7837 rtx seq;
7839 emit_move_insn (count, GEN_INT (blocks));
7840 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7842 /* Because of the difficulty in emitting a new basic block this
7843 late in the compilation, generate the loop as a single insn. */
7844 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7846 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7848 rtx last = gen_rtx_MEM (DImode,
7849 plus_constant (Pmode, ptr, -leftover));
7850 MEM_VOLATILE_P (last) = 1;
7851 emit_move_insn (last, const0_rtx);
7854 if (flag_stack_check)
7856 /* If -fstack-check is specified we have to load the entire
7857 constant into a register and subtract from the sp in one go,
7858 because the probed stack size is not equal to the frame size. */
7859 HOST_WIDE_INT lo, hi;
7860 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7861 hi = frame_size - lo;
7863 emit_move_insn (ptr, GEN_INT (hi));
7864 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7865 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7866 ptr));
7868 else
7870 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7871 GEN_INT (-leftover)));
7874 /* This alternative is special, because the DWARF code cannot
7875 possibly intuit through the loop above. So we invent this
7876 note it looks at instead. */
7877 RTX_FRAME_RELATED_P (seq) = 1;
7878 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7879 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7880 plus_constant (Pmode, stack_pointer_rtx,
7881 -frame_size)));
7884 /* Cope with very large offsets to the register save area. */
7885 sa_bias = 0;
7886 sa_reg = stack_pointer_rtx;
7887 if (reg_offset + sa_size > 0x8000)
7889 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7890 rtx sa_bias_rtx;
7892 if (low + sa_size <= 0x8000)
7893 sa_bias = reg_offset - low, reg_offset = low;
7894 else
7895 sa_bias = reg_offset, reg_offset = 0;
7897 sa_reg = gen_rtx_REG (DImode, 24);
7898 sa_bias_rtx = GEN_INT (sa_bias);
7900 if (add_operand (sa_bias_rtx, DImode))
7901 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7902 else
7904 emit_move_insn (sa_reg, sa_bias_rtx);
7905 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7909 /* Save regs in stack order. Beginning with VMS PV. */
7910 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7911 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7913 /* Save register RA next. */
7914 if (imask & (1UL << REG_RA))
7916 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7917 imask &= ~(1UL << REG_RA);
7918 reg_offset += 8;
7921 /* Now save any other registers required to be saved. */
7922 for (i = 0; i < 31; i++)
7923 if (imask & (1UL << i))
7925 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7926 reg_offset += 8;
7929 for (i = 0; i < 31; i++)
7930 if (fmask & (1UL << i))
7932 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7933 reg_offset += 8;
7936 if (TARGET_ABI_OPEN_VMS)
7938 /* Register frame procedures save the fp. */
7939 if (alpha_procedure_type == PT_REGISTER)
7941 rtx_insn *insn =
7942 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7943 hard_frame_pointer_rtx);
7944 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7945 RTX_FRAME_RELATED_P (insn) = 1;
7948 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7949 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7950 gen_rtx_REG (DImode, REG_PV)));
7952 if (alpha_procedure_type != PT_NULL
7953 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7954 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7956 /* If we have to allocate space for outgoing args, do it now. */
7957 if (crtl->outgoing_args_size != 0)
7959 rtx_insn *seq
7960 = emit_move_insn (stack_pointer_rtx,
7961 plus_constant
7962 (Pmode, hard_frame_pointer_rtx,
7963 - (ALPHA_ROUND
7964 (crtl->outgoing_args_size))));
7966 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7967 if ! frame_pointer_needed. Setting the bit will change the CFA
7968 computation rule to use sp again, which would be wrong if we had
7969 frame_pointer_needed, as this means sp might move unpredictably
7970 later on.
7972 Also, note that
7973 frame_pointer_needed
7974 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7976 crtl->outgoing_args_size != 0
7977 => alpha_procedure_type != PT_NULL,
7979 so when we are not setting the bit here, we are guaranteed to
7980 have emitted an FRP frame pointer update just before. */
7981 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7984 else
7986 /* If we need a frame pointer, set it from the stack pointer. */
7987 if (frame_pointer_needed)
7989 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7990 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7991 else
7992 /* This must always be the last instruction in the
7993 prologue, thus we emit a special move + clobber. */
7994 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7995 stack_pointer_rtx, sa_reg)));
7999 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8000 the prologue, for exception handling reasons, we cannot do this for
8001 any insn that might fault. We could prevent this for mems with a
8002 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8003 have to prevent all such scheduling with a blockage.
8005 Linux, on the other hand, never bothered to implement OSF/1's
8006 exception handling, and so doesn't care about such things. Anyone
8007 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8009 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8010 emit_insn (gen_blockage ());
8013 /* Count the number of .file directives, so that .loc is up to date. */
8014 int num_source_filenames = 0;
8016 /* Output the textual info surrounding the prologue. */
8018 void
8019 alpha_start_function (FILE *file, const char *fnname,
8020 tree decl ATTRIBUTE_UNUSED)
8022 unsigned long imask = 0;
8023 unsigned long fmask = 0;
8024 /* Stack space needed for pushing registers clobbered by us. */
8025 HOST_WIDE_INT sa_size;
8026 /* Complete stack size needed. */
8027 unsigned HOST_WIDE_INT frame_size;
8028 /* The maximum debuggable frame size. */
8029 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
8030 /* Offset from base reg to register save area. */
8031 HOST_WIDE_INT reg_offset;
8032 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8033 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8034 int i;
8036 #if TARGET_ABI_OPEN_VMS
8037 vms_start_function (fnname);
8038 #endif
8040 alpha_fnname = fnname;
8041 sa_size = alpha_sa_size ();
8042 frame_size = compute_frame_size (get_frame_size (), sa_size);
8044 if (TARGET_ABI_OPEN_VMS)
8045 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8046 else
8047 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8049 alpha_sa_mask (&imask, &fmask);
8051 /* Issue function start and label. */
8052 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
8054 fputs ("\t.ent ", file);
8055 assemble_name (file, fnname);
8056 putc ('\n', file);
8058 /* If the function needs GP, we'll write the "..ng" label there.
8059 Otherwise, do it here. */
8060 if (TARGET_ABI_OSF
8061 && ! alpha_function_needs_gp
8062 && ! cfun->is_thunk)
8064 putc ('$', file);
8065 assemble_name (file, fnname);
8066 fputs ("..ng:\n", file);
8069 /* Nested functions on VMS that are potentially called via trampoline
8070 get a special transfer entry point that loads the called functions
8071 procedure descriptor and static chain. */
8072 if (TARGET_ABI_OPEN_VMS
8073 && !TREE_PUBLIC (decl)
8074 && DECL_CONTEXT (decl)
8075 && !TYPE_P (DECL_CONTEXT (decl))
8076 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
8078 strcpy (tramp_label, fnname);
8079 strcat (tramp_label, "..tr");
8080 ASM_OUTPUT_LABEL (file, tramp_label);
8081 fprintf (file, "\tldq $1,24($27)\n");
8082 fprintf (file, "\tldq $27,16($27)\n");
8085 strcpy (entry_label, fnname);
8086 if (TARGET_ABI_OPEN_VMS)
8087 strcat (entry_label, "..en");
8089 ASM_OUTPUT_LABEL (file, entry_label);
8090 inside_function = TRUE;
8092 if (TARGET_ABI_OPEN_VMS)
8093 fprintf (file, "\t.base $%d\n", vms_base_regno);
8095 if (TARGET_ABI_OSF
8096 && TARGET_IEEE_CONFORMANT
8097 && !flag_inhibit_size_directive)
8099 /* Set flags in procedure descriptor to request IEEE-conformant
8100 math-library routines. The value we set it to is PDSC_EXC_IEEE
8101 (/usr/include/pdsc.h). */
8102 fputs ("\t.eflag 48\n", file);
8105 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8106 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8107 alpha_arg_offset = -frame_size + 48;
8109 /* Describe our frame. If the frame size is larger than an integer,
8110 print it as zero to avoid an assembler error. We won't be
8111 properly describing such a frame, but that's the best we can do. */
8112 if (TARGET_ABI_OPEN_VMS)
8113 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8114 HOST_WIDE_INT_PRINT_DEC "\n",
8115 vms_unwind_regno,
8116 frame_size >= (1UL << 31) ? 0 : frame_size,
8117 reg_offset);
8118 else if (!flag_inhibit_size_directive)
8119 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8120 (frame_pointer_needed
8121 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8122 frame_size >= max_frame_size ? 0 : frame_size,
8123 crtl->args.pretend_args_size);
8125 /* Describe which registers were spilled. */
8126 if (TARGET_ABI_OPEN_VMS)
8128 if (imask)
8129 /* ??? Does VMS care if mask contains ra? The old code didn't
8130 set it, so I don't here. */
8131 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8132 if (fmask)
8133 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8134 if (alpha_procedure_type == PT_REGISTER)
8135 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8137 else if (!flag_inhibit_size_directive)
8139 if (imask)
8141 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8142 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8144 for (i = 0; i < 32; ++i)
8145 if (imask & (1UL << i))
8146 reg_offset += 8;
8149 if (fmask)
8150 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8151 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8154 #if TARGET_ABI_OPEN_VMS
8155 /* If a user condition handler has been installed at some point, emit
8156 the procedure descriptor bits to point the Condition Handling Facility
8157 at the indirection wrapper, and state the fp offset at which the user
8158 handler may be found. */
8159 if (cfun->machine->uses_condition_handler)
8161 fprintf (file, "\t.handler __gcc_shell_handler\n");
8162 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8165 #ifdef TARGET_VMS_CRASH_DEBUG
8166 /* Support of minimal traceback info. */
8167 switch_to_section (readonly_data_section);
8168 fprintf (file, "\t.align 3\n");
8169 assemble_name (file, fnname); fputs ("..na:\n", file);
8170 fputs ("\t.ascii \"", file);
8171 assemble_name (file, fnname);
8172 fputs ("\\0\"\n", file);
8173 switch_to_section (text_section);
8174 #endif
8175 #endif /* TARGET_ABI_OPEN_VMS */
8178 /* Emit the .prologue note at the scheduled end of the prologue. */
8180 static void
8181 alpha_output_function_end_prologue (FILE *file)
8183 if (TARGET_ABI_OPEN_VMS)
8184 fputs ("\t.prologue\n", file);
8185 else if (!flag_inhibit_size_directive)
8186 fprintf (file, "\t.prologue %d\n",
8187 alpha_function_needs_gp || cfun->is_thunk);
8190 /* Write function epilogue. */
8192 void
8193 alpha_expand_epilogue (void)
8195 /* Registers to save. */
8196 unsigned long imask = 0;
8197 unsigned long fmask = 0;
8198 /* Stack space needed for pushing registers clobbered by us. */
8199 HOST_WIDE_INT sa_size;
8200 /* Complete stack size needed. */
8201 HOST_WIDE_INT frame_size;
8202 /* Offset from base reg to register save area. */
8203 HOST_WIDE_INT reg_offset;
8204 int fp_is_frame_pointer, fp_offset;
8205 rtx sa_reg, sa_reg_exp = NULL;
8206 rtx sp_adj1, sp_adj2, mem, reg, insn;
8207 rtx eh_ofs;
8208 rtx cfa_restores = NULL_RTX;
8209 int i;
8211 sa_size = alpha_sa_size ();
8212 frame_size = compute_frame_size (get_frame_size (), sa_size);
8214 if (TARGET_ABI_OPEN_VMS)
8216 if (alpha_procedure_type == PT_STACK)
8217 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8218 else
8219 reg_offset = 0;
8221 else
8222 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8224 alpha_sa_mask (&imask, &fmask);
8226 fp_is_frame_pointer
8227 = (TARGET_ABI_OPEN_VMS
8228 ? alpha_procedure_type == PT_STACK
8229 : frame_pointer_needed);
8230 fp_offset = 0;
8231 sa_reg = stack_pointer_rtx;
8233 if (crtl->calls_eh_return)
8234 eh_ofs = EH_RETURN_STACKADJ_RTX;
8235 else
8236 eh_ofs = NULL_RTX;
8238 if (sa_size)
8240 /* If we have a frame pointer, restore SP from it. */
8241 if (TARGET_ABI_OPEN_VMS
8242 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8243 : frame_pointer_needed)
8244 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8246 /* Cope with very large offsets to the register save area. */
8247 if (reg_offset + sa_size > 0x8000)
8249 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8250 HOST_WIDE_INT bias;
8252 if (low + sa_size <= 0x8000)
8253 bias = reg_offset - low, reg_offset = low;
8254 else
8255 bias = reg_offset, reg_offset = 0;
8257 sa_reg = gen_rtx_REG (DImode, 22);
8258 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8260 emit_move_insn (sa_reg, sa_reg_exp);
8263 /* Restore registers in order, excepting a true frame pointer. */
8265 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8266 reg = gen_rtx_REG (DImode, REG_RA);
8267 emit_move_insn (reg, mem);
8268 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8270 reg_offset += 8;
8271 imask &= ~(1UL << REG_RA);
8273 for (i = 0; i < 31; ++i)
8274 if (imask & (1UL << i))
8276 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8277 fp_offset = reg_offset;
8278 else
8280 mem = gen_frame_mem (DImode,
8281 plus_constant (Pmode, sa_reg,
8282 reg_offset));
8283 reg = gen_rtx_REG (DImode, i);
8284 emit_move_insn (reg, mem);
8285 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8286 cfa_restores);
8288 reg_offset += 8;
8291 for (i = 0; i < 31; ++i)
8292 if (fmask & (1UL << i))
8294 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8295 reg_offset));
8296 reg = gen_rtx_REG (DFmode, i+32);
8297 emit_move_insn (reg, mem);
8298 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8299 reg_offset += 8;
8303 if (frame_size || eh_ofs)
8305 sp_adj1 = stack_pointer_rtx;
8307 if (eh_ofs)
8309 sp_adj1 = gen_rtx_REG (DImode, 23);
8310 emit_move_insn (sp_adj1,
8311 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8314 /* If the stack size is large, begin computation into a temporary
8315 register so as not to interfere with a potential fp restore,
8316 which must be consecutive with an SP restore. */
8317 if (frame_size < 32768 && !cfun->calls_alloca)
8318 sp_adj2 = GEN_INT (frame_size);
8319 else if (frame_size < 0x40007fffL)
8321 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8323 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8324 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8325 sp_adj1 = sa_reg;
8326 else
8328 sp_adj1 = gen_rtx_REG (DImode, 23);
8329 emit_move_insn (sp_adj1, sp_adj2);
8331 sp_adj2 = GEN_INT (low);
8333 else
8335 rtx tmp = gen_rtx_REG (DImode, 23);
8336 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8337 if (!sp_adj2)
8339 /* We can't drop new things to memory this late, afaik,
8340 so build it up by pieces. */
8341 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8342 -(frame_size < 0));
8343 gcc_assert (sp_adj2);
8347 /* From now on, things must be in order. So emit blockages. */
8349 /* Restore the frame pointer. */
8350 if (fp_is_frame_pointer)
8352 emit_insn (gen_blockage ());
8353 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8354 fp_offset));
8355 emit_move_insn (hard_frame_pointer_rtx, mem);
8356 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8357 hard_frame_pointer_rtx, cfa_restores);
8359 else if (TARGET_ABI_OPEN_VMS)
8361 emit_insn (gen_blockage ());
8362 emit_move_insn (hard_frame_pointer_rtx,
8363 gen_rtx_REG (DImode, vms_save_fp_regno));
8364 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8365 hard_frame_pointer_rtx, cfa_restores);
8368 /* Restore the stack pointer. */
8369 emit_insn (gen_blockage ());
8370 if (sp_adj2 == const0_rtx)
8371 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8372 else
8373 insn = emit_move_insn (stack_pointer_rtx,
8374 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8375 REG_NOTES (insn) = cfa_restores;
8376 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8377 RTX_FRAME_RELATED_P (insn) = 1;
8379 else
8381 gcc_assert (cfa_restores == NULL);
8383 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8385 emit_insn (gen_blockage ());
8386 insn = emit_move_insn (hard_frame_pointer_rtx,
8387 gen_rtx_REG (DImode, vms_save_fp_regno));
8388 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8389 RTX_FRAME_RELATED_P (insn) = 1;
8394 /* Output the rest of the textual info surrounding the epilogue. */
8396 void
8397 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8399 rtx_insn *insn;
8401 /* We output a nop after noreturn calls at the very end of the function to
8402 ensure that the return address always remains in the caller's code range,
8403 as not doing so might confuse unwinding engines. */
8404 insn = get_last_insn ();
8405 if (!INSN_P (insn))
8406 insn = prev_active_insn (insn);
8407 if (insn && CALL_P (insn))
8408 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8410 #if TARGET_ABI_OPEN_VMS
8411 /* Write the linkage entries. */
8412 alpha_write_linkage (file, fnname);
8413 #endif
8415 /* End the function. */
8416 if (TARGET_ABI_OPEN_VMS
8417 || !flag_inhibit_size_directive)
8419 fputs ("\t.end ", file);
8420 assemble_name (file, fnname);
8421 putc ('\n', file);
8423 inside_function = FALSE;
8426 #if TARGET_ABI_OSF
8427 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8429 In order to avoid the hordes of differences between generated code
8430 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8431 lots of code loading up large constants, generate rtl and emit it
8432 instead of going straight to text.
8434 Not sure why this idea hasn't been explored before... */
8436 static void
8437 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8438 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8439 tree function)
8441 HOST_WIDE_INT hi, lo;
8442 rtx this_rtx, funexp;
8443 rtx_insn *insn;
8445 /* We always require a valid GP. */
8446 emit_insn (gen_prologue_ldgp ());
8447 emit_note (NOTE_INSN_PROLOGUE_END);
8449 /* Find the "this" pointer. If the function returns a structure,
8450 the structure return pointer is in $16. */
8451 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8452 this_rtx = gen_rtx_REG (Pmode, 17);
8453 else
8454 this_rtx = gen_rtx_REG (Pmode, 16);
8456 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8457 entire constant for the add. */
8458 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8459 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8460 if (hi + lo == delta)
8462 if (hi)
8463 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8464 if (lo)
8465 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8467 else
8469 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8470 delta, -(delta < 0));
8471 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8474 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8475 if (vcall_offset)
8477 rtx tmp, tmp2;
8479 tmp = gen_rtx_REG (Pmode, 0);
8480 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8482 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8483 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8484 if (hi + lo == vcall_offset)
8486 if (hi)
8487 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8489 else
8491 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8492 vcall_offset, -(vcall_offset < 0));
8493 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8494 lo = 0;
8496 if (lo)
8497 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8498 else
8499 tmp2 = tmp;
8500 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8502 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8505 /* Generate a tail call to the target function. */
8506 if (! TREE_USED (function))
8508 assemble_external (function);
8509 TREE_USED (function) = 1;
8511 funexp = XEXP (DECL_RTL (function), 0);
8512 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8513 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8514 SIBLING_CALL_P (insn) = 1;
8516 /* Run just enough of rest_of_compilation to get the insns emitted.
8517 There's not really enough bulk here to make other passes such as
8518 instruction scheduling worth while. Note that use_thunk calls
8519 assemble_start_function and assemble_end_function. */
8520 insn = get_insns ();
8521 shorten_branches (insn);
8522 final_start_function (insn, file, 1);
8523 final (insn, file, 1);
8524 final_end_function ();
8526 #endif /* TARGET_ABI_OSF */
8528 /* Debugging support. */
8530 #include "gstab.h"
8532 /* Name of the file containing the current function. */
8534 static const char *current_function_file = "";
8536 /* Offsets to alpha virtual arg/local debugging pointers. */
8538 long alpha_arg_offset;
8539 long alpha_auto_offset;
8541 /* Emit a new filename to a stream. */
8543 void
8544 alpha_output_filename (FILE *stream, const char *name)
8546 static int first_time = TRUE;
8548 if (first_time)
8550 first_time = FALSE;
8551 ++num_source_filenames;
8552 current_function_file = name;
8553 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8554 output_quoted_string (stream, name);
8555 fprintf (stream, "\n");
8558 else if (name != current_function_file
8559 && strcmp (name, current_function_file) != 0)
8561 ++num_source_filenames;
8562 current_function_file = name;
8563 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8565 output_quoted_string (stream, name);
8566 fprintf (stream, "\n");
8570 /* Structure to show the current status of registers and memory. */
8572 struct shadow_summary
8574 struct {
8575 unsigned int i : 31; /* Mask of int regs */
8576 unsigned int fp : 31; /* Mask of fp regs */
8577 unsigned int mem : 1; /* mem == imem | fpmem */
8578 } used, defd;
8581 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8582 to the summary structure. SET is nonzero if the insn is setting the
8583 object, otherwise zero. */
8585 static void
8586 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8588 const char *format_ptr;
8589 int i, j;
8591 if (x == 0)
8592 return;
8594 switch (GET_CODE (x))
8596 /* ??? Note that this case would be incorrect if the Alpha had a
8597 ZERO_EXTRACT in SET_DEST. */
8598 case SET:
8599 summarize_insn (SET_SRC (x), sum, 0);
8600 summarize_insn (SET_DEST (x), sum, 1);
8601 break;
8603 case CLOBBER:
8604 summarize_insn (XEXP (x, 0), sum, 1);
8605 break;
8607 case USE:
8608 summarize_insn (XEXP (x, 0), sum, 0);
8609 break;
8611 case ASM_OPERANDS:
8612 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8613 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8614 break;
8616 case PARALLEL:
8617 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8618 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8619 break;
8621 case SUBREG:
8622 summarize_insn (SUBREG_REG (x), sum, 0);
8623 break;
8625 case REG:
8627 int regno = REGNO (x);
8628 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8630 if (regno == 31 || regno == 63)
8631 break;
8633 if (set)
8635 if (regno < 32)
8636 sum->defd.i |= mask;
8637 else
8638 sum->defd.fp |= mask;
8640 else
8642 if (regno < 32)
8643 sum->used.i |= mask;
8644 else
8645 sum->used.fp |= mask;
8648 break;
8650 case MEM:
8651 if (set)
8652 sum->defd.mem = 1;
8653 else
8654 sum->used.mem = 1;
8656 /* Find the regs used in memory address computation: */
8657 summarize_insn (XEXP (x, 0), sum, 0);
8658 break;
8660 case CONST_INT: case CONST_DOUBLE:
8661 case SYMBOL_REF: case LABEL_REF: case CONST:
8662 case SCRATCH: case ASM_INPUT:
8663 break;
8665 /* Handle common unary and binary ops for efficiency. */
8666 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8667 case MOD: case UDIV: case UMOD: case AND: case IOR:
8668 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8669 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8670 case NE: case EQ: case GE: case GT: case LE:
8671 case LT: case GEU: case GTU: case LEU: case LTU:
8672 summarize_insn (XEXP (x, 0), sum, 0);
8673 summarize_insn (XEXP (x, 1), sum, 0);
8674 break;
8676 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8677 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8678 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8679 case SQRT: case FFS:
8680 summarize_insn (XEXP (x, 0), sum, 0);
8681 break;
8683 default:
8684 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8685 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8686 switch (format_ptr[i])
8688 case 'e':
8689 summarize_insn (XEXP (x, i), sum, 0);
8690 break;
8692 case 'E':
8693 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8694 summarize_insn (XVECEXP (x, i, j), sum, 0);
8695 break;
8697 case 'i':
8698 break;
8700 default:
8701 gcc_unreachable ();
8706 /* Ensure a sufficient number of `trapb' insns are in the code when
8707 the user requests code with a trap precision of functions or
8708 instructions.
8710 In naive mode, when the user requests a trap-precision of
8711 "instruction", a trapb is needed after every instruction that may
8712 generate a trap. This ensures that the code is resumption safe but
8713 it is also slow.
8715 When optimizations are turned on, we delay issuing a trapb as long
8716 as possible. In this context, a trap shadow is the sequence of
8717 instructions that starts with a (potentially) trap generating
8718 instruction and extends to the next trapb or call_pal instruction
8719 (but GCC never generates call_pal by itself). We can delay (and
8720 therefore sometimes omit) a trapb subject to the following
8721 conditions:
8723 (a) On entry to the trap shadow, if any Alpha register or memory
8724 location contains a value that is used as an operand value by some
8725 instruction in the trap shadow (live on entry), then no instruction
8726 in the trap shadow may modify the register or memory location.
8728 (b) Within the trap shadow, the computation of the base register
8729 for a memory load or store instruction may not involve using the
8730 result of an instruction that might generate an UNPREDICTABLE
8731 result.
8733 (c) Within the trap shadow, no register may be used more than once
8734 as a destination register. (This is to make life easier for the
8735 trap-handler.)
8737 (d) The trap shadow may not include any branch instructions. */
8739 static void
8740 alpha_handle_trap_shadows (void)
8742 struct shadow_summary shadow;
8743 int trap_pending, exception_nesting;
8744 rtx_insn *i, *n;
8746 trap_pending = 0;
8747 exception_nesting = 0;
8748 shadow.used.i = 0;
8749 shadow.used.fp = 0;
8750 shadow.used.mem = 0;
8751 shadow.defd = shadow.used;
8753 for (i = get_insns (); i ; i = NEXT_INSN (i))
8755 if (NOTE_P (i))
8757 switch (NOTE_KIND (i))
8759 case NOTE_INSN_EH_REGION_BEG:
8760 exception_nesting++;
8761 if (trap_pending)
8762 goto close_shadow;
8763 break;
8765 case NOTE_INSN_EH_REGION_END:
8766 exception_nesting--;
8767 if (trap_pending)
8768 goto close_shadow;
8769 break;
8771 case NOTE_INSN_EPILOGUE_BEG:
8772 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8773 goto close_shadow;
8774 break;
8777 else if (trap_pending)
8779 if (alpha_tp == ALPHA_TP_FUNC)
8781 if (JUMP_P (i)
8782 && GET_CODE (PATTERN (i)) == RETURN)
8783 goto close_shadow;
8785 else if (alpha_tp == ALPHA_TP_INSN)
8787 if (optimize > 0)
8789 struct shadow_summary sum;
8791 sum.used.i = 0;
8792 sum.used.fp = 0;
8793 sum.used.mem = 0;
8794 sum.defd = sum.used;
8796 switch (GET_CODE (i))
8798 case INSN:
8799 /* Annoyingly, get_attr_trap will die on these. */
8800 if (GET_CODE (PATTERN (i)) == USE
8801 || GET_CODE (PATTERN (i)) == CLOBBER)
8802 break;
8804 summarize_insn (PATTERN (i), &sum, 0);
8806 if ((sum.defd.i & shadow.defd.i)
8807 || (sum.defd.fp & shadow.defd.fp))
8809 /* (c) would be violated */
8810 goto close_shadow;
8813 /* Combine shadow with summary of current insn: */
8814 shadow.used.i |= sum.used.i;
8815 shadow.used.fp |= sum.used.fp;
8816 shadow.used.mem |= sum.used.mem;
8817 shadow.defd.i |= sum.defd.i;
8818 shadow.defd.fp |= sum.defd.fp;
8819 shadow.defd.mem |= sum.defd.mem;
8821 if ((sum.defd.i & shadow.used.i)
8822 || (sum.defd.fp & shadow.used.fp)
8823 || (sum.defd.mem & shadow.used.mem))
8825 /* (a) would be violated (also takes care of (b)) */
8826 gcc_assert (get_attr_trap (i) != TRAP_YES
8827 || (!(sum.defd.i & sum.used.i)
8828 && !(sum.defd.fp & sum.used.fp)));
8830 goto close_shadow;
8832 break;
8834 case BARRIER:
8835 /* __builtin_unreachable can expand to no code at all,
8836 leaving (barrier) RTXes in the instruction stream. */
8837 goto close_shadow_notrapb;
8839 case JUMP_INSN:
8840 case CALL_INSN:
8841 case CODE_LABEL:
8842 goto close_shadow;
8844 default:
8845 gcc_unreachable ();
8848 else
8850 close_shadow:
8851 n = emit_insn_before (gen_trapb (), i);
8852 PUT_MODE (n, TImode);
8853 PUT_MODE (i, TImode);
8854 close_shadow_notrapb:
8855 trap_pending = 0;
8856 shadow.used.i = 0;
8857 shadow.used.fp = 0;
8858 shadow.used.mem = 0;
8859 shadow.defd = shadow.used;
8864 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8865 && NONJUMP_INSN_P (i)
8866 && GET_CODE (PATTERN (i)) != USE
8867 && GET_CODE (PATTERN (i)) != CLOBBER
8868 && get_attr_trap (i) == TRAP_YES)
8870 if (optimize && !trap_pending)
8871 summarize_insn (PATTERN (i), &shadow, 0);
8872 trap_pending = 1;
8877 /* Alpha can only issue instruction groups simultaneously if they are
8878 suitably aligned. This is very processor-specific. */
8879 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8880 that are marked "fake". These instructions do not exist on that target,
8881 but it is possible to see these insns with deranged combinations of
8882 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8883 choose a result at random. */
8885 enum alphaev4_pipe {
8886 EV4_STOP = 0,
8887 EV4_IB0 = 1,
8888 EV4_IB1 = 2,
8889 EV4_IBX = 4
8892 enum alphaev5_pipe {
8893 EV5_STOP = 0,
8894 EV5_NONE = 1,
8895 EV5_E01 = 2,
8896 EV5_E0 = 4,
8897 EV5_E1 = 8,
8898 EV5_FAM = 16,
8899 EV5_FA = 32,
8900 EV5_FM = 64
8903 static enum alphaev4_pipe
8904 alphaev4_insn_pipe (rtx_insn *insn)
8906 if (recog_memoized (insn) < 0)
8907 return EV4_STOP;
8908 if (get_attr_length (insn) != 4)
8909 return EV4_STOP;
8911 switch (get_attr_type (insn))
8913 case TYPE_ILD:
8914 case TYPE_LDSYM:
8915 case TYPE_FLD:
8916 case TYPE_LD_L:
8917 return EV4_IBX;
8919 case TYPE_IADD:
8920 case TYPE_ILOG:
8921 case TYPE_ICMOV:
8922 case TYPE_ICMP:
8923 case TYPE_FST:
8924 case TYPE_SHIFT:
8925 case TYPE_IMUL:
8926 case TYPE_FBR:
8927 case TYPE_MVI: /* fake */
8928 return EV4_IB0;
8930 case TYPE_IST:
8931 case TYPE_MISC:
8932 case TYPE_IBR:
8933 case TYPE_JSR:
8934 case TYPE_CALLPAL:
8935 case TYPE_FCPYS:
8936 case TYPE_FCMOV:
8937 case TYPE_FADD:
8938 case TYPE_FDIV:
8939 case TYPE_FMUL:
8940 case TYPE_ST_C:
8941 case TYPE_MB:
8942 case TYPE_FSQRT: /* fake */
8943 case TYPE_FTOI: /* fake */
8944 case TYPE_ITOF: /* fake */
8945 return EV4_IB1;
8947 default:
8948 gcc_unreachable ();
8952 static enum alphaev5_pipe
8953 alphaev5_insn_pipe (rtx_insn *insn)
8955 if (recog_memoized (insn) < 0)
8956 return EV5_STOP;
8957 if (get_attr_length (insn) != 4)
8958 return EV5_STOP;
8960 switch (get_attr_type (insn))
8962 case TYPE_ILD:
8963 case TYPE_FLD:
8964 case TYPE_LDSYM:
8965 case TYPE_IADD:
8966 case TYPE_ILOG:
8967 case TYPE_ICMOV:
8968 case TYPE_ICMP:
8969 return EV5_E01;
8971 case TYPE_IST:
8972 case TYPE_FST:
8973 case TYPE_SHIFT:
8974 case TYPE_IMUL:
8975 case TYPE_MISC:
8976 case TYPE_MVI:
8977 case TYPE_LD_L:
8978 case TYPE_ST_C:
8979 case TYPE_MB:
8980 case TYPE_FTOI: /* fake */
8981 case TYPE_ITOF: /* fake */
8982 return EV5_E0;
8984 case TYPE_IBR:
8985 case TYPE_JSR:
8986 case TYPE_CALLPAL:
8987 return EV5_E1;
8989 case TYPE_FCPYS:
8990 return EV5_FAM;
8992 case TYPE_FBR:
8993 case TYPE_FCMOV:
8994 case TYPE_FADD:
8995 case TYPE_FDIV:
8996 case TYPE_FSQRT: /* fake */
8997 return EV5_FA;
8999 case TYPE_FMUL:
9000 return EV5_FM;
9002 default:
9003 gcc_unreachable ();
9007 /* IN_USE is a mask of the slots currently filled within the insn group.
9008 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9009 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9011 LEN is, of course, the length of the group in bytes. */
9013 static rtx_insn *
9014 alphaev4_next_group (rtx_insn *insn, int *pin_use, int *plen)
9016 int len, in_use;
9018 len = in_use = 0;
9020 if (! INSN_P (insn)
9021 || GET_CODE (PATTERN (insn)) == CLOBBER
9022 || GET_CODE (PATTERN (insn)) == USE)
9023 goto next_and_done;
9025 while (1)
9027 enum alphaev4_pipe pipe;
9029 pipe = alphaev4_insn_pipe (insn);
9030 switch (pipe)
9032 case EV4_STOP:
9033 /* Force complex instructions to start new groups. */
9034 if (in_use)
9035 goto done;
9037 /* If this is a completely unrecognized insn, it's an asm.
9038 We don't know how long it is, so record length as -1 to
9039 signal a needed realignment. */
9040 if (recog_memoized (insn) < 0)
9041 len = -1;
9042 else
9043 len = get_attr_length (insn);
9044 goto next_and_done;
9046 case EV4_IBX:
9047 if (in_use & EV4_IB0)
9049 if (in_use & EV4_IB1)
9050 goto done;
9051 in_use |= EV4_IB1;
9053 else
9054 in_use |= EV4_IB0 | EV4_IBX;
9055 break;
9057 case EV4_IB0:
9058 if (in_use & EV4_IB0)
9060 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9061 goto done;
9062 in_use |= EV4_IB1;
9064 in_use |= EV4_IB0;
9065 break;
9067 case EV4_IB1:
9068 if (in_use & EV4_IB1)
9069 goto done;
9070 in_use |= EV4_IB1;
9071 break;
9073 default:
9074 gcc_unreachable ();
9076 len += 4;
9078 /* Haifa doesn't do well scheduling branches. */
9079 if (JUMP_P (insn))
9080 goto next_and_done;
9082 next:
9083 insn = next_nonnote_insn (insn);
9085 if (!insn || ! INSN_P (insn))
9086 goto done;
9088 /* Let Haifa tell us where it thinks insn group boundaries are. */
9089 if (GET_MODE (insn) == TImode)
9090 goto done;
9092 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9093 goto next;
9096 next_and_done:
9097 insn = next_nonnote_insn (insn);
9099 done:
9100 *plen = len;
9101 *pin_use = in_use;
9102 return insn;
9105 /* IN_USE is a mask of the slots currently filled within the insn group.
9106 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9107 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9109 LEN is, of course, the length of the group in bytes. */
9111 static rtx_insn *
9112 alphaev5_next_group (rtx_insn *insn, int *pin_use, int *plen)
9114 int len, in_use;
9116 len = in_use = 0;
9118 if (! INSN_P (insn)
9119 || GET_CODE (PATTERN (insn)) == CLOBBER
9120 || GET_CODE (PATTERN (insn)) == USE)
9121 goto next_and_done;
9123 while (1)
9125 enum alphaev5_pipe pipe;
9127 pipe = alphaev5_insn_pipe (insn);
9128 switch (pipe)
9130 case EV5_STOP:
9131 /* Force complex instructions to start new groups. */
9132 if (in_use)
9133 goto done;
9135 /* If this is a completely unrecognized insn, it's an asm.
9136 We don't know how long it is, so record length as -1 to
9137 signal a needed realignment. */
9138 if (recog_memoized (insn) < 0)
9139 len = -1;
9140 else
9141 len = get_attr_length (insn);
9142 goto next_and_done;
9144 /* ??? Most of the places below, we would like to assert never
9145 happen, as it would indicate an error either in Haifa, or
9146 in the scheduling description. Unfortunately, Haifa never
9147 schedules the last instruction of the BB, so we don't have
9148 an accurate TI bit to go off. */
9149 case EV5_E01:
9150 if (in_use & EV5_E0)
9152 if (in_use & EV5_E1)
9153 goto done;
9154 in_use |= EV5_E1;
9156 else
9157 in_use |= EV5_E0 | EV5_E01;
9158 break;
9160 case EV5_E0:
9161 if (in_use & EV5_E0)
9163 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9164 goto done;
9165 in_use |= EV5_E1;
9167 in_use |= EV5_E0;
9168 break;
9170 case EV5_E1:
9171 if (in_use & EV5_E1)
9172 goto done;
9173 in_use |= EV5_E1;
9174 break;
9176 case EV5_FAM:
9177 if (in_use & EV5_FA)
9179 if (in_use & EV5_FM)
9180 goto done;
9181 in_use |= EV5_FM;
9183 else
9184 in_use |= EV5_FA | EV5_FAM;
9185 break;
9187 case EV5_FA:
9188 if (in_use & EV5_FA)
9189 goto done;
9190 in_use |= EV5_FA;
9191 break;
9193 case EV5_FM:
9194 if (in_use & EV5_FM)
9195 goto done;
9196 in_use |= EV5_FM;
9197 break;
9199 case EV5_NONE:
9200 break;
9202 default:
9203 gcc_unreachable ();
9205 len += 4;
9207 /* Haifa doesn't do well scheduling branches. */
9208 /* ??? If this is predicted not-taken, slotting continues, except
9209 that no more IBR, FBR, or JSR insns may be slotted. */
9210 if (JUMP_P (insn))
9211 goto next_and_done;
9213 next:
9214 insn = next_nonnote_insn (insn);
9216 if (!insn || ! INSN_P (insn))
9217 goto done;
9219 /* Let Haifa tell us where it thinks insn group boundaries are. */
9220 if (GET_MODE (insn) == TImode)
9221 goto done;
9223 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9224 goto next;
9227 next_and_done:
9228 insn = next_nonnote_insn (insn);
9230 done:
9231 *plen = len;
9232 *pin_use = in_use;
9233 return insn;
9236 static rtx
9237 alphaev4_next_nop (int *pin_use)
9239 int in_use = *pin_use;
9240 rtx nop;
9242 if (!(in_use & EV4_IB0))
9244 in_use |= EV4_IB0;
9245 nop = gen_nop ();
9247 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9249 in_use |= EV4_IB1;
9250 nop = gen_nop ();
9252 else if (TARGET_FP && !(in_use & EV4_IB1))
9254 in_use |= EV4_IB1;
9255 nop = gen_fnop ();
9257 else
9258 nop = gen_unop ();
9260 *pin_use = in_use;
9261 return nop;
9264 static rtx
9265 alphaev5_next_nop (int *pin_use)
9267 int in_use = *pin_use;
9268 rtx nop;
9270 if (!(in_use & EV5_E1))
9272 in_use |= EV5_E1;
9273 nop = gen_nop ();
9275 else if (TARGET_FP && !(in_use & EV5_FA))
9277 in_use |= EV5_FA;
9278 nop = gen_fnop ();
9280 else if (TARGET_FP && !(in_use & EV5_FM))
9282 in_use |= EV5_FM;
9283 nop = gen_fnop ();
9285 else
9286 nop = gen_unop ();
9288 *pin_use = in_use;
9289 return nop;
9292 /* The instruction group alignment main loop. */
9294 static void
9295 alpha_align_insns_1 (unsigned int max_align,
9296 rtx_insn *(*next_group) (rtx_insn *, int *, int *),
9297 rtx (*next_nop) (int *))
9299 /* ALIGN is the known alignment for the insn group. */
9300 unsigned int align;
9301 /* OFS is the offset of the current insn in the insn group. */
9302 int ofs;
9303 int prev_in_use, in_use, len, ldgp;
9304 rtx_insn *i, *next;
9306 /* Let shorten branches care for assigning alignments to code labels. */
9307 shorten_branches (get_insns ());
9309 if (align_functions < 4)
9310 align = 4;
9311 else if ((unsigned int) align_functions < max_align)
9312 align = align_functions;
9313 else
9314 align = max_align;
9316 ofs = prev_in_use = 0;
9317 i = get_insns ();
9318 if (NOTE_P (i))
9319 i = next_nonnote_insn (i);
9321 ldgp = alpha_function_needs_gp ? 8 : 0;
9323 while (i)
9325 next = (*next_group) (i, &in_use, &len);
9327 /* When we see a label, resync alignment etc. */
9328 if (LABEL_P (i))
9330 unsigned int new_align = 1 << label_to_alignment (i);
9332 if (new_align >= align)
9334 align = new_align < max_align ? new_align : max_align;
9335 ofs = 0;
9338 else if (ofs & (new_align-1))
9339 ofs = (ofs | (new_align-1)) + 1;
9340 gcc_assert (!len);
9343 /* Handle complex instructions special. */
9344 else if (in_use == 0)
9346 /* Asms will have length < 0. This is a signal that we have
9347 lost alignment knowledge. Assume, however, that the asm
9348 will not mis-align instructions. */
9349 if (len < 0)
9351 ofs = 0;
9352 align = 4;
9353 len = 0;
9357 /* If the known alignment is smaller than the recognized insn group,
9358 realign the output. */
9359 else if ((int) align < len)
9361 unsigned int new_log_align = len > 8 ? 4 : 3;
9362 rtx_insn *prev, *where;
9364 where = prev = prev_nonnote_insn (i);
9365 if (!where || !LABEL_P (where))
9366 where = i;
9368 /* Can't realign between a call and its gp reload. */
9369 if (! (TARGET_EXPLICIT_RELOCS
9370 && prev && CALL_P (prev)))
9372 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9373 align = 1 << new_log_align;
9374 ofs = 0;
9378 /* We may not insert padding inside the initial ldgp sequence. */
9379 else if (ldgp > 0)
9380 ldgp -= len;
9382 /* If the group won't fit in the same INT16 as the previous,
9383 we need to add padding to keep the group together. Rather
9384 than simply leaving the insn filling to the assembler, we
9385 can make use of the knowledge of what sorts of instructions
9386 were issued in the previous group to make sure that all of
9387 the added nops are really free. */
9388 else if (ofs + len > (int) align)
9390 int nop_count = (align - ofs) / 4;
9391 rtx_insn *where;
9393 /* Insert nops before labels, branches, and calls to truly merge
9394 the execution of the nops with the previous instruction group. */
9395 where = prev_nonnote_insn (i);
9396 if (where)
9398 if (LABEL_P (where))
9400 rtx_insn *where2 = prev_nonnote_insn (where);
9401 if (where2 && JUMP_P (where2))
9402 where = where2;
9404 else if (NONJUMP_INSN_P (where))
9405 where = i;
9407 else
9408 where = i;
9411 emit_insn_before ((*next_nop)(&prev_in_use), where);
9412 while (--nop_count);
9413 ofs = 0;
9416 ofs = (ofs + len) & (align - 1);
9417 prev_in_use = in_use;
9418 i = next;
9422 static void
9423 alpha_align_insns (void)
9425 if (alpha_tune == PROCESSOR_EV4)
9426 alpha_align_insns_1 (8, alphaev4_next_group, alphaev4_next_nop);
9427 else if (alpha_tune == PROCESSOR_EV5)
9428 alpha_align_insns_1 (16, alphaev5_next_group, alphaev5_next_nop);
9429 else
9430 gcc_unreachable ();
9433 /* Insert an unop between sibcall or noreturn function call and GP load. */
9435 static void
9436 alpha_pad_function_end (void)
9438 rtx_insn *insn, *next;
9440 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9442 if (!CALL_P (insn)
9443 || !(SIBLING_CALL_P (insn)
9444 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9445 continue;
9447 /* Make sure we do not split a call and its corresponding
9448 CALL_ARG_LOCATION note. */
9449 next = NEXT_INSN (insn);
9450 if (next == NULL)
9451 continue;
9452 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9453 insn = next;
9455 next = next_active_insn (insn);
9456 if (next)
9458 rtx pat = PATTERN (next);
9460 if (GET_CODE (pat) == SET
9461 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9462 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9463 emit_insn_after (gen_unop (), insn);
9468 /* Machine dependent reorg pass. */
9470 static void
9471 alpha_reorg (void)
9473 /* Workaround for a linker error that triggers when an exception
9474 handler immediatelly follows a sibcall or a noreturn function.
9476 In the sibcall case:
9478 The instruction stream from an object file:
9480 1d8: 00 00 fb 6b jmp (t12)
9481 1dc: 00 00 ba 27 ldah gp,0(ra)
9482 1e0: 00 00 bd 23 lda gp,0(gp)
9483 1e4: 00 00 7d a7 ldq t12,0(gp)
9484 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9486 was converted in the final link pass to:
9488 12003aa88: 67 fa ff c3 br 120039428 <...>
9489 12003aa8c: 00 00 fe 2f unop
9490 12003aa90: 00 00 fe 2f unop
9491 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9492 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9494 And in the noreturn case:
9496 The instruction stream from an object file:
9498 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9499 58: 00 00 ba 27 ldah gp,0(ra)
9500 5c: 00 00 bd 23 lda gp,0(gp)
9501 60: 00 00 7d a7 ldq t12,0(gp)
9502 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9504 was converted in the final link pass to:
9506 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9507 fdb28: 00 00 fe 2f unop
9508 fdb2c: 00 00 fe 2f unop
9509 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9510 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9512 GP load instructions were wrongly cleared by the linker relaxation
9513 pass. This workaround prevents removal of GP loads by inserting
9514 an unop instruction between a sibcall or noreturn function call and
9515 exception handler prologue. */
9517 if (current_function_has_exception_handlers ())
9518 alpha_pad_function_end ();
9521 static void
9522 alpha_file_start (void)
9524 default_file_start ();
9526 fputs ("\t.set noreorder\n", asm_out_file);
9527 fputs ("\t.set volatile\n", asm_out_file);
9528 if (TARGET_ABI_OSF)
9529 fputs ("\t.set noat\n", asm_out_file);
9530 if (TARGET_EXPLICIT_RELOCS)
9531 fputs ("\t.set nomacro\n", asm_out_file);
9532 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9534 const char *arch;
9536 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9537 arch = "ev6";
9538 else if (TARGET_MAX)
9539 arch = "pca56";
9540 else if (TARGET_BWX)
9541 arch = "ev56";
9542 else if (alpha_cpu == PROCESSOR_EV5)
9543 arch = "ev5";
9544 else
9545 arch = "ev4";
9547 fprintf (asm_out_file, "\t.arch %s\n", arch);
9551 /* Since we don't have a .dynbss section, we should not allow global
9552 relocations in the .rodata section. */
9554 static int
9555 alpha_elf_reloc_rw_mask (void)
9557 return flag_pic ? 3 : 2;
9560 /* Return a section for X. The only special thing we do here is to
9561 honor small data. */
9563 static section *
9564 alpha_elf_select_rtx_section (machine_mode mode, rtx x,
9565 unsigned HOST_WIDE_INT align)
9567 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9568 /* ??? Consider using mergeable sdata sections. */
9569 return sdata_section;
9570 else
9571 return default_elf_select_rtx_section (mode, x, align);
9574 static unsigned int
9575 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9577 unsigned int flags = 0;
9579 if (strcmp (name, ".sdata") == 0
9580 || strncmp (name, ".sdata.", 7) == 0
9581 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9582 || strcmp (name, ".sbss") == 0
9583 || strncmp (name, ".sbss.", 6) == 0
9584 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9585 flags = SECTION_SMALL;
9587 flags |= default_section_type_flags (decl, name, reloc);
9588 return flags;
9591 /* Structure to collect function names for final output in link section. */
9592 /* Note that items marked with GTY can't be ifdef'ed out. */
9594 enum reloc_kind
9596 KIND_LINKAGE,
9597 KIND_CODEADDR
9600 struct GTY(()) alpha_links
9602 rtx func;
9603 rtx linkage;
9604 enum reloc_kind rkind;
9607 #if TARGET_ABI_OPEN_VMS
9609 /* Return the VMS argument type corresponding to MODE. */
9611 enum avms_arg_type
9612 alpha_arg_type (machine_mode mode)
9614 switch (mode)
9616 case SFmode:
9617 return TARGET_FLOAT_VAX ? FF : FS;
9618 case DFmode:
9619 return TARGET_FLOAT_VAX ? FD : FT;
9620 default:
9621 return I64;
9625 /* Return an rtx for an integer representing the VMS Argument Information
9626 register value. */
9629 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9631 unsigned HOST_WIDE_INT regval = cum.num_args;
9632 int i;
9634 for (i = 0; i < 6; i++)
9635 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9637 return GEN_INT (regval);
9641 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9642 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9643 this is the reference to the linkage pointer value, 0 if this is the
9644 reference to the function entry value. RFLAG is 1 if this a reduced
9645 reference (code address only), 0 if this is a full reference. */
9648 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9650 struct alpha_links *al = NULL;
9651 const char *name = XSTR (func, 0);
9653 if (cfun->machine->links)
9655 /* Is this name already defined? */
9656 alpha_links *slot = cfun->machine->links->get (name);
9657 if (slot)
9658 al = *slot;
9660 else
9661 cfun->machine->links
9662 = hash_map<const char *, alpha_links *, string_traits>::create_ggc (64);
9664 if (al == NULL)
9666 size_t buf_len;
9667 char *linksym;
9668 tree id;
9670 if (name[0] == '*')
9671 name++;
9673 /* Follow transparent alias, as this is used for CRTL translations. */
9674 id = maybe_get_identifier (name);
9675 if (id)
9677 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9678 id = TREE_CHAIN (id);
9679 name = IDENTIFIER_POINTER (id);
9682 buf_len = strlen (name) + 8 + 9;
9683 linksym = (char *) alloca (buf_len);
9684 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9686 al = ggc_alloc<alpha_links> ();
9687 al->func = func;
9688 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9690 cfun->machine->links->put (ggc_strdup (name), al);
9693 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9695 if (lflag)
9696 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9697 else
9698 return al->linkage;
9701 static int
9702 alpha_write_one_linkage (const char *name, alpha_links *link, FILE *steam)
9704 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9705 if (link->rkind == KIND_CODEADDR)
9707 /* External and used, request code address. */
9708 fprintf (stream, "\t.code_address ");
9710 else
9712 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9713 && SYMBOL_REF_LOCAL_P (link->func))
9715 /* Locally defined, build linkage pair. */
9716 fprintf (stream, "\t.quad %s..en\n", name);
9717 fprintf (stream, "\t.quad ");
9719 else
9721 /* External, request linkage pair. */
9722 fprintf (stream, "\t.linkage ");
9725 assemble_name (stream, name);
9726 fputs ("\n", stream);
9728 return 0;
9731 static void
9732 alpha_write_linkage (FILE *stream, const char *funname)
9734 fprintf (stream, "\t.link\n");
9735 fprintf (stream, "\t.align 3\n");
9736 in_section = NULL;
9738 #ifdef TARGET_VMS_CRASH_DEBUG
9739 fputs ("\t.name ", stream);
9740 assemble_name (stream, funname);
9741 fputs ("..na\n", stream);
9742 #endif
9744 ASM_OUTPUT_LABEL (stream, funname);
9745 fprintf (stream, "\t.pdesc ");
9746 assemble_name (stream, funname);
9747 fprintf (stream, "..en,%s\n",
9748 alpha_procedure_type == PT_STACK ? "stack"
9749 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9751 if (cfun->machine->links)
9753 hash_map<const char *, alpha_links *, string_traits>::iterator iter
9754 = cfun->machine->links->begin ();
9755 for (; iter != cfun->machine->links->end (); ++iter)
9756 alpha_write_one_linkage ((*iter).first, (*iter).second, stream);
9760 /* Switch to an arbitrary section NAME with attributes as specified
9761 by FLAGS. ALIGN specifies any known alignment requirements for
9762 the section; 0 if the default should be used. */
9764 static void
9765 vms_asm_named_section (const char *name, unsigned int flags,
9766 tree decl ATTRIBUTE_UNUSED)
9768 fputc ('\n', asm_out_file);
9769 fprintf (asm_out_file, ".section\t%s", name);
9771 if (flags & SECTION_DEBUG)
9772 fprintf (asm_out_file, ",NOWRT");
9774 fputc ('\n', asm_out_file);
9777 /* Record an element in the table of global constructors. SYMBOL is
9778 a SYMBOL_REF of the function to be called; PRIORITY is a number
9779 between 0 and MAX_INIT_PRIORITY.
9781 Differs from default_ctors_section_asm_out_constructor in that the
9782 width of the .ctors entry is always 64 bits, rather than the 32 bits
9783 used by a normal pointer. */
9785 static void
9786 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9788 switch_to_section (ctors_section);
9789 assemble_align (BITS_PER_WORD);
9790 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9793 static void
9794 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9796 switch_to_section (dtors_section);
9797 assemble_align (BITS_PER_WORD);
9798 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9800 #else
9802 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9803 bool lflag ATTRIBUTE_UNUSED,
9804 bool rflag ATTRIBUTE_UNUSED)
9806 return NULL_RTX;
9809 #endif /* TARGET_ABI_OPEN_VMS */
9811 static void
9812 alpha_init_libfuncs (void)
9814 if (TARGET_ABI_OPEN_VMS)
9816 /* Use the VMS runtime library functions for division and
9817 remainder. */
9818 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9819 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9820 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9821 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9822 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9823 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9824 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9825 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9826 abort_libfunc = init_one_libfunc ("decc$abort");
9827 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9828 #ifdef MEM_LIBFUNCS_INIT
9829 MEM_LIBFUNCS_INIT;
9830 #endif
9834 /* On the Alpha, we use this to disable the floating-point registers
9835 when they don't exist. */
9837 static void
9838 alpha_conditional_register_usage (void)
9840 int i;
9841 if (! TARGET_FPREGS)
9842 for (i = 32; i < 63; i++)
9843 fixed_regs[i] = call_used_regs[i] = 1;
9846 /* Canonicalize a comparison from one we don't have to one we do have. */
9848 static void
9849 alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9850 bool op0_preserve_value)
9852 if (!op0_preserve_value
9853 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9854 && (REG_P (*op1) || *op1 == const0_rtx))
9856 rtx tem = *op0;
9857 *op0 = *op1;
9858 *op1 = tem;
9859 *code = (int)swap_condition ((enum rtx_code)*code);
9862 if ((*code == LT || *code == LTU)
9863 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9865 *code = *code == LT ? LE : LEU;
9866 *op1 = GEN_INT (255);
9870 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9872 static void
9873 alpha_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
9875 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17);
9877 tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
9878 tree new_fenv_var, reload_fenv, restore_fnenv;
9879 tree update_call, atomic_feraiseexcept, hold_fnclex;
9881 /* Assume OSF/1 compatible interfaces. */
9882 if (!TARGET_ABI_OSF)
9883 return;
9885 /* Generate the equivalent of :
9886 unsigned long fenv_var;
9887 fenv_var = __ieee_get_fp_control ();
9889 unsigned long masked_fenv;
9890 masked_fenv = fenv_var & mask;
9892 __ieee_set_fp_control (masked_fenv); */
9894 fenv_var = create_tmp_var (long_unsigned_type_node);
9895 get_fpscr
9896 = build_fn_decl ("__ieee_get_fp_control",
9897 build_function_type_list (long_unsigned_type_node, NULL));
9898 set_fpscr
9899 = build_fn_decl ("__ieee_set_fp_control",
9900 build_function_type_list (void_type_node, NULL));
9901 mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK);
9902 ld_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node,
9903 fenv_var, build_call_expr (get_fpscr, 0));
9904 masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask);
9905 hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
9906 *hold = build2 (COMPOUND_EXPR, void_type_node,
9907 build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
9908 hold_fnclex);
9910 /* Store the value of masked_fenv to clear the exceptions:
9911 __ieee_set_fp_control (masked_fenv); */
9913 *clear = build_call_expr (set_fpscr, 1, masked_fenv);
9915 /* Generate the equivalent of :
9916 unsigned long new_fenv_var;
9917 new_fenv_var = __ieee_get_fp_control ();
9919 __ieee_set_fp_control (fenv_var);
9921 __atomic_feraiseexcept (new_fenv_var); */
9923 new_fenv_var = create_tmp_var (long_unsigned_type_node);
9924 reload_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node, new_fenv_var,
9925 build_call_expr (get_fpscr, 0));
9926 restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
9927 atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
9928 update_call
9929 = build_call_expr (atomic_feraiseexcept, 1,
9930 fold_convert (integer_type_node, new_fenv_var));
9931 *update = build2 (COMPOUND_EXPR, void_type_node,
9932 build2 (COMPOUND_EXPR, void_type_node,
9933 reload_fenv, restore_fnenv), update_call);
9936 /* Initialize the GCC target structure. */
9937 #if TARGET_ABI_OPEN_VMS
9938 # undef TARGET_ATTRIBUTE_TABLE
9939 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9940 # undef TARGET_CAN_ELIMINATE
9941 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9942 #endif
9944 #undef TARGET_IN_SMALL_DATA_P
9945 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9947 #undef TARGET_ASM_ALIGNED_HI_OP
9948 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9949 #undef TARGET_ASM_ALIGNED_DI_OP
9950 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9952 /* Default unaligned ops are provided for ELF systems. To get unaligned
9953 data for non-ELF systems, we have to turn off auto alignment. */
9954 #if TARGET_ABI_OPEN_VMS
9955 #undef TARGET_ASM_UNALIGNED_HI_OP
9956 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9957 #undef TARGET_ASM_UNALIGNED_SI_OP
9958 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9959 #undef TARGET_ASM_UNALIGNED_DI_OP
9960 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9961 #endif
9963 #undef TARGET_ASM_RELOC_RW_MASK
9964 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9965 #undef TARGET_ASM_SELECT_RTX_SECTION
9966 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9967 #undef TARGET_SECTION_TYPE_FLAGS
9968 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9970 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9971 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9973 #undef TARGET_INIT_LIBFUNCS
9974 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9976 #undef TARGET_LEGITIMIZE_ADDRESS
9977 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9978 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9979 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9981 #undef TARGET_ASM_FILE_START
9982 #define TARGET_ASM_FILE_START alpha_file_start
9984 #undef TARGET_SCHED_ADJUST_COST
9985 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9986 #undef TARGET_SCHED_ISSUE_RATE
9987 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9988 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9989 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9990 alpha_multipass_dfa_lookahead
9992 #undef TARGET_HAVE_TLS
9993 #define TARGET_HAVE_TLS HAVE_AS_TLS
9995 #undef TARGET_BUILTIN_DECL
9996 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9997 #undef TARGET_INIT_BUILTINS
9998 #define TARGET_INIT_BUILTINS alpha_init_builtins
9999 #undef TARGET_EXPAND_BUILTIN
10000 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10001 #undef TARGET_FOLD_BUILTIN
10002 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10003 #undef TARGET_GIMPLE_FOLD_BUILTIN
10004 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
10006 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10007 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10008 #undef TARGET_CANNOT_COPY_INSN_P
10009 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10010 #undef TARGET_LEGITIMATE_CONSTANT_P
10011 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
10012 #undef TARGET_CANNOT_FORCE_CONST_MEM
10013 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10015 #if TARGET_ABI_OSF
10016 #undef TARGET_ASM_OUTPUT_MI_THUNK
10017 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10018 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10019 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10020 #undef TARGET_STDARG_OPTIMIZE_HOOK
10021 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10022 #endif
10024 /* Use 16-bits anchor. */
10025 #undef TARGET_MIN_ANCHOR_OFFSET
10026 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
10027 #undef TARGET_MAX_ANCHOR_OFFSET
10028 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
10029 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10030 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
10032 #undef TARGET_RTX_COSTS
10033 #define TARGET_RTX_COSTS alpha_rtx_costs
10034 #undef TARGET_ADDRESS_COST
10035 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
10037 #undef TARGET_MACHINE_DEPENDENT_REORG
10038 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10040 #undef TARGET_PROMOTE_FUNCTION_MODE
10041 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
10042 #undef TARGET_PROMOTE_PROTOTYPES
10043 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10044 #undef TARGET_RETURN_IN_MEMORY
10045 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10046 #undef TARGET_PASS_BY_REFERENCE
10047 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10048 #undef TARGET_SETUP_INCOMING_VARARGS
10049 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10050 #undef TARGET_STRICT_ARGUMENT_NAMING
10051 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10052 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10053 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10054 #undef TARGET_SPLIT_COMPLEX_ARG
10055 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10056 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10057 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10058 #undef TARGET_ARG_PARTIAL_BYTES
10059 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10060 #undef TARGET_FUNCTION_ARG
10061 #define TARGET_FUNCTION_ARG alpha_function_arg
10062 #undef TARGET_FUNCTION_ARG_ADVANCE
10063 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
10064 #undef TARGET_TRAMPOLINE_INIT
10065 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
10067 #undef TARGET_INSTANTIATE_DECLS
10068 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10070 #undef TARGET_SECONDARY_RELOAD
10071 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10073 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10074 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10075 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10076 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10078 #undef TARGET_BUILD_BUILTIN_VA_LIST
10079 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10081 #undef TARGET_EXPAND_BUILTIN_VA_START
10082 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10084 /* The Alpha architecture does not require sequential consistency. See
10085 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10086 for an example of how it can be violated in practice. */
10087 #undef TARGET_RELAXED_ORDERING
10088 #define TARGET_RELAXED_ORDERING true
10090 #undef TARGET_OPTION_OVERRIDE
10091 #define TARGET_OPTION_OVERRIDE alpha_option_override
10093 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10094 #undef TARGET_MANGLE_TYPE
10095 #define TARGET_MANGLE_TYPE alpha_mangle_type
10096 #endif
10098 #undef TARGET_LEGITIMATE_ADDRESS_P
10099 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10101 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10102 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10104 #undef TARGET_CANONICALIZE_COMPARISON
10105 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10107 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10108 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10110 struct gcc_target targetm = TARGET_INITIALIZER;
10113 #include "gt-alpha.h"