* rtl.h (MEM_READONLY_P): Replace RTX_UNCHANGING_P.
[official-gcc.git] / gcc / config / alpha / alpha.c
blob6252e888c800e777081467803a46ead1f1f3fa79
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
57 /* Specify which cpu to schedule for. */
59 enum processor_type alpha_cpu;
60 static const char * const alpha_cpu_name[] =
62 "ev4", "ev5", "ev6"
65 /* Specify how accurate floating-point traps need to be. */
67 enum alpha_trap_precision alpha_tp;
69 /* Specify the floating-point rounding mode. */
71 enum alpha_fp_rounding_mode alpha_fprm;
73 /* Specify which things cause traps. */
75 enum alpha_fp_trap_mode alpha_fptm;
77 /* Specify bit size of immediate TLS offsets. */
79 int alpha_tls_size = 32;
81 /* Strings decoded into the above options. */
83 const char *alpha_cpu_string; /* -mcpu= */
84 const char *alpha_tune_string; /* -mtune= */
85 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
86 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
87 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
88 const char *alpha_mlat_string; /* -mmemory-latency= */
89 const char *alpha_tls_size_string; /* -mtls-size=[16|32|64] */
91 /* Save information from a "cmpxx" operation until the branch or scc is
92 emitted. */
94 struct alpha_compare alpha_compare;
96 /* Nonzero if inside of a function, because the Alpha asm can't
97 handle .files inside of functions. */
99 static int inside_function = FALSE;
101 /* The number of cycles of latency we should assume on memory reads. */
103 int alpha_memory_latency = 3;
105 /* Whether the function needs the GP. */
107 static int alpha_function_needs_gp;
109 /* The alias set for prologue/epilogue register save/restore. */
111 static GTY(()) int alpha_sr_alias_set;
113 /* The assembler name of the current function. */
115 static const char *alpha_fnname;
117 /* The next explicit relocation sequence number. */
118 extern GTY(()) int alpha_next_sequence_number;
119 int alpha_next_sequence_number = 1;
121 /* The literal and gpdisp sequence numbers for this insn, as printed
122 by %# and %* respectively. */
123 extern GTY(()) int alpha_this_literal_sequence_number;
124 extern GTY(()) int alpha_this_gpdisp_sequence_number;
125 int alpha_this_literal_sequence_number;
126 int alpha_this_gpdisp_sequence_number;
128 /* Costs of various operations on the different architectures. */
130 struct alpha_rtx_cost_data
132 unsigned char fp_add;
133 unsigned char fp_mult;
134 unsigned char fp_div_sf;
135 unsigned char fp_div_df;
136 unsigned char int_mult_si;
137 unsigned char int_mult_di;
138 unsigned char int_shift;
139 unsigned char int_cmov;
140 unsigned short int_div;
143 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
145 { /* EV4 */
146 COSTS_N_INSNS (6), /* fp_add */
147 COSTS_N_INSNS (6), /* fp_mult */
148 COSTS_N_INSNS (34), /* fp_div_sf */
149 COSTS_N_INSNS (63), /* fp_div_df */
150 COSTS_N_INSNS (23), /* int_mult_si */
151 COSTS_N_INSNS (23), /* int_mult_di */
152 COSTS_N_INSNS (2), /* int_shift */
153 COSTS_N_INSNS (2), /* int_cmov */
154 COSTS_N_INSNS (97), /* int_div */
156 { /* EV5 */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (15), /* fp_div_sf */
160 COSTS_N_INSNS (22), /* fp_div_df */
161 COSTS_N_INSNS (8), /* int_mult_si */
162 COSTS_N_INSNS (12), /* int_mult_di */
163 COSTS_N_INSNS (1) + 1, /* int_shift */
164 COSTS_N_INSNS (1), /* int_cmov */
165 COSTS_N_INSNS (83), /* int_div */
167 { /* EV6 */
168 COSTS_N_INSNS (4), /* fp_add */
169 COSTS_N_INSNS (4), /* fp_mult */
170 COSTS_N_INSNS (12), /* fp_div_sf */
171 COSTS_N_INSNS (15), /* fp_div_df */
172 COSTS_N_INSNS (7), /* int_mult_si */
173 COSTS_N_INSNS (7), /* int_mult_di */
174 COSTS_N_INSNS (1), /* int_shift */
175 COSTS_N_INSNS (2), /* int_cmov */
176 COSTS_N_INSNS (86), /* int_div */
180 /* Similar but tuned for code size instead of execution latency. The
181 extra +N is fractional cost tuning based on latency. It's used to
182 encourage use of cheaper insns like shift, but only if there's just
183 one of them. */
185 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
187 COSTS_N_INSNS (1), /* fp_add */
188 COSTS_N_INSNS (1), /* fp_mult */
189 COSTS_N_INSNS (1), /* fp_div_sf */
190 COSTS_N_INSNS (1) + 1, /* fp_div_df */
191 COSTS_N_INSNS (1) + 1, /* int_mult_si */
192 COSTS_N_INSNS (1) + 2, /* int_mult_di */
193 COSTS_N_INSNS (1), /* int_shift */
194 COSTS_N_INSNS (1), /* int_cmov */
195 COSTS_N_INSNS (6), /* int_div */
198 /* Get the number of args of a function in one of two ways. */
199 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
200 #define NUM_ARGS current_function_args_info.num_args
201 #else
202 #define NUM_ARGS current_function_args_info
203 #endif
205 #define REG_PV 27
206 #define REG_RA 26
208 /* Declarations of static functions. */
209 static struct machine_function *alpha_init_machine_status (void);
210 static rtx alpha_emit_xfloating_compare (enum rtx_code, rtx, rtx);
212 #if TARGET_ABI_OPEN_VMS
213 static void alpha_write_linkage (FILE *, const char *, tree);
214 #endif
216 static void unicosmk_output_deferred_case_vectors (FILE *);
217 static void unicosmk_gen_dsib (unsigned long *);
218 static void unicosmk_output_ssib (FILE *, const char *);
219 static int unicosmk_need_dex (rtx);
221 /* Parse target option strings. */
223 void
224 override_options (void)
226 int i;
227 static const struct cpu_table {
228 const char *const name;
229 const enum processor_type processor;
230 const int flags;
231 } cpu_table[] = {
232 #define EV5_MASK (MASK_CPU_EV5)
233 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
234 { "ev4", PROCESSOR_EV4, 0 },
235 { "ev45", PROCESSOR_EV4, 0 },
236 { "21064", PROCESSOR_EV4, 0 },
237 { "ev5", PROCESSOR_EV5, EV5_MASK },
238 { "21164", PROCESSOR_EV5, EV5_MASK },
239 { "ev56", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
240 { "21164a", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
241 { "pca56", PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
242 { "21164PC",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
243 { "21164pc",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
244 { "ev6", PROCESSOR_EV6, EV6_MASK },
245 { "21264", PROCESSOR_EV6, EV6_MASK },
246 { "ev67", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
247 { "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
248 { 0, 0, 0 }
251 /* Unicos/Mk doesn't have shared libraries. */
252 if (TARGET_ABI_UNICOSMK && flag_pic)
254 warning ("-f%s ignored for Unicos/Mk (not supported)",
255 (flag_pic > 1) ? "PIC" : "pic");
256 flag_pic = 0;
259 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
260 floating-point instructions. Make that the default for this target. */
261 if (TARGET_ABI_UNICOSMK)
262 alpha_fprm = ALPHA_FPRM_DYN;
263 else
264 alpha_fprm = ALPHA_FPRM_NORM;
266 alpha_tp = ALPHA_TP_PROG;
267 alpha_fptm = ALPHA_FPTM_N;
269 /* We cannot use su and sui qualifiers for conversion instructions on
270 Unicos/Mk. I'm not sure if this is due to assembler or hardware
271 limitations. Right now, we issue a warning if -mieee is specified
272 and then ignore it; eventually, we should either get it right or
273 disable the option altogether. */
275 if (TARGET_IEEE)
277 if (TARGET_ABI_UNICOSMK)
278 warning ("-mieee not supported on Unicos/Mk");
279 else
281 alpha_tp = ALPHA_TP_INSN;
282 alpha_fptm = ALPHA_FPTM_SU;
286 if (TARGET_IEEE_WITH_INEXACT)
288 if (TARGET_ABI_UNICOSMK)
289 warning ("-mieee-with-inexact not supported on Unicos/Mk");
290 else
292 alpha_tp = ALPHA_TP_INSN;
293 alpha_fptm = ALPHA_FPTM_SUI;
297 if (alpha_tp_string)
299 if (! strcmp (alpha_tp_string, "p"))
300 alpha_tp = ALPHA_TP_PROG;
301 else if (! strcmp (alpha_tp_string, "f"))
302 alpha_tp = ALPHA_TP_FUNC;
303 else if (! strcmp (alpha_tp_string, "i"))
304 alpha_tp = ALPHA_TP_INSN;
305 else
306 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
309 if (alpha_fprm_string)
311 if (! strcmp (alpha_fprm_string, "n"))
312 alpha_fprm = ALPHA_FPRM_NORM;
313 else if (! strcmp (alpha_fprm_string, "m"))
314 alpha_fprm = ALPHA_FPRM_MINF;
315 else if (! strcmp (alpha_fprm_string, "c"))
316 alpha_fprm = ALPHA_FPRM_CHOP;
317 else if (! strcmp (alpha_fprm_string,"d"))
318 alpha_fprm = ALPHA_FPRM_DYN;
319 else
320 error ("bad value `%s' for -mfp-rounding-mode switch",
321 alpha_fprm_string);
324 if (alpha_fptm_string)
326 if (strcmp (alpha_fptm_string, "n") == 0)
327 alpha_fptm = ALPHA_FPTM_N;
328 else if (strcmp (alpha_fptm_string, "u") == 0)
329 alpha_fptm = ALPHA_FPTM_U;
330 else if (strcmp (alpha_fptm_string, "su") == 0)
331 alpha_fptm = ALPHA_FPTM_SU;
332 else if (strcmp (alpha_fptm_string, "sui") == 0)
333 alpha_fptm = ALPHA_FPTM_SUI;
334 else
335 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
338 if (alpha_tls_size_string)
340 if (strcmp (alpha_tls_size_string, "16") == 0)
341 alpha_tls_size = 16;
342 else if (strcmp (alpha_tls_size_string, "32") == 0)
343 alpha_tls_size = 32;
344 else if (strcmp (alpha_tls_size_string, "64") == 0)
345 alpha_tls_size = 64;
346 else
347 error ("bad value `%s' for -mtls-size switch", alpha_tls_size_string);
350 alpha_cpu
351 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
352 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
354 if (alpha_cpu_string)
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
359 alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX
361 | MASK_CPU_EV5 | MASK_CPU_EV6);
362 target_flags |= cpu_table [i].flags;
363 break;
365 if (! cpu_table [i].name)
366 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
369 if (alpha_tune_string)
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_tune_string, cpu_table [i].name))
374 alpha_cpu = cpu_table [i].processor;
375 break;
377 if (! cpu_table [i].name)
378 error ("bad value `%s' for -mcpu switch", alpha_tune_string);
381 /* Do some sanity checks on the above options. */
383 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
385 warning ("trap mode not supported on Unicos/Mk");
386 alpha_fptm = ALPHA_FPTM_N;
389 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
390 && alpha_tp != ALPHA_TP_INSN && ! TARGET_CPU_EV6)
392 warning ("fp software completion requires -mtrap-precision=i");
393 alpha_tp = ALPHA_TP_INSN;
396 if (TARGET_CPU_EV6)
398 /* Except for EV6 pass 1 (not released), we always have precise
399 arithmetic traps. Which means we can do software completion
400 without minding trap shadows. */
401 alpha_tp = ALPHA_TP_PROG;
404 if (TARGET_FLOAT_VAX)
406 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
408 warning ("rounding mode not supported for VAX floats");
409 alpha_fprm = ALPHA_FPRM_NORM;
411 if (alpha_fptm == ALPHA_FPTM_SUI)
413 warning ("trap mode not supported for VAX floats");
414 alpha_fptm = ALPHA_FPTM_SU;
416 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
417 warning ("128-bit long double not supported for VAX floats");
418 target_flags &= ~MASK_LONG_DOUBLE_128;
422 char *end;
423 int lat;
425 if (!alpha_mlat_string)
426 alpha_mlat_string = "L1";
428 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
429 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
431 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
432 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
433 && alpha_mlat_string[2] == '\0')
435 static int const cache_latency[][4] =
437 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
438 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
439 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
442 lat = alpha_mlat_string[1] - '0';
443 if (lat <= 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
445 warning ("L%d cache latency unknown for %s",
446 lat, alpha_cpu_name[alpha_cpu]);
447 lat = 3;
449 else
450 lat = cache_latency[alpha_cpu][lat-1];
452 else if (! strcmp (alpha_mlat_string, "main"))
454 /* Most current memories have about 370ns latency. This is
455 a reasonable guess for a fast cpu. */
456 lat = 150;
458 else
460 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
461 lat = 3;
464 alpha_memory_latency = lat;
467 /* Default the definition of "small data" to 8 bytes. */
468 if (!g_switch_set)
469 g_switch_value = 8;
471 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
472 if (flag_pic == 1)
473 target_flags |= MASK_SMALL_DATA;
474 else if (flag_pic == 2)
475 target_flags &= ~MASK_SMALL_DATA;
477 /* Align labels and loops for optimal branching. */
478 /* ??? Kludge these by not doing anything if we don't optimize and also if
479 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
480 if (optimize > 0 && write_symbols != SDB_DEBUG)
482 if (align_loops <= 0)
483 align_loops = 16;
484 if (align_jumps <= 0)
485 align_jumps = 16;
487 if (align_functions <= 0)
488 align_functions = 16;
490 /* Acquire a unique set number for our register saves and restores. */
491 alpha_sr_alias_set = new_alias_set ();
493 /* Register variables and functions with the garbage collector. */
495 /* Set up function hooks. */
496 init_machine_status = alpha_init_machine_status;
498 /* Tell the compiler when we're using VAX floating point. */
499 if (TARGET_FLOAT_VAX)
501 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
502 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
503 REAL_MODE_FORMAT (TFmode) = NULL;
507 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
510 zap_mask (HOST_WIDE_INT value)
512 int i;
514 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
515 i++, value >>= 8)
516 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
517 return 0;
519 return 1;
522 /* Return true if OP is valid for a particular TLS relocation.
523 We are already guaranteed that OP is a CONST. */
526 tls_symbolic_operand_1 (rtx op, int size, int unspec)
528 op = XEXP (op, 0);
530 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
531 return 0;
532 op = XVECEXP (op, 0, 0);
534 if (GET_CODE (op) != SYMBOL_REF)
535 return 0;
537 if (SYMBOL_REF_LOCAL_P (op))
539 if (alpha_tls_size > size)
540 return 0;
542 else
544 if (size != 64)
545 return 0;
548 switch (SYMBOL_REF_TLS_MODEL (op))
550 case TLS_MODEL_LOCAL_DYNAMIC:
551 return unspec == UNSPEC_DTPREL;
552 case TLS_MODEL_INITIAL_EXEC:
553 return unspec == UNSPEC_TPREL && size == 64;
554 case TLS_MODEL_LOCAL_EXEC:
555 return unspec == UNSPEC_TPREL;
556 default:
557 abort ();
561 /* Used by aligned_memory_operand and unaligned_memory_operand to
562 resolve what reload is going to do with OP if it's a register. */
565 resolve_reload_operand (rtx op)
567 if (reload_in_progress)
569 rtx tmp = op;
570 if (GET_CODE (tmp) == SUBREG)
571 tmp = SUBREG_REG (tmp);
572 if (GET_CODE (tmp) == REG
573 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
575 op = reg_equiv_memory_loc[REGNO (tmp)];
576 if (op == 0)
577 return 0;
580 return op;
583 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
584 the range defined for C in [I-P]. */
586 bool
587 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
589 switch (c)
591 case 'I':
592 /* An unsigned 8 bit constant. */
593 return (unsigned HOST_WIDE_INT) value < 0x100;
594 case 'J':
595 /* The constant zero. */
596 return value == 0;
597 case 'K':
598 /* A signed 16 bit constant. */
599 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
600 case 'L':
601 /* A shifted signed 16 bit constant appropriate for LDAH. */
602 return ((value & 0xffff) == 0
603 && ((value) >> 31 == -1 || value >> 31 == 0));
604 case 'M':
605 /* A constant that can be AND'ed with using a ZAP insn. */
606 return zap_mask (value);
607 case 'N':
608 /* A complemented unsigned 8 bit constant. */
609 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
610 case 'O':
611 /* A negated unsigned 8 bit constant. */
612 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
613 case 'P':
614 /* The constant 1, 2 or 3. */
615 return value == 1 || value == 2 || value == 3;
617 default:
618 return false;
622 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
623 matches for C in [GH]. */
625 bool
626 alpha_const_double_ok_for_letter_p (rtx value, int c)
628 switch (c)
630 case 'G':
631 /* The floating point zero constant. */
632 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
633 && value == CONST0_RTX (GET_MODE (value)));
635 case 'H':
636 /* A valid operand of a ZAP insn. */
637 return (GET_MODE (value) == VOIDmode
638 && zap_mask (CONST_DOUBLE_LOW (value))
639 && zap_mask (CONST_DOUBLE_HIGH (value)));
641 default:
642 return false;
646 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
647 matches for C. */
649 bool
650 alpha_extra_constraint (rtx value, int c)
652 switch (c)
654 case 'Q':
655 return normal_memory_operand (value, VOIDmode);
656 case 'R':
657 return direct_call_operand (value, Pmode);
658 case 'S':
659 return (GET_CODE (value) == CONST_INT
660 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
661 case 'T':
662 return GET_CODE (value) == HIGH;
663 case 'U':
664 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
665 case 'W':
666 return (GET_CODE (value) == CONST_VECTOR
667 && value == CONST0_RTX (GET_MODE (value)));
668 default:
669 return false;
673 /* Return 1 if this function can directly return via $26. */
676 direct_return (void)
678 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
679 && reload_completed
680 && alpha_sa_size () == 0
681 && get_frame_size () == 0
682 && current_function_outgoing_args_size == 0
683 && current_function_pretend_args_size == 0);
686 /* Return the ADDR_VEC associated with a tablejump insn. */
689 alpha_tablejump_addr_vec (rtx insn)
691 rtx tmp;
693 tmp = JUMP_LABEL (insn);
694 if (!tmp)
695 return NULL_RTX;
696 tmp = NEXT_INSN (tmp);
697 if (!tmp)
698 return NULL_RTX;
699 if (GET_CODE (tmp) == JUMP_INSN
700 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
701 return PATTERN (tmp);
702 return NULL_RTX;
705 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
708 alpha_tablejump_best_label (rtx insn)
710 rtx jump_table = alpha_tablejump_addr_vec (insn);
711 rtx best_label = NULL_RTX;
713 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
714 there for edge frequency counts from profile data. */
716 if (jump_table)
718 int n_labels = XVECLEN (jump_table, 1);
719 int best_count = -1;
720 int i, j;
722 for (i = 0; i < n_labels; i++)
724 int count = 1;
726 for (j = i + 1; j < n_labels; j++)
727 if (XEXP (XVECEXP (jump_table, 1, i), 0)
728 == XEXP (XVECEXP (jump_table, 1, j), 0))
729 count++;
731 if (count > best_count)
732 best_count = count, best_label = XVECEXP (jump_table, 1, i);
736 return best_label ? best_label : const0_rtx;
739 /* Return the TLS model to use for SYMBOL. */
741 static enum tls_model
742 tls_symbolic_operand_type (rtx symbol)
744 enum tls_model model;
746 if (GET_CODE (symbol) != SYMBOL_REF)
747 return 0;
748 model = SYMBOL_REF_TLS_MODEL (symbol);
750 /* Local-exec with a 64-bit size is the same code as initial-exec. */
751 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
752 model = TLS_MODEL_INITIAL_EXEC;
754 return model;
757 /* Return true if the function DECL will share the same GP as any
758 function in the current unit of translation. */
760 static bool
761 decl_has_samegp (tree decl)
763 /* Functions that are not local can be overridden, and thus may
764 not share the same gp. */
765 if (!(*targetm.binds_local_p) (decl))
766 return false;
768 /* If -msmall-data is in effect, assume that there is only one GP
769 for the module, and so any local symbol has this property. We
770 need explicit relocations to be able to enforce this for symbols
771 not defined in this unit of translation, however. */
772 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
773 return true;
775 /* Functions that are not external are defined in this UoT. */
776 /* ??? Irritatingly, static functions not yet emitted are still
777 marked "external". Apply this to non-static functions only. */
778 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
781 /* Return true if EXP should be placed in the small data section. */
783 static bool
784 alpha_in_small_data_p (tree exp)
786 /* We want to merge strings, so we never consider them small data. */
787 if (TREE_CODE (exp) == STRING_CST)
788 return false;
790 /* Functions are never in the small data area. Duh. */
791 if (TREE_CODE (exp) == FUNCTION_DECL)
792 return false;
794 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
796 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
797 if (strcmp (section, ".sdata") == 0
798 || strcmp (section, ".sbss") == 0)
799 return true;
801 else
803 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
805 /* If this is an incomplete type with size 0, then we can't put it
806 in sdata because it might be too big when completed. */
807 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
808 return true;
811 return false;
814 #if TARGET_ABI_OPEN_VMS
815 static bool
816 alpha_linkage_symbol_p (const char *symname)
818 int symlen = strlen (symname);
820 if (symlen > 4)
821 return strcmp (&symname [symlen - 4], "..lk") == 0;
823 return false;
826 #define LINKAGE_SYMBOL_REF_P(X) \
827 ((GET_CODE (X) == SYMBOL_REF \
828 && alpha_linkage_symbol_p (XSTR (X, 0))) \
829 || (GET_CODE (X) == CONST \
830 && GET_CODE (XEXP (X, 0)) == PLUS \
831 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
832 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
833 #endif
835 /* legitimate_address_p recognizes an RTL expression that is a valid
836 memory address for an instruction. The MODE argument is the
837 machine mode for the MEM expression that wants to use this address.
839 For Alpha, we have either a constant address or the sum of a
840 register and a constant address, or just a register. For DImode,
841 any of those forms can be surrounded with an AND that clear the
842 low-order three bits; this is an "unaligned" access. */
844 bool
845 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
847 /* If this is an ldq_u type address, discard the outer AND. */
848 if (mode == DImode
849 && GET_CODE (x) == AND
850 && GET_CODE (XEXP (x, 1)) == CONST_INT
851 && INTVAL (XEXP (x, 1)) == -8)
852 x = XEXP (x, 0);
854 /* Discard non-paradoxical subregs. */
855 if (GET_CODE (x) == SUBREG
856 && (GET_MODE_SIZE (GET_MODE (x))
857 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
858 x = SUBREG_REG (x);
860 /* Unadorned general registers are valid. */
861 if (REG_P (x)
862 && (strict
863 ? STRICT_REG_OK_FOR_BASE_P (x)
864 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
865 return true;
867 /* Constant addresses (i.e. +/- 32k) are valid. */
868 if (CONSTANT_ADDRESS_P (x))
869 return true;
871 #if TARGET_ABI_OPEN_VMS
872 if (LINKAGE_SYMBOL_REF_P (x))
873 return true;
874 #endif
876 /* Register plus a small constant offset is valid. */
877 if (GET_CODE (x) == PLUS)
879 rtx ofs = XEXP (x, 1);
880 x = XEXP (x, 0);
882 /* Discard non-paradoxical subregs. */
883 if (GET_CODE (x) == SUBREG
884 && (GET_MODE_SIZE (GET_MODE (x))
885 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
886 x = SUBREG_REG (x);
888 if (REG_P (x))
890 if (! strict
891 && NONSTRICT_REG_OK_FP_BASE_P (x)
892 && GET_CODE (ofs) == CONST_INT)
893 return true;
894 if ((strict
895 ? STRICT_REG_OK_FOR_BASE_P (x)
896 : NONSTRICT_REG_OK_FOR_BASE_P (x))
897 && CONSTANT_ADDRESS_P (ofs))
898 return true;
902 /* If we're managing explicit relocations, LO_SUM is valid, as
903 are small data symbols. */
904 else if (TARGET_EXPLICIT_RELOCS)
906 if (small_symbolic_operand (x, Pmode))
907 return true;
909 if (GET_CODE (x) == LO_SUM)
911 rtx ofs = XEXP (x, 1);
912 x = XEXP (x, 0);
914 /* Discard non-paradoxical subregs. */
915 if (GET_CODE (x) == SUBREG
916 && (GET_MODE_SIZE (GET_MODE (x))
917 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
918 x = SUBREG_REG (x);
920 /* Must have a valid base register. */
921 if (! (REG_P (x)
922 && (strict
923 ? STRICT_REG_OK_FOR_BASE_P (x)
924 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
925 return false;
927 /* The symbol must be local. */
928 if (local_symbolic_operand (ofs, Pmode)
929 || dtp32_symbolic_operand (ofs, Pmode)
930 || tp32_symbolic_operand (ofs, Pmode))
931 return true;
935 return false;
938 /* Build the SYMBOL_REF for __tls_get_addr. */
940 static GTY(()) rtx tls_get_addr_libfunc;
942 static rtx
943 get_tls_get_addr (void)
945 if (!tls_get_addr_libfunc)
946 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
947 return tls_get_addr_libfunc;
950 /* Try machine-dependent ways of modifying an illegitimate address
951 to be legitimate. If we find one, return the new, valid address. */
954 alpha_legitimize_address (rtx x, rtx scratch,
955 enum machine_mode mode ATTRIBUTE_UNUSED)
957 HOST_WIDE_INT addend;
959 /* If the address is (plus reg const_int) and the CONST_INT is not a
960 valid offset, compute the high part of the constant and add it to
961 the register. Then our address is (plus temp low-part-const). */
962 if (GET_CODE (x) == PLUS
963 && GET_CODE (XEXP (x, 0)) == REG
964 && GET_CODE (XEXP (x, 1)) == CONST_INT
965 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
967 addend = INTVAL (XEXP (x, 1));
968 x = XEXP (x, 0);
969 goto split_addend;
972 /* If the address is (const (plus FOO const_int)), find the low-order
973 part of the CONST_INT. Then load FOO plus any high-order part of the
974 CONST_INT into a register. Our address is (plus reg low-part-const).
975 This is done to reduce the number of GOT entries. */
976 if (!no_new_pseudos
977 && GET_CODE (x) == CONST
978 && GET_CODE (XEXP (x, 0)) == PLUS
979 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
981 addend = INTVAL (XEXP (XEXP (x, 0), 1));
982 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
983 goto split_addend;
986 /* If we have a (plus reg const), emit the load as in (2), then add
987 the two registers, and finally generate (plus reg low-part-const) as
988 our address. */
989 if (!no_new_pseudos
990 && GET_CODE (x) == PLUS
991 && GET_CODE (XEXP (x, 0)) == REG
992 && GET_CODE (XEXP (x, 1)) == CONST
993 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
994 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
996 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
997 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
998 XEXP (XEXP (XEXP (x, 1), 0), 0),
999 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1000 goto split_addend;
1003 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1004 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1006 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1008 switch (tls_symbolic_operand_type (x))
1010 case TLS_MODEL_GLOBAL_DYNAMIC:
1011 start_sequence ();
1013 r0 = gen_rtx_REG (Pmode, 0);
1014 r16 = gen_rtx_REG (Pmode, 16);
1015 tga = get_tls_get_addr ();
1016 dest = gen_reg_rtx (Pmode);
1017 seq = GEN_INT (alpha_next_sequence_number++);
1019 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1020 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1021 insn = emit_call_insn (insn);
1022 CONST_OR_PURE_CALL_P (insn) = 1;
1023 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1025 insn = get_insns ();
1026 end_sequence ();
1028 emit_libcall_block (insn, dest, r0, x);
1029 return dest;
1031 case TLS_MODEL_LOCAL_DYNAMIC:
1032 start_sequence ();
1034 r0 = gen_rtx_REG (Pmode, 0);
1035 r16 = gen_rtx_REG (Pmode, 16);
1036 tga = get_tls_get_addr ();
1037 scratch = gen_reg_rtx (Pmode);
1038 seq = GEN_INT (alpha_next_sequence_number++);
1040 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1041 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1042 insn = emit_call_insn (insn);
1043 CONST_OR_PURE_CALL_P (insn) = 1;
1044 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1046 insn = get_insns ();
1047 end_sequence ();
1049 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1050 UNSPEC_TLSLDM_CALL);
1051 emit_libcall_block (insn, scratch, r0, eqv);
1053 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1054 eqv = gen_rtx_CONST (Pmode, eqv);
1056 if (alpha_tls_size == 64)
1058 dest = gen_reg_rtx (Pmode);
1059 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1060 emit_insn (gen_adddi3 (dest, dest, scratch));
1061 return dest;
1063 if (alpha_tls_size == 32)
1065 insn = gen_rtx_HIGH (Pmode, eqv);
1066 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1067 scratch = gen_reg_rtx (Pmode);
1068 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1070 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1072 case TLS_MODEL_INITIAL_EXEC:
1073 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1074 eqv = gen_rtx_CONST (Pmode, eqv);
1075 tp = gen_reg_rtx (Pmode);
1076 scratch = gen_reg_rtx (Pmode);
1077 dest = gen_reg_rtx (Pmode);
1079 emit_insn (gen_load_tp (tp));
1080 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1081 emit_insn (gen_adddi3 (dest, tp, scratch));
1082 return dest;
1084 case TLS_MODEL_LOCAL_EXEC:
1085 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1086 eqv = gen_rtx_CONST (Pmode, eqv);
1087 tp = gen_reg_rtx (Pmode);
1089 emit_insn (gen_load_tp (tp));
1090 if (alpha_tls_size == 32)
1092 insn = gen_rtx_HIGH (Pmode, eqv);
1093 insn = gen_rtx_PLUS (Pmode, tp, insn);
1094 tp = gen_reg_rtx (Pmode);
1095 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1097 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1100 if (local_symbolic_operand (x, Pmode))
1102 if (small_symbolic_operand (x, Pmode))
1103 return x;
1104 else
1106 if (!no_new_pseudos)
1107 scratch = gen_reg_rtx (Pmode);
1108 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1109 gen_rtx_HIGH (Pmode, x)));
1110 return gen_rtx_LO_SUM (Pmode, scratch, x);
1115 return NULL;
1117 split_addend:
1119 HOST_WIDE_INT low, high;
1121 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1122 addend -= low;
1123 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1124 addend -= high;
1126 if (addend)
1127 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1128 (no_new_pseudos ? scratch : NULL_RTX),
1129 1, OPTAB_LIB_WIDEN);
1130 if (high)
1131 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1132 (no_new_pseudos ? scratch : NULL_RTX),
1133 1, OPTAB_LIB_WIDEN);
1135 return plus_constant (x, low);
1139 /* We do not allow indirect calls to be optimized into sibling calls, nor
1140 can we allow a call to a function with a different GP to be optimized
1141 into a sibcall. */
1143 static bool
1144 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1146 /* Can't do indirect tail calls, since we don't know if the target
1147 uses the same GP. */
1148 if (!decl)
1149 return false;
1151 /* Otherwise, we can make a tail call if the target function shares
1152 the same GP. */
1153 return decl_has_samegp (decl);
1156 /* For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
1157 small symbolic operand until after reload. At which point we need
1158 to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
1159 so that sched2 has the proper dependency information. */
1161 {"some_small_symbolic_operand", {SET, PARALLEL, PREFETCH, UNSPEC, \
1162 UNSPEC_VOLATILE}},
1165 static int
1166 some_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1168 rtx x = *px;
1170 /* Don't re-split. */
1171 if (GET_CODE (x) == LO_SUM)
1172 return -1;
1174 return small_symbolic_operand (x, Pmode) != 0;
1178 some_small_symbolic_operand (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1180 return for_each_rtx (&x, some_small_symbolic_operand_1, NULL);
1183 static int
1184 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1186 rtx x = *px;
1188 /* Don't re-split. */
1189 if (GET_CODE (x) == LO_SUM)
1190 return -1;
1192 if (small_symbolic_operand (x, Pmode))
1194 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1195 *px = x;
1196 return -1;
1199 return 0;
1203 split_small_symbolic_operand (rtx x)
1205 x = copy_insn (x);
1206 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1207 return x;
1210 /* Indicate that INSN cannot be duplicated. This is true for any insn
1211 that we've marked with gpdisp relocs, since those have to stay in
1212 1-1 correspondence with one another.
1214 Technically we could copy them if we could set up a mapping from one
1215 sequence number to another, across the set of insns to be duplicated.
1216 This seems overly complicated and error-prone since interblock motion
1217 from sched-ebb could move one of the pair of insns to a different block.
1219 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1220 then they'll be in a different block from their ldgp. Which could lead
1221 the bb reorder code to think that it would be ok to copy just the block
1222 containing the call and branch to the block containing the ldgp. */
1224 static bool
1225 alpha_cannot_copy_insn_p (rtx insn)
1227 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1228 return false;
1229 if (recog_memoized (insn) >= 0)
1230 return get_attr_cannot_copy (insn);
1231 else
1232 return false;
1236 /* Try a machine-dependent way of reloading an illegitimate address
1237 operand. If we find one, push the reload and return the new rtx. */
1240 alpha_legitimize_reload_address (rtx x,
1241 enum machine_mode mode ATTRIBUTE_UNUSED,
1242 int opnum, int type,
1243 int ind_levels ATTRIBUTE_UNUSED)
1245 /* We must recognize output that we have already generated ourselves. */
1246 if (GET_CODE (x) == PLUS
1247 && GET_CODE (XEXP (x, 0)) == PLUS
1248 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1249 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1250 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1252 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1253 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1254 opnum, type);
1255 return x;
1258 /* We wish to handle large displacements off a base register by
1259 splitting the addend across an ldah and the mem insn. This
1260 cuts number of extra insns needed from 3 to 1. */
1261 if (GET_CODE (x) == PLUS
1262 && GET_CODE (XEXP (x, 0)) == REG
1263 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1264 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1265 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1267 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1268 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1269 HOST_WIDE_INT high
1270 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1272 /* Check for 32-bit overflow. */
1273 if (high + low != val)
1274 return NULL_RTX;
1276 /* Reload the high part into a base reg; leave the low part
1277 in the mem directly. */
1278 x = gen_rtx_PLUS (GET_MODE (x),
1279 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1280 GEN_INT (high)),
1281 GEN_INT (low));
1283 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1284 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1285 opnum, type);
1286 return x;
1289 return NULL_RTX;
1292 /* Compute a (partial) cost for rtx X. Return true if the complete
1293 cost has been computed, and false if subexpressions should be
1294 scanned. In either case, *TOTAL contains the cost result. */
1296 static bool
1297 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1299 enum machine_mode mode = GET_MODE (x);
1300 bool float_mode_p = FLOAT_MODE_P (mode);
1301 const struct alpha_rtx_cost_data *cost_data;
1303 if (optimize_size)
1304 cost_data = &alpha_rtx_cost_size;
1305 else
1306 cost_data = &alpha_rtx_cost_data[alpha_cpu];
1308 switch (code)
1310 case CONST_INT:
1311 /* If this is an 8-bit constant, return zero since it can be used
1312 nearly anywhere with no cost. If it is a valid operand for an
1313 ADD or AND, likewise return 0 if we know it will be used in that
1314 context. Otherwise, return 2 since it might be used there later.
1315 All other constants take at least two insns. */
1316 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1318 *total = 0;
1319 return true;
1321 /* FALLTHRU */
1323 case CONST_DOUBLE:
1324 if (x == CONST0_RTX (mode))
1325 *total = 0;
1326 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1327 || (outer_code == AND && and_operand (x, VOIDmode)))
1328 *total = 0;
1329 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1330 *total = 2;
1331 else
1332 *total = COSTS_N_INSNS (2);
1333 return true;
1335 case CONST:
1336 case SYMBOL_REF:
1337 case LABEL_REF:
1338 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1339 *total = COSTS_N_INSNS (outer_code != MEM);
1340 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1341 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1342 else if (tls_symbolic_operand_type (x))
1343 /* Estimate of cost for call_pal rduniq. */
1344 /* ??? How many insns do we emit here? More than one... */
1345 *total = COSTS_N_INSNS (15);
1346 else
1347 /* Otherwise we do a load from the GOT. */
1348 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1349 return true;
1351 case PLUS:
1352 case MINUS:
1353 if (float_mode_p)
1354 *total = cost_data->fp_add;
1355 else if (GET_CODE (XEXP (x, 0)) == MULT
1356 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1358 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1359 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1360 return true;
1362 return false;
1364 case MULT:
1365 if (float_mode_p)
1366 *total = cost_data->fp_mult;
1367 else if (mode == DImode)
1368 *total = cost_data->int_mult_di;
1369 else
1370 *total = cost_data->int_mult_si;
1371 return false;
1373 case ASHIFT:
1374 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1375 && INTVAL (XEXP (x, 1)) <= 3)
1377 *total = COSTS_N_INSNS (1);
1378 return false;
1380 /* FALLTHRU */
1382 case ASHIFTRT:
1383 case LSHIFTRT:
1384 *total = cost_data->int_shift;
1385 return false;
1387 case IF_THEN_ELSE:
1388 if (float_mode_p)
1389 *total = cost_data->fp_add;
1390 else
1391 *total = cost_data->int_cmov;
1392 return false;
1394 case DIV:
1395 case UDIV:
1396 case MOD:
1397 case UMOD:
1398 if (!float_mode_p)
1399 *total = cost_data->int_div;
1400 else if (mode == SFmode)
1401 *total = cost_data->fp_div_sf;
1402 else
1403 *total = cost_data->fp_div_df;
1404 return false;
1406 case MEM:
1407 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1408 return true;
1410 case NEG:
1411 if (! float_mode_p)
1413 *total = COSTS_N_INSNS (1);
1414 return false;
1416 /* FALLTHRU */
1418 case ABS:
1419 if (! float_mode_p)
1421 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1422 return false;
1424 /* FALLTHRU */
1426 case FLOAT:
1427 case UNSIGNED_FLOAT:
1428 case FIX:
1429 case UNSIGNED_FIX:
1430 case FLOAT_EXTEND:
1431 case FLOAT_TRUNCATE:
1432 *total = cost_data->fp_add;
1433 return false;
1435 default:
1436 return false;
1440 /* REF is an alignable memory location. Place an aligned SImode
1441 reference into *PALIGNED_MEM and the number of bits to shift into
1442 *PBITNUM. SCRATCH is a free register for use in reloading out
1443 of range stack slots. */
1445 void
1446 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1448 rtx base;
1449 HOST_WIDE_INT offset = 0;
1451 if (GET_CODE (ref) != MEM)
1452 abort ();
1454 if (reload_in_progress
1455 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1457 base = find_replacement (&XEXP (ref, 0));
1459 if (! memory_address_p (GET_MODE (ref), base))
1460 abort ();
1462 else
1464 base = XEXP (ref, 0);
1467 if (GET_CODE (base) == PLUS)
1468 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1470 *paligned_mem
1471 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
1473 if (WORDS_BIG_ENDIAN)
1474 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
1475 + (offset & 3) * 8));
1476 else
1477 *pbitnum = GEN_INT ((offset & 3) * 8);
1480 /* Similar, but just get the address. Handle the two reload cases.
1481 Add EXTRA_OFFSET to the address we return. */
1484 get_unaligned_address (rtx ref, int extra_offset)
1486 rtx base;
1487 HOST_WIDE_INT offset = 0;
1489 if (GET_CODE (ref) != MEM)
1490 abort ();
1492 if (reload_in_progress
1493 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1495 base = find_replacement (&XEXP (ref, 0));
1497 if (! memory_address_p (GET_MODE (ref), base))
1498 abort ();
1500 else
1502 base = XEXP (ref, 0);
1505 if (GET_CODE (base) == PLUS)
1506 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1508 return plus_constant (base, offset + extra_offset);
1511 /* On the Alpha, all (non-symbolic) constants except zero go into
1512 a floating-point register via memory. Note that we cannot
1513 return anything that is not a subset of CLASS, and that some
1514 symbolic constants cannot be dropped to memory. */
1516 enum reg_class
1517 alpha_preferred_reload_class(rtx x, enum reg_class class)
1519 /* Zero is present in any register class. */
1520 if (x == CONST0_RTX (GET_MODE (x)))
1521 return class;
1523 /* These sorts of constants we can easily drop to memory. */
1524 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
1526 if (class == FLOAT_REGS)
1527 return NO_REGS;
1528 if (class == ALL_REGS)
1529 return GENERAL_REGS;
1530 return class;
1533 /* All other kinds of constants should not (and in the case of HIGH
1534 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1535 secondary reload. */
1536 if (CONSTANT_P (x))
1537 return (class == ALL_REGS ? GENERAL_REGS : class);
1539 return class;
1542 /* Loading and storing HImode or QImode values to and from memory
1543 usually requires a scratch register. The exceptions are loading
1544 QImode and HImode from an aligned address to a general register
1545 unless byte instructions are permitted.
1547 We also cannot load an unaligned address or a paradoxical SUBREG
1548 into an FP register.
1550 We also cannot do integral arithmetic into FP regs, as might result
1551 from register elimination into a DImode fp register. */
1553 enum reg_class
1554 secondary_reload_class (enum reg_class class, enum machine_mode mode,
1555 rtx x, int in)
1557 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1559 if (GET_CODE (x) == MEM
1560 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1561 || (GET_CODE (x) == SUBREG
1562 && (GET_CODE (SUBREG_REG (x)) == MEM
1563 || (GET_CODE (SUBREG_REG (x)) == REG
1564 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1566 if (!in || !aligned_memory_operand(x, mode))
1567 return GENERAL_REGS;
1571 if (class == FLOAT_REGS)
1573 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1574 return GENERAL_REGS;
1576 if (GET_CODE (x) == SUBREG
1577 && (GET_MODE_SIZE (GET_MODE (x))
1578 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1579 return GENERAL_REGS;
1581 if (in && INTEGRAL_MODE_P (mode)
1582 && ! (memory_operand (x, mode) || x == const0_rtx))
1583 return GENERAL_REGS;
1586 return NO_REGS;
1589 /* Subfunction of the following function. Update the flags of any MEM
1590 found in part of X. */
1592 static int
1593 alpha_set_memflags_1 (rtx *xp, void *data)
1595 rtx x = *xp, orig = (rtx) data;
1597 if (GET_CODE (x) != MEM)
1598 return 0;
1600 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1601 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1602 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1603 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1604 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1606 /* Sadly, we cannot use alias sets because the extra aliasing
1607 produced by the AND interferes. Given that two-byte quantities
1608 are the only thing we would be able to differentiate anyway,
1609 there does not seem to be any point in convoluting the early
1610 out of the alias check. */
1612 return -1;
1615 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1616 generated to perform a memory operation, look for any MEMs in either
1617 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1618 volatile flags from REF into each of the MEMs found. If REF is not
1619 a MEM, don't do anything. */
1621 void
1622 alpha_set_memflags (rtx insn, rtx ref)
1624 rtx *base_ptr;
1626 if (GET_CODE (ref) != MEM)
1627 return;
1629 /* This is only called from alpha.md, after having had something
1630 generated from one of the insn patterns. So if everything is
1631 zero, the pattern is already up-to-date. */
1632 if (!MEM_VOLATILE_P (ref)
1633 && !MEM_IN_STRUCT_P (ref)
1634 && !MEM_SCALAR_P (ref)
1635 && !MEM_NOTRAP_P (ref)
1636 && !MEM_READONLY_P (ref))
1637 return;
1639 if (INSN_P (insn))
1640 base_ptr = &PATTERN (insn);
1641 else
1642 base_ptr = &insn;
1643 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1646 /* Internal routine for alpha_emit_set_const to check for N or below insns. */
1648 static rtx
1649 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1650 HOST_WIDE_INT c, int n)
1652 HOST_WIDE_INT new;
1653 int i, bits;
1654 /* Use a pseudo if highly optimizing and still generating RTL. */
1655 rtx subtarget
1656 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1657 rtx temp, insn;
1659 /* If this is a sign-extended 32-bit constant, we can do this in at most
1660 three insns, so do it if we have enough insns left. We always have
1661 a sign-extended 32-bit constant when compiling on a narrow machine. */
1663 if (HOST_BITS_PER_WIDE_INT != 64
1664 || c >> 31 == -1 || c >> 31 == 0)
1666 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1667 HOST_WIDE_INT tmp1 = c - low;
1668 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1669 HOST_WIDE_INT extra = 0;
1671 /* If HIGH will be interpreted as negative but the constant is
1672 positive, we must adjust it to do two ldha insns. */
1674 if ((high & 0x8000) != 0 && c >= 0)
1676 extra = 0x4000;
1677 tmp1 -= 0x40000000;
1678 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1681 if (c == low || (low == 0 && extra == 0))
1683 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1684 but that meant that we can't handle INT_MIN on 32-bit machines
1685 (like NT/Alpha), because we recurse indefinitely through
1686 emit_move_insn to gen_movdi. So instead, since we know exactly
1687 what we want, create it explicitly. */
1689 if (target == NULL)
1690 target = gen_reg_rtx (mode);
1691 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1692 return target;
1694 else if (n >= 2 + (extra != 0))
1696 if (no_new_pseudos)
1698 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1699 temp = target;
1701 else
1702 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1703 subtarget, mode);
1705 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1706 This means that if we go through expand_binop, we'll try to
1707 generate extensions, etc, which will require new pseudos, which
1708 will fail during some split phases. The SImode add patterns
1709 still exist, but are not named. So build the insns by hand. */
1711 if (extra != 0)
1713 if (! subtarget)
1714 subtarget = gen_reg_rtx (mode);
1715 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1716 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1717 emit_insn (insn);
1718 temp = subtarget;
1721 if (target == NULL)
1722 target = gen_reg_rtx (mode);
1723 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1724 insn = gen_rtx_SET (VOIDmode, target, insn);
1725 emit_insn (insn);
1726 return target;
1730 /* If we couldn't do it that way, try some other methods. But if we have
1731 no instructions left, don't bother. Likewise, if this is SImode and
1732 we can't make pseudos, we can't do anything since the expand_binop
1733 and expand_unop calls will widen and try to make pseudos. */
1735 if (n == 1 || (mode == SImode && no_new_pseudos))
1736 return 0;
1738 /* Next, see if we can load a related constant and then shift and possibly
1739 negate it to get the constant we want. Try this once each increasing
1740 numbers of insns. */
1742 for (i = 1; i < n; i++)
1744 /* First, see if minus some low bits, we've an easy load of
1745 high bits. */
1747 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1748 if (new != 0
1749 && (temp = alpha_emit_set_const (subtarget, mode, c - new, i)) != 0)
1750 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1751 target, 0, OPTAB_WIDEN);
1753 /* Next try complementing. */
1754 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
1755 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1757 /* Next try to form a constant and do a left shift. We can do this
1758 if some low-order bits are zero; the exact_log2 call below tells
1759 us that information. The bits we are shifting out could be any
1760 value, but here we'll just try the 0- and sign-extended forms of
1761 the constant. To try to increase the chance of having the same
1762 constant in more than one insn, start at the highest number of
1763 bits to shift, but try all possibilities in case a ZAPNOT will
1764 be useful. */
1766 if ((bits = exact_log2 (c & - c)) > 0)
1767 for (; bits > 0; bits--)
1768 if ((temp = (alpha_emit_set_const
1769 (subtarget, mode, c >> bits, i))) != 0
1770 || ((temp = (alpha_emit_set_const
1771 (subtarget, mode,
1772 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
1773 != 0))
1774 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1775 target, 0, OPTAB_WIDEN);
1777 /* Now try high-order zero bits. Here we try the shifted-in bits as
1778 all zero and all ones. Be careful to avoid shifting outside the
1779 mode and to avoid shifting outside the host wide int size. */
1780 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1781 confuse the recursive call and set all of the high 32 bits. */
1783 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1784 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
1785 for (; bits > 0; bits--)
1786 if ((temp = alpha_emit_set_const (subtarget, mode,
1787 c << bits, i)) != 0
1788 || ((temp = (alpha_emit_set_const
1789 (subtarget, mode,
1790 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1791 i)))
1792 != 0))
1793 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1794 target, 1, OPTAB_WIDEN);
1796 /* Now try high-order 1 bits. We get that with a sign-extension.
1797 But one bit isn't enough here. Be careful to avoid shifting outside
1798 the mode and to avoid shifting outside the host wide int size. */
1800 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1801 - floor_log2 (~ c) - 2)) > 0)
1802 for (; bits > 0; bits--)
1803 if ((temp = alpha_emit_set_const (subtarget, mode,
1804 c << bits, i)) != 0
1805 || ((temp = (alpha_emit_set_const
1806 (subtarget, mode,
1807 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
1808 i)))
1809 != 0))
1810 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1811 target, 0, OPTAB_WIDEN);
1814 #if HOST_BITS_PER_WIDE_INT == 64
1815 /* Finally, see if can load a value into the target that is the same as the
1816 constant except that all bytes that are 0 are changed to be 0xff. If we
1817 can, then we can do a ZAPNOT to obtain the desired constant. */
1819 new = c;
1820 for (i = 0; i < 64; i += 8)
1821 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1822 new |= (HOST_WIDE_INT) 0xff << i;
1824 /* We are only called for SImode and DImode. If this is SImode, ensure that
1825 we are sign extended to a full word. */
1827 if (mode == SImode)
1828 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1830 if (new != c && new != -1
1831 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
1832 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1833 target, 0, OPTAB_WIDEN);
1834 #endif
1836 return 0;
1839 /* Try to output insns to set TARGET equal to the constant C if it can be
1840 done in less than N insns. Do all computations in MODE. Returns the place
1841 where the output has been placed if it can be done and the insns have been
1842 emitted. If it would take more than N insns, zero is returned and no
1843 insns and emitted. */
1846 alpha_emit_set_const (rtx target, enum machine_mode mode,
1847 HOST_WIDE_INT c, int n)
1849 rtx result = 0;
1850 rtx orig_target = target;
1851 int i;
1853 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1854 can't load this constant in one insn, do this in DImode. */
1855 if (no_new_pseudos && mode == SImode
1856 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER
1857 && (result = alpha_emit_set_const_1 (target, mode, c, 1)) == 0)
1859 target = gen_lowpart (DImode, target);
1860 mode = DImode;
1863 /* Try 1 insn, then 2, then up to N. */
1864 for (i = 1; i <= n; i++)
1866 result = alpha_emit_set_const_1 (target, mode, c, i);
1867 if (result)
1869 rtx insn = get_last_insn ();
1870 rtx set = single_set (insn);
1871 if (! CONSTANT_P (SET_SRC (set)))
1872 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1873 break;
1877 /* Allow for the case where we changed the mode of TARGET. */
1878 if (result == target)
1879 result = orig_target;
1881 return result;
1884 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1885 fall back to a straight forward decomposition. We do this to avoid
1886 exponential run times encountered when looking for longer sequences
1887 with alpha_emit_set_const. */
1890 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1892 HOST_WIDE_INT d1, d2, d3, d4;
1894 /* Decompose the entire word */
1895 #if HOST_BITS_PER_WIDE_INT >= 64
1896 if (c2 != -(c1 < 0))
1897 abort ();
1898 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1899 c1 -= d1;
1900 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1901 c1 = (c1 - d2) >> 32;
1902 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1903 c1 -= d3;
1904 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1905 if (c1 != d4)
1906 abort ();
1907 #else
1908 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1909 c1 -= d1;
1910 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1911 if (c1 != d2)
1912 abort ();
1913 c2 += (d2 < 0);
1914 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1915 c2 -= d3;
1916 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1917 if (c2 != d4)
1918 abort ();
1919 #endif
1921 /* Construct the high word */
1922 if (d4)
1924 emit_move_insn (target, GEN_INT (d4));
1925 if (d3)
1926 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1928 else
1929 emit_move_insn (target, GEN_INT (d3));
1931 /* Shift it into place */
1932 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1934 /* Add in the low bits. */
1935 if (d2)
1936 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1937 if (d1)
1938 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1940 return target;
1943 /* Expand a move instruction; return true if all work is done.
1944 We don't handle non-bwx subword loads here. */
1946 bool
1947 alpha_expand_mov (enum machine_mode mode, rtx *operands)
1949 /* If the output is not a register, the input must be. */
1950 if (GET_CODE (operands[0]) == MEM
1951 && ! reg_or_0_operand (operands[1], mode))
1952 operands[1] = force_reg (mode, operands[1]);
1954 /* Allow legitimize_address to perform some simplifications. */
1955 if (mode == Pmode && symbolic_operand (operands[1], mode))
1957 rtx tmp;
1959 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
1960 if (tmp)
1962 if (tmp == operands[0])
1963 return true;
1964 operands[1] = tmp;
1965 return false;
1969 /* Early out for non-constants and valid constants. */
1970 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
1971 return false;
1973 /* Split large integers. */
1974 if (GET_CODE (operands[1]) == CONST_INT
1975 || GET_CODE (operands[1]) == CONST_DOUBLE)
1977 HOST_WIDE_INT i0, i1;
1978 rtx temp = NULL_RTX;
1980 if (GET_CODE (operands[1]) == CONST_INT)
1982 i0 = INTVAL (operands[1]);
1983 i1 = -(i0 < 0);
1985 else if (HOST_BITS_PER_WIDE_INT >= 64)
1987 i0 = CONST_DOUBLE_LOW (operands[1]);
1988 i1 = -(i0 < 0);
1990 else
1992 i0 = CONST_DOUBLE_LOW (operands[1]);
1993 i1 = CONST_DOUBLE_HIGH (operands[1]);
1996 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
1997 temp = alpha_emit_set_const (operands[0], mode, i0, 3);
1999 if (!temp && TARGET_BUILD_CONSTANTS)
2000 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2002 if (temp)
2004 if (rtx_equal_p (operands[0], temp))
2005 return true;
2006 operands[1] = temp;
2007 return false;
2011 /* Otherwise we've nothing left but to drop the thing to memory. */
2012 operands[1] = force_const_mem (mode, operands[1]);
2013 if (reload_in_progress)
2015 emit_move_insn (operands[0], XEXP (operands[1], 0));
2016 operands[1] = copy_rtx (operands[1]);
2017 XEXP (operands[1], 0) = operands[0];
2019 else
2020 operands[1] = validize_mem (operands[1]);
2021 return false;
2024 /* Expand a non-bwx QImode or HImode move instruction;
2025 return true if all work is done. */
2027 bool
2028 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2030 /* If the output is not a register, the input must be. */
2031 if (GET_CODE (operands[0]) == MEM)
2032 operands[1] = force_reg (mode, operands[1]);
2034 /* Handle four memory cases, unaligned and aligned for either the input
2035 or the output. The only case where we can be called during reload is
2036 for aligned loads; all other cases require temporaries. */
2038 if (GET_CODE (operands[1]) == MEM
2039 || (GET_CODE (operands[1]) == SUBREG
2040 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2041 || (reload_in_progress && GET_CODE (operands[1]) == REG
2042 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2043 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2044 && GET_CODE (SUBREG_REG (operands[1])) == REG
2045 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2047 if (aligned_memory_operand (operands[1], mode))
2049 if (reload_in_progress)
2051 emit_insn ((mode == QImode
2052 ? gen_reload_inqi_help
2053 : gen_reload_inhi_help)
2054 (operands[0], operands[1],
2055 gen_rtx_REG (SImode, REGNO (operands[0]))));
2057 else
2059 rtx aligned_mem, bitnum;
2060 rtx scratch = gen_reg_rtx (SImode);
2061 rtx subtarget;
2062 bool copyout;
2064 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2066 subtarget = operands[0];
2067 if (GET_CODE (subtarget) == REG)
2068 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2069 else
2070 subtarget = gen_reg_rtx (DImode), copyout = true;
2072 emit_insn ((mode == QImode
2073 ? gen_aligned_loadqi
2074 : gen_aligned_loadhi)
2075 (subtarget, aligned_mem, bitnum, scratch));
2077 if (copyout)
2078 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2081 else
2083 /* Don't pass these as parameters since that makes the generated
2084 code depend on parameter evaluation order which will cause
2085 bootstrap failures. */
2087 rtx temp1, temp2, seq, subtarget;
2088 bool copyout;
2090 temp1 = gen_reg_rtx (DImode);
2091 temp2 = gen_reg_rtx (DImode);
2093 subtarget = operands[0];
2094 if (GET_CODE (subtarget) == REG)
2095 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2096 else
2097 subtarget = gen_reg_rtx (DImode), copyout = true;
2099 seq = ((mode == QImode
2100 ? gen_unaligned_loadqi
2101 : gen_unaligned_loadhi)
2102 (subtarget, get_unaligned_address (operands[1], 0),
2103 temp1, temp2));
2104 alpha_set_memflags (seq, operands[1]);
2105 emit_insn (seq);
2107 if (copyout)
2108 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2110 return true;
2113 if (GET_CODE (operands[0]) == MEM
2114 || (GET_CODE (operands[0]) == SUBREG
2115 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2116 || (reload_in_progress && GET_CODE (operands[0]) == REG
2117 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2118 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2119 && GET_CODE (SUBREG_REG (operands[0])) == REG
2120 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2122 if (aligned_memory_operand (operands[0], mode))
2124 rtx aligned_mem, bitnum;
2125 rtx temp1 = gen_reg_rtx (SImode);
2126 rtx temp2 = gen_reg_rtx (SImode);
2128 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2130 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2131 temp1, temp2));
2133 else
2135 rtx temp1 = gen_reg_rtx (DImode);
2136 rtx temp2 = gen_reg_rtx (DImode);
2137 rtx temp3 = gen_reg_rtx (DImode);
2138 rtx seq = ((mode == QImode
2139 ? gen_unaligned_storeqi
2140 : gen_unaligned_storehi)
2141 (get_unaligned_address (operands[0], 0),
2142 operands[1], temp1, temp2, temp3));
2144 alpha_set_memflags (seq, operands[0]);
2145 emit_insn (seq);
2147 return true;
2150 return false;
2153 /* Generate an unsigned DImode to FP conversion. This is the same code
2154 optabs would emit if we didn't have TFmode patterns.
2156 For SFmode, this is the only construction I've found that can pass
2157 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2158 intermediates will work, because you'll get intermediate rounding
2159 that ruins the end result. Some of this could be fixed by turning
2160 on round-to-positive-infinity, but that requires diddling the fpsr,
2161 which kills performance. I tried turning this around and converting
2162 to a negative number, so that I could turn on /m, but either I did
2163 it wrong or there's something else cause I wound up with the exact
2164 same single-bit error. There is a branch-less form of this same code:
2166 srl $16,1,$1
2167 and $16,1,$2
2168 cmplt $16,0,$3
2169 or $1,$2,$2
2170 cmovge $16,$16,$2
2171 itoft $3,$f10
2172 itoft $2,$f11
2173 cvtqs $f11,$f11
2174 adds $f11,$f11,$f0
2175 fcmoveq $f10,$f11,$f0
2177 I'm not using it because it's the same number of instructions as
2178 this branch-full form, and it has more serialized long latency
2179 instructions on the critical path.
2181 For DFmode, we can avoid rounding errors by breaking up the word
2182 into two pieces, converting them separately, and adding them back:
2184 LC0: .long 0,0x5f800000
2186 itoft $16,$f11
2187 lda $2,LC0
2188 cmplt $16,0,$1
2189 cpyse $f11,$f31,$f10
2190 cpyse $f31,$f11,$f11
2191 s4addq $1,$2,$1
2192 lds $f12,0($1)
2193 cvtqt $f10,$f10
2194 cvtqt $f11,$f11
2195 addt $f12,$f10,$f0
2196 addt $f0,$f11,$f0
2198 This doesn't seem to be a clear-cut win over the optabs form.
2199 It probably all depends on the distribution of numbers being
2200 converted -- in the optabs form, all but high-bit-set has a
2201 much lower minimum execution time. */
2203 void
2204 alpha_emit_floatuns (rtx operands[2])
2206 rtx neglab, donelab, i0, i1, f0, in, out;
2207 enum machine_mode mode;
2209 out = operands[0];
2210 in = force_reg (DImode, operands[1]);
2211 mode = GET_MODE (out);
2212 neglab = gen_label_rtx ();
2213 donelab = gen_label_rtx ();
2214 i0 = gen_reg_rtx (DImode);
2215 i1 = gen_reg_rtx (DImode);
2216 f0 = gen_reg_rtx (mode);
2218 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2220 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2221 emit_jump_insn (gen_jump (donelab));
2222 emit_barrier ();
2224 emit_label (neglab);
2226 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2227 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2228 emit_insn (gen_iordi3 (i0, i0, i1));
2229 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2230 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2232 emit_label (donelab);
2235 /* Generate the comparison for a conditional branch. */
2238 alpha_emit_conditional_branch (enum rtx_code code)
2240 enum rtx_code cmp_code, branch_code;
2241 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2242 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2243 rtx tem;
2245 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2247 if (! TARGET_HAS_XFLOATING_LIBS)
2248 abort ();
2250 /* X_floating library comparison functions return
2251 -1 unordered
2252 0 false
2253 1 true
2254 Convert the compare against the raw return value. */
2256 switch (code)
2258 case UNORDERED:
2259 cmp_code = EQ;
2260 code = LT;
2261 break;
2262 case ORDERED:
2263 cmp_code = EQ;
2264 code = GE;
2265 break;
2266 case NE:
2267 cmp_code = NE;
2268 code = NE;
2269 break;
2270 default:
2271 cmp_code = code;
2272 code = GT;
2273 break;
2276 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
2277 op1 = const0_rtx;
2278 alpha_compare.fp_p = 0;
2281 /* The general case: fold the comparison code to the types of compares
2282 that we have, choosing the branch as necessary. */
2283 switch (code)
2285 case EQ: case LE: case LT: case LEU: case LTU:
2286 case UNORDERED:
2287 /* We have these compares: */
2288 cmp_code = code, branch_code = NE;
2289 break;
2291 case NE:
2292 case ORDERED:
2293 /* These must be reversed. */
2294 cmp_code = reverse_condition (code), branch_code = EQ;
2295 break;
2297 case GE: case GT: case GEU: case GTU:
2298 /* For FP, we swap them, for INT, we reverse them. */
2299 if (alpha_compare.fp_p)
2301 cmp_code = swap_condition (code);
2302 branch_code = NE;
2303 tem = op0, op0 = op1, op1 = tem;
2305 else
2307 cmp_code = reverse_condition (code);
2308 branch_code = EQ;
2310 break;
2312 default:
2313 abort ();
2316 if (alpha_compare.fp_p)
2318 cmp_mode = DFmode;
2319 if (flag_unsafe_math_optimizations)
2321 /* When we are not as concerned about non-finite values, and we
2322 are comparing against zero, we can branch directly. */
2323 if (op1 == CONST0_RTX (DFmode))
2324 cmp_code = NIL, branch_code = code;
2325 else if (op0 == CONST0_RTX (DFmode))
2327 /* Undo the swap we probably did just above. */
2328 tem = op0, op0 = op1, op1 = tem;
2329 branch_code = swap_condition (cmp_code);
2330 cmp_code = NIL;
2333 else
2335 /* ??? We mark the branch mode to be CCmode to prevent the
2336 compare and branch from being combined, since the compare
2337 insn follows IEEE rules that the branch does not. */
2338 branch_mode = CCmode;
2341 else
2343 cmp_mode = DImode;
2345 /* The following optimizations are only for signed compares. */
2346 if (code != LEU && code != LTU && code != GEU && code != GTU)
2348 /* Whee. Compare and branch against 0 directly. */
2349 if (op1 == const0_rtx)
2350 cmp_code = NIL, branch_code = code;
2352 /* If the constants doesn't fit into an immediate, but can
2353 be generated by lda/ldah, we adjust the argument and
2354 compare against zero, so we can use beq/bne directly. */
2355 /* ??? Don't do this when comparing against symbols, otherwise
2356 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2357 be declared false out of hand (at least for non-weak). */
2358 else if (GET_CODE (op1) == CONST_INT
2359 && (code == EQ || code == NE)
2360 && !(symbolic_operand (op0, VOIDmode)
2361 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2363 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2365 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2366 && (CONST_OK_FOR_LETTER_P (n, 'K')
2367 || CONST_OK_FOR_LETTER_P (n, 'L')))
2369 cmp_code = PLUS, branch_code = code;
2370 op1 = GEN_INT (n);
2375 if (!reg_or_0_operand (op0, DImode))
2376 op0 = force_reg (DImode, op0);
2377 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2378 op1 = force_reg (DImode, op1);
2381 /* Emit an initial compare instruction, if necessary. */
2382 tem = op0;
2383 if (cmp_code != NIL)
2385 tem = gen_reg_rtx (cmp_mode);
2386 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2389 /* Zero the operands. */
2390 memset (&alpha_compare, 0, sizeof (alpha_compare));
2392 /* Return the branch comparison. */
2393 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2396 /* Certain simplifications can be done to make invalid setcc operations
2397 valid. Return the final comparison, or NULL if we can't work. */
2400 alpha_emit_setcc (enum rtx_code code)
2402 enum rtx_code cmp_code;
2403 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2404 int fp_p = alpha_compare.fp_p;
2405 rtx tmp;
2407 /* Zero the operands. */
2408 memset (&alpha_compare, 0, sizeof (alpha_compare));
2410 if (fp_p && GET_MODE (op0) == TFmode)
2412 if (! TARGET_HAS_XFLOATING_LIBS)
2413 abort ();
2415 /* X_floating library comparison functions return
2416 -1 unordered
2417 0 false
2418 1 true
2419 Convert the compare against the raw return value. */
2421 if (code == UNORDERED || code == ORDERED)
2422 cmp_code = EQ;
2423 else
2424 cmp_code = code;
2426 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
2427 op1 = const0_rtx;
2428 fp_p = 0;
2430 if (code == UNORDERED)
2431 code = LT;
2432 else if (code == ORDERED)
2433 code = GE;
2434 else
2435 code = GT;
2438 if (fp_p && !TARGET_FIX)
2439 return NULL_RTX;
2441 /* The general case: fold the comparison code to the types of compares
2442 that we have, choosing the branch as necessary. */
2444 cmp_code = NIL;
2445 switch (code)
2447 case EQ: case LE: case LT: case LEU: case LTU:
2448 case UNORDERED:
2449 /* We have these compares. */
2450 if (fp_p)
2451 cmp_code = code, code = NE;
2452 break;
2454 case NE:
2455 if (!fp_p && op1 == const0_rtx)
2456 break;
2457 /* FALLTHRU */
2459 case ORDERED:
2460 cmp_code = reverse_condition (code);
2461 code = EQ;
2462 break;
2464 case GE: case GT: case GEU: case GTU:
2465 /* These normally need swapping, but for integer zero we have
2466 special patterns that recognize swapped operands. */
2467 if (!fp_p && op1 == const0_rtx)
2468 break;
2469 code = swap_condition (code);
2470 if (fp_p)
2471 cmp_code = code, code = NE;
2472 tmp = op0, op0 = op1, op1 = tmp;
2473 break;
2475 default:
2476 abort ();
2479 if (!fp_p)
2481 if (!register_operand (op0, DImode))
2482 op0 = force_reg (DImode, op0);
2483 if (!reg_or_8bit_operand (op1, DImode))
2484 op1 = force_reg (DImode, op1);
2487 /* Emit an initial compare instruction, if necessary. */
2488 if (cmp_code != NIL)
2490 enum machine_mode mode = fp_p ? DFmode : DImode;
2492 tmp = gen_reg_rtx (mode);
2493 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2494 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2496 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2497 op1 = const0_rtx;
2500 /* Return the setcc comparison. */
2501 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2505 /* Rewrite a comparison against zero CMP of the form
2506 (CODE (cc0) (const_int 0)) so it can be written validly in
2507 a conditional move (if_then_else CMP ...).
2508 If both of the operands that set cc0 are nonzero we must emit
2509 an insn to perform the compare (it can't be done within
2510 the conditional move). */
2513 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2515 enum rtx_code code = GET_CODE (cmp);
2516 enum rtx_code cmov_code = NE;
2517 rtx op0 = alpha_compare.op0;
2518 rtx op1 = alpha_compare.op1;
2519 int fp_p = alpha_compare.fp_p;
2520 enum machine_mode cmp_mode
2521 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2522 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2523 enum machine_mode cmov_mode = VOIDmode;
2524 int local_fast_math = flag_unsafe_math_optimizations;
2525 rtx tem;
2527 /* Zero the operands. */
2528 memset (&alpha_compare, 0, sizeof (alpha_compare));
2530 if (fp_p != FLOAT_MODE_P (mode))
2532 enum rtx_code cmp_code;
2534 if (! TARGET_FIX)
2535 return 0;
2537 /* If we have fp<->int register move instructions, do a cmov by
2538 performing the comparison in fp registers, and move the
2539 zero/nonzero value to integer registers, where we can then
2540 use a normal cmov, or vice-versa. */
2542 switch (code)
2544 case EQ: case LE: case LT: case LEU: case LTU:
2545 /* We have these compares. */
2546 cmp_code = code, code = NE;
2547 break;
2549 case NE:
2550 /* This must be reversed. */
2551 cmp_code = EQ, code = EQ;
2552 break;
2554 case GE: case GT: case GEU: case GTU:
2555 /* These normally need swapping, but for integer zero we have
2556 special patterns that recognize swapped operands. */
2557 if (!fp_p && op1 == const0_rtx)
2558 cmp_code = code, code = NE;
2559 else
2561 cmp_code = swap_condition (code);
2562 code = NE;
2563 tem = op0, op0 = op1, op1 = tem;
2565 break;
2567 default:
2568 abort ();
2571 tem = gen_reg_rtx (cmp_op_mode);
2572 emit_insn (gen_rtx_SET (VOIDmode, tem,
2573 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2574 op0, op1)));
2576 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2577 op0 = gen_lowpart (cmp_op_mode, tem);
2578 op1 = CONST0_RTX (cmp_op_mode);
2579 fp_p = !fp_p;
2580 local_fast_math = 1;
2583 /* We may be able to use a conditional move directly.
2584 This avoids emitting spurious compares. */
2585 if (signed_comparison_operator (cmp, VOIDmode)
2586 && (!fp_p || local_fast_math)
2587 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2588 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2590 /* We can't put the comparison inside the conditional move;
2591 emit a compare instruction and put that inside the
2592 conditional move. Make sure we emit only comparisons we have;
2593 swap or reverse as necessary. */
2595 if (no_new_pseudos)
2596 return NULL_RTX;
2598 switch (code)
2600 case EQ: case LE: case LT: case LEU: case LTU:
2601 /* We have these compares: */
2602 break;
2604 case NE:
2605 /* This must be reversed. */
2606 code = reverse_condition (code);
2607 cmov_code = EQ;
2608 break;
2610 case GE: case GT: case GEU: case GTU:
2611 /* These must be swapped. */
2612 if (op1 != CONST0_RTX (cmp_mode))
2614 code = swap_condition (code);
2615 tem = op0, op0 = op1, op1 = tem;
2617 break;
2619 default:
2620 abort ();
2623 if (!fp_p)
2625 if (!reg_or_0_operand (op0, DImode))
2626 op0 = force_reg (DImode, op0);
2627 if (!reg_or_8bit_operand (op1, DImode))
2628 op1 = force_reg (DImode, op1);
2631 /* ??? We mark the branch mode to be CCmode to prevent the compare
2632 and cmov from being combined, since the compare insn follows IEEE
2633 rules that the cmov does not. */
2634 if (fp_p && !local_fast_math)
2635 cmov_mode = CCmode;
2637 tem = gen_reg_rtx (cmp_op_mode);
2638 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2639 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2642 /* Simplify a conditional move of two constants into a setcc with
2643 arithmetic. This is done with a splitter since combine would
2644 just undo the work if done during code generation. It also catches
2645 cases we wouldn't have before cse. */
2648 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2649 rtx t_rtx, rtx f_rtx)
2651 HOST_WIDE_INT t, f, diff;
2652 enum machine_mode mode;
2653 rtx target, subtarget, tmp;
2655 mode = GET_MODE (dest);
2656 t = INTVAL (t_rtx);
2657 f = INTVAL (f_rtx);
2658 diff = t - f;
2660 if (((code == NE || code == EQ) && diff < 0)
2661 || (code == GE || code == GT))
2663 code = reverse_condition (code);
2664 diff = t, t = f, f = diff;
2665 diff = t - f;
2668 subtarget = target = dest;
2669 if (mode != DImode)
2671 target = gen_lowpart (DImode, dest);
2672 if (! no_new_pseudos)
2673 subtarget = gen_reg_rtx (DImode);
2674 else
2675 subtarget = target;
2677 /* Below, we must be careful to use copy_rtx on target and subtarget
2678 in intermediate insns, as they may be a subreg rtx, which may not
2679 be shared. */
2681 if (f == 0 && exact_log2 (diff) > 0
2682 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2683 viable over a longer latency cmove. On EV5, the E0 slot is a
2684 scarce resource, and on EV4 shift has the same latency as a cmove. */
2685 && (diff <= 8 || alpha_cpu == PROCESSOR_EV6))
2687 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2688 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2690 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2691 GEN_INT (exact_log2 (t)));
2692 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2694 else if (f == 0 && t == -1)
2696 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2697 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2699 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2701 else if (diff == 1 || diff == 4 || diff == 8)
2703 rtx add_op;
2705 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2706 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2708 if (diff == 1)
2709 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2710 else
2712 add_op = GEN_INT (f);
2713 if (sext_add_operand (add_op, mode))
2715 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2716 GEN_INT (diff));
2717 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2718 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2720 else
2721 return 0;
2724 else
2725 return 0;
2727 return 1;
2730 /* Look up the function X_floating library function name for the
2731 given operation. */
2733 struct xfloating_op GTY(())
2735 const enum rtx_code code;
2736 const char *const GTY((skip)) osf_func;
2737 const char *const GTY((skip)) vms_func;
2738 rtx libcall;
2741 static GTY(()) struct xfloating_op xfloating_ops[] =
2743 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2744 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2745 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2746 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2747 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2748 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2749 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2750 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2751 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2752 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2753 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2754 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2755 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2756 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2757 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2760 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2762 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2763 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2766 static rtx
2767 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2769 struct xfloating_op *ops = xfloating_ops;
2770 long n = ARRAY_SIZE (xfloating_ops);
2771 long i;
2773 /* How irritating. Nothing to key off for the main table. */
2774 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2776 ops = vax_cvt_ops;
2777 n = ARRAY_SIZE (vax_cvt_ops);
2780 for (i = 0; i < n; ++i, ++ops)
2781 if (ops->code == code)
2783 rtx func = ops->libcall;
2784 if (!func)
2786 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2787 ? ops->vms_func : ops->osf_func);
2788 ops->libcall = func;
2790 return func;
2793 abort();
2796 /* Most X_floating operations take the rounding mode as an argument.
2797 Compute that here. */
2799 static int
2800 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2801 enum alpha_fp_rounding_mode round)
2803 int mode;
2805 switch (round)
2807 case ALPHA_FPRM_NORM:
2808 mode = 2;
2809 break;
2810 case ALPHA_FPRM_MINF:
2811 mode = 1;
2812 break;
2813 case ALPHA_FPRM_CHOP:
2814 mode = 0;
2815 break;
2816 case ALPHA_FPRM_DYN:
2817 mode = 4;
2818 break;
2819 default:
2820 abort ();
2822 /* XXX For reference, round to +inf is mode = 3. */
2825 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2826 mode |= 0x10000;
2828 return mode;
2831 /* Emit an X_floating library function call.
2833 Note that these functions do not follow normal calling conventions:
2834 TFmode arguments are passed in two integer registers (as opposed to
2835 indirect); TFmode return values appear in R16+R17.
2837 FUNC is the function to call.
2838 TARGET is where the output belongs.
2839 OPERANDS are the inputs.
2840 NOPERANDS is the count of inputs.
2841 EQUIV is the expression equivalent for the function.
2844 static void
2845 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2846 int noperands, rtx equiv)
2848 rtx usage = NULL_RTX, tmp, reg;
2849 int regno = 16, i;
2851 start_sequence ();
2853 for (i = 0; i < noperands; ++i)
2855 switch (GET_MODE (operands[i]))
2857 case TFmode:
2858 reg = gen_rtx_REG (TFmode, regno);
2859 regno += 2;
2860 break;
2862 case DFmode:
2863 reg = gen_rtx_REG (DFmode, regno + 32);
2864 regno += 1;
2865 break;
2867 case VOIDmode:
2868 if (GET_CODE (operands[i]) != CONST_INT)
2869 abort ();
2870 /* FALLTHRU */
2871 case DImode:
2872 reg = gen_rtx_REG (DImode, regno);
2873 regno += 1;
2874 break;
2876 default:
2877 abort ();
2880 emit_move_insn (reg, operands[i]);
2881 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2884 switch (GET_MODE (target))
2886 case TFmode:
2887 reg = gen_rtx_REG (TFmode, 16);
2888 break;
2889 case DFmode:
2890 reg = gen_rtx_REG (DFmode, 32);
2891 break;
2892 case DImode:
2893 reg = gen_rtx_REG (DImode, 0);
2894 break;
2895 default:
2896 abort ();
2899 tmp = gen_rtx_MEM (QImode, func);
2900 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
2901 const0_rtx, const0_rtx));
2902 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2903 CONST_OR_PURE_CALL_P (tmp) = 1;
2905 tmp = get_insns ();
2906 end_sequence ();
2908 emit_libcall_block (tmp, target, reg, equiv);
2911 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
2913 void
2914 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
2916 rtx func;
2917 int mode;
2918 rtx out_operands[3];
2920 func = alpha_lookup_xfloating_lib_func (code);
2921 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
2923 out_operands[0] = operands[1];
2924 out_operands[1] = operands[2];
2925 out_operands[2] = GEN_INT (mode);
2926 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
2927 gen_rtx_fmt_ee (code, TFmode, operands[1],
2928 operands[2]));
2931 /* Emit an X_floating library function call for a comparison. */
2933 static rtx
2934 alpha_emit_xfloating_compare (enum rtx_code code, rtx op0, rtx op1)
2936 rtx func;
2937 rtx out, operands[2];
2939 func = alpha_lookup_xfloating_lib_func (code);
2941 operands[0] = op0;
2942 operands[1] = op1;
2943 out = gen_reg_rtx (DImode);
2945 /* ??? Strange mode for equiv because what's actually returned
2946 is -1,0,1, not a proper boolean value. */
2947 alpha_emit_xfloating_libcall (func, out, operands, 2,
2948 gen_rtx_fmt_ee (code, CCmode, op0, op1));
2950 return out;
2953 /* Emit an X_floating library function call for a conversion. */
2955 void
2956 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
2958 int noperands = 1, mode;
2959 rtx out_operands[2];
2960 rtx func;
2961 enum rtx_code code = orig_code;
2963 if (code == UNSIGNED_FIX)
2964 code = FIX;
2966 func = alpha_lookup_xfloating_lib_func (code);
2968 out_operands[0] = operands[1];
2970 switch (code)
2972 case FIX:
2973 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
2974 out_operands[1] = GEN_INT (mode);
2975 noperands = 2;
2976 break;
2977 case FLOAT_TRUNCATE:
2978 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
2979 out_operands[1] = GEN_INT (mode);
2980 noperands = 2;
2981 break;
2982 default:
2983 break;
2986 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
2987 gen_rtx_fmt_e (orig_code,
2988 GET_MODE (operands[0]),
2989 operands[1]));
2992 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
2993 OP[0] into OP[0,1]. Naturally, output operand ordering is
2994 little-endian. */
2996 void
2997 alpha_split_tfmode_pair (rtx operands[4])
2999 if (GET_CODE (operands[1]) == REG)
3001 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3002 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3004 else if (GET_CODE (operands[1]) == MEM)
3006 operands[3] = adjust_address (operands[1], DImode, 8);
3007 operands[2] = adjust_address (operands[1], DImode, 0);
3009 else if (operands[1] == CONST0_RTX (TFmode))
3010 operands[2] = operands[3] = const0_rtx;
3011 else
3012 abort ();
3014 if (GET_CODE (operands[0]) == REG)
3016 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3017 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3019 else if (GET_CODE (operands[0]) == MEM)
3021 operands[1] = adjust_address (operands[0], DImode, 8);
3022 operands[0] = adjust_address (operands[0], DImode, 0);
3024 else
3025 abort ();
3028 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3029 op2 is a register containing the sign bit, operation is the
3030 logical operation to be performed. */
3032 void
3033 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3035 rtx high_bit = operands[2];
3036 rtx scratch;
3037 int move;
3039 alpha_split_tfmode_pair (operands);
3041 /* Detect three flavors of operand overlap. */
3042 move = 1;
3043 if (rtx_equal_p (operands[0], operands[2]))
3044 move = 0;
3045 else if (rtx_equal_p (operands[1], operands[2]))
3047 if (rtx_equal_p (operands[0], high_bit))
3048 move = 2;
3049 else
3050 move = -1;
3053 if (move < 0)
3054 emit_move_insn (operands[0], operands[2]);
3056 /* ??? If the destination overlaps both source tf and high_bit, then
3057 assume source tf is dead in its entirety and use the other half
3058 for a scratch register. Otherwise "scratch" is just the proper
3059 destination register. */
3060 scratch = operands[move < 2 ? 1 : 3];
3062 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3064 if (move > 0)
3066 emit_move_insn (operands[0], operands[2]);
3067 if (move > 1)
3068 emit_move_insn (operands[1], scratch);
3072 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3073 unaligned data:
3075 unsigned: signed:
3076 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3077 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3078 lda r3,X(r11) lda r3,X+2(r11)
3079 extwl r1,r3,r1 extql r1,r3,r1
3080 extwh r2,r3,r2 extqh r2,r3,r2
3081 or r1.r2.r1 or r1,r2,r1
3082 sra r1,48,r1
3084 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3085 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3086 lda r3,X(r11) lda r3,X(r11)
3087 extll r1,r3,r1 extll r1,r3,r1
3088 extlh r2,r3,r2 extlh r2,r3,r2
3089 or r1.r2.r1 addl r1,r2,r1
3091 quad: ldq_u r1,X(r11)
3092 ldq_u r2,X+7(r11)
3093 lda r3,X(r11)
3094 extql r1,r3,r1
3095 extqh r2,r3,r2
3096 or r1.r2.r1
3099 void
3100 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3101 HOST_WIDE_INT ofs, int sign)
3103 rtx meml, memh, addr, extl, exth, tmp, mema;
3104 enum machine_mode mode;
3106 meml = gen_reg_rtx (DImode);
3107 memh = gen_reg_rtx (DImode);
3108 addr = gen_reg_rtx (DImode);
3109 extl = gen_reg_rtx (DImode);
3110 exth = gen_reg_rtx (DImode);
3112 mema = XEXP (mem, 0);
3113 if (GET_CODE (mema) == LO_SUM)
3114 mema = force_reg (Pmode, mema);
3116 /* AND addresses cannot be in any alias set, since they may implicitly
3117 alias surrounding code. Ideally we'd have some alias set that
3118 covered all types except those with alignment 8 or higher. */
3120 tmp = change_address (mem, DImode,
3121 gen_rtx_AND (DImode,
3122 plus_constant (mema, ofs),
3123 GEN_INT (-8)));
3124 set_mem_alias_set (tmp, 0);
3125 emit_move_insn (meml, tmp);
3127 tmp = change_address (mem, DImode,
3128 gen_rtx_AND (DImode,
3129 plus_constant (mema, ofs + size - 1),
3130 GEN_INT (-8)));
3131 set_mem_alias_set (tmp, 0);
3132 emit_move_insn (memh, tmp);
3134 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3136 emit_move_insn (addr, plus_constant (mema, -1));
3138 emit_insn (gen_extqh_be (extl, meml, addr));
3139 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3141 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3142 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3143 addr, 1, OPTAB_WIDEN);
3145 else if (sign && size == 2)
3147 emit_move_insn (addr, plus_constant (mema, ofs+2));
3149 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3150 emit_insn (gen_extqh_le (exth, memh, addr));
3152 /* We must use tgt here for the target. Alpha-vms port fails if we use
3153 addr for the target, because addr is marked as a pointer and combine
3154 knows that pointers are always sign-extended 32 bit values. */
3155 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3156 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3157 addr, 1, OPTAB_WIDEN);
3159 else
3161 if (WORDS_BIG_ENDIAN)
3163 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3164 switch ((int) size)
3166 case 2:
3167 emit_insn (gen_extwh_be (extl, meml, addr));
3168 mode = HImode;
3169 break;
3171 case 4:
3172 emit_insn (gen_extlh_be (extl, meml, addr));
3173 mode = SImode;
3174 break;
3176 case 8:
3177 emit_insn (gen_extqh_be (extl, meml, addr));
3178 mode = DImode;
3179 break;
3181 default:
3182 abort ();
3184 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3186 else
3188 emit_move_insn (addr, plus_constant (mema, ofs));
3189 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3190 switch ((int) size)
3192 case 2:
3193 emit_insn (gen_extwh_le (exth, memh, addr));
3194 mode = HImode;
3195 break;
3197 case 4:
3198 emit_insn (gen_extlh_le (exth, memh, addr));
3199 mode = SImode;
3200 break;
3202 case 8:
3203 emit_insn (gen_extqh_le (exth, memh, addr));
3204 mode = DImode;
3205 break;
3207 default:
3208 abort();
3212 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3213 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3214 sign, OPTAB_WIDEN);
3217 if (addr != tgt)
3218 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
3221 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3223 void
3224 alpha_expand_unaligned_store (rtx dst, rtx src,
3225 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3227 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3229 dstl = gen_reg_rtx (DImode);
3230 dsth = gen_reg_rtx (DImode);
3231 insl = gen_reg_rtx (DImode);
3232 insh = gen_reg_rtx (DImode);
3234 dsta = XEXP (dst, 0);
3235 if (GET_CODE (dsta) == LO_SUM)
3236 dsta = force_reg (Pmode, dsta);
3238 /* AND addresses cannot be in any alias set, since they may implicitly
3239 alias surrounding code. Ideally we'd have some alias set that
3240 covered all types except those with alignment 8 or higher. */
3242 meml = change_address (dst, DImode,
3243 gen_rtx_AND (DImode,
3244 plus_constant (dsta, ofs),
3245 GEN_INT (-8)));
3246 set_mem_alias_set (meml, 0);
3248 memh = change_address (dst, DImode,
3249 gen_rtx_AND (DImode,
3250 plus_constant (dsta, ofs + size - 1),
3251 GEN_INT (-8)));
3252 set_mem_alias_set (memh, 0);
3254 emit_move_insn (dsth, memh);
3255 emit_move_insn (dstl, meml);
3256 if (WORDS_BIG_ENDIAN)
3258 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3260 if (src != const0_rtx)
3262 switch ((int) size)
3264 case 2:
3265 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3266 break;
3267 case 4:
3268 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3269 break;
3270 case 8:
3271 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3272 break;
3274 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3275 GEN_INT (size*8), addr));
3278 switch ((int) size)
3280 case 2:
3281 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3282 break;
3283 case 4:
3285 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3286 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3287 break;
3289 case 8:
3290 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3291 break;
3294 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3296 else
3298 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3300 if (src != const0_rtx)
3302 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3303 GEN_INT (size*8), addr));
3305 switch ((int) size)
3307 case 2:
3308 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3309 break;
3310 case 4:
3311 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3312 break;
3313 case 8:
3314 emit_insn (gen_insql_le (insl, src, addr));
3315 break;
3319 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3321 switch ((int) size)
3323 case 2:
3324 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3325 break;
3326 case 4:
3328 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3329 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3330 break;
3332 case 8:
3333 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3334 break;
3338 if (src != const0_rtx)
3340 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3341 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3344 if (WORDS_BIG_ENDIAN)
3346 emit_move_insn (meml, dstl);
3347 emit_move_insn (memh, dsth);
3349 else
3351 /* Must store high before low for degenerate case of aligned. */
3352 emit_move_insn (memh, dsth);
3353 emit_move_insn (meml, dstl);
3357 /* The block move code tries to maximize speed by separating loads and
3358 stores at the expense of register pressure: we load all of the data
3359 before we store it back out. There are two secondary effects worth
3360 mentioning, that this speeds copying to/from aligned and unaligned
3361 buffers, and that it makes the code significantly easier to write. */
3363 #define MAX_MOVE_WORDS 8
3365 /* Load an integral number of consecutive unaligned quadwords. */
3367 static void
3368 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3369 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3371 rtx const im8 = GEN_INT (-8);
3372 rtx const i64 = GEN_INT (64);
3373 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3374 rtx sreg, areg, tmp, smema;
3375 HOST_WIDE_INT i;
3377 smema = XEXP (smem, 0);
3378 if (GET_CODE (smema) == LO_SUM)
3379 smema = force_reg (Pmode, smema);
3381 /* Generate all the tmp registers we need. */
3382 for (i = 0; i < words; ++i)
3384 data_regs[i] = out_regs[i];
3385 ext_tmps[i] = gen_reg_rtx (DImode);
3387 data_regs[words] = gen_reg_rtx (DImode);
3389 if (ofs != 0)
3390 smem = adjust_address (smem, GET_MODE (smem), ofs);
3392 /* Load up all of the source data. */
3393 for (i = 0; i < words; ++i)
3395 tmp = change_address (smem, DImode,
3396 gen_rtx_AND (DImode,
3397 plus_constant (smema, 8*i),
3398 im8));
3399 set_mem_alias_set (tmp, 0);
3400 emit_move_insn (data_regs[i], tmp);
3403 tmp = change_address (smem, DImode,
3404 gen_rtx_AND (DImode,
3405 plus_constant (smema, 8*words - 1),
3406 im8));
3407 set_mem_alias_set (tmp, 0);
3408 emit_move_insn (data_regs[words], tmp);
3410 /* Extract the half-word fragments. Unfortunately DEC decided to make
3411 extxh with offset zero a noop instead of zeroing the register, so
3412 we must take care of that edge condition ourselves with cmov. */
3414 sreg = copy_addr_to_reg (smema);
3415 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3416 1, OPTAB_WIDEN);
3417 if (WORDS_BIG_ENDIAN)
3418 emit_move_insn (sreg, plus_constant (sreg, 7));
3419 for (i = 0; i < words; ++i)
3421 if (WORDS_BIG_ENDIAN)
3423 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3424 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3426 else
3428 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3429 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3431 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3432 gen_rtx_IF_THEN_ELSE (DImode,
3433 gen_rtx_EQ (DImode, areg,
3434 const0_rtx),
3435 const0_rtx, ext_tmps[i])));
3438 /* Merge the half-words into whole words. */
3439 for (i = 0; i < words; ++i)
3441 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3442 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3446 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3447 may be NULL to store zeros. */
3449 static void
3450 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3451 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3453 rtx const im8 = GEN_INT (-8);
3454 rtx const i64 = GEN_INT (64);
3455 rtx ins_tmps[MAX_MOVE_WORDS];
3456 rtx st_tmp_1, st_tmp_2, dreg;
3457 rtx st_addr_1, st_addr_2, dmema;
3458 HOST_WIDE_INT i;
3460 dmema = XEXP (dmem, 0);
3461 if (GET_CODE (dmema) == LO_SUM)
3462 dmema = force_reg (Pmode, dmema);
3464 /* Generate all the tmp registers we need. */
3465 if (data_regs != NULL)
3466 for (i = 0; i < words; ++i)
3467 ins_tmps[i] = gen_reg_rtx(DImode);
3468 st_tmp_1 = gen_reg_rtx(DImode);
3469 st_tmp_2 = gen_reg_rtx(DImode);
3471 if (ofs != 0)
3472 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3474 st_addr_2 = change_address (dmem, DImode,
3475 gen_rtx_AND (DImode,
3476 plus_constant (dmema, words*8 - 1),
3477 im8));
3478 set_mem_alias_set (st_addr_2, 0);
3480 st_addr_1 = change_address (dmem, DImode,
3481 gen_rtx_AND (DImode, dmema, im8));
3482 set_mem_alias_set (st_addr_1, 0);
3484 /* Load up the destination end bits. */
3485 emit_move_insn (st_tmp_2, st_addr_2);
3486 emit_move_insn (st_tmp_1, st_addr_1);
3488 /* Shift the input data into place. */
3489 dreg = copy_addr_to_reg (dmema);
3490 if (WORDS_BIG_ENDIAN)
3491 emit_move_insn (dreg, plus_constant (dreg, 7));
3492 if (data_regs != NULL)
3494 for (i = words-1; i >= 0; --i)
3496 if (WORDS_BIG_ENDIAN)
3498 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3499 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3501 else
3503 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3504 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3507 for (i = words-1; i > 0; --i)
3509 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3510 ins_tmps[i-1], ins_tmps[i-1], 1,
3511 OPTAB_WIDEN);
3515 /* Split and merge the ends with the destination data. */
3516 if (WORDS_BIG_ENDIAN)
3518 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3519 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3521 else
3523 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3524 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3527 if (data_regs != NULL)
3529 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3530 st_tmp_2, 1, OPTAB_WIDEN);
3531 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3532 st_tmp_1, 1, OPTAB_WIDEN);
3535 /* Store it all. */
3536 if (WORDS_BIG_ENDIAN)
3537 emit_move_insn (st_addr_1, st_tmp_1);
3538 else
3539 emit_move_insn (st_addr_2, st_tmp_2);
3540 for (i = words-1; i > 0; --i)
3542 rtx tmp = change_address (dmem, DImode,
3543 gen_rtx_AND (DImode,
3544 plus_constant(dmema,
3545 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3546 im8));
3547 set_mem_alias_set (tmp, 0);
3548 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3550 if (WORDS_BIG_ENDIAN)
3551 emit_move_insn (st_addr_2, st_tmp_2);
3552 else
3553 emit_move_insn (st_addr_1, st_tmp_1);
3557 /* Expand string/block move operations.
3559 operands[0] is the pointer to the destination.
3560 operands[1] is the pointer to the source.
3561 operands[2] is the number of bytes to move.
3562 operands[3] is the alignment. */
3565 alpha_expand_block_move (rtx operands[])
3567 rtx bytes_rtx = operands[2];
3568 rtx align_rtx = operands[3];
3569 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3570 HOST_WIDE_INT bytes = orig_bytes;
3571 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3572 HOST_WIDE_INT dst_align = src_align;
3573 rtx orig_src = operands[1];
3574 rtx orig_dst = operands[0];
3575 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3576 rtx tmp;
3577 unsigned int i, words, ofs, nregs = 0;
3579 if (orig_bytes <= 0)
3580 return 1;
3581 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3582 return 0;
3584 /* Look for additional alignment information from recorded register info. */
3586 tmp = XEXP (orig_src, 0);
3587 if (GET_CODE (tmp) == REG)
3588 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3589 else if (GET_CODE (tmp) == PLUS
3590 && GET_CODE (XEXP (tmp, 0)) == REG
3591 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3593 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3594 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3596 if (a > src_align)
3598 if (a >= 64 && c % 8 == 0)
3599 src_align = 64;
3600 else if (a >= 32 && c % 4 == 0)
3601 src_align = 32;
3602 else if (a >= 16 && c % 2 == 0)
3603 src_align = 16;
3607 tmp = XEXP (orig_dst, 0);
3608 if (GET_CODE (tmp) == REG)
3609 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3610 else if (GET_CODE (tmp) == PLUS
3611 && GET_CODE (XEXP (tmp, 0)) == REG
3612 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3614 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3615 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3617 if (a > dst_align)
3619 if (a >= 64 && c % 8 == 0)
3620 dst_align = 64;
3621 else if (a >= 32 && c % 4 == 0)
3622 dst_align = 32;
3623 else if (a >= 16 && c % 2 == 0)
3624 dst_align = 16;
3628 ofs = 0;
3629 if (src_align >= 64 && bytes >= 8)
3631 words = bytes / 8;
3633 for (i = 0; i < words; ++i)
3634 data_regs[nregs + i] = gen_reg_rtx (DImode);
3636 for (i = 0; i < words; ++i)
3637 emit_move_insn (data_regs[nregs + i],
3638 adjust_address (orig_src, DImode, ofs + i * 8));
3640 nregs += words;
3641 bytes -= words * 8;
3642 ofs += words * 8;
3645 if (src_align >= 32 && bytes >= 4)
3647 words = bytes / 4;
3649 for (i = 0; i < words; ++i)
3650 data_regs[nregs + i] = gen_reg_rtx (SImode);
3652 for (i = 0; i < words; ++i)
3653 emit_move_insn (data_regs[nregs + i],
3654 adjust_address (orig_src, SImode, ofs + i * 4));
3656 nregs += words;
3657 bytes -= words * 4;
3658 ofs += words * 4;
3661 if (bytes >= 8)
3663 words = bytes / 8;
3665 for (i = 0; i < words+1; ++i)
3666 data_regs[nregs + i] = gen_reg_rtx (DImode);
3668 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3669 words, ofs);
3671 nregs += words;
3672 bytes -= words * 8;
3673 ofs += words * 8;
3676 if (! TARGET_BWX && bytes >= 4)
3678 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3679 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3680 bytes -= 4;
3681 ofs += 4;
3684 if (bytes >= 2)
3686 if (src_align >= 16)
3688 do {
3689 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3690 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3691 bytes -= 2;
3692 ofs += 2;
3693 } while (bytes >= 2);
3695 else if (! TARGET_BWX)
3697 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3698 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3699 bytes -= 2;
3700 ofs += 2;
3704 while (bytes > 0)
3706 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3707 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3708 bytes -= 1;
3709 ofs += 1;
3712 if (nregs > ARRAY_SIZE (data_regs))
3713 abort ();
3715 /* Now save it back out again. */
3717 i = 0, ofs = 0;
3719 /* Write out the data in whatever chunks reading the source allowed. */
3720 if (dst_align >= 64)
3722 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3724 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3725 data_regs[i]);
3726 ofs += 8;
3727 i++;
3731 if (dst_align >= 32)
3733 /* If the source has remaining DImode regs, write them out in
3734 two pieces. */
3735 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3737 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3738 NULL_RTX, 1, OPTAB_WIDEN);
3740 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3741 gen_lowpart (SImode, data_regs[i]));
3742 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3743 gen_lowpart (SImode, tmp));
3744 ofs += 8;
3745 i++;
3748 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3750 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3751 data_regs[i]);
3752 ofs += 4;
3753 i++;
3757 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3759 /* Write out a remaining block of words using unaligned methods. */
3761 for (words = 1; i + words < nregs; words++)
3762 if (GET_MODE (data_regs[i + words]) != DImode)
3763 break;
3765 if (words == 1)
3766 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3767 else
3768 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3769 words, ofs);
3771 i += words;
3772 ofs += words * 8;
3775 /* Due to the above, this won't be aligned. */
3776 /* ??? If we have more than one of these, consider constructing full
3777 words in registers and using alpha_expand_unaligned_store_words. */
3778 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3780 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3781 ofs += 4;
3782 i++;
3785 if (dst_align >= 16)
3786 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3788 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3789 i++;
3790 ofs += 2;
3792 else
3793 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3795 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3796 i++;
3797 ofs += 2;
3800 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
3802 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3803 i++;
3804 ofs += 1;
3807 if (i != nregs)
3808 abort ();
3810 return 1;
3814 alpha_expand_block_clear (rtx operands[])
3816 rtx bytes_rtx = operands[1];
3817 rtx align_rtx = operands[2];
3818 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3819 HOST_WIDE_INT bytes = orig_bytes;
3820 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3821 HOST_WIDE_INT alignofs = 0;
3822 rtx orig_dst = operands[0];
3823 rtx tmp;
3824 int i, words, ofs = 0;
3826 if (orig_bytes <= 0)
3827 return 1;
3828 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3829 return 0;
3831 /* Look for stricter alignment. */
3832 tmp = XEXP (orig_dst, 0);
3833 if (GET_CODE (tmp) == REG)
3834 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3835 else if (GET_CODE (tmp) == PLUS
3836 && GET_CODE (XEXP (tmp, 0)) == REG
3837 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3839 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3840 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3842 if (a > align)
3844 if (a >= 64)
3845 align = a, alignofs = 8 - c % 8;
3846 else if (a >= 32)
3847 align = a, alignofs = 4 - c % 4;
3848 else if (a >= 16)
3849 align = a, alignofs = 2 - c % 2;
3853 /* Handle an unaligned prefix first. */
3855 if (alignofs > 0)
3857 #if HOST_BITS_PER_WIDE_INT >= 64
3858 /* Given that alignofs is bounded by align, the only time BWX could
3859 generate three stores is for a 7 byte fill. Prefer two individual
3860 stores over a load/mask/store sequence. */
3861 if ((!TARGET_BWX || alignofs == 7)
3862 && align >= 32
3863 && !(alignofs == 4 && bytes >= 4))
3865 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3866 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3867 rtx mem, tmp;
3868 HOST_WIDE_INT mask;
3870 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3871 set_mem_alias_set (mem, 0);
3873 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3874 if (bytes < alignofs)
3876 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3877 ofs += bytes;
3878 bytes = 0;
3880 else
3882 bytes -= alignofs;
3883 ofs += alignofs;
3885 alignofs = 0;
3887 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3888 NULL_RTX, 1, OPTAB_WIDEN);
3890 emit_move_insn (mem, tmp);
3892 #endif
3894 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3896 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3897 bytes -= 1;
3898 ofs += 1;
3899 alignofs -= 1;
3901 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3903 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
3904 bytes -= 2;
3905 ofs += 2;
3906 alignofs -= 2;
3908 if (alignofs == 4 && bytes >= 4)
3910 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3911 bytes -= 4;
3912 ofs += 4;
3913 alignofs = 0;
3916 /* If we've not used the extra lead alignment information by now,
3917 we won't be able to. Downgrade align to match what's left over. */
3918 if (alignofs > 0)
3920 alignofs = alignofs & -alignofs;
3921 align = MIN (align, alignofs * BITS_PER_UNIT);
3925 /* Handle a block of contiguous long-words. */
3927 if (align >= 64 && bytes >= 8)
3929 words = bytes / 8;
3931 for (i = 0; i < words; ++i)
3932 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
3933 const0_rtx);
3935 bytes -= words * 8;
3936 ofs += words * 8;
3939 /* If the block is large and appropriately aligned, emit a single
3940 store followed by a sequence of stq_u insns. */
3942 if (align >= 32 && bytes > 16)
3944 rtx orig_dsta;
3946 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3947 bytes -= 4;
3948 ofs += 4;
3950 orig_dsta = XEXP (orig_dst, 0);
3951 if (GET_CODE (orig_dsta) == LO_SUM)
3952 orig_dsta = force_reg (Pmode, orig_dsta);
3954 words = bytes / 8;
3955 for (i = 0; i < words; ++i)
3957 rtx mem
3958 = change_address (orig_dst, DImode,
3959 gen_rtx_AND (DImode,
3960 plus_constant (orig_dsta, ofs + i*8),
3961 GEN_INT (-8)));
3962 set_mem_alias_set (mem, 0);
3963 emit_move_insn (mem, const0_rtx);
3966 /* Depending on the alignment, the first stq_u may have overlapped
3967 with the initial stl, which means that the last stq_u didn't
3968 write as much as it would appear. Leave those questionable bytes
3969 unaccounted for. */
3970 bytes -= words * 8 - 4;
3971 ofs += words * 8 - 4;
3974 /* Handle a smaller block of aligned words. */
3976 if ((align >= 64 && bytes == 4)
3977 || (align == 32 && bytes >= 4))
3979 words = bytes / 4;
3981 for (i = 0; i < words; ++i)
3982 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
3983 const0_rtx);
3985 bytes -= words * 4;
3986 ofs += words * 4;
3989 /* An unaligned block uses stq_u stores for as many as possible. */
3991 if (bytes >= 8)
3993 words = bytes / 8;
3995 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
3997 bytes -= words * 8;
3998 ofs += words * 8;
4001 /* Next clean up any trailing pieces. */
4003 #if HOST_BITS_PER_WIDE_INT >= 64
4004 /* Count the number of bits in BYTES for which aligned stores could
4005 be emitted. */
4006 words = 0;
4007 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4008 if (bytes & i)
4009 words += 1;
4011 /* If we have appropriate alignment (and it wouldn't take too many
4012 instructions otherwise), mask out the bytes we need. */
4013 if (TARGET_BWX ? words > 2 : bytes > 0)
4015 if (align >= 64)
4017 rtx mem, tmp;
4018 HOST_WIDE_INT mask;
4020 mem = adjust_address (orig_dst, DImode, ofs);
4021 set_mem_alias_set (mem, 0);
4023 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4025 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4026 NULL_RTX, 1, OPTAB_WIDEN);
4028 emit_move_insn (mem, tmp);
4029 return 1;
4031 else if (align >= 32 && bytes < 4)
4033 rtx mem, tmp;
4034 HOST_WIDE_INT mask;
4036 mem = adjust_address (orig_dst, SImode, ofs);
4037 set_mem_alias_set (mem, 0);
4039 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4041 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4042 NULL_RTX, 1, OPTAB_WIDEN);
4044 emit_move_insn (mem, tmp);
4045 return 1;
4048 #endif
4050 if (!TARGET_BWX && bytes >= 4)
4052 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4053 bytes -= 4;
4054 ofs += 4;
4057 if (bytes >= 2)
4059 if (align >= 16)
4061 do {
4062 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4063 const0_rtx);
4064 bytes -= 2;
4065 ofs += 2;
4066 } while (bytes >= 2);
4068 else if (! TARGET_BWX)
4070 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4071 bytes -= 2;
4072 ofs += 2;
4076 while (bytes > 0)
4078 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4079 bytes -= 1;
4080 ofs += 1;
4083 return 1;
4086 /* Returns a mask so that zap(x, value) == x & mask. */
4089 alpha_expand_zap_mask (HOST_WIDE_INT value)
4091 rtx result;
4092 int i;
4094 if (HOST_BITS_PER_WIDE_INT >= 64)
4096 HOST_WIDE_INT mask = 0;
4098 for (i = 7; i >= 0; --i)
4100 mask <<= 8;
4101 if (!((value >> i) & 1))
4102 mask |= 0xff;
4105 result = gen_int_mode (mask, DImode);
4107 else if (HOST_BITS_PER_WIDE_INT == 32)
4109 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4111 for (i = 7; i >= 4; --i)
4113 mask_hi <<= 8;
4114 if (!((value >> i) & 1))
4115 mask_hi |= 0xff;
4118 for (i = 3; i >= 0; --i)
4120 mask_lo <<= 8;
4121 if (!((value >> i) & 1))
4122 mask_lo |= 0xff;
4125 result = immed_double_const (mask_lo, mask_hi, DImode);
4127 else
4128 abort ();
4130 return result;
4133 void
4134 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4135 enum machine_mode mode,
4136 rtx op0, rtx op1, rtx op2)
4138 op0 = gen_lowpart (mode, op0);
4140 if (op1 == const0_rtx)
4141 op1 = CONST0_RTX (mode);
4142 else
4143 op1 = gen_lowpart (mode, op1);
4145 if (op2 == const0_rtx)
4146 op2 = CONST0_RTX (mode);
4147 else
4148 op2 = gen_lowpart (mode, op2);
4150 emit_insn ((*gen) (op0, op1, op2));
4153 /* Adjust the cost of a scheduling dependency. Return the new cost of
4154 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4156 static int
4157 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4159 enum attr_type insn_type, dep_insn_type;
4161 /* If the dependence is an anti-dependence, there is no cost. For an
4162 output dependence, there is sometimes a cost, but it doesn't seem
4163 worth handling those few cases. */
4164 if (REG_NOTE_KIND (link) != 0)
4165 return cost;
4167 /* If we can't recognize the insns, we can't really do anything. */
4168 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4169 return cost;
4171 insn_type = get_attr_type (insn);
4172 dep_insn_type = get_attr_type (dep_insn);
4174 /* Bring in the user-defined memory latency. */
4175 if (dep_insn_type == TYPE_ILD
4176 || dep_insn_type == TYPE_FLD
4177 || dep_insn_type == TYPE_LDSYM)
4178 cost += alpha_memory_latency-1;
4180 /* Everything else handled in DFA bypasses now. */
4182 return cost;
4185 /* The number of instructions that can be issued per cycle. */
4187 static int
4188 alpha_issue_rate (void)
4190 return (alpha_cpu == PROCESSOR_EV4 ? 2 : 4);
4193 /* How many alternative schedules to try. This should be as wide as the
4194 scheduling freedom in the DFA, but no wider. Making this value too
4195 large results extra work for the scheduler.
4197 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4198 alternative schedules. For EV5, we can choose between E0/E1 and
4199 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4201 static int
4202 alpha_multipass_dfa_lookahead (void)
4204 return (alpha_cpu == PROCESSOR_EV6 ? 4 : 2);
4207 /* Machine-specific function data. */
4209 struct machine_function GTY(())
4211 /* For unicosmk. */
4212 /* List of call information words for calls from this function. */
4213 struct rtx_def *first_ciw;
4214 struct rtx_def *last_ciw;
4215 int ciw_count;
4217 /* List of deferred case vectors. */
4218 struct rtx_def *addr_list;
4220 /* For OSF. */
4221 const char *some_ld_name;
4223 /* For TARGET_LD_BUGGY_LDGP. */
4224 struct rtx_def *gp_save_rtx;
4227 /* How to allocate a 'struct machine_function'. */
4229 static struct machine_function *
4230 alpha_init_machine_status (void)
4232 return ((struct machine_function *)
4233 ggc_alloc_cleared (sizeof (struct machine_function)));
4236 /* Functions to save and restore alpha_return_addr_rtx. */
4238 /* Start the ball rolling with RETURN_ADDR_RTX. */
4241 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4243 if (count != 0)
4244 return const0_rtx;
4246 return get_hard_reg_initial_val (Pmode, REG_RA);
4249 /* Return or create a memory slot containing the gp value for the current
4250 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4253 alpha_gp_save_rtx (void)
4255 rtx seq, m = cfun->machine->gp_save_rtx;
4257 if (m == NULL)
4259 start_sequence ();
4261 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4262 m = validize_mem (m);
4263 emit_move_insn (m, pic_offset_table_rtx);
4265 seq = get_insns ();
4266 end_sequence ();
4267 emit_insn_after (seq, entry_of_function ());
4269 cfun->machine->gp_save_rtx = m;
4272 return m;
4275 static int
4276 alpha_ra_ever_killed (void)
4278 rtx top;
4280 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4281 return regs_ever_live[REG_RA];
4283 push_topmost_sequence ();
4284 top = get_insns ();
4285 pop_topmost_sequence ();
4287 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4291 /* Return the trap mode suffix applicable to the current
4292 instruction, or NULL. */
4294 static const char *
4295 get_trap_mode_suffix (void)
4297 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4299 switch (s)
4301 case TRAP_SUFFIX_NONE:
4302 return NULL;
4304 case TRAP_SUFFIX_SU:
4305 if (alpha_fptm >= ALPHA_FPTM_SU)
4306 return "su";
4307 return NULL;
4309 case TRAP_SUFFIX_SUI:
4310 if (alpha_fptm >= ALPHA_FPTM_SUI)
4311 return "sui";
4312 return NULL;
4314 case TRAP_SUFFIX_V_SV:
4315 switch (alpha_fptm)
4317 case ALPHA_FPTM_N:
4318 return NULL;
4319 case ALPHA_FPTM_U:
4320 return "v";
4321 case ALPHA_FPTM_SU:
4322 case ALPHA_FPTM_SUI:
4323 return "sv";
4325 break;
4327 case TRAP_SUFFIX_V_SV_SVI:
4328 switch (alpha_fptm)
4330 case ALPHA_FPTM_N:
4331 return NULL;
4332 case ALPHA_FPTM_U:
4333 return "v";
4334 case ALPHA_FPTM_SU:
4335 return "sv";
4336 case ALPHA_FPTM_SUI:
4337 return "svi";
4339 break;
4341 case TRAP_SUFFIX_U_SU_SUI:
4342 switch (alpha_fptm)
4344 case ALPHA_FPTM_N:
4345 return NULL;
4346 case ALPHA_FPTM_U:
4347 return "u";
4348 case ALPHA_FPTM_SU:
4349 return "su";
4350 case ALPHA_FPTM_SUI:
4351 return "sui";
4353 break;
4355 abort ();
4358 /* Return the rounding mode suffix applicable to the current
4359 instruction, or NULL. */
4361 static const char *
4362 get_round_mode_suffix (void)
4364 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4366 switch (s)
4368 case ROUND_SUFFIX_NONE:
4369 return NULL;
4370 case ROUND_SUFFIX_NORMAL:
4371 switch (alpha_fprm)
4373 case ALPHA_FPRM_NORM:
4374 return NULL;
4375 case ALPHA_FPRM_MINF:
4376 return "m";
4377 case ALPHA_FPRM_CHOP:
4378 return "c";
4379 case ALPHA_FPRM_DYN:
4380 return "d";
4382 break;
4384 case ROUND_SUFFIX_C:
4385 return "c";
4387 abort ();
4390 /* Locate some local-dynamic symbol still in use by this function
4391 so that we can print its name in some movdi_er_tlsldm pattern. */
4393 static int
4394 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4396 rtx x = *px;
4398 if (GET_CODE (x) == SYMBOL_REF
4399 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4401 cfun->machine->some_ld_name = XSTR (x, 0);
4402 return 1;
4405 return 0;
4408 static const char *
4409 get_some_local_dynamic_name (void)
4411 rtx insn;
4413 if (cfun->machine->some_ld_name)
4414 return cfun->machine->some_ld_name;
4416 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4417 if (INSN_P (insn)
4418 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4419 return cfun->machine->some_ld_name;
4421 abort ();
4424 /* Print an operand. Recognize special options, documented below. */
4426 void
4427 print_operand (FILE *file, rtx x, int code)
4429 int i;
4431 switch (code)
4433 case '~':
4434 /* Print the assembler name of the current function. */
4435 assemble_name (file, alpha_fnname);
4436 break;
4438 case '&':
4439 assemble_name (file, get_some_local_dynamic_name ());
4440 break;
4442 case '/':
4444 const char *trap = get_trap_mode_suffix ();
4445 const char *round = get_round_mode_suffix ();
4447 if (trap || round)
4448 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4449 (trap ? trap : ""), (round ? round : ""));
4450 break;
4453 case ',':
4454 /* Generates single precision instruction suffix. */
4455 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4456 break;
4458 case '-':
4459 /* Generates double precision instruction suffix. */
4460 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4461 break;
4463 case '+':
4464 /* Generates a nop after a noreturn call at the very end of the
4465 function. */
4466 if (next_real_insn (current_output_insn) == 0)
4467 fprintf (file, "\n\tnop");
4468 break;
4470 case '#':
4471 if (alpha_this_literal_sequence_number == 0)
4472 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4473 fprintf (file, "%d", alpha_this_literal_sequence_number);
4474 break;
4476 case '*':
4477 if (alpha_this_gpdisp_sequence_number == 0)
4478 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
4479 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
4480 break;
4482 case 'H':
4483 if (GET_CODE (x) == HIGH)
4484 output_addr_const (file, XEXP (x, 0));
4485 else
4486 output_operand_lossage ("invalid %%H value");
4487 break;
4489 case 'J':
4491 const char *lituse;
4493 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
4495 x = XVECEXP (x, 0, 0);
4496 lituse = "lituse_tlsgd";
4498 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
4500 x = XVECEXP (x, 0, 0);
4501 lituse = "lituse_tlsldm";
4503 else if (GET_CODE (x) == CONST_INT)
4504 lituse = "lituse_jsr";
4505 else
4507 output_operand_lossage ("invalid %%J value");
4508 break;
4511 if (x != const0_rtx)
4512 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4514 break;
4516 case 'r':
4517 /* If this operand is the constant zero, write it as "$31". */
4518 if (GET_CODE (x) == REG)
4519 fprintf (file, "%s", reg_names[REGNO (x)]);
4520 else if (x == CONST0_RTX (GET_MODE (x)))
4521 fprintf (file, "$31");
4522 else
4523 output_operand_lossage ("invalid %%r value");
4524 break;
4526 case 'R':
4527 /* Similar, but for floating-point. */
4528 if (GET_CODE (x) == REG)
4529 fprintf (file, "%s", reg_names[REGNO (x)]);
4530 else if (x == CONST0_RTX (GET_MODE (x)))
4531 fprintf (file, "$f31");
4532 else
4533 output_operand_lossage ("invalid %%R value");
4534 break;
4536 case 'N':
4537 /* Write the 1's complement of a constant. */
4538 if (GET_CODE (x) != CONST_INT)
4539 output_operand_lossage ("invalid %%N value");
4541 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
4542 break;
4544 case 'P':
4545 /* Write 1 << C, for a constant C. */
4546 if (GET_CODE (x) != CONST_INT)
4547 output_operand_lossage ("invalid %%P value");
4549 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
4550 break;
4552 case 'h':
4553 /* Write the high-order 16 bits of a constant, sign-extended. */
4554 if (GET_CODE (x) != CONST_INT)
4555 output_operand_lossage ("invalid %%h value");
4557 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
4558 break;
4560 case 'L':
4561 /* Write the low-order 16 bits of a constant, sign-extended. */
4562 if (GET_CODE (x) != CONST_INT)
4563 output_operand_lossage ("invalid %%L value");
4565 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4566 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
4567 break;
4569 case 'm':
4570 /* Write mask for ZAP insn. */
4571 if (GET_CODE (x) == CONST_DOUBLE)
4573 HOST_WIDE_INT mask = 0;
4574 HOST_WIDE_INT value;
4576 value = CONST_DOUBLE_LOW (x);
4577 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
4578 i++, value >>= 8)
4579 if (value & 0xff)
4580 mask |= (1 << i);
4582 value = CONST_DOUBLE_HIGH (x);
4583 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
4584 i++, value >>= 8)
4585 if (value & 0xff)
4586 mask |= (1 << (i + sizeof (int)));
4588 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
4591 else if (GET_CODE (x) == CONST_INT)
4593 HOST_WIDE_INT mask = 0, value = INTVAL (x);
4595 for (i = 0; i < 8; i++, value >>= 8)
4596 if (value & 0xff)
4597 mask |= (1 << i);
4599 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
4601 else
4602 output_operand_lossage ("invalid %%m value");
4603 break;
4605 case 'M':
4606 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
4607 if (GET_CODE (x) != CONST_INT
4608 || (INTVAL (x) != 8 && INTVAL (x) != 16
4609 && INTVAL (x) != 32 && INTVAL (x) != 64))
4610 output_operand_lossage ("invalid %%M value");
4612 fprintf (file, "%s",
4613 (INTVAL (x) == 8 ? "b"
4614 : INTVAL (x) == 16 ? "w"
4615 : INTVAL (x) == 32 ? "l"
4616 : "q"));
4617 break;
4619 case 'U':
4620 /* Similar, except do it from the mask. */
4621 if (GET_CODE (x) == CONST_INT)
4623 HOST_WIDE_INT value = INTVAL (x);
4625 if (value == 0xff)
4627 fputc ('b', file);
4628 break;
4630 if (value == 0xffff)
4632 fputc ('w', file);
4633 break;
4635 if (value == 0xffffffff)
4637 fputc ('l', file);
4638 break;
4640 if (value == -1)
4642 fputc ('q', file);
4643 break;
4646 else if (HOST_BITS_PER_WIDE_INT == 32
4647 && GET_CODE (x) == CONST_DOUBLE
4648 && CONST_DOUBLE_LOW (x) == 0xffffffff
4649 && CONST_DOUBLE_HIGH (x) == 0)
4651 fputc ('l', file);
4652 break;
4654 output_operand_lossage ("invalid %%U value");
4655 break;
4657 case 's':
4658 /* Write the constant value divided by 8 for little-endian mode or
4659 (56 - value) / 8 for big-endian mode. */
4661 if (GET_CODE (x) != CONST_INT
4662 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
4663 ? 56
4664 : 64)
4665 || (INTVAL (x) & 7) != 0)
4666 output_operand_lossage ("invalid %%s value");
4668 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4669 WORDS_BIG_ENDIAN
4670 ? (56 - INTVAL (x)) / 8
4671 : INTVAL (x) / 8);
4672 break;
4674 case 'S':
4675 /* Same, except compute (64 - c) / 8 */
4677 if (GET_CODE (x) != CONST_INT
4678 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
4679 && (INTVAL (x) & 7) != 8)
4680 output_operand_lossage ("invalid %%s value");
4682 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
4683 break;
4685 case 't':
4687 /* On Unicos/Mk systems: use a DEX expression if the symbol
4688 clashes with a register name. */
4689 int dex = unicosmk_need_dex (x);
4690 if (dex)
4691 fprintf (file, "DEX(%d)", dex);
4692 else
4693 output_addr_const (file, x);
4695 break;
4697 case 'C': case 'D': case 'c': case 'd':
4698 /* Write out comparison name. */
4700 enum rtx_code c = GET_CODE (x);
4702 if (!COMPARISON_P (x))
4703 output_operand_lossage ("invalid %%C value");
4705 else if (code == 'D')
4706 c = reverse_condition (c);
4707 else if (code == 'c')
4708 c = swap_condition (c);
4709 else if (code == 'd')
4710 c = swap_condition (reverse_condition (c));
4712 if (c == LEU)
4713 fprintf (file, "ule");
4714 else if (c == LTU)
4715 fprintf (file, "ult");
4716 else if (c == UNORDERED)
4717 fprintf (file, "un");
4718 else
4719 fprintf (file, "%s", GET_RTX_NAME (c));
4721 break;
4723 case 'E':
4724 /* Write the divide or modulus operator. */
4725 switch (GET_CODE (x))
4727 case DIV:
4728 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
4729 break;
4730 case UDIV:
4731 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
4732 break;
4733 case MOD:
4734 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
4735 break;
4736 case UMOD:
4737 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
4738 break;
4739 default:
4740 output_operand_lossage ("invalid %%E value");
4741 break;
4743 break;
4745 case 'A':
4746 /* Write "_u" for unaligned access. */
4747 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
4748 fprintf (file, "_u");
4749 break;
4751 case 0:
4752 if (GET_CODE (x) == REG)
4753 fprintf (file, "%s", reg_names[REGNO (x)]);
4754 else if (GET_CODE (x) == MEM)
4755 output_address (XEXP (x, 0));
4756 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
4758 switch (XINT (XEXP (x, 0), 1))
4760 case UNSPEC_DTPREL:
4761 case UNSPEC_TPREL:
4762 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
4763 break;
4764 default:
4765 output_operand_lossage ("unknown relocation unspec");
4766 break;
4769 else
4770 output_addr_const (file, x);
4771 break;
4773 default:
4774 output_operand_lossage ("invalid %%xn code");
4778 void
4779 print_operand_address (FILE *file, rtx addr)
4781 int basereg = 31;
4782 HOST_WIDE_INT offset = 0;
4784 if (GET_CODE (addr) == AND)
4785 addr = XEXP (addr, 0);
4787 if (GET_CODE (addr) == PLUS
4788 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
4790 offset = INTVAL (XEXP (addr, 1));
4791 addr = XEXP (addr, 0);
4794 if (GET_CODE (addr) == LO_SUM)
4796 const char *reloc16, *reloclo;
4797 rtx op1 = XEXP (addr, 1);
4799 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
4801 op1 = XEXP (op1, 0);
4802 switch (XINT (op1, 1))
4804 case UNSPEC_DTPREL:
4805 reloc16 = NULL;
4806 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
4807 break;
4808 case UNSPEC_TPREL:
4809 reloc16 = NULL;
4810 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
4811 break;
4812 default:
4813 output_operand_lossage ("unknown relocation unspec");
4814 return;
4817 output_addr_const (file, XVECEXP (op1, 0, 0));
4819 else
4821 reloc16 = "gprel";
4822 reloclo = "gprellow";
4823 output_addr_const (file, op1);
4826 if (offset)
4827 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
4829 addr = XEXP (addr, 0);
4830 if (GET_CODE (addr) == REG)
4831 basereg = REGNO (addr);
4832 else if (GET_CODE (addr) == SUBREG
4833 && GET_CODE (SUBREG_REG (addr)) == REG)
4834 basereg = subreg_regno (addr);
4835 else
4836 abort ();
4838 fprintf (file, "($%d)\t\t!%s", basereg,
4839 (basereg == 29 ? reloc16 : reloclo));
4840 return;
4843 if (GET_CODE (addr) == REG)
4844 basereg = REGNO (addr);
4845 else if (GET_CODE (addr) == SUBREG
4846 && GET_CODE (SUBREG_REG (addr)) == REG)
4847 basereg = subreg_regno (addr);
4848 else if (GET_CODE (addr) == CONST_INT)
4849 offset = INTVAL (addr);
4851 #if TARGET_ABI_OPEN_VMS
4852 else if (GET_CODE (addr) == SYMBOL_REF)
4854 fprintf (file, "%s", XSTR (addr, 0));
4855 return;
4857 else if (GET_CODE (addr) == CONST
4858 && GET_CODE (XEXP (addr, 0)) == PLUS
4859 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
4861 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
4862 XSTR (XEXP (XEXP (addr, 0), 0), 0),
4863 INTVAL (XEXP (XEXP (addr, 0), 1)));
4864 return;
4866 #endif
4868 else
4869 abort ();
4871 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
4874 /* Emit RTL insns to initialize the variable parts of a trampoline at
4875 TRAMP. FNADDR is an RTX for the address of the function's pure
4876 code. CXT is an RTX for the static chain value for the function.
4878 The three offset parameters are for the individual template's
4879 layout. A JMPOFS < 0 indicates that the trampoline does not
4880 contain instructions at all.
4882 We assume here that a function will be called many more times than
4883 its address is taken (e.g., it might be passed to qsort), so we
4884 take the trouble to initialize the "hint" field in the JMP insn.
4885 Note that the hint field is PC (new) + 4 * bits 13:0. */
4887 void
4888 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
4889 int fnofs, int cxtofs, int jmpofs)
4891 rtx temp, temp1, addr;
4892 /* VMS really uses DImode pointers in memory at this point. */
4893 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
4895 #ifdef POINTERS_EXTEND_UNSIGNED
4896 fnaddr = convert_memory_address (mode, fnaddr);
4897 cxt = convert_memory_address (mode, cxt);
4898 #endif
4900 /* Store function address and CXT. */
4901 addr = memory_address (mode, plus_constant (tramp, fnofs));
4902 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
4903 addr = memory_address (mode, plus_constant (tramp, cxtofs));
4904 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
4906 /* This has been disabled since the hint only has a 32k range, and in
4907 no existing OS is the stack within 32k of the text segment. */
4908 if (0 && jmpofs >= 0)
4910 /* Compute hint value. */
4911 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
4912 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
4913 OPTAB_WIDEN);
4914 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
4915 build_int_cst (NULL_TREE, 2, 0), NULL_RTX, 1);
4916 temp = expand_and (SImode, gen_lowpart (SImode, temp),
4917 GEN_INT (0x3fff), 0);
4919 /* Merge in the hint. */
4920 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
4921 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
4922 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
4923 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
4924 OPTAB_WIDEN);
4925 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
4928 #ifdef ENABLE_EXECUTE_STACK
4929 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
4930 0, VOIDmode, 1, tramp, Pmode);
4931 #endif
4933 if (jmpofs >= 0)
4934 emit_insn (gen_imb ());
4937 /* Determine where to put an argument to a function.
4938 Value is zero to push the argument on the stack,
4939 or a hard register in which to store the argument.
4941 MODE is the argument's machine mode.
4942 TYPE is the data type of the argument (as a tree).
4943 This is null for libcalls where that information may
4944 not be available.
4945 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4946 the preceding args and about the function being called.
4947 NAMED is nonzero if this argument is a named parameter
4948 (otherwise it is an extra parameter matching an ellipsis).
4950 On Alpha the first 6 words of args are normally in registers
4951 and the rest are pushed. */
4954 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
4955 int named ATTRIBUTE_UNUSED)
4957 int basereg;
4958 int num_args;
4960 /* Don't get confused and pass small structures in FP registers. */
4961 if (type && AGGREGATE_TYPE_P (type))
4962 basereg = 16;
4963 else
4965 #ifdef ENABLE_CHECKING
4966 /* With alpha_split_complex_arg, we shouldn't see any raw complex
4967 values here. */
4968 if (COMPLEX_MODE_P (mode))
4969 abort ();
4970 #endif
4972 /* Set up defaults for FP operands passed in FP registers, and
4973 integral operands passed in integer registers. */
4974 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
4975 basereg = 32 + 16;
4976 else
4977 basereg = 16;
4980 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
4981 the three platforms, so we can't avoid conditional compilation. */
4982 #if TARGET_ABI_OPEN_VMS
4984 if (mode == VOIDmode)
4985 return alpha_arg_info_reg_val (cum);
4987 num_args = cum.num_args;
4988 if (num_args >= 6
4989 || targetm.calls.must_pass_in_stack (mode, type))
4990 return NULL_RTX;
4992 #elif TARGET_ABI_UNICOSMK
4994 int size;
4996 /* If this is the last argument, generate the call info word (CIW). */
4997 /* ??? We don't include the caller's line number in the CIW because
4998 I don't know how to determine it if debug infos are turned off. */
4999 if (mode == VOIDmode)
5001 int i;
5002 HOST_WIDE_INT lo;
5003 HOST_WIDE_INT hi;
5004 rtx ciw;
5006 lo = 0;
5008 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5009 if (cum.reg_args_type[i])
5010 lo |= (1 << (7 - i));
5012 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5013 lo |= 7;
5014 else
5015 lo |= cum.num_reg_words;
5017 #if HOST_BITS_PER_WIDE_INT == 32
5018 hi = (cum.num_args << 20) | cum.num_arg_words;
5019 #else
5020 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5021 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5022 hi = 0;
5023 #endif
5024 ciw = immed_double_const (lo, hi, DImode);
5026 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5027 UNSPEC_UMK_LOAD_CIW);
5030 size = ALPHA_ARG_SIZE (mode, type, named);
5031 num_args = cum.num_reg_words;
5032 if (cum.force_stack
5033 || cum.num_reg_words + size > 6
5034 || targetm.calls.must_pass_in_stack (mode, type))
5035 return NULL_RTX;
5036 else if (type && TYPE_MODE (type) == BLKmode)
5038 rtx reg1, reg2;
5040 reg1 = gen_rtx_REG (DImode, num_args + 16);
5041 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5043 /* The argument fits in two registers. Note that we still need to
5044 reserve a register for empty structures. */
5045 if (size == 0)
5046 return NULL_RTX;
5047 else if (size == 1)
5048 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5049 else
5051 reg2 = gen_rtx_REG (DImode, num_args + 17);
5052 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5053 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5057 #elif TARGET_ABI_OSF
5059 if (cum >= 6)
5060 return NULL_RTX;
5061 num_args = cum;
5063 /* VOID is passed as a special flag for "last argument". */
5064 if (type == void_type_node)
5065 basereg = 16;
5066 else if (targetm.calls.must_pass_in_stack (mode, type))
5067 return NULL_RTX;
5069 #else
5070 #error Unhandled ABI
5071 #endif
5073 return gen_rtx_REG (mode, num_args + basereg);
5076 /* Return true if TYPE must be returned in memory, instead of in registers. */
5078 static bool
5079 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5081 enum machine_mode mode = VOIDmode;
5082 int size;
5084 if (type)
5086 mode = TYPE_MODE (type);
5088 /* All aggregates are returned in memory. */
5089 if (AGGREGATE_TYPE_P (type))
5090 return true;
5093 size = GET_MODE_SIZE (mode);
5094 switch (GET_MODE_CLASS (mode))
5096 case MODE_VECTOR_FLOAT:
5097 /* Pass all float vectors in memory, like an aggregate. */
5098 return true;
5100 case MODE_COMPLEX_FLOAT:
5101 /* We judge complex floats on the size of their element,
5102 not the size of the whole type. */
5103 size = GET_MODE_UNIT_SIZE (mode);
5104 break;
5106 case MODE_INT:
5107 case MODE_FLOAT:
5108 case MODE_COMPLEX_INT:
5109 case MODE_VECTOR_INT:
5110 break;
5112 default:
5113 /* ??? We get called on all sorts of random stuff from
5114 aggregate_value_p. We can't abort, but it's not clear
5115 what's safe to return. Pretend it's a struct I guess. */
5116 return true;
5119 /* Otherwise types must fit in one register. */
5120 return size > UNITS_PER_WORD;
5123 /* Return true if TYPE should be passed by invisible reference. */
5125 static bool
5126 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5127 enum machine_mode mode,
5128 tree type ATTRIBUTE_UNUSED,
5129 bool named ATTRIBUTE_UNUSED)
5131 return mode == TFmode || mode == TCmode;
5134 /* Define how to find the value returned by a function. VALTYPE is the
5135 data type of the value (as a tree). If the precise function being
5136 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5137 MODE is set instead of VALTYPE for libcalls.
5139 On Alpha the value is found in $0 for integer functions and
5140 $f0 for floating-point functions. */
5143 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5144 enum machine_mode mode)
5146 unsigned int regnum, dummy;
5147 enum mode_class class;
5149 #ifdef ENABLE_CHECKING
5150 if (valtype && alpha_return_in_memory (valtype, func))
5151 abort ();
5152 #endif
5154 if (valtype)
5155 mode = TYPE_MODE (valtype);
5157 class = GET_MODE_CLASS (mode);
5158 switch (class)
5160 case MODE_INT:
5161 PROMOTE_MODE (mode, dummy, valtype);
5162 /* FALLTHRU */
5164 case MODE_COMPLEX_INT:
5165 case MODE_VECTOR_INT:
5166 regnum = 0;
5167 break;
5169 case MODE_FLOAT:
5170 regnum = 32;
5171 break;
5173 case MODE_COMPLEX_FLOAT:
5175 enum machine_mode cmode = GET_MODE_INNER (mode);
5177 return gen_rtx_PARALLEL
5178 (VOIDmode,
5179 gen_rtvec (2,
5180 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5181 const0_rtx),
5182 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5183 GEN_INT (GET_MODE_SIZE (cmode)))));
5186 default:
5187 abort ();
5190 return gen_rtx_REG (mode, regnum);
5193 /* TCmode complex values are passed by invisible reference. We
5194 should not split these values. */
5196 static bool
5197 alpha_split_complex_arg (tree type)
5199 return TYPE_MODE (type) != TCmode;
5202 static tree
5203 alpha_build_builtin_va_list (void)
5205 tree base, ofs, space, record, type_decl;
5207 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5208 return ptr_type_node;
5210 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5211 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5212 TREE_CHAIN (record) = type_decl;
5213 TYPE_NAME (record) = type_decl;
5215 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5217 /* Dummy field to prevent alignment warnings. */
5218 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5219 DECL_FIELD_CONTEXT (space) = record;
5220 DECL_ARTIFICIAL (space) = 1;
5221 DECL_IGNORED_P (space) = 1;
5223 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5224 integer_type_node);
5225 DECL_FIELD_CONTEXT (ofs) = record;
5226 TREE_CHAIN (ofs) = space;
5228 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5229 ptr_type_node);
5230 DECL_FIELD_CONTEXT (base) = record;
5231 TREE_CHAIN (base) = ofs;
5233 TYPE_FIELDS (record) = base;
5234 layout_type (record);
5236 return record;
5239 /* Perform any needed actions needed for a function that is receiving a
5240 variable number of arguments. */
5242 static void
5243 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum,
5244 enum machine_mode mode ATTRIBUTE_UNUSED,
5245 tree type ATTRIBUTE_UNUSED,
5246 int *pretend_size, int no_rtl)
5248 #if TARGET_ABI_UNICOSMK
5249 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5250 arguments on the stack. Unfortunately, it doesn't always store the first
5251 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5252 with stdargs as we always have at least one named argument there. */
5253 int num_reg_words = pcum->num_reg_words;
5254 if (num_reg_words < 6)
5256 if (!no_rtl)
5258 emit_insn (gen_umk_mismatch_args (GEN_INT (num_reg_words + 1)));
5259 emit_insn (gen_arg_home_umk ());
5261 *pretend_size = 0;
5263 #elif TARGET_ABI_OPEN_VMS
5264 /* For VMS, we allocate space for all 6 arg registers plus a count.
5266 However, if NO registers need to be saved, don't allocate any space.
5267 This is not only because we won't need the space, but because AP
5268 includes the current_pretend_args_size and we don't want to mess up
5269 any ap-relative addresses already made. */
5270 if (pcum->num_args < 6)
5272 if (!no_rtl)
5274 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
5275 emit_insn (gen_arg_home ());
5277 *pretend_size = 7 * UNITS_PER_WORD;
5279 #else
5280 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
5281 only push those that are remaining. However, if NO registers need to
5282 be saved, don't allocate any space. This is not only because we won't
5283 need the space, but because AP includes the current_pretend_args_size
5284 and we don't want to mess up any ap-relative addresses already made.
5286 If we are not to use the floating-point registers, save the integer
5287 registers where we would put the floating-point registers. This is
5288 not the most efficient way to implement varargs with just one register
5289 class, but it isn't worth doing anything more efficient in this rare
5290 case. */
5291 CUMULATIVE_ARGS cum = *pcum;
5293 if (cum >= 6)
5294 return;
5296 if (!no_rtl)
5298 int set = get_varargs_alias_set ();
5299 rtx tmp;
5301 tmp = gen_rtx_MEM (BLKmode,
5302 plus_constant (virtual_incoming_args_rtx,
5303 (cum + 6) * UNITS_PER_WORD));
5304 set_mem_alias_set (tmp, set);
5305 move_block_from_reg (16 + cum, tmp, 6 - cum);
5307 tmp = gen_rtx_MEM (BLKmode,
5308 plus_constant (virtual_incoming_args_rtx,
5309 cum * UNITS_PER_WORD));
5310 set_mem_alias_set (tmp, set);
5311 move_block_from_reg (16 + (TARGET_FPREGS ? 32 : 0) + cum, tmp,
5312 6 - cum);
5314 *pretend_size = 12 * UNITS_PER_WORD;
5315 #endif
5318 void
5319 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
5321 HOST_WIDE_INT offset;
5322 tree t, offset_field, base_field;
5324 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
5325 return;
5327 if (TARGET_ABI_UNICOSMK)
5328 std_expand_builtin_va_start (valist, nextarg);
5330 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
5331 up by 48, storing fp arg registers in the first 48 bytes, and the
5332 integer arg registers in the next 48 bytes. This is only done,
5333 however, if any integer registers need to be stored.
5335 If no integer registers need be stored, then we must subtract 48
5336 in order to account for the integer arg registers which are counted
5337 in argsize above, but which are not actually stored on the stack.
5338 Must further be careful here about structures straddling the last
5339 integer argument register; that futzes with pretend_args_size,
5340 which changes the meaning of AP. */
5342 if (NUM_ARGS <= 6)
5343 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
5344 else
5345 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
5347 if (TARGET_ABI_OPEN_VMS)
5349 nextarg = plus_constant (nextarg, offset);
5350 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
5351 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
5352 make_tree (ptr_type_node, nextarg));
5353 TREE_SIDE_EFFECTS (t) = 1;
5355 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5357 else
5359 base_field = TYPE_FIELDS (TREE_TYPE (valist));
5360 offset_field = TREE_CHAIN (base_field);
5362 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
5363 valist, base_field, NULL_TREE);
5364 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
5365 valist, offset_field, NULL_TREE);
5367 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5368 t = build (PLUS_EXPR, ptr_type_node, t,
5369 build_int_cst (NULL_TREE, offset, 0));
5370 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
5371 TREE_SIDE_EFFECTS (t) = 1;
5372 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5374 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD, 0);
5375 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
5376 TREE_SIDE_EFFECTS (t) = 1;
5377 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5381 static tree
5382 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
5384 tree type_size, ptr_type, addend, t, addr, internal_post;
5386 /* If the type could not be passed in registers, skip the block
5387 reserved for the registers. */
5388 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
5390 t = build_int_cst (TREE_TYPE (offset), 6*8, 0);
5391 t = build (MODIFY_EXPR, TREE_TYPE (offset), offset,
5392 build (MAX_EXPR, TREE_TYPE (offset), offset, t));
5393 gimplify_and_add (t, pre_p);
5396 addend = offset;
5397 ptr_type = build_pointer_type (type);
5399 if (TREE_CODE (type) == COMPLEX_TYPE)
5401 tree real_part, imag_part, real_temp;
5403 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
5404 offset, pre_p);
5406 /* Copy the value into a new temporary, lest the formal temporary
5407 be reused out from under us. */
5408 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
5410 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
5411 offset, pre_p);
5413 return build (COMPLEX_EXPR, type, real_temp, imag_part);
5415 else if (TREE_CODE (type) == REAL_TYPE)
5417 tree fpaddend, cond, fourtyeight;
5419 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8, 0);
5420 fpaddend = fold (build (MINUS_EXPR, TREE_TYPE (addend),
5421 addend, fourtyeight));
5422 cond = fold (build (LT_EXPR, boolean_type_node, addend, fourtyeight));
5423 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
5424 fpaddend, addend));
5427 /* Build the final address and force that value into a temporary. */
5428 addr = build (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
5429 fold_convert (ptr_type, addend));
5430 internal_post = NULL;
5431 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
5432 append_to_statement_list (internal_post, pre_p);
5434 /* Update the offset field. */
5435 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
5436 if (type_size == NULL || TREE_OVERFLOW (type_size))
5437 t = size_zero_node;
5438 else
5440 t = size_binop (PLUS_EXPR, type_size, size_int (7));
5441 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
5442 t = size_binop (MULT_EXPR, t, size_int (8));
5444 t = fold_convert (TREE_TYPE (offset), t);
5445 t = build (MODIFY_EXPR, void_type_node, offset,
5446 build (PLUS_EXPR, TREE_TYPE (offset), offset, t));
5447 gimplify_and_add (t, pre_p);
5449 return build_fold_indirect_ref (addr);
5452 static tree
5453 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5455 tree offset_field, base_field, offset, base, t, r;
5456 bool indirect;
5458 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5459 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5461 base_field = TYPE_FIELDS (va_list_type_node);
5462 offset_field = TREE_CHAIN (base_field);
5463 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
5464 valist, base_field, NULL_TREE);
5465 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
5466 valist, offset_field, NULL_TREE);
5468 /* Pull the fields of the structure out into temporaries. Since we never
5469 modify the base field, we can use a formal temporary. Sign-extend the
5470 offset field so that it's the proper width for pointer arithmetic. */
5471 base = get_formal_tmp_var (base_field, pre_p);
5473 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
5474 offset = get_initialized_tmp_var (t, pre_p, NULL);
5476 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5477 if (indirect)
5478 type = build_pointer_type (type);
5480 /* Find the value. Note that this will be a stable indirection, or
5481 a composite of stable indirections in the case of complex. */
5482 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
5484 /* Stuff the offset temporary back into its field. */
5485 t = build (MODIFY_EXPR, void_type_node, offset_field,
5486 fold_convert (TREE_TYPE (offset_field), offset));
5487 gimplify_and_add (t, pre_p);
5489 if (indirect)
5490 r = build_fold_indirect_ref (r);
5492 return r;
5495 /* Builtins. */
5497 enum alpha_builtin
5499 ALPHA_BUILTIN_CMPBGE,
5500 ALPHA_BUILTIN_EXTBL,
5501 ALPHA_BUILTIN_EXTWL,
5502 ALPHA_BUILTIN_EXTLL,
5503 ALPHA_BUILTIN_EXTQL,
5504 ALPHA_BUILTIN_EXTWH,
5505 ALPHA_BUILTIN_EXTLH,
5506 ALPHA_BUILTIN_EXTQH,
5507 ALPHA_BUILTIN_INSBL,
5508 ALPHA_BUILTIN_INSWL,
5509 ALPHA_BUILTIN_INSLL,
5510 ALPHA_BUILTIN_INSQL,
5511 ALPHA_BUILTIN_INSWH,
5512 ALPHA_BUILTIN_INSLH,
5513 ALPHA_BUILTIN_INSQH,
5514 ALPHA_BUILTIN_MSKBL,
5515 ALPHA_BUILTIN_MSKWL,
5516 ALPHA_BUILTIN_MSKLL,
5517 ALPHA_BUILTIN_MSKQL,
5518 ALPHA_BUILTIN_MSKWH,
5519 ALPHA_BUILTIN_MSKLH,
5520 ALPHA_BUILTIN_MSKQH,
5521 ALPHA_BUILTIN_UMULH,
5522 ALPHA_BUILTIN_ZAP,
5523 ALPHA_BUILTIN_ZAPNOT,
5524 ALPHA_BUILTIN_AMASK,
5525 ALPHA_BUILTIN_IMPLVER,
5526 ALPHA_BUILTIN_RPCC,
5527 ALPHA_BUILTIN_THREAD_POINTER,
5528 ALPHA_BUILTIN_SET_THREAD_POINTER,
5530 /* TARGET_MAX */
5531 ALPHA_BUILTIN_MINUB8,
5532 ALPHA_BUILTIN_MINSB8,
5533 ALPHA_BUILTIN_MINUW4,
5534 ALPHA_BUILTIN_MINSW4,
5535 ALPHA_BUILTIN_MAXUB8,
5536 ALPHA_BUILTIN_MAXSB8,
5537 ALPHA_BUILTIN_MAXUW4,
5538 ALPHA_BUILTIN_MAXSW4,
5539 ALPHA_BUILTIN_PERR,
5540 ALPHA_BUILTIN_PKLB,
5541 ALPHA_BUILTIN_PKWB,
5542 ALPHA_BUILTIN_UNPKBL,
5543 ALPHA_BUILTIN_UNPKBW,
5545 /* TARGET_CIX */
5546 ALPHA_BUILTIN_CTTZ,
5547 ALPHA_BUILTIN_CTLZ,
5548 ALPHA_BUILTIN_CTPOP,
5550 ALPHA_BUILTIN_max
5553 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
5554 CODE_FOR_builtin_cmpbge,
5555 CODE_FOR_builtin_extbl,
5556 CODE_FOR_builtin_extwl,
5557 CODE_FOR_builtin_extll,
5558 CODE_FOR_builtin_extql,
5559 CODE_FOR_builtin_extwh,
5560 CODE_FOR_builtin_extlh,
5561 CODE_FOR_builtin_extqh,
5562 CODE_FOR_builtin_insbl,
5563 CODE_FOR_builtin_inswl,
5564 CODE_FOR_builtin_insll,
5565 CODE_FOR_builtin_insql,
5566 CODE_FOR_builtin_inswh,
5567 CODE_FOR_builtin_inslh,
5568 CODE_FOR_builtin_insqh,
5569 CODE_FOR_builtin_mskbl,
5570 CODE_FOR_builtin_mskwl,
5571 CODE_FOR_builtin_mskll,
5572 CODE_FOR_builtin_mskql,
5573 CODE_FOR_builtin_mskwh,
5574 CODE_FOR_builtin_msklh,
5575 CODE_FOR_builtin_mskqh,
5576 CODE_FOR_umuldi3_highpart,
5577 CODE_FOR_builtin_zap,
5578 CODE_FOR_builtin_zapnot,
5579 CODE_FOR_builtin_amask,
5580 CODE_FOR_builtin_implver,
5581 CODE_FOR_builtin_rpcc,
5582 CODE_FOR_load_tp,
5583 CODE_FOR_set_tp,
5585 /* TARGET_MAX */
5586 CODE_FOR_builtin_minub8,
5587 CODE_FOR_builtin_minsb8,
5588 CODE_FOR_builtin_minuw4,
5589 CODE_FOR_builtin_minsw4,
5590 CODE_FOR_builtin_maxub8,
5591 CODE_FOR_builtin_maxsb8,
5592 CODE_FOR_builtin_maxuw4,
5593 CODE_FOR_builtin_maxsw4,
5594 CODE_FOR_builtin_perr,
5595 CODE_FOR_builtin_pklb,
5596 CODE_FOR_builtin_pkwb,
5597 CODE_FOR_builtin_unpkbl,
5598 CODE_FOR_builtin_unpkbw,
5600 /* TARGET_CIX */
5601 CODE_FOR_builtin_cttz,
5602 CODE_FOR_builtin_ctlz,
5603 CODE_FOR_builtin_ctpop
5606 struct alpha_builtin_def
5608 const char *name;
5609 enum alpha_builtin code;
5610 unsigned int target_mask;
5613 static struct alpha_builtin_def const zero_arg_builtins[] = {
5614 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0 },
5615 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0 }
5618 static struct alpha_builtin_def const one_arg_builtins[] = {
5619 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0 },
5620 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX },
5621 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX },
5622 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX },
5623 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX },
5624 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX },
5625 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX },
5626 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX }
5629 static struct alpha_builtin_def const two_arg_builtins[] = {
5630 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0 },
5631 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0 },
5632 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0 },
5633 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0 },
5634 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0 },
5635 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0 },
5636 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0 },
5637 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0 },
5638 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0 },
5639 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0 },
5640 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0 },
5641 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0 },
5642 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0 },
5643 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0 },
5644 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0 },
5645 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0 },
5646 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0 },
5647 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0 },
5648 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0 },
5649 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0 },
5650 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0 },
5651 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0 },
5652 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0 },
5653 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0 },
5654 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0 },
5655 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX },
5656 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX },
5657 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX },
5658 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX },
5659 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX },
5660 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX },
5661 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX },
5662 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX },
5663 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX }
5666 static void
5667 alpha_init_builtins (void)
5669 const struct alpha_builtin_def *p;
5670 tree ftype;
5671 size_t i;
5673 ftype = build_function_type (long_integer_type_node, void_list_node);
5675 p = zero_arg_builtins;
5676 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
5677 if ((target_flags & p->target_mask) == p->target_mask)
5678 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
5679 NULL, NULL_TREE);
5681 ftype = build_function_type_list (long_integer_type_node,
5682 long_integer_type_node, NULL_TREE);
5684 p = one_arg_builtins;
5685 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
5686 if ((target_flags & p->target_mask) == p->target_mask)
5687 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
5688 NULL, NULL_TREE);
5690 ftype = build_function_type_list (long_integer_type_node,
5691 long_integer_type_node,
5692 long_integer_type_node, NULL_TREE);
5694 p = two_arg_builtins;
5695 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
5696 if ((target_flags & p->target_mask) == p->target_mask)
5697 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
5698 NULL, NULL_TREE);
5700 ftype = build_function_type (ptr_type_node, void_list_node);
5701 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
5702 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
5703 NULL, NULL_TREE);
5705 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
5706 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
5707 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
5708 NULL, NULL_TREE);
5711 /* Expand an expression EXP that calls a built-in function,
5712 with result going to TARGET if that's convenient
5713 (and in mode MODE if that's convenient).
5714 SUBTARGET may be used as the target for computing one of EXP's operands.
5715 IGNORE is nonzero if the value is to be ignored. */
5717 static rtx
5718 alpha_expand_builtin (tree exp, rtx target,
5719 rtx subtarget ATTRIBUTE_UNUSED,
5720 enum machine_mode mode ATTRIBUTE_UNUSED,
5721 int ignore ATTRIBUTE_UNUSED)
5723 #define MAX_ARGS 2
5725 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
5726 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
5727 tree arglist = TREE_OPERAND (exp, 1);
5728 enum insn_code icode;
5729 rtx op[MAX_ARGS], pat;
5730 int arity;
5731 bool nonvoid;
5733 if (fcode >= ALPHA_BUILTIN_max)
5734 internal_error ("bad builtin fcode");
5735 icode = code_for_builtin[fcode];
5736 if (icode == 0)
5737 internal_error ("bad builtin fcode");
5739 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
5741 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
5742 arglist;
5743 arglist = TREE_CHAIN (arglist), arity++)
5745 const struct insn_operand_data *insn_op;
5747 tree arg = TREE_VALUE (arglist);
5748 if (arg == error_mark_node)
5749 return NULL_RTX;
5750 if (arity > MAX_ARGS)
5751 return NULL_RTX;
5753 insn_op = &insn_data[icode].operand[arity + nonvoid];
5755 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
5757 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
5758 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
5761 if (nonvoid)
5763 enum machine_mode tmode = insn_data[icode].operand[0].mode;
5764 if (!target
5765 || GET_MODE (target) != tmode
5766 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
5767 target = gen_reg_rtx (tmode);
5770 switch (arity)
5772 case 0:
5773 pat = GEN_FCN (icode) (target);
5774 break;
5775 case 1:
5776 if (nonvoid)
5777 pat = GEN_FCN (icode) (target, op[0]);
5778 else
5779 pat = GEN_FCN (icode) (op[0]);
5780 break;
5781 case 2:
5782 pat = GEN_FCN (icode) (target, op[0], op[1]);
5783 break;
5784 default:
5785 abort ();
5787 if (!pat)
5788 return NULL_RTX;
5789 emit_insn (pat);
5791 if (nonvoid)
5792 return target;
5793 else
5794 return const0_rtx;
5797 /* This page contains routines that are used to determine what the function
5798 prologue and epilogue code will do and write them out. */
5800 /* Compute the size of the save area in the stack. */
5802 /* These variables are used for communication between the following functions.
5803 They indicate various things about the current function being compiled
5804 that are used to tell what kind of prologue, epilogue and procedure
5805 descriptor to generate. */
5807 /* Nonzero if we need a stack procedure. */
5808 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
5809 static enum alpha_procedure_types alpha_procedure_type;
5811 /* Register number (either FP or SP) that is used to unwind the frame. */
5812 static int vms_unwind_regno;
5814 /* Register number used to save FP. We need not have one for RA since
5815 we don't modify it for register procedures. This is only defined
5816 for register frame procedures. */
5817 static int vms_save_fp_regno;
5819 /* Register number used to reference objects off our PV. */
5820 static int vms_base_regno;
5822 /* Compute register masks for saved registers. */
5824 static void
5825 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
5827 unsigned long imask = 0;
5828 unsigned long fmask = 0;
5829 unsigned int i;
5831 /* When outputting a thunk, we don't have valid register life info,
5832 but assemble_start_function wants to output .frame and .mask
5833 directives. */
5834 if (current_function_is_thunk)
5836 *imaskP = 0;
5837 *fmaskP = 0;
5838 return;
5841 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
5842 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
5844 /* One for every register we have to save. */
5845 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5846 if (! fixed_regs[i] && ! call_used_regs[i]
5847 && regs_ever_live[i] && i != REG_RA
5848 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
5850 if (i < 32)
5851 imask |= (1UL << i);
5852 else
5853 fmask |= (1UL << (i - 32));
5856 /* We need to restore these for the handler. */
5857 if (current_function_calls_eh_return)
5859 for (i = 0; ; ++i)
5861 unsigned regno = EH_RETURN_DATA_REGNO (i);
5862 if (regno == INVALID_REGNUM)
5863 break;
5864 imask |= 1UL << regno;
5867 /* Glibc likes to use $31 as an unwind stopper for crt0. To
5868 avoid hackery in unwind-dw2.c, we need to actively store a
5869 zero in the prologue of _Unwind_RaiseException et al. */
5870 imask |= 1UL << 31;
5873 /* If any register spilled, then spill the return address also. */
5874 /* ??? This is required by the Digital stack unwind specification
5875 and isn't needed if we're doing Dwarf2 unwinding. */
5876 if (imask || fmask || alpha_ra_ever_killed ())
5877 imask |= (1UL << REG_RA);
5879 *imaskP = imask;
5880 *fmaskP = fmask;
5884 alpha_sa_size (void)
5886 unsigned long mask[2];
5887 int sa_size = 0;
5888 int i, j;
5890 alpha_sa_mask (&mask[0], &mask[1]);
5892 if (TARGET_ABI_UNICOSMK)
5894 if (mask[0] || mask[1])
5895 sa_size = 14;
5897 else
5899 for (j = 0; j < 2; ++j)
5900 for (i = 0; i < 32; ++i)
5901 if ((mask[j] >> i) & 1)
5902 sa_size++;
5905 if (TARGET_ABI_UNICOSMK)
5907 /* We might not need to generate a frame if we don't make any calls
5908 (including calls to __T3E_MISMATCH if this is a vararg function),
5909 don't have any local variables which require stack slots, don't
5910 use alloca and have not determined that we need a frame for other
5911 reasons. */
5913 alpha_procedure_type
5914 = (sa_size || get_frame_size() != 0
5915 || current_function_outgoing_args_size
5916 || current_function_stdarg || current_function_calls_alloca
5917 || frame_pointer_needed)
5918 ? PT_STACK : PT_REGISTER;
5920 /* Always reserve space for saving callee-saved registers if we
5921 need a frame as required by the calling convention. */
5922 if (alpha_procedure_type == PT_STACK)
5923 sa_size = 14;
5925 else if (TARGET_ABI_OPEN_VMS)
5927 /* Start by assuming we can use a register procedure if we don't
5928 make any calls (REG_RA not used) or need to save any
5929 registers and a stack procedure if we do. */
5930 if ((mask[0] >> REG_RA) & 1)
5931 alpha_procedure_type = PT_STACK;
5932 else if (get_frame_size() != 0)
5933 alpha_procedure_type = PT_REGISTER;
5934 else
5935 alpha_procedure_type = PT_NULL;
5937 /* Don't reserve space for saving FP & RA yet. Do that later after we've
5938 made the final decision on stack procedure vs register procedure. */
5939 if (alpha_procedure_type == PT_STACK)
5940 sa_size -= 2;
5942 /* Decide whether to refer to objects off our PV via FP or PV.
5943 If we need FP for something else or if we receive a nonlocal
5944 goto (which expects PV to contain the value), we must use PV.
5945 Otherwise, start by assuming we can use FP. */
5947 vms_base_regno
5948 = (frame_pointer_needed
5949 || current_function_has_nonlocal_label
5950 || alpha_procedure_type == PT_STACK
5951 || current_function_outgoing_args_size)
5952 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
5954 /* If we want to copy PV into FP, we need to find some register
5955 in which to save FP. */
5957 vms_save_fp_regno = -1;
5958 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
5959 for (i = 0; i < 32; i++)
5960 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
5961 vms_save_fp_regno = i;
5963 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
5964 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
5965 else if (alpha_procedure_type == PT_NULL)
5966 vms_base_regno = REG_PV;
5968 /* Stack unwinding should be done via FP unless we use it for PV. */
5969 vms_unwind_regno = (vms_base_regno == REG_PV
5970 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
5972 /* If this is a stack procedure, allow space for saving FP and RA. */
5973 if (alpha_procedure_type == PT_STACK)
5974 sa_size += 2;
5976 else
5978 /* Our size must be even (multiple of 16 bytes). */
5979 if (sa_size & 1)
5980 sa_size++;
5983 return sa_size * 8;
5986 /* Define the offset between two registers, one to be eliminated,
5987 and the other its replacement, at the start of a routine. */
5989 HOST_WIDE_INT
5990 alpha_initial_elimination_offset (unsigned int from,
5991 unsigned int to ATTRIBUTE_UNUSED)
5993 HOST_WIDE_INT ret;
5995 ret = alpha_sa_size ();
5996 ret += ALPHA_ROUND (current_function_outgoing_args_size);
5998 if (from == FRAME_POINTER_REGNUM)
6000 else if (from == ARG_POINTER_REGNUM)
6001 ret += (ALPHA_ROUND (get_frame_size ()
6002 + current_function_pretend_args_size)
6003 - current_function_pretend_args_size);
6004 else
6005 abort ();
6007 return ret;
6011 alpha_pv_save_size (void)
6013 alpha_sa_size ();
6014 return alpha_procedure_type == PT_STACK ? 8 : 0;
6018 alpha_using_fp (void)
6020 alpha_sa_size ();
6021 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
6024 #if TARGET_ABI_OPEN_VMS
6026 const struct attribute_spec vms_attribute_table[] =
6028 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
6029 { "overlaid", 0, 0, true, false, false, NULL },
6030 { "global", 0, 0, true, false, false, NULL },
6031 { "initialize", 0, 0, true, false, false, NULL },
6032 { NULL, 0, 0, false, false, false, NULL }
6035 #endif
6037 static int
6038 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
6040 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
6044 alpha_find_lo_sum_using_gp (rtx insn)
6046 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
6049 static int
6050 alpha_does_function_need_gp (void)
6052 rtx insn;
6054 /* The GP being variable is an OSF abi thing. */
6055 if (! TARGET_ABI_OSF)
6056 return 0;
6058 /* We need the gp to load the address of __mcount. */
6059 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
6060 return 1;
6062 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
6063 if (current_function_is_thunk)
6064 return 1;
6066 /* The nonlocal receiver pattern assumes that the gp is valid for
6067 the nested function. Reasonable because it's almost always set
6068 correctly already. For the cases where that's wrong, make sure
6069 the nested function loads its gp on entry. */
6070 if (current_function_has_nonlocal_goto)
6071 return 1;
6073 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
6074 Even if we are a static function, we still need to do this in case
6075 our address is taken and passed to something like qsort. */
6077 push_topmost_sequence ();
6078 insn = get_insns ();
6079 pop_topmost_sequence ();
6081 for (; insn; insn = NEXT_INSN (insn))
6082 if (INSN_P (insn)
6083 && GET_CODE (PATTERN (insn)) != USE
6084 && GET_CODE (PATTERN (insn)) != CLOBBER
6085 && get_attr_usegp (insn))
6086 return 1;
6088 return 0;
6092 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
6093 sequences. */
6095 static rtx
6096 set_frame_related_p (void)
6098 rtx seq = get_insns ();
6099 rtx insn;
6101 end_sequence ();
6103 if (!seq)
6104 return NULL_RTX;
6106 if (INSN_P (seq))
6108 insn = seq;
6109 while (insn != NULL_RTX)
6111 RTX_FRAME_RELATED_P (insn) = 1;
6112 insn = NEXT_INSN (insn);
6114 seq = emit_insn (seq);
6116 else
6118 seq = emit_insn (seq);
6119 RTX_FRAME_RELATED_P (seq) = 1;
6121 return seq;
6124 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
6126 /* Write function prologue. */
6128 /* On vms we have two kinds of functions:
6130 - stack frame (PROC_STACK)
6131 these are 'normal' functions with local vars and which are
6132 calling other functions
6133 - register frame (PROC_REGISTER)
6134 keeps all data in registers, needs no stack
6136 We must pass this to the assembler so it can generate the
6137 proper pdsc (procedure descriptor)
6138 This is done with the '.pdesc' command.
6140 On not-vms, we don't really differentiate between the two, as we can
6141 simply allocate stack without saving registers. */
6143 void
6144 alpha_expand_prologue (void)
6146 /* Registers to save. */
6147 unsigned long imask = 0;
6148 unsigned long fmask = 0;
6149 /* Stack space needed for pushing registers clobbered by us. */
6150 HOST_WIDE_INT sa_size;
6151 /* Complete stack size needed. */
6152 HOST_WIDE_INT frame_size;
6153 /* Offset from base reg to register save area. */
6154 HOST_WIDE_INT reg_offset;
6155 rtx sa_reg, mem;
6156 int i;
6158 sa_size = alpha_sa_size ();
6160 frame_size = get_frame_size ();
6161 if (TARGET_ABI_OPEN_VMS)
6162 frame_size = ALPHA_ROUND (sa_size
6163 + (alpha_procedure_type == PT_STACK ? 8 : 0)
6164 + frame_size
6165 + current_function_pretend_args_size);
6166 else if (TARGET_ABI_UNICOSMK)
6167 /* We have to allocate space for the DSIB if we generate a frame. */
6168 frame_size = ALPHA_ROUND (sa_size
6169 + (alpha_procedure_type == PT_STACK ? 48 : 0))
6170 + ALPHA_ROUND (frame_size
6171 + current_function_outgoing_args_size);
6172 else
6173 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
6174 + sa_size
6175 + ALPHA_ROUND (frame_size
6176 + current_function_pretend_args_size));
6178 if (TARGET_ABI_OPEN_VMS)
6179 reg_offset = 8;
6180 else
6181 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
6183 alpha_sa_mask (&imask, &fmask);
6185 /* Emit an insn to reload GP, if needed. */
6186 if (TARGET_ABI_OSF)
6188 alpha_function_needs_gp = alpha_does_function_need_gp ();
6189 if (alpha_function_needs_gp)
6190 emit_insn (gen_prologue_ldgp ());
6193 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
6194 the call to mcount ourselves, rather than having the linker do it
6195 magically in response to -pg. Since _mcount has special linkage,
6196 don't represent the call as a call. */
6197 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
6198 emit_insn (gen_prologue_mcount ());
6200 if (TARGET_ABI_UNICOSMK)
6201 unicosmk_gen_dsib (&imask);
6203 /* Adjust the stack by the frame size. If the frame size is > 4096
6204 bytes, we need to be sure we probe somewhere in the first and last
6205 4096 bytes (we can probably get away without the latter test) and
6206 every 8192 bytes in between. If the frame size is > 32768, we
6207 do this in a loop. Otherwise, we generate the explicit probe
6208 instructions.
6210 Note that we are only allowed to adjust sp once in the prologue. */
6212 if (frame_size <= 32768)
6214 if (frame_size > 4096)
6216 int probed = 4096;
6219 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
6220 ? -probed + 64
6221 : -probed)));
6222 while ((probed += 8192) < frame_size);
6224 /* We only have to do this probe if we aren't saving registers. */
6225 if (sa_size == 0 && probed + 4096 < frame_size)
6226 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
6229 if (frame_size != 0)
6230 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
6231 GEN_INT (TARGET_ABI_UNICOSMK
6232 ? -frame_size + 64
6233 : -frame_size))));
6235 else
6237 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
6238 number of 8192 byte blocks to probe. We then probe each block
6239 in the loop and then set SP to the proper location. If the
6240 amount remaining is > 4096, we have to do one more probe if we
6241 are not saving any registers. */
6243 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
6244 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
6245 rtx ptr = gen_rtx_REG (DImode, 22);
6246 rtx count = gen_rtx_REG (DImode, 23);
6247 rtx seq;
6249 emit_move_insn (count, GEN_INT (blocks));
6250 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
6251 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
6253 /* Because of the difficulty in emitting a new basic block this
6254 late in the compilation, generate the loop as a single insn. */
6255 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
6257 if (leftover > 4096 && sa_size == 0)
6259 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
6260 MEM_VOLATILE_P (last) = 1;
6261 emit_move_insn (last, const0_rtx);
6264 if (TARGET_ABI_WINDOWS_NT)
6266 /* For NT stack unwind (done by 'reverse execution'), it's
6267 not OK to take the result of a loop, even though the value
6268 is already in ptr, so we reload it via a single operation
6269 and subtract it to sp.
6271 Yes, that's correct -- we have to reload the whole constant
6272 into a temporary via ldah+lda then subtract from sp. */
6274 HOST_WIDE_INT lo, hi;
6275 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
6276 hi = frame_size - lo;
6278 emit_move_insn (ptr, GEN_INT (hi));
6279 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
6280 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
6281 ptr));
6283 else
6285 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
6286 GEN_INT (-leftover)));
6289 /* This alternative is special, because the DWARF code cannot
6290 possibly intuit through the loop above. So we invent this
6291 note it looks at instead. */
6292 RTX_FRAME_RELATED_P (seq) = 1;
6293 REG_NOTES (seq)
6294 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6295 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6296 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
6297 GEN_INT (TARGET_ABI_UNICOSMK
6298 ? -frame_size + 64
6299 : -frame_size))),
6300 REG_NOTES (seq));
6303 if (!TARGET_ABI_UNICOSMK)
6305 /* Cope with very large offsets to the register save area. */
6306 sa_reg = stack_pointer_rtx;
6307 if (reg_offset + sa_size > 0x8000)
6309 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
6310 HOST_WIDE_INT bias;
6312 if (low + sa_size <= 0x8000)
6313 bias = reg_offset - low, reg_offset = low;
6314 else
6315 bias = reg_offset, reg_offset = 0;
6317 sa_reg = gen_rtx_REG (DImode, 24);
6318 FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
6319 GEN_INT (bias))));
6322 /* Save regs in stack order. Beginning with VMS PV. */
6323 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6325 mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
6326 set_mem_alias_set (mem, alpha_sr_alias_set);
6327 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
6330 /* Save register RA next. */
6331 if (imask & (1UL << REG_RA))
6333 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
6334 set_mem_alias_set (mem, alpha_sr_alias_set);
6335 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
6336 imask &= ~(1UL << REG_RA);
6337 reg_offset += 8;
6340 /* Now save any other registers required to be saved. */
6341 for (i = 0; i < 31; i++)
6342 if (imask & (1UL << i))
6344 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
6345 set_mem_alias_set (mem, alpha_sr_alias_set);
6346 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
6347 reg_offset += 8;
6350 /* Store a zero if requested for unwinding. */
6351 if (imask & (1UL << 31))
6353 rtx insn, t;
6355 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
6356 set_mem_alias_set (mem, alpha_sr_alias_set);
6357 insn = emit_move_insn (mem, const0_rtx);
6359 RTX_FRAME_RELATED_P (insn) = 1;
6360 t = gen_rtx_REG (Pmode, 31);
6361 t = gen_rtx_SET (VOIDmode, mem, t);
6362 t = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, t, REG_NOTES (insn));
6363 REG_NOTES (insn) = t;
6365 reg_offset += 8;
6368 for (i = 0; i < 31; i++)
6369 if (fmask & (1UL << i))
6371 mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
6372 set_mem_alias_set (mem, alpha_sr_alias_set);
6373 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
6374 reg_offset += 8;
6377 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
6379 /* The standard frame on the T3E includes space for saving registers.
6380 We just have to use it. We don't have to save the return address and
6381 the old frame pointer here - they are saved in the DSIB. */
6383 reg_offset = -56;
6384 for (i = 9; i < 15; i++)
6385 if (imask & (1UL << i))
6387 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
6388 reg_offset));
6389 set_mem_alias_set (mem, alpha_sr_alias_set);
6390 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
6391 reg_offset -= 8;
6393 for (i = 2; i < 10; i++)
6394 if (fmask & (1UL << i))
6396 mem = gen_rtx_MEM (DFmode, plus_constant (hard_frame_pointer_rtx,
6397 reg_offset));
6398 set_mem_alias_set (mem, alpha_sr_alias_set);
6399 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
6400 reg_offset -= 8;
6404 if (TARGET_ABI_OPEN_VMS)
6406 if (alpha_procedure_type == PT_REGISTER)
6407 /* Register frame procedures save the fp.
6408 ?? Ought to have a dwarf2 save for this. */
6409 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
6410 hard_frame_pointer_rtx);
6412 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
6413 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
6414 gen_rtx_REG (DImode, REG_PV)));
6416 if (alpha_procedure_type != PT_NULL
6417 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
6418 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
6420 /* If we have to allocate space for outgoing args, do it now. */
6421 if (current_function_outgoing_args_size != 0)
6423 rtx seq
6424 = emit_move_insn (stack_pointer_rtx,
6425 plus_constant
6426 (hard_frame_pointer_rtx,
6427 - (ALPHA_ROUND
6428 (current_function_outgoing_args_size))));
6430 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
6431 if ! frame_pointer_needed. Setting the bit will change the CFA
6432 computation rule to use sp again, which would be wrong if we had
6433 frame_pointer_needed, as this means sp might move unpredictably
6434 later on.
6436 Also, note that
6437 frame_pointer_needed
6438 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
6440 current_function_outgoing_args_size != 0
6441 => alpha_procedure_type != PT_NULL,
6443 so when we are not setting the bit here, we are guaranteed to
6444 have emitted an FRP frame pointer update just before. */
6445 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
6448 else if (!TARGET_ABI_UNICOSMK)
6450 /* If we need a frame pointer, set it from the stack pointer. */
6451 if (frame_pointer_needed)
6453 if (TARGET_CAN_FAULT_IN_PROLOGUE)
6454 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
6455 else
6456 /* This must always be the last instruction in the
6457 prologue, thus we emit a special move + clobber. */
6458 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
6459 stack_pointer_rtx, sa_reg)));
6463 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
6464 the prologue, for exception handling reasons, we cannot do this for
6465 any insn that might fault. We could prevent this for mems with a
6466 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
6467 have to prevent all such scheduling with a blockage.
6469 Linux, on the other hand, never bothered to implement OSF/1's
6470 exception handling, and so doesn't care about such things. Anyone
6471 planning to use dwarf2 frame-unwind info can also omit the blockage. */
6473 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
6474 emit_insn (gen_blockage ());
6477 /* Output the textual info surrounding the prologue. */
6479 void
6480 alpha_start_function (FILE *file, const char *fnname,
6481 tree decl ATTRIBUTE_UNUSED)
6483 unsigned long imask = 0;
6484 unsigned long fmask = 0;
6485 /* Stack space needed for pushing registers clobbered by us. */
6486 HOST_WIDE_INT sa_size;
6487 /* Complete stack size needed. */
6488 unsigned HOST_WIDE_INT frame_size;
6489 /* Offset from base reg to register save area. */
6490 HOST_WIDE_INT reg_offset;
6491 char *entry_label = (char *) alloca (strlen (fnname) + 6);
6492 int i;
6494 /* Don't emit an extern directive for functions defined in the same file. */
6495 if (TARGET_ABI_UNICOSMK)
6497 tree name_tree;
6498 name_tree = get_identifier (fnname);
6499 TREE_ASM_WRITTEN (name_tree) = 1;
6502 alpha_fnname = fnname;
6503 sa_size = alpha_sa_size ();
6505 frame_size = get_frame_size ();
6506 if (TARGET_ABI_OPEN_VMS)
6507 frame_size = ALPHA_ROUND (sa_size
6508 + (alpha_procedure_type == PT_STACK ? 8 : 0)
6509 + frame_size
6510 + current_function_pretend_args_size);
6511 else if (TARGET_ABI_UNICOSMK)
6512 frame_size = ALPHA_ROUND (sa_size
6513 + (alpha_procedure_type == PT_STACK ? 48 : 0))
6514 + ALPHA_ROUND (frame_size
6515 + current_function_outgoing_args_size);
6516 else
6517 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
6518 + sa_size
6519 + ALPHA_ROUND (frame_size
6520 + current_function_pretend_args_size));
6522 if (TARGET_ABI_OPEN_VMS)
6523 reg_offset = 8;
6524 else
6525 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
6527 alpha_sa_mask (&imask, &fmask);
6529 /* Ecoff can handle multiple .file directives, so put out file and lineno.
6530 We have to do that before the .ent directive as we cannot switch
6531 files within procedures with native ecoff because line numbers are
6532 linked to procedure descriptors.
6533 Outputting the lineno helps debugging of one line functions as they
6534 would otherwise get no line number at all. Please note that we would
6535 like to put out last_linenum from final.c, but it is not accessible. */
6537 if (write_symbols == SDB_DEBUG)
6539 #ifdef ASM_OUTPUT_SOURCE_FILENAME
6540 ASM_OUTPUT_SOURCE_FILENAME (file,
6541 DECL_SOURCE_FILE (current_function_decl));
6542 #endif
6543 #ifdef ASM_OUTPUT_SOURCE_LINE
6544 if (debug_info_level != DINFO_LEVEL_TERSE)
6545 ASM_OUTPUT_SOURCE_LINE (file,
6546 DECL_SOURCE_LINE (current_function_decl), 0);
6547 #endif
6550 /* Issue function start and label. */
6551 if (TARGET_ABI_OPEN_VMS
6552 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
6554 fputs ("\t.ent ", file);
6555 assemble_name (file, fnname);
6556 putc ('\n', file);
6558 /* If the function needs GP, we'll write the "..ng" label there.
6559 Otherwise, do it here. */
6560 if (TARGET_ABI_OSF
6561 && ! alpha_function_needs_gp
6562 && ! current_function_is_thunk)
6564 putc ('$', file);
6565 assemble_name (file, fnname);
6566 fputs ("..ng:\n", file);
6570 strcpy (entry_label, fnname);
6571 if (TARGET_ABI_OPEN_VMS)
6572 strcat (entry_label, "..en");
6574 /* For public functions, the label must be globalized by appending an
6575 additional colon. */
6576 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
6577 strcat (entry_label, ":");
6579 ASM_OUTPUT_LABEL (file, entry_label);
6580 inside_function = TRUE;
6582 if (TARGET_ABI_OPEN_VMS)
6583 fprintf (file, "\t.base $%d\n", vms_base_regno);
6585 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
6586 && !flag_inhibit_size_directive)
6588 /* Set flags in procedure descriptor to request IEEE-conformant
6589 math-library routines. The value we set it to is PDSC_EXC_IEEE
6590 (/usr/include/pdsc.h). */
6591 fputs ("\t.eflag 48\n", file);
6594 /* Set up offsets to alpha virtual arg/local debugging pointer. */
6595 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
6596 alpha_arg_offset = -frame_size + 48;
6598 /* Describe our frame. If the frame size is larger than an integer,
6599 print it as zero to avoid an assembler error. We won't be
6600 properly describing such a frame, but that's the best we can do. */
6601 if (TARGET_ABI_UNICOSMK)
6603 else if (TARGET_ABI_OPEN_VMS)
6604 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
6605 HOST_WIDE_INT_PRINT_DEC "\n",
6606 vms_unwind_regno,
6607 frame_size >= (1UL << 31) ? 0 : frame_size,
6608 reg_offset);
6609 else if (!flag_inhibit_size_directive)
6610 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
6611 (frame_pointer_needed
6612 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
6613 frame_size >= (1UL << 31) ? 0 : frame_size,
6614 current_function_pretend_args_size);
6616 /* Describe which registers were spilled. */
6617 if (TARGET_ABI_UNICOSMK)
6619 else if (TARGET_ABI_OPEN_VMS)
6621 if (imask)
6622 /* ??? Does VMS care if mask contains ra? The old code didn't
6623 set it, so I don't here. */
6624 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
6625 if (fmask)
6626 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
6627 if (alpha_procedure_type == PT_REGISTER)
6628 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
6630 else if (!flag_inhibit_size_directive)
6632 if (imask)
6634 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
6635 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
6637 for (i = 0; i < 32; ++i)
6638 if (imask & (1UL << i))
6639 reg_offset += 8;
6642 if (fmask)
6643 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
6644 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
6647 #if TARGET_ABI_OPEN_VMS
6648 /* Ifdef'ed cause link_section are only available then. */
6649 readonly_data_section ();
6650 fprintf (file, "\t.align 3\n");
6651 assemble_name (file, fnname); fputs ("..na:\n", file);
6652 fputs ("\t.ascii \"", file);
6653 assemble_name (file, fnname);
6654 fputs ("\\0\"\n", file);
6655 alpha_need_linkage (fnname, 1);
6656 text_section ();
6657 #endif
6660 /* Emit the .prologue note at the scheduled end of the prologue. */
6662 static void
6663 alpha_output_function_end_prologue (FILE *file)
6665 if (TARGET_ABI_UNICOSMK)
6667 else if (TARGET_ABI_OPEN_VMS)
6668 fputs ("\t.prologue\n", file);
6669 else if (TARGET_ABI_WINDOWS_NT)
6670 fputs ("\t.prologue 0\n", file);
6671 else if (!flag_inhibit_size_directive)
6672 fprintf (file, "\t.prologue %d\n",
6673 alpha_function_needs_gp || current_function_is_thunk);
6676 /* Write function epilogue. */
6678 /* ??? At some point we will want to support full unwind, and so will
6679 need to mark the epilogue as well. At the moment, we just confuse
6680 dwarf2out. */
6681 #undef FRP
6682 #define FRP(exp) exp
6684 void
6685 alpha_expand_epilogue (void)
6687 /* Registers to save. */
6688 unsigned long imask = 0;
6689 unsigned long fmask = 0;
6690 /* Stack space needed for pushing registers clobbered by us. */
6691 HOST_WIDE_INT sa_size;
6692 /* Complete stack size needed. */
6693 HOST_WIDE_INT frame_size;
6694 /* Offset from base reg to register save area. */
6695 HOST_WIDE_INT reg_offset;
6696 int fp_is_frame_pointer, fp_offset;
6697 rtx sa_reg, sa_reg_exp = NULL;
6698 rtx sp_adj1, sp_adj2, mem;
6699 rtx eh_ofs;
6700 int i;
6702 sa_size = alpha_sa_size ();
6704 frame_size = get_frame_size ();
6705 if (TARGET_ABI_OPEN_VMS)
6706 frame_size = ALPHA_ROUND (sa_size
6707 + (alpha_procedure_type == PT_STACK ? 8 : 0)
6708 + frame_size
6709 + current_function_pretend_args_size);
6710 else if (TARGET_ABI_UNICOSMK)
6711 frame_size = ALPHA_ROUND (sa_size
6712 + (alpha_procedure_type == PT_STACK ? 48 : 0))
6713 + ALPHA_ROUND (frame_size
6714 + current_function_outgoing_args_size);
6715 else
6716 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
6717 + sa_size
6718 + ALPHA_ROUND (frame_size
6719 + current_function_pretend_args_size));
6721 if (TARGET_ABI_OPEN_VMS)
6723 if (alpha_procedure_type == PT_STACK)
6724 reg_offset = 8;
6725 else
6726 reg_offset = 0;
6728 else
6729 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
6731 alpha_sa_mask (&imask, &fmask);
6733 fp_is_frame_pointer
6734 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6735 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
6736 fp_offset = 0;
6737 sa_reg = stack_pointer_rtx;
6739 if (current_function_calls_eh_return)
6740 eh_ofs = EH_RETURN_STACKADJ_RTX;
6741 else
6742 eh_ofs = NULL_RTX;
6744 if (!TARGET_ABI_UNICOSMK && sa_size)
6746 /* If we have a frame pointer, restore SP from it. */
6747 if ((TARGET_ABI_OPEN_VMS
6748 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
6749 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
6750 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
6752 /* Cope with very large offsets to the register save area. */
6753 if (reg_offset + sa_size > 0x8000)
6755 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
6756 HOST_WIDE_INT bias;
6758 if (low + sa_size <= 0x8000)
6759 bias = reg_offset - low, reg_offset = low;
6760 else
6761 bias = reg_offset, reg_offset = 0;
6763 sa_reg = gen_rtx_REG (DImode, 22);
6764 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
6766 FRP (emit_move_insn (sa_reg, sa_reg_exp));
6769 /* Restore registers in order, excepting a true frame pointer. */
6771 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
6772 if (! eh_ofs)
6773 set_mem_alias_set (mem, alpha_sr_alias_set);
6774 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
6776 reg_offset += 8;
6777 imask &= ~(1UL << REG_RA);
6779 for (i = 0; i < 31; ++i)
6780 if (imask & (1UL << i))
6782 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
6783 fp_offset = reg_offset;
6784 else
6786 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
6787 set_mem_alias_set (mem, alpha_sr_alias_set);
6788 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
6790 reg_offset += 8;
6793 if (imask & (1UL << 31))
6794 reg_offset += 8;
6796 for (i = 0; i < 31; ++i)
6797 if (fmask & (1UL << i))
6799 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
6800 set_mem_alias_set (mem, alpha_sr_alias_set);
6801 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
6802 reg_offset += 8;
6805 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
6807 /* Restore callee-saved general-purpose registers. */
6809 reg_offset = -56;
6811 for (i = 9; i < 15; i++)
6812 if (imask & (1UL << i))
6814 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
6815 reg_offset));
6816 set_mem_alias_set (mem, alpha_sr_alias_set);
6817 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
6818 reg_offset -= 8;
6821 for (i = 2; i < 10; i++)
6822 if (fmask & (1UL << i))
6824 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
6825 reg_offset));
6826 set_mem_alias_set (mem, alpha_sr_alias_set);
6827 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
6828 reg_offset -= 8;
6831 /* Restore the return address from the DSIB. */
6833 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
6834 set_mem_alias_set (mem, alpha_sr_alias_set);
6835 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
6838 if (frame_size || eh_ofs)
6840 sp_adj1 = stack_pointer_rtx;
6842 if (eh_ofs)
6844 sp_adj1 = gen_rtx_REG (DImode, 23);
6845 emit_move_insn (sp_adj1,
6846 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
6849 /* If the stack size is large, begin computation into a temporary
6850 register so as not to interfere with a potential fp restore,
6851 which must be consecutive with an SP restore. */
6852 if (frame_size < 32768
6853 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
6854 sp_adj2 = GEN_INT (frame_size);
6855 else if (TARGET_ABI_UNICOSMK)
6857 sp_adj1 = gen_rtx_REG (DImode, 23);
6858 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
6859 sp_adj2 = const0_rtx;
6861 else if (frame_size < 0x40007fffL)
6863 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
6865 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
6866 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
6867 sp_adj1 = sa_reg;
6868 else
6870 sp_adj1 = gen_rtx_REG (DImode, 23);
6871 FRP (emit_move_insn (sp_adj1, sp_adj2));
6873 sp_adj2 = GEN_INT (low);
6875 else
6877 rtx tmp = gen_rtx_REG (DImode, 23);
6878 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3));
6879 if (!sp_adj2)
6881 /* We can't drop new things to memory this late, afaik,
6882 so build it up by pieces. */
6883 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
6884 -(frame_size < 0)));
6885 if (!sp_adj2)
6886 abort ();
6890 /* From now on, things must be in order. So emit blockages. */
6892 /* Restore the frame pointer. */
6893 if (TARGET_ABI_UNICOSMK)
6895 emit_insn (gen_blockage ());
6896 mem = gen_rtx_MEM (DImode,
6897 plus_constant (hard_frame_pointer_rtx, -16));
6898 set_mem_alias_set (mem, alpha_sr_alias_set);
6899 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
6901 else if (fp_is_frame_pointer)
6903 emit_insn (gen_blockage ());
6904 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
6905 set_mem_alias_set (mem, alpha_sr_alias_set);
6906 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
6908 else if (TARGET_ABI_OPEN_VMS)
6910 emit_insn (gen_blockage ());
6911 FRP (emit_move_insn (hard_frame_pointer_rtx,
6912 gen_rtx_REG (DImode, vms_save_fp_regno)));
6915 /* Restore the stack pointer. */
6916 emit_insn (gen_blockage ());
6917 if (sp_adj2 == const0_rtx)
6918 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
6919 else
6920 FRP (emit_move_insn (stack_pointer_rtx,
6921 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
6923 else
6925 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
6927 emit_insn (gen_blockage ());
6928 FRP (emit_move_insn (hard_frame_pointer_rtx,
6929 gen_rtx_REG (DImode, vms_save_fp_regno)));
6931 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
6933 /* Decrement the frame pointer if the function does not have a
6934 frame. */
6936 emit_insn (gen_blockage ());
6937 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
6938 hard_frame_pointer_rtx, constm1_rtx)));
6943 /* Output the rest of the textual info surrounding the epilogue. */
6945 void
6946 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
6948 #if TARGET_ABI_OPEN_VMS
6949 alpha_write_linkage (file, fnname, decl);
6950 #endif
6952 /* End the function. */
6953 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
6955 fputs ("\t.end ", file);
6956 assemble_name (file, fnname);
6957 putc ('\n', file);
6959 inside_function = FALSE;
6961 /* Output jump tables and the static subroutine information block. */
6962 if (TARGET_ABI_UNICOSMK)
6964 unicosmk_output_ssib (file, fnname);
6965 unicosmk_output_deferred_case_vectors (file);
6969 #if TARGET_ABI_OSF
6970 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
6972 In order to avoid the hordes of differences between generated code
6973 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
6974 lots of code loading up large constants, generate rtl and emit it
6975 instead of going straight to text.
6977 Not sure why this idea hasn't been explored before... */
6979 static void
6980 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
6981 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
6982 tree function)
6984 HOST_WIDE_INT hi, lo;
6985 rtx this, insn, funexp;
6987 reset_block_changes ();
6989 /* We always require a valid GP. */
6990 emit_insn (gen_prologue_ldgp ());
6991 emit_note (NOTE_INSN_PROLOGUE_END);
6993 /* Find the "this" pointer. If the function returns a structure,
6994 the structure return pointer is in $16. */
6995 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
6996 this = gen_rtx_REG (Pmode, 17);
6997 else
6998 this = gen_rtx_REG (Pmode, 16);
7000 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
7001 entire constant for the add. */
7002 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
7003 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7004 if (hi + lo == delta)
7006 if (hi)
7007 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
7008 if (lo)
7009 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
7011 else
7013 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
7014 delta, -(delta < 0));
7015 emit_insn (gen_adddi3 (this, this, tmp));
7018 /* Add a delta stored in the vtable at VCALL_OFFSET. */
7019 if (vcall_offset)
7021 rtx tmp, tmp2;
7023 tmp = gen_rtx_REG (Pmode, 0);
7024 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
7026 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
7027 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7028 if (hi + lo == vcall_offset)
7030 if (hi)
7031 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
7033 else
7035 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
7036 vcall_offset, -(vcall_offset < 0));
7037 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
7038 lo = 0;
7040 if (lo)
7041 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
7042 else
7043 tmp2 = tmp;
7044 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
7046 emit_insn (gen_adddi3 (this, this, tmp));
7049 /* Generate a tail call to the target function. */
7050 if (! TREE_USED (function))
7052 assemble_external (function);
7053 TREE_USED (function) = 1;
7055 funexp = XEXP (DECL_RTL (function), 0);
7056 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
7057 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
7058 SIBLING_CALL_P (insn) = 1;
7060 /* Run just enough of rest_of_compilation to get the insns emitted.
7061 There's not really enough bulk here to make other passes such as
7062 instruction scheduling worth while. Note that use_thunk calls
7063 assemble_start_function and assemble_end_function. */
7064 insn = get_insns ();
7065 insn_locators_initialize ();
7066 shorten_branches (insn);
7067 final_start_function (insn, file, 1);
7068 final (insn, file, 1, 0);
7069 final_end_function ();
7071 #endif /* TARGET_ABI_OSF */
7073 /* Debugging support. */
7075 #include "gstab.h"
7077 /* Count the number of sdb related labels are generated (to find block
7078 start and end boundaries). */
7080 int sdb_label_count = 0;
7082 /* Next label # for each statement. */
7084 static int sym_lineno = 0;
7086 /* Count the number of .file directives, so that .loc is up to date. */
7088 static int num_source_filenames = 0;
7090 /* Name of the file containing the current function. */
7092 static const char *current_function_file = "";
7094 /* Offsets to alpha virtual arg/local debugging pointers. */
7096 long alpha_arg_offset;
7097 long alpha_auto_offset;
7099 /* Emit a new filename to a stream. */
7101 void
7102 alpha_output_filename (FILE *stream, const char *name)
7104 static int first_time = TRUE;
7105 char ltext_label_name[100];
7107 if (first_time)
7109 first_time = FALSE;
7110 ++num_source_filenames;
7111 current_function_file = name;
7112 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7113 output_quoted_string (stream, name);
7114 fprintf (stream, "\n");
7115 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
7116 fprintf (stream, "\t#@stabs\n");
7119 else if (write_symbols == DBX_DEBUG)
7121 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
7122 fprintf (stream, "%s", ASM_STABS_OP);
7123 output_quoted_string (stream, name);
7124 fprintf (stream, ",%d,0,0,%s\n", N_SOL, &ltext_label_name[1]);
7127 else if (name != current_function_file
7128 && strcmp (name, current_function_file) != 0)
7130 if (inside_function && ! TARGET_GAS)
7131 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
7132 else
7134 ++num_source_filenames;
7135 current_function_file = name;
7136 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7139 output_quoted_string (stream, name);
7140 fprintf (stream, "\n");
7144 /* Emit a linenumber to a stream. */
7146 void
7147 alpha_output_lineno (FILE *stream, int line)
7149 if (write_symbols == DBX_DEBUG)
7151 /* mips-tfile doesn't understand .stabd directives. */
7152 ++sym_lineno;
7153 fprintf (stream, "$LM%d:\n%s%d,0,%d,$LM%d\n",
7154 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
7156 else
7157 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
7160 /* Structure to show the current status of registers and memory. */
7162 struct shadow_summary
7164 struct {
7165 unsigned int i : 31; /* Mask of int regs */
7166 unsigned int fp : 31; /* Mask of fp regs */
7167 unsigned int mem : 1; /* mem == imem | fpmem */
7168 } used, defd;
7171 /* Summary the effects of expression X on the machine. Update SUM, a pointer
7172 to the summary structure. SET is nonzero if the insn is setting the
7173 object, otherwise zero. */
7175 static void
7176 summarize_insn (rtx x, struct shadow_summary *sum, int set)
7178 const char *format_ptr;
7179 int i, j;
7181 if (x == 0)
7182 return;
7184 switch (GET_CODE (x))
7186 /* ??? Note that this case would be incorrect if the Alpha had a
7187 ZERO_EXTRACT in SET_DEST. */
7188 case SET:
7189 summarize_insn (SET_SRC (x), sum, 0);
7190 summarize_insn (SET_DEST (x), sum, 1);
7191 break;
7193 case CLOBBER:
7194 summarize_insn (XEXP (x, 0), sum, 1);
7195 break;
7197 case USE:
7198 summarize_insn (XEXP (x, 0), sum, 0);
7199 break;
7201 case ASM_OPERANDS:
7202 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
7203 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
7204 break;
7206 case PARALLEL:
7207 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7208 summarize_insn (XVECEXP (x, 0, i), sum, 0);
7209 break;
7211 case SUBREG:
7212 summarize_insn (SUBREG_REG (x), sum, 0);
7213 break;
7215 case REG:
7217 int regno = REGNO (x);
7218 unsigned long mask = ((unsigned long) 1) << (regno % 32);
7220 if (regno == 31 || regno == 63)
7221 break;
7223 if (set)
7225 if (regno < 32)
7226 sum->defd.i |= mask;
7227 else
7228 sum->defd.fp |= mask;
7230 else
7232 if (regno < 32)
7233 sum->used.i |= mask;
7234 else
7235 sum->used.fp |= mask;
7238 break;
7240 case MEM:
7241 if (set)
7242 sum->defd.mem = 1;
7243 else
7244 sum->used.mem = 1;
7246 /* Find the regs used in memory address computation: */
7247 summarize_insn (XEXP (x, 0), sum, 0);
7248 break;
7250 case CONST_INT: case CONST_DOUBLE:
7251 case SYMBOL_REF: case LABEL_REF: case CONST:
7252 case SCRATCH: case ASM_INPUT:
7253 break;
7255 /* Handle common unary and binary ops for efficiency. */
7256 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
7257 case MOD: case UDIV: case UMOD: case AND: case IOR:
7258 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
7259 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
7260 case NE: case EQ: case GE: case GT: case LE:
7261 case LT: case GEU: case GTU: case LEU: case LTU:
7262 summarize_insn (XEXP (x, 0), sum, 0);
7263 summarize_insn (XEXP (x, 1), sum, 0);
7264 break;
7266 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
7267 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
7268 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
7269 case SQRT: case FFS:
7270 summarize_insn (XEXP (x, 0), sum, 0);
7271 break;
7273 default:
7274 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
7275 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7276 switch (format_ptr[i])
7278 case 'e':
7279 summarize_insn (XEXP (x, i), sum, 0);
7280 break;
7282 case 'E':
7283 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7284 summarize_insn (XVECEXP (x, i, j), sum, 0);
7285 break;
7287 case 'i':
7288 break;
7290 default:
7291 abort ();
7296 /* Ensure a sufficient number of `trapb' insns are in the code when
7297 the user requests code with a trap precision of functions or
7298 instructions.
7300 In naive mode, when the user requests a trap-precision of
7301 "instruction", a trapb is needed after every instruction that may
7302 generate a trap. This ensures that the code is resumption safe but
7303 it is also slow.
7305 When optimizations are turned on, we delay issuing a trapb as long
7306 as possible. In this context, a trap shadow is the sequence of
7307 instructions that starts with a (potentially) trap generating
7308 instruction and extends to the next trapb or call_pal instruction
7309 (but GCC never generates call_pal by itself). We can delay (and
7310 therefore sometimes omit) a trapb subject to the following
7311 conditions:
7313 (a) On entry to the trap shadow, if any Alpha register or memory
7314 location contains a value that is used as an operand value by some
7315 instruction in the trap shadow (live on entry), then no instruction
7316 in the trap shadow may modify the register or memory location.
7318 (b) Within the trap shadow, the computation of the base register
7319 for a memory load or store instruction may not involve using the
7320 result of an instruction that might generate an UNPREDICTABLE
7321 result.
7323 (c) Within the trap shadow, no register may be used more than once
7324 as a destination register. (This is to make life easier for the
7325 trap-handler.)
7327 (d) The trap shadow may not include any branch instructions. */
7329 static void
7330 alpha_handle_trap_shadows (void)
7332 struct shadow_summary shadow;
7333 int trap_pending, exception_nesting;
7334 rtx i, n;
7336 trap_pending = 0;
7337 exception_nesting = 0;
7338 shadow.used.i = 0;
7339 shadow.used.fp = 0;
7340 shadow.used.mem = 0;
7341 shadow.defd = shadow.used;
7343 for (i = get_insns (); i ; i = NEXT_INSN (i))
7345 if (GET_CODE (i) == NOTE)
7347 switch (NOTE_LINE_NUMBER (i))
7349 case NOTE_INSN_EH_REGION_BEG:
7350 exception_nesting++;
7351 if (trap_pending)
7352 goto close_shadow;
7353 break;
7355 case NOTE_INSN_EH_REGION_END:
7356 exception_nesting--;
7357 if (trap_pending)
7358 goto close_shadow;
7359 break;
7361 case NOTE_INSN_EPILOGUE_BEG:
7362 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
7363 goto close_shadow;
7364 break;
7367 else if (trap_pending)
7369 if (alpha_tp == ALPHA_TP_FUNC)
7371 if (GET_CODE (i) == JUMP_INSN
7372 && GET_CODE (PATTERN (i)) == RETURN)
7373 goto close_shadow;
7375 else if (alpha_tp == ALPHA_TP_INSN)
7377 if (optimize > 0)
7379 struct shadow_summary sum;
7381 sum.used.i = 0;
7382 sum.used.fp = 0;
7383 sum.used.mem = 0;
7384 sum.defd = sum.used;
7386 switch (GET_CODE (i))
7388 case INSN:
7389 /* Annoyingly, get_attr_trap will abort on these. */
7390 if (GET_CODE (PATTERN (i)) == USE
7391 || GET_CODE (PATTERN (i)) == CLOBBER)
7392 break;
7394 summarize_insn (PATTERN (i), &sum, 0);
7396 if ((sum.defd.i & shadow.defd.i)
7397 || (sum.defd.fp & shadow.defd.fp))
7399 /* (c) would be violated */
7400 goto close_shadow;
7403 /* Combine shadow with summary of current insn: */
7404 shadow.used.i |= sum.used.i;
7405 shadow.used.fp |= sum.used.fp;
7406 shadow.used.mem |= sum.used.mem;
7407 shadow.defd.i |= sum.defd.i;
7408 shadow.defd.fp |= sum.defd.fp;
7409 shadow.defd.mem |= sum.defd.mem;
7411 if ((sum.defd.i & shadow.used.i)
7412 || (sum.defd.fp & shadow.used.fp)
7413 || (sum.defd.mem & shadow.used.mem))
7415 /* (a) would be violated (also takes care of (b)) */
7416 if (get_attr_trap (i) == TRAP_YES
7417 && ((sum.defd.i & sum.used.i)
7418 || (sum.defd.fp & sum.used.fp)))
7419 abort ();
7421 goto close_shadow;
7423 break;
7425 case JUMP_INSN:
7426 case CALL_INSN:
7427 case CODE_LABEL:
7428 goto close_shadow;
7430 default:
7431 abort ();
7434 else
7436 close_shadow:
7437 n = emit_insn_before (gen_trapb (), i);
7438 PUT_MODE (n, TImode);
7439 PUT_MODE (i, TImode);
7440 trap_pending = 0;
7441 shadow.used.i = 0;
7442 shadow.used.fp = 0;
7443 shadow.used.mem = 0;
7444 shadow.defd = shadow.used;
7449 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
7450 && GET_CODE (i) == INSN
7451 && GET_CODE (PATTERN (i)) != USE
7452 && GET_CODE (PATTERN (i)) != CLOBBER
7453 && get_attr_trap (i) == TRAP_YES)
7455 if (optimize && !trap_pending)
7456 summarize_insn (PATTERN (i), &shadow, 0);
7457 trap_pending = 1;
7462 /* Alpha can only issue instruction groups simultaneously if they are
7463 suitably aligned. This is very processor-specific. */
7465 enum alphaev4_pipe {
7466 EV4_STOP = 0,
7467 EV4_IB0 = 1,
7468 EV4_IB1 = 2,
7469 EV4_IBX = 4
7472 enum alphaev5_pipe {
7473 EV5_STOP = 0,
7474 EV5_NONE = 1,
7475 EV5_E01 = 2,
7476 EV5_E0 = 4,
7477 EV5_E1 = 8,
7478 EV5_FAM = 16,
7479 EV5_FA = 32,
7480 EV5_FM = 64
7483 static enum alphaev4_pipe
7484 alphaev4_insn_pipe (rtx insn)
7486 if (recog_memoized (insn) < 0)
7487 return EV4_STOP;
7488 if (get_attr_length (insn) != 4)
7489 return EV4_STOP;
7491 switch (get_attr_type (insn))
7493 case TYPE_ILD:
7494 case TYPE_FLD:
7495 return EV4_IBX;
7497 case TYPE_LDSYM:
7498 case TYPE_IADD:
7499 case TYPE_ILOG:
7500 case TYPE_ICMOV:
7501 case TYPE_ICMP:
7502 case TYPE_IST:
7503 case TYPE_FST:
7504 case TYPE_SHIFT:
7505 case TYPE_IMUL:
7506 case TYPE_FBR:
7507 return EV4_IB0;
7509 case TYPE_MISC:
7510 case TYPE_IBR:
7511 case TYPE_JSR:
7512 case TYPE_CALLPAL:
7513 case TYPE_FCPYS:
7514 case TYPE_FCMOV:
7515 case TYPE_FADD:
7516 case TYPE_FDIV:
7517 case TYPE_FMUL:
7518 return EV4_IB1;
7520 default:
7521 abort ();
7525 static enum alphaev5_pipe
7526 alphaev5_insn_pipe (rtx insn)
7528 if (recog_memoized (insn) < 0)
7529 return EV5_STOP;
7530 if (get_attr_length (insn) != 4)
7531 return EV5_STOP;
7533 switch (get_attr_type (insn))
7535 case TYPE_ILD:
7536 case TYPE_FLD:
7537 case TYPE_LDSYM:
7538 case TYPE_IADD:
7539 case TYPE_ILOG:
7540 case TYPE_ICMOV:
7541 case TYPE_ICMP:
7542 return EV5_E01;
7544 case TYPE_IST:
7545 case TYPE_FST:
7546 case TYPE_SHIFT:
7547 case TYPE_IMUL:
7548 case TYPE_MISC:
7549 case TYPE_MVI:
7550 return EV5_E0;
7552 case TYPE_IBR:
7553 case TYPE_JSR:
7554 case TYPE_CALLPAL:
7555 return EV5_E1;
7557 case TYPE_FCPYS:
7558 return EV5_FAM;
7560 case TYPE_FBR:
7561 case TYPE_FCMOV:
7562 case TYPE_FADD:
7563 case TYPE_FDIV:
7564 return EV5_FA;
7566 case TYPE_FMUL:
7567 return EV5_FM;
7569 default:
7570 abort();
7574 /* IN_USE is a mask of the slots currently filled within the insn group.
7575 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
7576 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
7578 LEN is, of course, the length of the group in bytes. */
7580 static rtx
7581 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
7583 int len, in_use;
7585 len = in_use = 0;
7587 if (! INSN_P (insn)
7588 || GET_CODE (PATTERN (insn)) == CLOBBER
7589 || GET_CODE (PATTERN (insn)) == USE)
7590 goto next_and_done;
7592 while (1)
7594 enum alphaev4_pipe pipe;
7596 pipe = alphaev4_insn_pipe (insn);
7597 switch (pipe)
7599 case EV4_STOP:
7600 /* Force complex instructions to start new groups. */
7601 if (in_use)
7602 goto done;
7604 /* If this is a completely unrecognized insn, its an asm.
7605 We don't know how long it is, so record length as -1 to
7606 signal a needed realignment. */
7607 if (recog_memoized (insn) < 0)
7608 len = -1;
7609 else
7610 len = get_attr_length (insn);
7611 goto next_and_done;
7613 case EV4_IBX:
7614 if (in_use & EV4_IB0)
7616 if (in_use & EV4_IB1)
7617 goto done;
7618 in_use |= EV4_IB1;
7620 else
7621 in_use |= EV4_IB0 | EV4_IBX;
7622 break;
7624 case EV4_IB0:
7625 if (in_use & EV4_IB0)
7627 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
7628 goto done;
7629 in_use |= EV4_IB1;
7631 in_use |= EV4_IB0;
7632 break;
7634 case EV4_IB1:
7635 if (in_use & EV4_IB1)
7636 goto done;
7637 in_use |= EV4_IB1;
7638 break;
7640 default:
7641 abort();
7643 len += 4;
7645 /* Haifa doesn't do well scheduling branches. */
7646 if (GET_CODE (insn) == JUMP_INSN)
7647 goto next_and_done;
7649 next:
7650 insn = next_nonnote_insn (insn);
7652 if (!insn || ! INSN_P (insn))
7653 goto done;
7655 /* Let Haifa tell us where it thinks insn group boundaries are. */
7656 if (GET_MODE (insn) == TImode)
7657 goto done;
7659 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
7660 goto next;
7663 next_and_done:
7664 insn = next_nonnote_insn (insn);
7666 done:
7667 *plen = len;
7668 *pin_use = in_use;
7669 return insn;
7672 /* IN_USE is a mask of the slots currently filled within the insn group.
7673 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
7674 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
7676 LEN is, of course, the length of the group in bytes. */
7678 static rtx
7679 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
7681 int len, in_use;
7683 len = in_use = 0;
7685 if (! INSN_P (insn)
7686 || GET_CODE (PATTERN (insn)) == CLOBBER
7687 || GET_CODE (PATTERN (insn)) == USE)
7688 goto next_and_done;
7690 while (1)
7692 enum alphaev5_pipe pipe;
7694 pipe = alphaev5_insn_pipe (insn);
7695 switch (pipe)
7697 case EV5_STOP:
7698 /* Force complex instructions to start new groups. */
7699 if (in_use)
7700 goto done;
7702 /* If this is a completely unrecognized insn, its an asm.
7703 We don't know how long it is, so record length as -1 to
7704 signal a needed realignment. */
7705 if (recog_memoized (insn) < 0)
7706 len = -1;
7707 else
7708 len = get_attr_length (insn);
7709 goto next_and_done;
7711 /* ??? Most of the places below, we would like to abort, as
7712 it would indicate an error either in Haifa, or in the
7713 scheduling description. Unfortunately, Haifa never
7714 schedules the last instruction of the BB, so we don't
7715 have an accurate TI bit to go off. */
7716 case EV5_E01:
7717 if (in_use & EV5_E0)
7719 if (in_use & EV5_E1)
7720 goto done;
7721 in_use |= EV5_E1;
7723 else
7724 in_use |= EV5_E0 | EV5_E01;
7725 break;
7727 case EV5_E0:
7728 if (in_use & EV5_E0)
7730 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
7731 goto done;
7732 in_use |= EV5_E1;
7734 in_use |= EV5_E0;
7735 break;
7737 case EV5_E1:
7738 if (in_use & EV5_E1)
7739 goto done;
7740 in_use |= EV5_E1;
7741 break;
7743 case EV5_FAM:
7744 if (in_use & EV5_FA)
7746 if (in_use & EV5_FM)
7747 goto done;
7748 in_use |= EV5_FM;
7750 else
7751 in_use |= EV5_FA | EV5_FAM;
7752 break;
7754 case EV5_FA:
7755 if (in_use & EV5_FA)
7756 goto done;
7757 in_use |= EV5_FA;
7758 break;
7760 case EV5_FM:
7761 if (in_use & EV5_FM)
7762 goto done;
7763 in_use |= EV5_FM;
7764 break;
7766 case EV5_NONE:
7767 break;
7769 default:
7770 abort();
7772 len += 4;
7774 /* Haifa doesn't do well scheduling branches. */
7775 /* ??? If this is predicted not-taken, slotting continues, except
7776 that no more IBR, FBR, or JSR insns may be slotted. */
7777 if (GET_CODE (insn) == JUMP_INSN)
7778 goto next_and_done;
7780 next:
7781 insn = next_nonnote_insn (insn);
7783 if (!insn || ! INSN_P (insn))
7784 goto done;
7786 /* Let Haifa tell us where it thinks insn group boundaries are. */
7787 if (GET_MODE (insn) == TImode)
7788 goto done;
7790 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
7791 goto next;
7794 next_and_done:
7795 insn = next_nonnote_insn (insn);
7797 done:
7798 *plen = len;
7799 *pin_use = in_use;
7800 return insn;
7803 static rtx
7804 alphaev4_next_nop (int *pin_use)
7806 int in_use = *pin_use;
7807 rtx nop;
7809 if (!(in_use & EV4_IB0))
7811 in_use |= EV4_IB0;
7812 nop = gen_nop ();
7814 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
7816 in_use |= EV4_IB1;
7817 nop = gen_nop ();
7819 else if (TARGET_FP && !(in_use & EV4_IB1))
7821 in_use |= EV4_IB1;
7822 nop = gen_fnop ();
7824 else
7825 nop = gen_unop ();
7827 *pin_use = in_use;
7828 return nop;
7831 static rtx
7832 alphaev5_next_nop (int *pin_use)
7834 int in_use = *pin_use;
7835 rtx nop;
7837 if (!(in_use & EV5_E1))
7839 in_use |= EV5_E1;
7840 nop = gen_nop ();
7842 else if (TARGET_FP && !(in_use & EV5_FA))
7844 in_use |= EV5_FA;
7845 nop = gen_fnop ();
7847 else if (TARGET_FP && !(in_use & EV5_FM))
7849 in_use |= EV5_FM;
7850 nop = gen_fnop ();
7852 else
7853 nop = gen_unop ();
7855 *pin_use = in_use;
7856 return nop;
7859 /* The instruction group alignment main loop. */
7861 static void
7862 alpha_align_insns (unsigned int max_align,
7863 rtx (*next_group) (rtx, int *, int *),
7864 rtx (*next_nop) (int *))
7866 /* ALIGN is the known alignment for the insn group. */
7867 unsigned int align;
7868 /* OFS is the offset of the current insn in the insn group. */
7869 int ofs;
7870 int prev_in_use, in_use, len;
7871 rtx i, next;
7873 /* Let shorten branches care for assigning alignments to code labels. */
7874 shorten_branches (get_insns ());
7876 if (align_functions < 4)
7877 align = 4;
7878 else if ((unsigned int) align_functions < max_align)
7879 align = align_functions;
7880 else
7881 align = max_align;
7883 ofs = prev_in_use = 0;
7884 i = get_insns ();
7885 if (GET_CODE (i) == NOTE)
7886 i = next_nonnote_insn (i);
7888 while (i)
7890 next = (*next_group) (i, &in_use, &len);
7892 /* When we see a label, resync alignment etc. */
7893 if (GET_CODE (i) == CODE_LABEL)
7895 unsigned int new_align = 1 << label_to_alignment (i);
7897 if (new_align >= align)
7899 align = new_align < max_align ? new_align : max_align;
7900 ofs = 0;
7903 else if (ofs & (new_align-1))
7904 ofs = (ofs | (new_align-1)) + 1;
7905 if (len != 0)
7906 abort();
7909 /* Handle complex instructions special. */
7910 else if (in_use == 0)
7912 /* Asms will have length < 0. This is a signal that we have
7913 lost alignment knowledge. Assume, however, that the asm
7914 will not mis-align instructions. */
7915 if (len < 0)
7917 ofs = 0;
7918 align = 4;
7919 len = 0;
7923 /* If the known alignment is smaller than the recognized insn group,
7924 realign the output. */
7925 else if ((int) align < len)
7927 unsigned int new_log_align = len > 8 ? 4 : 3;
7928 rtx prev, where;
7930 where = prev = prev_nonnote_insn (i);
7931 if (!where || GET_CODE (where) != CODE_LABEL)
7932 where = i;
7934 /* Can't realign between a call and its gp reload. */
7935 if (! (TARGET_EXPLICIT_RELOCS
7936 && prev && GET_CODE (prev) == CALL_INSN))
7938 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
7939 align = 1 << new_log_align;
7940 ofs = 0;
7944 /* If the group won't fit in the same INT16 as the previous,
7945 we need to add padding to keep the group together. Rather
7946 than simply leaving the insn filling to the assembler, we
7947 can make use of the knowledge of what sorts of instructions
7948 were issued in the previous group to make sure that all of
7949 the added nops are really free. */
7950 else if (ofs + len > (int) align)
7952 int nop_count = (align - ofs) / 4;
7953 rtx where;
7955 /* Insert nops before labels, branches, and calls to truly merge
7956 the execution of the nops with the previous instruction group. */
7957 where = prev_nonnote_insn (i);
7958 if (where)
7960 if (GET_CODE (where) == CODE_LABEL)
7962 rtx where2 = prev_nonnote_insn (where);
7963 if (where2 && GET_CODE (where2) == JUMP_INSN)
7964 where = where2;
7966 else if (GET_CODE (where) == INSN)
7967 where = i;
7969 else
7970 where = i;
7973 emit_insn_before ((*next_nop)(&prev_in_use), where);
7974 while (--nop_count);
7975 ofs = 0;
7978 ofs = (ofs + len) & (align - 1);
7979 prev_in_use = in_use;
7980 i = next;
7984 /* Machine dependent reorg pass. */
7986 static void
7987 alpha_reorg (void)
7989 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
7990 alpha_handle_trap_shadows ();
7992 /* Due to the number of extra trapb insns, don't bother fixing up
7993 alignment when trap precision is instruction. Moreover, we can
7994 only do our job when sched2 is run. */
7995 if (optimize && !optimize_size
7996 && alpha_tp != ALPHA_TP_INSN
7997 && flag_schedule_insns_after_reload)
7999 if (alpha_cpu == PROCESSOR_EV4)
8000 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
8001 else if (alpha_cpu == PROCESSOR_EV5)
8002 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
8006 #if !TARGET_ABI_UNICOSMK
8008 #ifdef HAVE_STAMP_H
8009 #include <stamp.h>
8010 #endif
8012 static void
8013 alpha_file_start (void)
8015 #ifdef OBJECT_FORMAT_ELF
8016 /* If emitting dwarf2 debug information, we cannot generate a .file
8017 directive to start the file, as it will conflict with dwarf2out
8018 file numbers. So it's only useful when emitting mdebug output. */
8019 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
8020 #endif
8022 default_file_start ();
8023 #ifdef MS_STAMP
8024 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
8025 #endif
8027 fputs ("\t.set noreorder\n", asm_out_file);
8028 fputs ("\t.set volatile\n", asm_out_file);
8029 if (!TARGET_ABI_OPEN_VMS)
8030 fputs ("\t.set noat\n", asm_out_file);
8031 if (TARGET_EXPLICIT_RELOCS)
8032 fputs ("\t.set nomacro\n", asm_out_file);
8033 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
8034 fprintf (asm_out_file,
8035 "\t.arch %s\n",
8036 TARGET_CPU_EV6 ? "ev6"
8037 : (TARGET_CPU_EV5
8038 ? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5")
8039 : "ev4"));
8041 #endif
8043 #ifdef OBJECT_FORMAT_ELF
8045 /* Switch to the section to which we should output X. The only thing
8046 special we do here is to honor small data. */
8048 static void
8049 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
8050 unsigned HOST_WIDE_INT align)
8052 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
8053 /* ??? Consider using mergeable sdata sections. */
8054 sdata_section ();
8055 else
8056 default_elf_select_rtx_section (mode, x, align);
8059 #endif /* OBJECT_FORMAT_ELF */
8061 /* Structure to collect function names for final output in link section. */
8062 /* Note that items marked with GTY can't be ifdef'ed out. */
8064 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
8065 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
8067 struct alpha_links GTY(())
8069 int num;
8070 rtx linkage;
8071 enum links_kind lkind;
8072 enum reloc_kind rkind;
8075 struct alpha_funcs GTY(())
8077 int num;
8078 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8079 links;
8082 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8083 splay_tree alpha_links_tree;
8084 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
8085 splay_tree alpha_funcs_tree;
8087 static GTY(()) int alpha_funcs_num;
8089 #if TARGET_ABI_OPEN_VMS
8091 /* Return the VMS argument type corresponding to MODE. */
8093 enum avms_arg_type
8094 alpha_arg_type (enum machine_mode mode)
8096 switch (mode)
8098 case SFmode:
8099 return TARGET_FLOAT_VAX ? FF : FS;
8100 case DFmode:
8101 return TARGET_FLOAT_VAX ? FD : FT;
8102 default:
8103 return I64;
8107 /* Return an rtx for an integer representing the VMS Argument Information
8108 register value. */
8111 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
8113 unsigned HOST_WIDE_INT regval = cum.num_args;
8114 int i;
8116 for (i = 0; i < 6; i++)
8117 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
8119 return GEN_INT (regval);
8122 /* Make (or fake) .linkage entry for function call.
8124 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
8126 Return an SYMBOL_REF rtx for the linkage. */
8129 alpha_need_linkage (const char *name, int is_local)
8131 splay_tree_node node;
8132 struct alpha_links *al;
8134 if (name[0] == '*')
8135 name++;
8137 if (is_local)
8139 struct alpha_funcs *cfaf;
8141 if (!alpha_funcs_tree)
8142 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
8143 splay_tree_compare_pointers);
8145 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
8147 cfaf->links = 0;
8148 cfaf->num = ++alpha_funcs_num;
8150 splay_tree_insert (alpha_funcs_tree,
8151 (splay_tree_key) current_function_decl,
8152 (splay_tree_value) cfaf);
8155 if (alpha_links_tree)
8157 /* Is this name already defined? */
8159 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
8160 if (node)
8162 al = (struct alpha_links *) node->value;
8163 if (is_local)
8165 /* Defined here but external assumed. */
8166 if (al->lkind == KIND_EXTERN)
8167 al->lkind = KIND_LOCAL;
8169 else
8171 /* Used here but unused assumed. */
8172 if (al->lkind == KIND_UNUSED)
8173 al->lkind = KIND_LOCAL;
8175 return al->linkage;
8178 else
8179 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
8181 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
8182 name = ggc_strdup (name);
8184 /* Assume external if no definition. */
8185 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
8187 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
8188 get_identifier (name);
8190 /* Construct a SYMBOL_REF for us to call. */
8192 size_t name_len = strlen (name);
8193 char *linksym = alloca (name_len + 6);
8194 linksym[0] = '$';
8195 memcpy (linksym + 1, name, name_len);
8196 memcpy (linksym + 1 + name_len, "..lk", 5);
8197 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
8198 ggc_alloc_string (linksym, name_len + 5));
8201 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
8202 (splay_tree_value) al);
8204 return al->linkage;
8208 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
8210 splay_tree_node cfunnode;
8211 struct alpha_funcs *cfaf;
8212 struct alpha_links *al;
8213 const char *name = XSTR (linkage, 0);
8215 cfaf = (struct alpha_funcs *) 0;
8216 al = (struct alpha_links *) 0;
8218 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
8219 cfaf = (struct alpha_funcs *) cfunnode->value;
8221 if (cfaf->links)
8223 splay_tree_node lnode;
8225 /* Is this name already defined? */
8227 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
8228 if (lnode)
8229 al = (struct alpha_links *) lnode->value;
8231 else
8232 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
8234 if (!al)
8236 size_t name_len;
8237 size_t buflen;
8238 char buf [512];
8239 char *linksym;
8240 splay_tree_node node = 0;
8241 struct alpha_links *anl;
8243 if (name[0] == '*')
8244 name++;
8246 name_len = strlen (name);
8248 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
8249 al->num = cfaf->num;
8251 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
8252 if (node)
8254 anl = (struct alpha_links *) node->value;
8255 al->lkind = anl->lkind;
8258 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
8259 buflen = strlen (buf);
8260 linksym = alloca (buflen + 1);
8261 memcpy (linksym, buf, buflen + 1);
8263 al->linkage = gen_rtx_SYMBOL_REF
8264 (Pmode, ggc_alloc_string (linksym, buflen + 1));
8266 splay_tree_insert (cfaf->links, (splay_tree_key) name,
8267 (splay_tree_value) al);
8270 if (rflag)
8271 al->rkind = KIND_CODEADDR;
8272 else
8273 al->rkind = KIND_LINKAGE;
8275 if (lflag)
8276 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
8277 else
8278 return al->linkage;
8281 static int
8282 alpha_write_one_linkage (splay_tree_node node, void *data)
8284 const char *const name = (const char *) node->key;
8285 struct alpha_links *link = (struct alpha_links *) node->value;
8286 FILE *stream = (FILE *) data;
8288 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
8289 if (link->rkind == KIND_CODEADDR)
8291 if (link->lkind == KIND_LOCAL)
8293 /* Local and used */
8294 fprintf (stream, "\t.quad %s..en\n", name);
8296 else
8298 /* External and used, request code address. */
8299 fprintf (stream, "\t.code_address %s\n", name);
8302 else
8304 if (link->lkind == KIND_LOCAL)
8306 /* Local and used, build linkage pair. */
8307 fprintf (stream, "\t.quad %s..en\n", name);
8308 fprintf (stream, "\t.quad %s\n", name);
8310 else
8312 /* External and used, request linkage pair. */
8313 fprintf (stream, "\t.linkage %s\n", name);
8317 return 0;
8320 static void
8321 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
8323 splay_tree_node node;
8324 struct alpha_funcs *func;
8326 link_section ();
8327 fprintf (stream, "\t.align 3\n");
8328 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
8329 func = (struct alpha_funcs *) node->value;
8331 fputs ("\t.name ", stream);
8332 assemble_name (stream, funname);
8333 fputs ("..na\n", stream);
8334 ASM_OUTPUT_LABEL (stream, funname);
8335 fprintf (stream, "\t.pdesc ");
8336 assemble_name (stream, funname);
8337 fprintf (stream, "..en,%s\n",
8338 alpha_procedure_type == PT_STACK ? "stack"
8339 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
8341 if (func->links)
8343 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
8344 /* splay_tree_delete (func->links); */
8348 /* Given a decl, a section name, and whether the decl initializer
8349 has relocs, choose attributes for the section. */
8351 #define SECTION_VMS_OVERLAY SECTION_FORGET
8352 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
8353 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
8355 static unsigned int
8356 vms_section_type_flags (tree decl, const char *name, int reloc)
8358 unsigned int flags = default_section_type_flags (decl, name, reloc);
8360 if (decl && DECL_ATTRIBUTES (decl)
8361 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
8362 flags |= SECTION_VMS_OVERLAY;
8363 if (decl && DECL_ATTRIBUTES (decl)
8364 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
8365 flags |= SECTION_VMS_GLOBAL;
8366 if (decl && DECL_ATTRIBUTES (decl)
8367 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
8368 flags |= SECTION_VMS_INITIALIZE;
8370 return flags;
8373 /* Switch to an arbitrary section NAME with attributes as specified
8374 by FLAGS. ALIGN specifies any known alignment requirements for
8375 the section; 0 if the default should be used. */
8377 static void
8378 vms_asm_named_section (const char *name, unsigned int flags)
8380 fputc ('\n', asm_out_file);
8381 fprintf (asm_out_file, ".section\t%s", name);
8383 if (flags & SECTION_VMS_OVERLAY)
8384 fprintf (asm_out_file, ",OVR");
8385 if (flags & SECTION_VMS_GLOBAL)
8386 fprintf (asm_out_file, ",GBL");
8387 if (flags & SECTION_VMS_INITIALIZE)
8388 fprintf (asm_out_file, ",NOMOD");
8389 if (flags & SECTION_DEBUG)
8390 fprintf (asm_out_file, ",NOWRT");
8392 fputc ('\n', asm_out_file);
8395 /* Record an element in the table of global constructors. SYMBOL is
8396 a SYMBOL_REF of the function to be called; PRIORITY is a number
8397 between 0 and MAX_INIT_PRIORITY.
8399 Differs from default_ctors_section_asm_out_constructor in that the
8400 width of the .ctors entry is always 64 bits, rather than the 32 bits
8401 used by a normal pointer. */
8403 static void
8404 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
8406 ctors_section ();
8407 assemble_align (BITS_PER_WORD);
8408 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
8411 static void
8412 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
8414 dtors_section ();
8415 assemble_align (BITS_PER_WORD);
8416 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
8418 #else
8421 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
8422 int is_local ATTRIBUTE_UNUSED)
8424 return NULL_RTX;
8428 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
8429 tree cfundecl ATTRIBUTE_UNUSED,
8430 int lflag ATTRIBUTE_UNUSED,
8431 int rflag ATTRIBUTE_UNUSED)
8433 return NULL_RTX;
8436 #endif /* TARGET_ABI_OPEN_VMS */
8438 #if TARGET_ABI_UNICOSMK
8440 /* This evaluates to true if we do not know how to pass TYPE solely in
8441 registers. This is the case for all arguments that do not fit in two
8442 registers. */
8444 static bool
8445 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
8447 if (type == NULL)
8448 return false;
8450 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8451 return true;
8452 if (TREE_ADDRESSABLE (type))
8453 return true;
8455 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
8458 /* Define the offset between two registers, one to be eliminated, and the
8459 other its replacement, at the start of a routine. */
8462 unicosmk_initial_elimination_offset (int from, int to)
8464 int fixed_size;
8466 fixed_size = alpha_sa_size();
8467 if (fixed_size != 0)
8468 fixed_size += 48;
8470 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8471 return -fixed_size;
8472 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8473 return 0;
8474 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8475 return (ALPHA_ROUND (current_function_outgoing_args_size)
8476 + ALPHA_ROUND (get_frame_size()));
8477 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8478 return (ALPHA_ROUND (fixed_size)
8479 + ALPHA_ROUND (get_frame_size()
8480 + current_function_outgoing_args_size));
8481 else
8482 abort ();
8485 /* Output the module name for .ident and .end directives. We have to strip
8486 directories and add make sure that the module name starts with a letter
8487 or '$'. */
8489 static void
8490 unicosmk_output_module_name (FILE *file)
8492 const char *name = lbasename (main_input_filename);
8493 unsigned len = strlen (name);
8494 char *clean_name = alloca (len + 2);
8495 char *ptr = clean_name;
8497 /* CAM only accepts module names that start with a letter or '$'. We
8498 prefix the module name with a '$' if necessary. */
8500 if (!ISALPHA (*name))
8501 *ptr++ = '$';
8502 memcpy (ptr, name, len + 1);
8503 clean_symbol_name (clean_name);
8504 fputs (clean_name, file);
8507 /* Output the definition of a common variable. */
8509 void
8510 unicosmk_output_common (FILE *file, const char *name, int size, int align)
8512 tree name_tree;
8513 printf ("T3E__: common %s\n", name);
8515 common_section ();
8516 fputs("\t.endp\n\n\t.psect ", file);
8517 assemble_name(file, name);
8518 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
8519 fprintf(file, "\t.byte\t0:%d\n", size);
8521 /* Mark the symbol as defined in this module. */
8522 name_tree = get_identifier (name);
8523 TREE_ASM_WRITTEN (name_tree) = 1;
8526 #define SECTION_PUBLIC SECTION_MACH_DEP
8527 #define SECTION_MAIN (SECTION_PUBLIC << 1)
8528 static int current_section_align;
8530 static unsigned int
8531 unicosmk_section_type_flags (tree decl, const char *name,
8532 int reloc ATTRIBUTE_UNUSED)
8534 unsigned int flags = default_section_type_flags (decl, name, reloc);
8536 if (!decl)
8537 return flags;
8539 if (TREE_CODE (decl) == FUNCTION_DECL)
8541 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8542 if (align_functions_log > current_section_align)
8543 current_section_align = align_functions_log;
8545 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
8546 flags |= SECTION_MAIN;
8548 else
8549 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
8551 if (TREE_PUBLIC (decl))
8552 flags |= SECTION_PUBLIC;
8554 return flags;
8557 /* Generate a section name for decl and associate it with the
8558 declaration. */
8560 static void
8561 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8563 const char *name;
8564 int len;
8566 if (!decl)
8567 abort ();
8569 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
8570 name = default_strip_name_encoding (name);
8571 len = strlen (name);
8573 if (TREE_CODE (decl) == FUNCTION_DECL)
8575 char *string;
8577 /* It is essential that we prefix the section name here because
8578 otherwise the section names generated for constructors and
8579 destructors confuse collect2. */
8581 string = alloca (len + 6);
8582 sprintf (string, "code@%s", name);
8583 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
8585 else if (TREE_PUBLIC (decl))
8586 DECL_SECTION_NAME (decl) = build_string (len, name);
8587 else
8589 char *string;
8591 string = alloca (len + 6);
8592 sprintf (string, "data@%s", name);
8593 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
8597 /* Switch to an arbitrary section NAME with attributes as specified
8598 by FLAGS. ALIGN specifies any known alignment requirements for
8599 the section; 0 if the default should be used. */
8601 static void
8602 unicosmk_asm_named_section (const char *name, unsigned int flags)
8604 const char *kind;
8606 /* Close the previous section. */
8608 fputs ("\t.endp\n\n", asm_out_file);
8610 /* Find out what kind of section we are opening. */
8612 if (flags & SECTION_MAIN)
8613 fputs ("\t.start\tmain\n", asm_out_file);
8615 if (flags & SECTION_CODE)
8616 kind = "code";
8617 else if (flags & SECTION_PUBLIC)
8618 kind = "common";
8619 else
8620 kind = "data";
8622 if (current_section_align != 0)
8623 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
8624 current_section_align, kind);
8625 else
8626 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
8629 static void
8630 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
8632 if (DECL_P (decl)
8633 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
8634 unicosmk_unique_section (decl, 0);
8637 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
8638 in code sections because .align fill unused space with zeroes. */
8640 void
8641 unicosmk_output_align (FILE *file, int align)
8643 if (inside_function)
8644 fprintf (file, "\tgcc@code@align\t%d\n", align);
8645 else
8646 fprintf (file, "\t.align\t%d\n", align);
8649 /* Add a case vector to the current function's list of deferred case
8650 vectors. Case vectors have to be put into a separate section because CAM
8651 does not allow data definitions in code sections. */
8653 void
8654 unicosmk_defer_case_vector (rtx lab, rtx vec)
8656 struct machine_function *machine = cfun->machine;
8658 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8659 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
8660 machine->addr_list);
8663 /* Output a case vector. */
8665 static void
8666 unicosmk_output_addr_vec (FILE *file, rtx vec)
8668 rtx lab = XEXP (vec, 0);
8669 rtx body = XEXP (vec, 1);
8670 int vlen = XVECLEN (body, 0);
8671 int idx;
8673 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
8675 for (idx = 0; idx < vlen; idx++)
8677 ASM_OUTPUT_ADDR_VEC_ELT
8678 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8682 /* Output current function's deferred case vectors. */
8684 static void
8685 unicosmk_output_deferred_case_vectors (FILE *file)
8687 struct machine_function *machine = cfun->machine;
8688 rtx t;
8690 if (machine->addr_list == NULL_RTX)
8691 return;
8693 data_section ();
8694 for (t = machine->addr_list; t; t = XEXP (t, 1))
8695 unicosmk_output_addr_vec (file, XEXP (t, 0));
8698 /* Generate the name of the SSIB section for the current function. */
8700 #define SSIB_PREFIX "__SSIB_"
8701 #define SSIB_PREFIX_LEN 7
8703 static const char *
8704 unicosmk_ssib_name (void)
8706 /* This is ok since CAM won't be able to deal with names longer than that
8707 anyway. */
8709 static char name[256];
8711 rtx x;
8712 const char *fnname;
8713 int len;
8715 x = DECL_RTL (cfun->decl);
8716 if (GET_CODE (x) != MEM)
8717 abort ();
8718 x = XEXP (x, 0);
8719 if (GET_CODE (x) != SYMBOL_REF)
8720 abort ();
8721 fnname = XSTR (x, 0);
8723 len = strlen (fnname);
8724 if (len + SSIB_PREFIX_LEN > 255)
8725 len = 255 - SSIB_PREFIX_LEN;
8727 strcpy (name, SSIB_PREFIX);
8728 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
8729 name[len + SSIB_PREFIX_LEN] = 0;
8731 return name;
8734 /* Set up the dynamic subprogram information block (DSIB) and update the
8735 frame pointer register ($15) for subroutines which have a frame. If the
8736 subroutine doesn't have a frame, simply increment $15. */
8738 static void
8739 unicosmk_gen_dsib (unsigned long *imaskP)
8741 if (alpha_procedure_type == PT_STACK)
8743 const char *ssib_name;
8744 rtx mem;
8746 /* Allocate 64 bytes for the DSIB. */
8748 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
8749 GEN_INT (-64))));
8750 emit_insn (gen_blockage ());
8752 /* Save the return address. */
8754 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
8755 set_mem_alias_set (mem, alpha_sr_alias_set);
8756 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
8757 (*imaskP) &= ~(1UL << REG_RA);
8759 /* Save the old frame pointer. */
8761 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
8762 set_mem_alias_set (mem, alpha_sr_alias_set);
8763 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
8764 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
8766 emit_insn (gen_blockage ());
8768 /* Store the SSIB pointer. */
8770 ssib_name = ggc_strdup (unicosmk_ssib_name ());
8771 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
8772 set_mem_alias_set (mem, alpha_sr_alias_set);
8774 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
8775 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
8776 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
8778 /* Save the CIW index. */
8780 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
8781 set_mem_alias_set (mem, alpha_sr_alias_set);
8782 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
8784 emit_insn (gen_blockage ());
8786 /* Set the new frame pointer. */
8788 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8789 stack_pointer_rtx, GEN_INT (64))));
8792 else
8794 /* Increment the frame pointer register to indicate that we do not
8795 have a frame. */
8797 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8798 hard_frame_pointer_rtx, const1_rtx)));
8802 /* Output the static subroutine information block for the current
8803 function. */
8805 static void
8806 unicosmk_output_ssib (FILE *file, const char *fnname)
8808 int len;
8809 int i;
8810 rtx x;
8811 rtx ciw;
8812 struct machine_function *machine = cfun->machine;
8814 ssib_section ();
8815 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
8816 unicosmk_ssib_name ());
8818 /* Some required stuff and the function name length. */
8820 len = strlen (fnname);
8821 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
8823 /* Saved registers
8824 ??? We don't do that yet. */
8826 fputs ("\t.quad\t0\n", file);
8828 /* Function address. */
8830 fputs ("\t.quad\t", file);
8831 assemble_name (file, fnname);
8832 putc ('\n', file);
8834 fputs ("\t.quad\t0\n", file);
8835 fputs ("\t.quad\t0\n", file);
8837 /* Function name.
8838 ??? We do it the same way Cray CC does it but this could be
8839 simplified. */
8841 for( i = 0; i < len; i++ )
8842 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
8843 if( (len % 8) == 0 )
8844 fputs ("\t.quad\t0\n", file);
8845 else
8846 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
8848 /* All call information words used in the function. */
8850 for (x = machine->first_ciw; x; x = XEXP (x, 1))
8852 ciw = XEXP (x, 0);
8853 #if HOST_BITS_PER_WIDE_INT == 32
8854 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
8855 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
8856 #else
8857 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
8858 #endif
8862 /* Add a call information word (CIW) to the list of the current function's
8863 CIWs and return its index.
8865 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
8868 unicosmk_add_call_info_word (rtx x)
8870 rtx node;
8871 struct machine_function *machine = cfun->machine;
8873 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
8874 if (machine->first_ciw == NULL_RTX)
8875 machine->first_ciw = node;
8876 else
8877 XEXP (machine->last_ciw, 1) = node;
8879 machine->last_ciw = node;
8880 ++machine->ciw_count;
8882 return GEN_INT (machine->ciw_count
8883 + strlen (current_function_name ())/8 + 5);
8886 static char unicosmk_section_buf[100];
8888 char *
8889 unicosmk_text_section (void)
8891 static int count = 0;
8892 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
8893 count++);
8894 return unicosmk_section_buf;
8897 char *
8898 unicosmk_data_section (void)
8900 static int count = 1;
8901 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
8902 count++);
8903 return unicosmk_section_buf;
8906 /* The Cray assembler doesn't accept extern declarations for symbols which
8907 are defined in the same file. We have to keep track of all global
8908 symbols which are referenced and/or defined in a source file and output
8909 extern declarations for those which are referenced but not defined at
8910 the end of file. */
8912 /* List of identifiers for which an extern declaration might have to be
8913 emitted. */
8914 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
8916 struct unicosmk_extern_list
8918 struct unicosmk_extern_list *next;
8919 const char *name;
8922 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
8924 /* Output extern declarations which are required for every asm file. */
8926 static void
8927 unicosmk_output_default_externs (FILE *file)
8929 static const char *const externs[] =
8930 { "__T3E_MISMATCH" };
8932 int i;
8933 int n;
8935 n = ARRAY_SIZE (externs);
8937 for (i = 0; i < n; i++)
8938 fprintf (file, "\t.extern\t%s\n", externs[i]);
8941 /* Output extern declarations for global symbols which are have been
8942 referenced but not defined. */
8944 static void
8945 unicosmk_output_externs (FILE *file)
8947 struct unicosmk_extern_list *p;
8948 const char *real_name;
8949 int len;
8950 tree name_tree;
8952 len = strlen (user_label_prefix);
8953 for (p = unicosmk_extern_head; p != 0; p = p->next)
8955 /* We have to strip the encoding and possibly remove user_label_prefix
8956 from the identifier in order to handle -fleading-underscore and
8957 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
8958 real_name = default_strip_name_encoding (p->name);
8959 if (len && p->name[0] == '*'
8960 && !memcmp (real_name, user_label_prefix, len))
8961 real_name += len;
8963 name_tree = get_identifier (real_name);
8964 if (! TREE_ASM_WRITTEN (name_tree))
8966 TREE_ASM_WRITTEN (name_tree) = 1;
8967 fputs ("\t.extern\t", file);
8968 assemble_name (file, p->name);
8969 putc ('\n', file);
8974 /* Record an extern. */
8976 void
8977 unicosmk_add_extern (const char *name)
8979 struct unicosmk_extern_list *p;
8981 p = (struct unicosmk_extern_list *)
8982 xmalloc (sizeof (struct unicosmk_extern_list));
8983 p->next = unicosmk_extern_head;
8984 p->name = name;
8985 unicosmk_extern_head = p;
8988 /* The Cray assembler generates incorrect code if identifiers which
8989 conflict with register names are used as instruction operands. We have
8990 to replace such identifiers with DEX expressions. */
8992 /* Structure to collect identifiers which have been replaced by DEX
8993 expressions. */
8994 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
8996 struct unicosmk_dex {
8997 struct unicosmk_dex *next;
8998 const char *name;
9001 /* List of identifiers which have been replaced by DEX expressions. The DEX
9002 number is determined by the position in the list. */
9004 static struct unicosmk_dex *unicosmk_dex_list = NULL;
9006 /* The number of elements in the DEX list. */
9008 static int unicosmk_dex_count = 0;
9010 /* Check if NAME must be replaced by a DEX expression. */
9012 static int
9013 unicosmk_special_name (const char *name)
9015 if (name[0] == '*')
9016 ++name;
9018 if (name[0] == '$')
9019 ++name;
9021 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
9022 return 0;
9024 switch (name[1])
9026 case '1': case '2':
9027 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
9029 case '3':
9030 return (name[2] == '\0'
9031 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
9033 default:
9034 return (ISDIGIT (name[1]) && name[2] == '\0');
9038 /* Return the DEX number if X must be replaced by a DEX expression and 0
9039 otherwise. */
9041 static int
9042 unicosmk_need_dex (rtx x)
9044 struct unicosmk_dex *dex;
9045 const char *name;
9046 int i;
9048 if (GET_CODE (x) != SYMBOL_REF)
9049 return 0;
9051 name = XSTR (x,0);
9052 if (! unicosmk_special_name (name))
9053 return 0;
9055 i = unicosmk_dex_count;
9056 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9058 if (! strcmp (name, dex->name))
9059 return i;
9060 --i;
9063 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
9064 dex->name = name;
9065 dex->next = unicosmk_dex_list;
9066 unicosmk_dex_list = dex;
9068 ++unicosmk_dex_count;
9069 return unicosmk_dex_count;
9072 /* Output the DEX definitions for this file. */
9074 static void
9075 unicosmk_output_dex (FILE *file)
9077 struct unicosmk_dex *dex;
9078 int i;
9080 if (unicosmk_dex_list == NULL)
9081 return;
9083 fprintf (file, "\t.dexstart\n");
9085 i = unicosmk_dex_count;
9086 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9088 fprintf (file, "\tDEX (%d) = ", i);
9089 assemble_name (file, dex->name);
9090 putc ('\n', file);
9091 --i;
9094 fprintf (file, "\t.dexend\n");
9097 /* Output text that to appear at the beginning of an assembler file. */
9099 static void
9100 unicosmk_file_start (void)
9102 int i;
9104 fputs ("\t.ident\t", asm_out_file);
9105 unicosmk_output_module_name (asm_out_file);
9106 fputs ("\n\n", asm_out_file);
9108 /* The Unicos/Mk assembler uses different register names. Instead of trying
9109 to support them, we simply use micro definitions. */
9111 /* CAM has different register names: rN for the integer register N and fN
9112 for the floating-point register N. Instead of trying to use these in
9113 alpha.md, we define the symbols $N and $fN to refer to the appropriate
9114 register. */
9116 for (i = 0; i < 32; ++i)
9117 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
9119 for (i = 0; i < 32; ++i)
9120 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
9122 putc ('\n', asm_out_file);
9124 /* The .align directive fill unused space with zeroes which does not work
9125 in code sections. We define the macro 'gcc@code@align' which uses nops
9126 instead. Note that it assumes that code sections always have the
9127 biggest possible alignment since . refers to the current offset from
9128 the beginning of the section. */
9130 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
9131 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
9132 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
9133 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
9134 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
9135 fputs ("\tbis r31,r31,r31\n", asm_out_file);
9136 fputs ("\t.endr\n", asm_out_file);
9137 fputs ("\t.endif\n", asm_out_file);
9138 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
9140 /* Output extern declarations which should always be visible. */
9141 unicosmk_output_default_externs (asm_out_file);
9143 /* Open a dummy section. We always need to be inside a section for the
9144 section-switching code to work correctly.
9145 ??? This should be a module id or something like that. I still have to
9146 figure out what the rules for those are. */
9147 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
9150 /* Output text to appear at the end of an assembler file. This includes all
9151 pending extern declarations and DEX expressions. */
9153 static void
9154 unicosmk_file_end (void)
9156 fputs ("\t.endp\n\n", asm_out_file);
9158 /* Output all pending externs. */
9160 unicosmk_output_externs (asm_out_file);
9162 /* Output dex definitions used for functions whose names conflict with
9163 register names. */
9165 unicosmk_output_dex (asm_out_file);
9167 fputs ("\t.end\t", asm_out_file);
9168 unicosmk_output_module_name (asm_out_file);
9169 putc ('\n', asm_out_file);
9172 #else
9174 static void
9175 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
9178 static void
9179 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
9182 static void
9183 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
9184 const char * fnname ATTRIBUTE_UNUSED)
9188 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
9190 return NULL_RTX;
9193 static int
9194 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
9196 return 0;
9199 #endif /* TARGET_ABI_UNICOSMK */
9201 static void
9202 alpha_init_libfuncs (void)
9204 if (TARGET_ABI_UNICOSMK)
9206 /* Prevent gcc from generating calls to __divsi3. */
9207 set_optab_libfunc (sdiv_optab, SImode, 0);
9208 set_optab_libfunc (udiv_optab, SImode, 0);
9210 /* Use the functions provided by the system library
9211 for DImode integer division. */
9212 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
9213 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
9215 else if (TARGET_ABI_OPEN_VMS)
9217 /* Use the VMS runtime library functions for division and
9218 remainder. */
9219 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9220 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9221 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9222 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9223 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9224 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9225 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9226 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9231 /* Initialize the GCC target structure. */
9232 #if TARGET_ABI_OPEN_VMS
9233 # undef TARGET_ATTRIBUTE_TABLE
9234 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9235 # undef TARGET_SECTION_TYPE_FLAGS
9236 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
9237 #endif
9239 #undef TARGET_IN_SMALL_DATA_P
9240 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9242 #if TARGET_ABI_UNICOSMK
9243 # undef TARGET_INSERT_ATTRIBUTES
9244 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
9245 # undef TARGET_SECTION_TYPE_FLAGS
9246 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
9247 # undef TARGET_ASM_UNIQUE_SECTION
9248 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
9249 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
9250 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
9251 # undef TARGET_ASM_GLOBALIZE_LABEL
9252 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
9253 # undef TARGET_MUST_PASS_IN_STACK
9254 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
9255 #endif
9257 #undef TARGET_ASM_ALIGNED_HI_OP
9258 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9259 #undef TARGET_ASM_ALIGNED_DI_OP
9260 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9262 /* Default unaligned ops are provided for ELF systems. To get unaligned
9263 data for non-ELF systems, we have to turn off auto alignment. */
9264 #ifndef OBJECT_FORMAT_ELF
9265 #undef TARGET_ASM_UNALIGNED_HI_OP
9266 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9267 #undef TARGET_ASM_UNALIGNED_SI_OP
9268 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9269 #undef TARGET_ASM_UNALIGNED_DI_OP
9270 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9271 #endif
9273 #ifdef OBJECT_FORMAT_ELF
9274 #undef TARGET_ASM_SELECT_RTX_SECTION
9275 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9276 #endif
9278 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9279 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9281 #undef TARGET_INIT_LIBFUNCS
9282 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9284 #if TARGET_ABI_UNICOSMK
9285 #undef TARGET_ASM_FILE_START
9286 #define TARGET_ASM_FILE_START unicosmk_file_start
9287 #undef TARGET_ASM_FILE_END
9288 #define TARGET_ASM_FILE_END unicosmk_file_end
9289 #else
9290 #undef TARGET_ASM_FILE_START
9291 #define TARGET_ASM_FILE_START alpha_file_start
9292 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
9293 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
9294 #endif
9296 #undef TARGET_SCHED_ADJUST_COST
9297 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9298 #undef TARGET_SCHED_ISSUE_RATE
9299 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9300 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9301 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9302 alpha_multipass_dfa_lookahead
9304 #undef TARGET_HAVE_TLS
9305 #define TARGET_HAVE_TLS HAVE_AS_TLS
9307 #undef TARGET_INIT_BUILTINS
9308 #define TARGET_INIT_BUILTINS alpha_init_builtins
9309 #undef TARGET_EXPAND_BUILTIN
9310 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9312 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9313 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9314 #undef TARGET_CANNOT_COPY_INSN_P
9315 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9317 #if TARGET_ABI_OSF
9318 #undef TARGET_ASM_OUTPUT_MI_THUNK
9319 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9320 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9321 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
9322 #endif
9324 #undef TARGET_RTX_COSTS
9325 #define TARGET_RTX_COSTS alpha_rtx_costs
9326 #undef TARGET_ADDRESS_COST
9327 #define TARGET_ADDRESS_COST hook_int_rtx_0
9329 #undef TARGET_MACHINE_DEPENDENT_REORG
9330 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9332 #undef TARGET_PROMOTE_FUNCTION_ARGS
9333 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
9334 #undef TARGET_PROMOTE_FUNCTION_RETURN
9335 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
9336 #undef TARGET_PROMOTE_PROTOTYPES
9337 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
9338 #undef TARGET_RETURN_IN_MEMORY
9339 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9340 #undef TARGET_PASS_BY_REFERENCE
9341 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9342 #undef TARGET_SETUP_INCOMING_VARARGS
9343 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9344 #undef TARGET_STRICT_ARGUMENT_NAMING
9345 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9346 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9347 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9348 #undef TARGET_SPLIT_COMPLEX_ARG
9349 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9350 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9351 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9353 #undef TARGET_BUILD_BUILTIN_VA_LIST
9354 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9356 struct gcc_target targetm = TARGET_INITIALIZER;
9359 #include "gt-alpha.h"