2013-10-16 Michael Meissner <meissner@linux.vnet.ibm.com>
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob63f68c47bd53bd7358440da4d3023792eb9d1ca5
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2013 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #include "dumpfile.h"
62 #if TARGET_XCOFF
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #endif
65 #if TARGET_MACHO
66 #include "gstab.h" /* for N_SLINE */
67 #endif
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
71 #endif
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack {
78 int reload_completed; /* stack info won't change from here on */
79 int first_gp_reg_save; /* first callee saved GP register used */
80 int first_fp_reg_save; /* first callee saved FP register used */
81 int first_altivec_reg_save; /* first callee saved AltiVec register used */
82 int lr_save_p; /* true if the link reg needs to be saved */
83 int cr_save_p; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask; /* mask of vec registers to save */
85 int push_p; /* true if we need to allocate stack space */
86 int calls_p; /* true if the function makes any calls */
87 int world_save_p; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi; /* which ABI to use */
90 int gp_save_offset; /* offset to save GP regs from initial SP */
91 int fp_save_offset; /* offset to save FP regs from initial SP */
92 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset; /* offset to save LR from initial SP */
94 int cr_save_offset; /* offset to save CR from initial SP */
95 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset; /* offset to save the varargs registers */
98 int ehrd_offset; /* offset to EH return data */
99 int reg_size; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size; /* variable save area size */
101 int parm_size; /* outgoing parameter size */
102 int save_size; /* save area size */
103 int fixed_size; /* fixed size of stack frame */
104 int gp_size; /* size of saved GP registers */
105 int fp_size; /* size of saved FP registers */
106 int altivec_size; /* size of saved AltiVec registers */
107 int cr_size; /* size to hold CR if not in save_size */
108 int vrsave_size; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size; /* size of altivec alignment padding if
110 not in save_size */
111 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size;
113 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
114 int spe_64bit_regs_used;
115 int savres_strategy;
116 } rs6000_stack_t;
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame;
128 /* Flags if __builtin_return_address (0) was used. */
129 int ra_need_lr;
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 int lr_save_state;
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot;
142 } machine_function;
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load;
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined;
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno;
154 #ifdef USING_ELFOS_H
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno = 0;
157 #endif
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 int dot_symbols;
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode;
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size;
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct;
178 #endif
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
192 static int dbg_cost_ctrl;
194 /* Built in types. */
195 tree rs6000_builtin_types[RS6000_BTI_MAX];
196 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
198 /* Flag to say the TOC is initialized */
199 int toc_initialized;
200 char toc_label_name[10];
202 /* Cached value of rs6000_variable_issue. This is cached in
203 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
204 static short cached_can_issue_more;
206 static GTY(()) section *read_only_data_section;
207 static GTY(()) section *private_data_section;
208 static GTY(()) section *tls_data_section;
209 static GTY(()) section *tls_private_data_section;
210 static GTY(()) section *read_only_private_data_section;
211 static GTY(()) section *sdata2_section;
212 static GTY(()) section *toc_section;
214 struct builtin_description
216 const HOST_WIDE_INT mask;
217 const enum insn_code icode;
218 const char *const name;
219 const enum rs6000_builtins code;
222 /* Describe the vector unit used for modes. */
223 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
224 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
226 /* Register classes for various constraints that are based on the target
227 switches. */
228 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
230 /* Describe the alignment of a vector. */
231 int rs6000_vector_align[NUM_MACHINE_MODES];
233 /* Map selected modes to types for builtins. */
234 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
236 /* What modes to automatically generate reciprocal divide estimate (fre) and
237 reciprocal sqrt (frsqrte) for. */
238 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
240 /* Masks to determine which reciprocal esitmate instructions to generate
241 automatically. */
242 enum rs6000_recip_mask {
243 RECIP_SF_DIV = 0x001, /* Use divide estimate */
244 RECIP_DF_DIV = 0x002,
245 RECIP_V4SF_DIV = 0x004,
246 RECIP_V2DF_DIV = 0x008,
248 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
249 RECIP_DF_RSQRT = 0x020,
250 RECIP_V4SF_RSQRT = 0x040,
251 RECIP_V2DF_RSQRT = 0x080,
253 /* Various combination of flags for -mrecip=xxx. */
254 RECIP_NONE = 0,
255 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
256 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
257 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
259 RECIP_HIGH_PRECISION = RECIP_ALL,
261 /* On low precision machines like the power5, don't enable double precision
262 reciprocal square root estimate, since it isn't accurate enough. */
263 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
266 /* -mrecip options. */
267 static struct
269 const char *string; /* option name */
270 unsigned int mask; /* mask bits to set */
271 } recip_options[] = {
272 { "all", RECIP_ALL },
273 { "none", RECIP_NONE },
274 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
275 | RECIP_V2DF_DIV) },
276 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
277 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
278 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
279 | RECIP_V2DF_RSQRT) },
280 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
281 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
284 /* Pointer to function (in rs6000-c.c) that can define or undefine target
285 macros that have changed. Languages that don't support the preprocessor
286 don't link in rs6000-c.c, so we can't call it directly. */
287 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
289 /* Simplfy register classes into simpler classifications. We assume
290 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
291 check for standard register classes (gpr/floating/altivec/vsx) and
292 floating/vector classes (float/altivec/vsx). */
294 enum rs6000_reg_type {
295 NO_REG_TYPE,
296 PSEUDO_REG_TYPE,
297 GPR_REG_TYPE,
298 VSX_REG_TYPE,
299 ALTIVEC_REG_TYPE,
300 FPR_REG_TYPE,
301 SPR_REG_TYPE,
302 CR_REG_TYPE,
303 SPE_ACC_TYPE,
304 SPEFSCR_REG_TYPE
307 /* Map register class to register type. */
308 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
310 /* First/last register type for the 'normal' register types (i.e. general
311 purpose, floating point, altivec, and VSX registers). */
312 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
314 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
316 /* Register type masks based on the type, of valid addressing modes. */
317 struct rs6000_reg_addr {
318 enum insn_code reload_load; /* INSN to reload for loading. */
319 enum insn_code reload_store; /* INSN to reload for storing. */
320 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
321 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
322 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
325 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
328 /* Target cpu costs. */
330 struct processor_costs {
331 const int mulsi; /* cost of SImode multiplication. */
332 const int mulsi_const; /* cost of SImode multiplication by constant. */
333 const int mulsi_const9; /* cost of SImode mult by short constant. */
334 const int muldi; /* cost of DImode multiplication. */
335 const int divsi; /* cost of SImode division. */
336 const int divdi; /* cost of DImode division. */
337 const int fp; /* cost of simple SFmode and DFmode insns. */
338 const int dmul; /* cost of DFmode multiplication (and fmadd). */
339 const int sdiv; /* cost of SFmode division (fdivs). */
340 const int ddiv; /* cost of DFmode division (fdiv). */
341 const int cache_line_size; /* cache line size in bytes. */
342 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
343 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
344 const int simultaneous_prefetches; /* number of parallel prefetch
345 operations. */
348 const struct processor_costs *rs6000_cost;
350 /* Processor costs (relative to an add) */
352 /* Instruction size costs on 32bit processors. */
353 static const
354 struct processor_costs size32_cost = {
355 COSTS_N_INSNS (1), /* mulsi */
356 COSTS_N_INSNS (1), /* mulsi_const */
357 COSTS_N_INSNS (1), /* mulsi_const9 */
358 COSTS_N_INSNS (1), /* muldi */
359 COSTS_N_INSNS (1), /* divsi */
360 COSTS_N_INSNS (1), /* divdi */
361 COSTS_N_INSNS (1), /* fp */
362 COSTS_N_INSNS (1), /* dmul */
363 COSTS_N_INSNS (1), /* sdiv */
364 COSTS_N_INSNS (1), /* ddiv */
371 /* Instruction size costs on 64bit processors. */
372 static const
373 struct processor_costs size64_cost = {
374 COSTS_N_INSNS (1), /* mulsi */
375 COSTS_N_INSNS (1), /* mulsi_const */
376 COSTS_N_INSNS (1), /* mulsi_const9 */
377 COSTS_N_INSNS (1), /* muldi */
378 COSTS_N_INSNS (1), /* divsi */
379 COSTS_N_INSNS (1), /* divdi */
380 COSTS_N_INSNS (1), /* fp */
381 COSTS_N_INSNS (1), /* dmul */
382 COSTS_N_INSNS (1), /* sdiv */
383 COSTS_N_INSNS (1), /* ddiv */
384 128,
390 /* Instruction costs on RS64A processors. */
391 static const
392 struct processor_costs rs64a_cost = {
393 COSTS_N_INSNS (20), /* mulsi */
394 COSTS_N_INSNS (12), /* mulsi_const */
395 COSTS_N_INSNS (8), /* mulsi_const9 */
396 COSTS_N_INSNS (34), /* muldi */
397 COSTS_N_INSNS (65), /* divsi */
398 COSTS_N_INSNS (67), /* divdi */
399 COSTS_N_INSNS (4), /* fp */
400 COSTS_N_INSNS (4), /* dmul */
401 COSTS_N_INSNS (31), /* sdiv */
402 COSTS_N_INSNS (31), /* ddiv */
403 128, /* cache line size */
404 128, /* l1 cache */
405 2048, /* l2 cache */
406 1, /* streams */
409 /* Instruction costs on MPCCORE processors. */
410 static const
411 struct processor_costs mpccore_cost = {
412 COSTS_N_INSNS (2), /* mulsi */
413 COSTS_N_INSNS (2), /* mulsi_const */
414 COSTS_N_INSNS (2), /* mulsi_const9 */
415 COSTS_N_INSNS (2), /* muldi */
416 COSTS_N_INSNS (6), /* divsi */
417 COSTS_N_INSNS (6), /* divdi */
418 COSTS_N_INSNS (4), /* fp */
419 COSTS_N_INSNS (5), /* dmul */
420 COSTS_N_INSNS (10), /* sdiv */
421 COSTS_N_INSNS (17), /* ddiv */
422 32, /* cache line size */
423 4, /* l1 cache */
424 16, /* l2 cache */
425 1, /* streams */
428 /* Instruction costs on PPC403 processors. */
429 static const
430 struct processor_costs ppc403_cost = {
431 COSTS_N_INSNS (4), /* mulsi */
432 COSTS_N_INSNS (4), /* mulsi_const */
433 COSTS_N_INSNS (4), /* mulsi_const9 */
434 COSTS_N_INSNS (4), /* muldi */
435 COSTS_N_INSNS (33), /* divsi */
436 COSTS_N_INSNS (33), /* divdi */
437 COSTS_N_INSNS (11), /* fp */
438 COSTS_N_INSNS (11), /* dmul */
439 COSTS_N_INSNS (11), /* sdiv */
440 COSTS_N_INSNS (11), /* ddiv */
441 32, /* cache line size */
442 4, /* l1 cache */
443 16, /* l2 cache */
444 1, /* streams */
447 /* Instruction costs on PPC405 processors. */
448 static const
449 struct processor_costs ppc405_cost = {
450 COSTS_N_INSNS (5), /* mulsi */
451 COSTS_N_INSNS (4), /* mulsi_const */
452 COSTS_N_INSNS (3), /* mulsi_const9 */
453 COSTS_N_INSNS (5), /* muldi */
454 COSTS_N_INSNS (35), /* divsi */
455 COSTS_N_INSNS (35), /* divdi */
456 COSTS_N_INSNS (11), /* fp */
457 COSTS_N_INSNS (11), /* dmul */
458 COSTS_N_INSNS (11), /* sdiv */
459 COSTS_N_INSNS (11), /* ddiv */
460 32, /* cache line size */
461 16, /* l1 cache */
462 128, /* l2 cache */
463 1, /* streams */
466 /* Instruction costs on PPC440 processors. */
467 static const
468 struct processor_costs ppc440_cost = {
469 COSTS_N_INSNS (3), /* mulsi */
470 COSTS_N_INSNS (2), /* mulsi_const */
471 COSTS_N_INSNS (2), /* mulsi_const9 */
472 COSTS_N_INSNS (3), /* muldi */
473 COSTS_N_INSNS (34), /* divsi */
474 COSTS_N_INSNS (34), /* divdi */
475 COSTS_N_INSNS (5), /* fp */
476 COSTS_N_INSNS (5), /* dmul */
477 COSTS_N_INSNS (19), /* sdiv */
478 COSTS_N_INSNS (33), /* ddiv */
479 32, /* cache line size */
480 32, /* l1 cache */
481 256, /* l2 cache */
482 1, /* streams */
485 /* Instruction costs on PPC476 processors. */
486 static const
487 struct processor_costs ppc476_cost = {
488 COSTS_N_INSNS (4), /* mulsi */
489 COSTS_N_INSNS (4), /* mulsi_const */
490 COSTS_N_INSNS (4), /* mulsi_const9 */
491 COSTS_N_INSNS (4), /* muldi */
492 COSTS_N_INSNS (11), /* divsi */
493 COSTS_N_INSNS (11), /* divdi */
494 COSTS_N_INSNS (6), /* fp */
495 COSTS_N_INSNS (6), /* dmul */
496 COSTS_N_INSNS (19), /* sdiv */
497 COSTS_N_INSNS (33), /* ddiv */
498 32, /* l1 cache line size */
499 32, /* l1 cache */
500 512, /* l2 cache */
501 1, /* streams */
504 /* Instruction costs on PPC601 processors. */
505 static const
506 struct processor_costs ppc601_cost = {
507 COSTS_N_INSNS (5), /* mulsi */
508 COSTS_N_INSNS (5), /* mulsi_const */
509 COSTS_N_INSNS (5), /* mulsi_const9 */
510 COSTS_N_INSNS (5), /* muldi */
511 COSTS_N_INSNS (36), /* divsi */
512 COSTS_N_INSNS (36), /* divdi */
513 COSTS_N_INSNS (4), /* fp */
514 COSTS_N_INSNS (5), /* dmul */
515 COSTS_N_INSNS (17), /* sdiv */
516 COSTS_N_INSNS (31), /* ddiv */
517 32, /* cache line size */
518 32, /* l1 cache */
519 256, /* l2 cache */
520 1, /* streams */
523 /* Instruction costs on PPC603 processors. */
524 static const
525 struct processor_costs ppc603_cost = {
526 COSTS_N_INSNS (5), /* mulsi */
527 COSTS_N_INSNS (3), /* mulsi_const */
528 COSTS_N_INSNS (2), /* mulsi_const9 */
529 COSTS_N_INSNS (5), /* muldi */
530 COSTS_N_INSNS (37), /* divsi */
531 COSTS_N_INSNS (37), /* divdi */
532 COSTS_N_INSNS (3), /* fp */
533 COSTS_N_INSNS (4), /* dmul */
534 COSTS_N_INSNS (18), /* sdiv */
535 COSTS_N_INSNS (33), /* ddiv */
536 32, /* cache line size */
537 8, /* l1 cache */
538 64, /* l2 cache */
539 1, /* streams */
542 /* Instruction costs on PPC604 processors. */
543 static const
544 struct processor_costs ppc604_cost = {
545 COSTS_N_INSNS (4), /* mulsi */
546 COSTS_N_INSNS (4), /* mulsi_const */
547 COSTS_N_INSNS (4), /* mulsi_const9 */
548 COSTS_N_INSNS (4), /* muldi */
549 COSTS_N_INSNS (20), /* divsi */
550 COSTS_N_INSNS (20), /* divdi */
551 COSTS_N_INSNS (3), /* fp */
552 COSTS_N_INSNS (3), /* dmul */
553 COSTS_N_INSNS (18), /* sdiv */
554 COSTS_N_INSNS (32), /* ddiv */
555 32, /* cache line size */
556 16, /* l1 cache */
557 512, /* l2 cache */
558 1, /* streams */
561 /* Instruction costs on PPC604e processors. */
562 static const
563 struct processor_costs ppc604e_cost = {
564 COSTS_N_INSNS (2), /* mulsi */
565 COSTS_N_INSNS (2), /* mulsi_const */
566 COSTS_N_INSNS (2), /* mulsi_const9 */
567 COSTS_N_INSNS (2), /* muldi */
568 COSTS_N_INSNS (20), /* divsi */
569 COSTS_N_INSNS (20), /* divdi */
570 COSTS_N_INSNS (3), /* fp */
571 COSTS_N_INSNS (3), /* dmul */
572 COSTS_N_INSNS (18), /* sdiv */
573 COSTS_N_INSNS (32), /* ddiv */
574 32, /* cache line size */
575 32, /* l1 cache */
576 1024, /* l2 cache */
577 1, /* streams */
580 /* Instruction costs on PPC620 processors. */
581 static const
582 struct processor_costs ppc620_cost = {
583 COSTS_N_INSNS (5), /* mulsi */
584 COSTS_N_INSNS (4), /* mulsi_const */
585 COSTS_N_INSNS (3), /* mulsi_const9 */
586 COSTS_N_INSNS (7), /* muldi */
587 COSTS_N_INSNS (21), /* divsi */
588 COSTS_N_INSNS (37), /* divdi */
589 COSTS_N_INSNS (3), /* fp */
590 COSTS_N_INSNS (3), /* dmul */
591 COSTS_N_INSNS (18), /* sdiv */
592 COSTS_N_INSNS (32), /* ddiv */
593 128, /* cache line size */
594 32, /* l1 cache */
595 1024, /* l2 cache */
596 1, /* streams */
599 /* Instruction costs on PPC630 processors. */
600 static const
601 struct processor_costs ppc630_cost = {
602 COSTS_N_INSNS (5), /* mulsi */
603 COSTS_N_INSNS (4), /* mulsi_const */
604 COSTS_N_INSNS (3), /* mulsi_const9 */
605 COSTS_N_INSNS (7), /* muldi */
606 COSTS_N_INSNS (21), /* divsi */
607 COSTS_N_INSNS (37), /* divdi */
608 COSTS_N_INSNS (3), /* fp */
609 COSTS_N_INSNS (3), /* dmul */
610 COSTS_N_INSNS (17), /* sdiv */
611 COSTS_N_INSNS (21), /* ddiv */
612 128, /* cache line size */
613 64, /* l1 cache */
614 1024, /* l2 cache */
615 1, /* streams */
618 /* Instruction costs on Cell processor. */
619 /* COSTS_N_INSNS (1) ~ one add. */
620 static const
621 struct processor_costs ppccell_cost = {
622 COSTS_N_INSNS (9/2)+2, /* mulsi */
623 COSTS_N_INSNS (6/2), /* mulsi_const */
624 COSTS_N_INSNS (6/2), /* mulsi_const9 */
625 COSTS_N_INSNS (15/2)+2, /* muldi */
626 COSTS_N_INSNS (38/2), /* divsi */
627 COSTS_N_INSNS (70/2), /* divdi */
628 COSTS_N_INSNS (10/2), /* fp */
629 COSTS_N_INSNS (10/2), /* dmul */
630 COSTS_N_INSNS (74/2), /* sdiv */
631 COSTS_N_INSNS (74/2), /* ddiv */
632 128, /* cache line size */
633 32, /* l1 cache */
634 512, /* l2 cache */
635 6, /* streams */
638 /* Instruction costs on PPC750 and PPC7400 processors. */
639 static const
640 struct processor_costs ppc750_cost = {
641 COSTS_N_INSNS (5), /* mulsi */
642 COSTS_N_INSNS (3), /* mulsi_const */
643 COSTS_N_INSNS (2), /* mulsi_const9 */
644 COSTS_N_INSNS (5), /* muldi */
645 COSTS_N_INSNS (17), /* divsi */
646 COSTS_N_INSNS (17), /* divdi */
647 COSTS_N_INSNS (3), /* fp */
648 COSTS_N_INSNS (3), /* dmul */
649 COSTS_N_INSNS (17), /* sdiv */
650 COSTS_N_INSNS (31), /* ddiv */
651 32, /* cache line size */
652 32, /* l1 cache */
653 512, /* l2 cache */
654 1, /* streams */
657 /* Instruction costs on PPC7450 processors. */
658 static const
659 struct processor_costs ppc7450_cost = {
660 COSTS_N_INSNS (4), /* mulsi */
661 COSTS_N_INSNS (3), /* mulsi_const */
662 COSTS_N_INSNS (3), /* mulsi_const9 */
663 COSTS_N_INSNS (4), /* muldi */
664 COSTS_N_INSNS (23), /* divsi */
665 COSTS_N_INSNS (23), /* divdi */
666 COSTS_N_INSNS (5), /* fp */
667 COSTS_N_INSNS (5), /* dmul */
668 COSTS_N_INSNS (21), /* sdiv */
669 COSTS_N_INSNS (35), /* ddiv */
670 32, /* cache line size */
671 32, /* l1 cache */
672 1024, /* l2 cache */
673 1, /* streams */
676 /* Instruction costs on PPC8540 processors. */
677 static const
678 struct processor_costs ppc8540_cost = {
679 COSTS_N_INSNS (4), /* mulsi */
680 COSTS_N_INSNS (4), /* mulsi_const */
681 COSTS_N_INSNS (4), /* mulsi_const9 */
682 COSTS_N_INSNS (4), /* muldi */
683 COSTS_N_INSNS (19), /* divsi */
684 COSTS_N_INSNS (19), /* divdi */
685 COSTS_N_INSNS (4), /* fp */
686 COSTS_N_INSNS (4), /* dmul */
687 COSTS_N_INSNS (29), /* sdiv */
688 COSTS_N_INSNS (29), /* ddiv */
689 32, /* cache line size */
690 32, /* l1 cache */
691 256, /* l2 cache */
692 1, /* prefetch streams /*/
695 /* Instruction costs on E300C2 and E300C3 cores. */
696 static const
697 struct processor_costs ppce300c2c3_cost = {
698 COSTS_N_INSNS (4), /* mulsi */
699 COSTS_N_INSNS (4), /* mulsi_const */
700 COSTS_N_INSNS (4), /* mulsi_const9 */
701 COSTS_N_INSNS (4), /* muldi */
702 COSTS_N_INSNS (19), /* divsi */
703 COSTS_N_INSNS (19), /* divdi */
704 COSTS_N_INSNS (3), /* fp */
705 COSTS_N_INSNS (4), /* dmul */
706 COSTS_N_INSNS (18), /* sdiv */
707 COSTS_N_INSNS (33), /* ddiv */
709 16, /* l1 cache */
710 16, /* l2 cache */
711 1, /* prefetch streams /*/
714 /* Instruction costs on PPCE500MC processors. */
715 static const
716 struct processor_costs ppce500mc_cost = {
717 COSTS_N_INSNS (4), /* mulsi */
718 COSTS_N_INSNS (4), /* mulsi_const */
719 COSTS_N_INSNS (4), /* mulsi_const9 */
720 COSTS_N_INSNS (4), /* muldi */
721 COSTS_N_INSNS (14), /* divsi */
722 COSTS_N_INSNS (14), /* divdi */
723 COSTS_N_INSNS (8), /* fp */
724 COSTS_N_INSNS (10), /* dmul */
725 COSTS_N_INSNS (36), /* sdiv */
726 COSTS_N_INSNS (66), /* ddiv */
727 64, /* cache line size */
728 32, /* l1 cache */
729 128, /* l2 cache */
730 1, /* prefetch streams /*/
733 /* Instruction costs on PPCE500MC64 processors. */
734 static const
735 struct processor_costs ppce500mc64_cost = {
736 COSTS_N_INSNS (4), /* mulsi */
737 COSTS_N_INSNS (4), /* mulsi_const */
738 COSTS_N_INSNS (4), /* mulsi_const9 */
739 COSTS_N_INSNS (4), /* muldi */
740 COSTS_N_INSNS (14), /* divsi */
741 COSTS_N_INSNS (14), /* divdi */
742 COSTS_N_INSNS (4), /* fp */
743 COSTS_N_INSNS (10), /* dmul */
744 COSTS_N_INSNS (36), /* sdiv */
745 COSTS_N_INSNS (66), /* ddiv */
746 64, /* cache line size */
747 32, /* l1 cache */
748 128, /* l2 cache */
749 1, /* prefetch streams /*/
752 /* Instruction costs on PPCE5500 processors. */
753 static const
754 struct processor_costs ppce5500_cost = {
755 COSTS_N_INSNS (5), /* mulsi */
756 COSTS_N_INSNS (5), /* mulsi_const */
757 COSTS_N_INSNS (4), /* mulsi_const9 */
758 COSTS_N_INSNS (5), /* muldi */
759 COSTS_N_INSNS (14), /* divsi */
760 COSTS_N_INSNS (14), /* divdi */
761 COSTS_N_INSNS (7), /* fp */
762 COSTS_N_INSNS (10), /* dmul */
763 COSTS_N_INSNS (36), /* sdiv */
764 COSTS_N_INSNS (66), /* ddiv */
765 64, /* cache line size */
766 32, /* l1 cache */
767 128, /* l2 cache */
768 1, /* prefetch streams /*/
771 /* Instruction costs on PPCE6500 processors. */
772 static const
773 struct processor_costs ppce6500_cost = {
774 COSTS_N_INSNS (5), /* mulsi */
775 COSTS_N_INSNS (5), /* mulsi_const */
776 COSTS_N_INSNS (4), /* mulsi_const9 */
777 COSTS_N_INSNS (5), /* muldi */
778 COSTS_N_INSNS (14), /* divsi */
779 COSTS_N_INSNS (14), /* divdi */
780 COSTS_N_INSNS (7), /* fp */
781 COSTS_N_INSNS (10), /* dmul */
782 COSTS_N_INSNS (36), /* sdiv */
783 COSTS_N_INSNS (66), /* ddiv */
784 64, /* cache line size */
785 32, /* l1 cache */
786 128, /* l2 cache */
787 1, /* prefetch streams /*/
790 /* Instruction costs on AppliedMicro Titan processors. */
791 static const
792 struct processor_costs titan_cost = {
793 COSTS_N_INSNS (5), /* mulsi */
794 COSTS_N_INSNS (5), /* mulsi_const */
795 COSTS_N_INSNS (5), /* mulsi_const9 */
796 COSTS_N_INSNS (5), /* muldi */
797 COSTS_N_INSNS (18), /* divsi */
798 COSTS_N_INSNS (18), /* divdi */
799 COSTS_N_INSNS (10), /* fp */
800 COSTS_N_INSNS (10), /* dmul */
801 COSTS_N_INSNS (46), /* sdiv */
802 COSTS_N_INSNS (72), /* ddiv */
803 32, /* cache line size */
804 32, /* l1 cache */
805 512, /* l2 cache */
806 1, /* prefetch streams /*/
809 /* Instruction costs on POWER4 and POWER5 processors. */
810 static const
811 struct processor_costs power4_cost = {
812 COSTS_N_INSNS (3), /* mulsi */
813 COSTS_N_INSNS (2), /* mulsi_const */
814 COSTS_N_INSNS (2), /* mulsi_const9 */
815 COSTS_N_INSNS (4), /* muldi */
816 COSTS_N_INSNS (18), /* divsi */
817 COSTS_N_INSNS (34), /* divdi */
818 COSTS_N_INSNS (3), /* fp */
819 COSTS_N_INSNS (3), /* dmul */
820 COSTS_N_INSNS (17), /* sdiv */
821 COSTS_N_INSNS (17), /* ddiv */
822 128, /* cache line size */
823 32, /* l1 cache */
824 1024, /* l2 cache */
825 8, /* prefetch streams /*/
828 /* Instruction costs on POWER6 processors. */
829 static const
830 struct processor_costs power6_cost = {
831 COSTS_N_INSNS (8), /* mulsi */
832 COSTS_N_INSNS (8), /* mulsi_const */
833 COSTS_N_INSNS (8), /* mulsi_const9 */
834 COSTS_N_INSNS (8), /* muldi */
835 COSTS_N_INSNS (22), /* divsi */
836 COSTS_N_INSNS (28), /* divdi */
837 COSTS_N_INSNS (3), /* fp */
838 COSTS_N_INSNS (3), /* dmul */
839 COSTS_N_INSNS (13), /* sdiv */
840 COSTS_N_INSNS (16), /* ddiv */
841 128, /* cache line size */
842 64, /* l1 cache */
843 2048, /* l2 cache */
844 16, /* prefetch streams */
847 /* Instruction costs on POWER7 processors. */
848 static const
849 struct processor_costs power7_cost = {
850 COSTS_N_INSNS (2), /* mulsi */
851 COSTS_N_INSNS (2), /* mulsi_const */
852 COSTS_N_INSNS (2), /* mulsi_const9 */
853 COSTS_N_INSNS (2), /* muldi */
854 COSTS_N_INSNS (18), /* divsi */
855 COSTS_N_INSNS (34), /* divdi */
856 COSTS_N_INSNS (3), /* fp */
857 COSTS_N_INSNS (3), /* dmul */
858 COSTS_N_INSNS (13), /* sdiv */
859 COSTS_N_INSNS (16), /* ddiv */
860 128, /* cache line size */
861 32, /* l1 cache */
862 256, /* l2 cache */
863 12, /* prefetch streams */
866 /* Instruction costs on POWER8 processors. */
867 static const
868 struct processor_costs power8_cost = {
869 COSTS_N_INSNS (3), /* mulsi */
870 COSTS_N_INSNS (3), /* mulsi_const */
871 COSTS_N_INSNS (3), /* mulsi_const9 */
872 COSTS_N_INSNS (3), /* muldi */
873 COSTS_N_INSNS (19), /* divsi */
874 COSTS_N_INSNS (35), /* divdi */
875 COSTS_N_INSNS (3), /* fp */
876 COSTS_N_INSNS (3), /* dmul */
877 COSTS_N_INSNS (14), /* sdiv */
878 COSTS_N_INSNS (17), /* ddiv */
879 128, /* cache line size */
880 32, /* l1 cache */
881 256, /* l2 cache */
882 12, /* prefetch streams */
885 /* Instruction costs on POWER A2 processors. */
886 static const
887 struct processor_costs ppca2_cost = {
888 COSTS_N_INSNS (16), /* mulsi */
889 COSTS_N_INSNS (16), /* mulsi_const */
890 COSTS_N_INSNS (16), /* mulsi_const9 */
891 COSTS_N_INSNS (16), /* muldi */
892 COSTS_N_INSNS (22), /* divsi */
893 COSTS_N_INSNS (28), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (59), /* sdiv */
897 COSTS_N_INSNS (72), /* ddiv */
899 16, /* l1 cache */
900 2048, /* l2 cache */
901 16, /* prefetch streams */
905 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
906 #undef RS6000_BUILTIN_1
907 #undef RS6000_BUILTIN_2
908 #undef RS6000_BUILTIN_3
909 #undef RS6000_BUILTIN_A
910 #undef RS6000_BUILTIN_D
911 #undef RS6000_BUILTIN_E
912 #undef RS6000_BUILTIN_H
913 #undef RS6000_BUILTIN_P
914 #undef RS6000_BUILTIN_Q
915 #undef RS6000_BUILTIN_S
916 #undef RS6000_BUILTIN_X
918 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
919 { NAME, ICODE, MASK, ATTR },
921 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
922 { NAME, ICODE, MASK, ATTR },
924 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
925 { NAME, ICODE, MASK, ATTR },
927 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
928 { NAME, ICODE, MASK, ATTR },
930 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
931 { NAME, ICODE, MASK, ATTR },
933 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
934 { NAME, ICODE, MASK, ATTR },
936 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
937 { NAME, ICODE, MASK, ATTR },
939 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
940 { NAME, ICODE, MASK, ATTR },
942 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
943 { NAME, ICODE, MASK, ATTR },
945 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
946 { NAME, ICODE, MASK, ATTR },
948 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
949 { NAME, ICODE, MASK, ATTR },
951 struct rs6000_builtin_info_type {
952 const char *name;
953 const enum insn_code icode;
954 const HOST_WIDE_INT mask;
955 const unsigned attr;
958 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
960 #include "rs6000-builtin.def"
963 #undef RS6000_BUILTIN_1
964 #undef RS6000_BUILTIN_2
965 #undef RS6000_BUILTIN_3
966 #undef RS6000_BUILTIN_A
967 #undef RS6000_BUILTIN_D
968 #undef RS6000_BUILTIN_E
969 #undef RS6000_BUILTIN_H
970 #undef RS6000_BUILTIN_P
971 #undef RS6000_BUILTIN_Q
972 #undef RS6000_BUILTIN_S
973 #undef RS6000_BUILTIN_X
975 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
976 static tree (*rs6000_veclib_handler) (tree, tree, tree);
979 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
980 static bool spe_func_has_64bit_regs_p (void);
981 static struct machine_function * rs6000_init_machine_status (void);
982 static int rs6000_ra_ever_killed (void);
983 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
984 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
985 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
986 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
987 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
988 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
989 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
990 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
991 bool);
992 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
993 static bool is_microcoded_insn (rtx);
994 static bool is_nonpipeline_insn (rtx);
995 static bool is_cracked_insn (rtx);
996 static bool is_load_insn (rtx, rtx *);
997 static bool is_store_insn (rtx, rtx *);
998 static bool set_to_load_agen (rtx,rtx);
999 static bool insn_terminates_group_p (rtx , enum group_termination);
1000 static bool insn_must_be_first_in_group (rtx);
1001 static bool insn_must_be_last_in_group (rtx);
1002 static void altivec_init_builtins (void);
1003 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1004 enum machine_mode, enum machine_mode,
1005 enum rs6000_builtins, const char *name);
1006 static void rs6000_common_init_builtins (void);
1007 static void paired_init_builtins (void);
1008 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1009 static void spe_init_builtins (void);
1010 static void htm_init_builtins (void);
1011 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1012 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1013 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1014 static rs6000_stack_t *rs6000_stack_info (void);
1015 static void is_altivec_return_reg (rtx, void *);
1016 int easy_vector_constant (rtx, enum machine_mode);
1017 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1018 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1019 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
1020 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1021 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1022 bool, bool);
1023 #if TARGET_MACHO
1024 static void macho_branch_islands (void);
1025 #endif
1026 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1027 int, int *);
1028 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1029 int, int, int *);
1030 static bool rs6000_mode_dependent_address (const_rtx);
1031 static bool rs6000_debug_mode_dependent_address (const_rtx);
1032 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1033 enum machine_mode, rtx);
1034 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1035 enum machine_mode,
1036 rtx);
1037 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1038 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1039 enum reg_class);
1040 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1041 enum machine_mode);
1042 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1043 enum reg_class,
1044 enum machine_mode);
1045 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1046 enum machine_mode,
1047 enum reg_class);
1048 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1049 enum machine_mode,
1050 enum reg_class);
1051 static bool rs6000_save_toc_in_prologue_p (void);
1053 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1054 int, int *)
1055 = rs6000_legitimize_reload_address;
1057 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1058 = rs6000_mode_dependent_address;
1060 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1061 enum machine_mode, rtx)
1062 = rs6000_secondary_reload_class;
1064 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1065 = rs6000_preferred_reload_class;
1067 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1068 enum machine_mode)
1069 = rs6000_secondary_memory_needed;
1071 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1072 enum machine_mode,
1073 enum reg_class)
1074 = rs6000_cannot_change_mode_class;
1076 const int INSN_NOT_AVAILABLE = -1;
1078 static void rs6000_print_isa_options (FILE *, int, const char *,
1079 HOST_WIDE_INT);
1080 static void rs6000_print_builtin_options (FILE *, int, const char *,
1081 HOST_WIDE_INT);
1083 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1084 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1085 enum rs6000_reg_type,
1086 enum machine_mode,
1087 secondary_reload_info *,
1088 bool);
1090 /* Hash table stuff for keeping track of TOC entries. */
1092 struct GTY(()) toc_hash_struct
1094 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1095 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1096 rtx key;
1097 enum machine_mode key_mode;
1098 int labelno;
1101 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1103 /* Hash table to keep track of the argument types for builtin functions. */
1105 struct GTY(()) builtin_hash_struct
1107 tree type;
1108 enum machine_mode mode[4]; /* return value + 3 arguments. */
1109 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1112 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1115 /* Default register names. */
1116 char rs6000_reg_names[][8] =
1118 "0", "1", "2", "3", "4", "5", "6", "7",
1119 "8", "9", "10", "11", "12", "13", "14", "15",
1120 "16", "17", "18", "19", "20", "21", "22", "23",
1121 "24", "25", "26", "27", "28", "29", "30", "31",
1122 "0", "1", "2", "3", "4", "5", "6", "7",
1123 "8", "9", "10", "11", "12", "13", "14", "15",
1124 "16", "17", "18", "19", "20", "21", "22", "23",
1125 "24", "25", "26", "27", "28", "29", "30", "31",
1126 "mq", "lr", "ctr","ap",
1127 "0", "1", "2", "3", "4", "5", "6", "7",
1128 "ca",
1129 /* AltiVec registers. */
1130 "0", "1", "2", "3", "4", "5", "6", "7",
1131 "8", "9", "10", "11", "12", "13", "14", "15",
1132 "16", "17", "18", "19", "20", "21", "22", "23",
1133 "24", "25", "26", "27", "28", "29", "30", "31",
1134 "vrsave", "vscr",
1135 /* SPE registers. */
1136 "spe_acc", "spefscr",
1137 /* Soft frame pointer. */
1138 "sfp",
1139 /* HTM SPR registers. */
1140 "tfhar", "tfiar", "texasr"
1143 #ifdef TARGET_REGNAMES
1144 static const char alt_reg_names[][8] =
1146 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1147 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1148 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1149 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1150 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1151 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1152 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1153 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1154 "mq", "lr", "ctr", "ap",
1155 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1156 "ca",
1157 /* AltiVec registers. */
1158 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1159 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1160 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1161 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1162 "vrsave", "vscr",
1163 /* SPE registers. */
1164 "spe_acc", "spefscr",
1165 /* Soft frame pointer. */
1166 "sfp",
1167 /* HTM SPR registers. */
1168 "tfhar", "tfiar", "texasr"
1170 #endif
1172 /* Table of valid machine attributes. */
1174 static const struct attribute_spec rs6000_attribute_table[] =
1176 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1177 affects_type_identity } */
1178 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1179 false },
1180 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1181 false },
1182 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1183 false },
1184 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1185 false },
1186 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1187 false },
1188 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1189 SUBTARGET_ATTRIBUTE_TABLE,
1190 #endif
1191 { NULL, 0, 0, false, false, false, NULL, false }
1194 #ifndef TARGET_PROFILE_KERNEL
1195 #define TARGET_PROFILE_KERNEL 0
1196 #endif
1198 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1199 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1201 /* Initialize the GCC target structure. */
1202 #undef TARGET_ATTRIBUTE_TABLE
1203 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1204 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1205 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1206 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1207 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1209 #undef TARGET_ASM_ALIGNED_DI_OP
1210 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1212 /* Default unaligned ops are only provided for ELF. Find the ops needed
1213 for non-ELF systems. */
1214 #ifndef OBJECT_FORMAT_ELF
1215 #if TARGET_XCOFF
1216 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1217 64-bit targets. */
1218 #undef TARGET_ASM_UNALIGNED_HI_OP
1219 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1220 #undef TARGET_ASM_UNALIGNED_SI_OP
1221 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1222 #undef TARGET_ASM_UNALIGNED_DI_OP
1223 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1224 #else
1225 /* For Darwin. */
1226 #undef TARGET_ASM_UNALIGNED_HI_OP
1227 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1228 #undef TARGET_ASM_UNALIGNED_SI_OP
1229 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1230 #undef TARGET_ASM_UNALIGNED_DI_OP
1231 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1232 #undef TARGET_ASM_ALIGNED_DI_OP
1233 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1234 #endif
1235 #endif
1237 /* This hook deals with fixups for relocatable code and DI-mode objects
1238 in 64-bit code. */
1239 #undef TARGET_ASM_INTEGER
1240 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1242 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1243 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1244 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1245 #endif
1247 #undef TARGET_SET_UP_BY_PROLOGUE
1248 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1250 #undef TARGET_HAVE_TLS
1251 #define TARGET_HAVE_TLS HAVE_AS_TLS
1253 #undef TARGET_CANNOT_FORCE_CONST_MEM
1254 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1256 #undef TARGET_DELEGITIMIZE_ADDRESS
1257 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1259 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1260 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1262 #undef TARGET_ASM_FUNCTION_PROLOGUE
1263 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1264 #undef TARGET_ASM_FUNCTION_EPILOGUE
1265 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1267 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1268 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1270 #undef TARGET_LEGITIMIZE_ADDRESS
1271 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1273 #undef TARGET_SCHED_VARIABLE_ISSUE
1274 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1276 #undef TARGET_SCHED_ISSUE_RATE
1277 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1278 #undef TARGET_SCHED_ADJUST_COST
1279 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1280 #undef TARGET_SCHED_ADJUST_PRIORITY
1281 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1282 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1283 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1284 #undef TARGET_SCHED_INIT
1285 #define TARGET_SCHED_INIT rs6000_sched_init
1286 #undef TARGET_SCHED_FINISH
1287 #define TARGET_SCHED_FINISH rs6000_sched_finish
1288 #undef TARGET_SCHED_REORDER
1289 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1290 #undef TARGET_SCHED_REORDER2
1291 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1293 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1294 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1296 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1297 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1299 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1300 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1301 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1302 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1303 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1304 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1305 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1306 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1308 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1309 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1310 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1311 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1312 rs6000_builtin_support_vector_misalignment
1313 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1314 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1315 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1316 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1317 rs6000_builtin_vectorization_cost
1318 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1319 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1320 rs6000_preferred_simd_mode
1321 #undef TARGET_VECTORIZE_INIT_COST
1322 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1323 #undef TARGET_VECTORIZE_ADD_STMT_COST
1324 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1325 #undef TARGET_VECTORIZE_FINISH_COST
1326 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1327 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1328 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1330 #undef TARGET_INIT_BUILTINS
1331 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1332 #undef TARGET_BUILTIN_DECL
1333 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1335 #undef TARGET_EXPAND_BUILTIN
1336 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1338 #undef TARGET_MANGLE_TYPE
1339 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1341 #undef TARGET_INIT_LIBFUNCS
1342 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1344 #if TARGET_MACHO
1345 #undef TARGET_BINDS_LOCAL_P
1346 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1347 #endif
1349 #undef TARGET_MS_BITFIELD_LAYOUT_P
1350 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1352 #undef TARGET_ASM_OUTPUT_MI_THUNK
1353 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1355 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1356 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1358 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1359 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1361 #undef TARGET_REGISTER_MOVE_COST
1362 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1363 #undef TARGET_MEMORY_MOVE_COST
1364 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1365 #undef TARGET_RTX_COSTS
1366 #define TARGET_RTX_COSTS rs6000_rtx_costs
1367 #undef TARGET_ADDRESS_COST
1368 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1370 #undef TARGET_DWARF_REGISTER_SPAN
1371 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1373 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1374 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1376 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1377 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1379 /* On rs6000, function arguments are promoted, as are function return
1380 values. */
1381 #undef TARGET_PROMOTE_FUNCTION_MODE
1382 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1384 #undef TARGET_RETURN_IN_MEMORY
1385 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1387 #undef TARGET_SETUP_INCOMING_VARARGS
1388 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1390 /* Always strict argument naming on rs6000. */
1391 #undef TARGET_STRICT_ARGUMENT_NAMING
1392 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1393 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1394 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1395 #undef TARGET_SPLIT_COMPLEX_ARG
1396 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1397 #undef TARGET_MUST_PASS_IN_STACK
1398 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1399 #undef TARGET_PASS_BY_REFERENCE
1400 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1401 #undef TARGET_ARG_PARTIAL_BYTES
1402 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1403 #undef TARGET_FUNCTION_ARG_ADVANCE
1404 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1405 #undef TARGET_FUNCTION_ARG
1406 #define TARGET_FUNCTION_ARG rs6000_function_arg
1407 #undef TARGET_FUNCTION_ARG_BOUNDARY
1408 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1410 #undef TARGET_BUILD_BUILTIN_VA_LIST
1411 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1413 #undef TARGET_EXPAND_BUILTIN_VA_START
1414 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1416 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1417 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1419 #undef TARGET_EH_RETURN_FILTER_MODE
1420 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1422 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1423 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1425 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1426 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1428 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1429 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1431 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1432 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1434 #undef TARGET_OPTION_OVERRIDE
1435 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1437 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1438 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1439 rs6000_builtin_vectorized_function
1441 #if !TARGET_MACHO
1442 #undef TARGET_STACK_PROTECT_FAIL
1443 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1444 #endif
1446 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1447 The PowerPC architecture requires only weak consistency among
1448 processors--that is, memory accesses between processors need not be
1449 sequentially consistent and memory accesses among processors can occur
1450 in any order. The ability to order memory accesses weakly provides
1451 opportunities for more efficient use of the system bus. Unless a
1452 dependency exists, the 604e allows read operations to precede store
1453 operations. */
1454 #undef TARGET_RELAXED_ORDERING
1455 #define TARGET_RELAXED_ORDERING true
1457 #ifdef HAVE_AS_TLS
1458 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1459 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1460 #endif
1462 /* Use a 32-bit anchor range. This leads to sequences like:
1464 addis tmp,anchor,high
1465 add dest,tmp,low
1467 where tmp itself acts as an anchor, and can be shared between
1468 accesses to the same 64k page. */
1469 #undef TARGET_MIN_ANCHOR_OFFSET
1470 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1471 #undef TARGET_MAX_ANCHOR_OFFSET
1472 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1473 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1474 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1475 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1476 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1478 #undef TARGET_BUILTIN_RECIPROCAL
1479 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1481 #undef TARGET_EXPAND_TO_RTL_HOOK
1482 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1484 #undef TARGET_INSTANTIATE_DECLS
1485 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1487 #undef TARGET_SECONDARY_RELOAD
1488 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1490 #undef TARGET_LEGITIMATE_ADDRESS_P
1491 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1493 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1494 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1496 #undef TARGET_CAN_ELIMINATE
1497 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1499 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1500 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1502 #undef TARGET_TRAMPOLINE_INIT
1503 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1505 #undef TARGET_FUNCTION_VALUE
1506 #define TARGET_FUNCTION_VALUE rs6000_function_value
1508 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1509 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1511 #undef TARGET_OPTION_SAVE
1512 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1514 #undef TARGET_OPTION_RESTORE
1515 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1517 #undef TARGET_OPTION_PRINT
1518 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1520 #undef TARGET_CAN_INLINE_P
1521 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1523 #undef TARGET_SET_CURRENT_FUNCTION
1524 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1526 #undef TARGET_LEGITIMATE_CONSTANT_P
1527 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1529 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1530 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1533 /* Processor table. */
1534 struct rs6000_ptt
1536 const char *const name; /* Canonical processor name. */
1537 const enum processor_type processor; /* Processor type enum value. */
1538 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1541 static struct rs6000_ptt const processor_target_table[] =
1543 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1544 #include "rs6000-cpus.def"
1545 #undef RS6000_CPU
1548 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1549 name is invalid. */
1551 static int
1552 rs6000_cpu_name_lookup (const char *name)
1554 size_t i;
1556 if (name != NULL)
1558 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1559 if (! strcmp (name, processor_target_table[i].name))
1560 return (int)i;
1563 return -1;
1567 /* Return number of consecutive hard regs needed starting at reg REGNO
1568 to hold something of mode MODE.
1569 This is ordinarily the length in words of a value of mode MODE
1570 but can be less for certain modes in special long registers.
1572 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1573 scalar instructions. The upper 32 bits are only available to the
1574 SIMD instructions.
1576 POWER and PowerPC GPRs hold 32 bits worth;
1577 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1579 static int
1580 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1582 unsigned HOST_WIDE_INT reg_size;
1584 /* TF/TD modes are special in that they always take 2 registers. */
1585 if (FP_REGNO_P (regno))
1586 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1587 ? UNITS_PER_VSX_WORD
1588 : UNITS_PER_FP_WORD);
1590 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1591 reg_size = UNITS_PER_SPE_WORD;
1593 else if (ALTIVEC_REGNO_P (regno))
1594 reg_size = UNITS_PER_ALTIVEC_WORD;
1596 /* The value returned for SCmode in the E500 double case is 2 for
1597 ABI compatibility; storing an SCmode value in a single register
1598 would require function_arg and rs6000_spe_function_arg to handle
1599 SCmode so as to pass the value correctly in a pair of
1600 registers. */
1601 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1602 && !DECIMAL_FLOAT_MODE_P (mode))
1603 reg_size = UNITS_PER_FP_WORD;
1605 else
1606 reg_size = UNITS_PER_WORD;
1608 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1611 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1612 MODE. */
1613 static int
1614 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1616 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1618 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1619 register combinations, and use PTImode where we need to deal with quad
1620 word memory operations. Don't allow quad words in the argument or frame
1621 pointer registers, just registers 0..31. */
1622 if (mode == PTImode)
1623 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1624 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1625 && ((regno & 1) == 0));
1627 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1628 implementations. Don't allow an item to be split between a FP register
1629 and an Altivec register. Allow TImode in all VSX registers if the user
1630 asked for it. */
1631 if (TARGET_VSX && VSX_REGNO_P (regno)
1632 && (VECTOR_MEM_VSX_P (mode)
1633 || (TARGET_VSX_SCALAR_FLOAT && mode == SFmode)
1634 || (TARGET_VSX_SCALAR_DOUBLE && (mode == DFmode || mode == DImode))
1635 || (TARGET_VSX_TIMODE && mode == TImode)))
1637 if (FP_REGNO_P (regno))
1638 return FP_REGNO_P (last_regno);
1640 if (ALTIVEC_REGNO_P (regno))
1642 if (mode == SFmode && !TARGET_UPPER_REGS_SF)
1643 return 0;
1645 if ((mode == DFmode || mode == DImode) && !TARGET_UPPER_REGS_DF)
1646 return 0;
1648 return ALTIVEC_REGNO_P (last_regno);
1652 /* The GPRs can hold any mode, but values bigger than one register
1653 cannot go past R31. */
1654 if (INT_REGNO_P (regno))
1655 return INT_REGNO_P (last_regno);
1657 /* The float registers (except for VSX vector modes) can only hold floating
1658 modes and DImode. */
1659 if (FP_REGNO_P (regno))
1661 if (SCALAR_FLOAT_MODE_P (mode)
1662 && (mode != TDmode || (regno % 2) == 0)
1663 && FP_REGNO_P (last_regno))
1664 return 1;
1666 if (GET_MODE_CLASS (mode) == MODE_INT
1667 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1668 return 1;
1670 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1671 && PAIRED_VECTOR_MODE (mode))
1672 return 1;
1674 return 0;
1677 /* The CR register can only hold CC modes. */
1678 if (CR_REGNO_P (regno))
1679 return GET_MODE_CLASS (mode) == MODE_CC;
1681 if (CA_REGNO_P (regno))
1682 return mode == BImode;
1684 /* AltiVec only in AldyVec registers. */
1685 if (ALTIVEC_REGNO_P (regno))
1686 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1688 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1689 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1690 return 1;
1692 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1693 and it must be able to fit within the register set. */
1695 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1698 /* Print interesting facts about registers. */
1699 static void
1700 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1702 int r, m;
1704 for (r = first_regno; r <= last_regno; ++r)
1706 const char *comma = "";
1707 int len;
1709 if (first_regno == last_regno)
1710 fprintf (stderr, "%s:\t", reg_name);
1711 else
1712 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1714 len = 8;
1715 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1716 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1718 if (len > 70)
1720 fprintf (stderr, ",\n\t");
1721 len = 8;
1722 comma = "";
1725 if (rs6000_hard_regno_nregs[m][r] > 1)
1726 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1727 rs6000_hard_regno_nregs[m][r]);
1728 else
1729 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1731 comma = ", ";
1734 if (call_used_regs[r])
1736 if (len > 70)
1738 fprintf (stderr, ",\n\t");
1739 len = 8;
1740 comma = "";
1743 len += fprintf (stderr, "%s%s", comma, "call-used");
1744 comma = ", ";
1747 if (fixed_regs[r])
1749 if (len > 70)
1751 fprintf (stderr, ",\n\t");
1752 len = 8;
1753 comma = "";
1756 len += fprintf (stderr, "%s%s", comma, "fixed");
1757 comma = ", ";
1760 if (len > 70)
1762 fprintf (stderr, ",\n\t");
1763 comma = "";
1766 len += fprintf (stderr, "%sreg-class = %s", comma,
1767 reg_class_names[(int)rs6000_regno_regclass[r]]);
1768 comma = ", ";
1770 if (len > 70)
1772 fprintf (stderr, ",\n\t");
1773 comma = "";
1776 fprintf (stderr, "%sregno = %d\n", comma, r);
1780 #define DEBUG_FMT_ID "%-32s= "
1781 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1782 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1783 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1785 /* Print various interesting information with -mdebug=reg. */
1786 static void
1787 rs6000_debug_reg_global (void)
1789 static const char *const tf[2] = { "false", "true" };
1790 const char *nl = (const char *)0;
1791 int m;
1792 size_t m1, m2, v;
1793 char costly_num[20];
1794 char nop_num[20];
1795 char flags_buffer[40];
1796 const char *costly_str;
1797 const char *nop_str;
1798 const char *trace_str;
1799 const char *abi_str;
1800 const char *cmodel_str;
1801 struct cl_target_option cl_opts;
1803 /* Map enum rs6000_vector to string. */
1804 static const char *rs6000_debug_vector_unit[] = {
1805 "none",
1806 "altivec",
1807 "vsx",
1808 "p8_vector",
1809 "paired",
1810 "spe",
1811 "other"
1814 /* Modes we want tieable information on. */
1815 static const enum machine_mode print_tieable_modes[] = {
1816 QImode,
1817 HImode,
1818 SImode,
1819 DImode,
1820 TImode,
1821 PTImode,
1822 SFmode,
1823 DFmode,
1824 TFmode,
1825 SDmode,
1826 DDmode,
1827 TDmode,
1828 V8QImode,
1829 V4HImode,
1830 V2SImode,
1831 V16QImode,
1832 V8HImode,
1833 V4SImode,
1834 V2DImode,
1835 V32QImode,
1836 V16HImode,
1837 V8SImode,
1838 V4DImode,
1839 V2SFmode,
1840 V4SFmode,
1841 V2DFmode,
1842 V8SFmode,
1843 V4DFmode,
1844 CCmode,
1845 CCUNSmode,
1846 CCEQmode,
1849 /* Virtual regs we are interested in. */
1850 const static struct {
1851 int regno; /* register number. */
1852 const char *name; /* register name. */
1853 } virtual_regs[] = {
1854 { STACK_POINTER_REGNUM, "stack pointer:" },
1855 { TOC_REGNUM, "toc: " },
1856 { STATIC_CHAIN_REGNUM, "static chain: " },
1857 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
1858 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
1859 { ARG_POINTER_REGNUM, "arg pointer: " },
1860 { FRAME_POINTER_REGNUM, "frame pointer:" },
1861 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
1862 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
1863 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
1864 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
1865 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
1866 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
1867 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
1868 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
1869 { LAST_VIRTUAL_REGISTER, "last virtual: " },
1872 fputs ("\nHard register information:\n", stderr);
1873 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
1874 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
1875 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1876 LAST_ALTIVEC_REGNO,
1877 "vs");
1878 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1879 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1880 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1881 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1882 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1883 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1884 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1885 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1887 fputs ("\nVirtual/stack/frame registers:\n", stderr);
1888 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
1889 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
1891 fprintf (stderr,
1892 "\n"
1893 "d reg_class = %s\n"
1894 "f reg_class = %s\n"
1895 "v reg_class = %s\n"
1896 "wa reg_class = %s\n"
1897 "wd reg_class = %s\n"
1898 "wf reg_class = %s\n"
1899 "wg reg_class = %s\n"
1900 "wl reg_class = %s\n"
1901 "wm reg_class = %s\n"
1902 "wr reg_class = %s\n"
1903 "ws reg_class = %s\n"
1904 "wt reg_class = %s\n"
1905 "wu reg_class = %s\n"
1906 "wv reg_class = %s\n"
1907 "ww reg_class = %s\n"
1908 "wx reg_class = %s\n"
1909 "wy reg_class = %s\n"
1910 "wz reg_class = %s\n"
1911 "\n",
1912 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1913 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1914 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1915 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1916 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1917 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1918 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
1919 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
1920 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
1921 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
1922 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
1923 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
1924 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
1925 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
1926 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
1927 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
1928 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
1929 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
1931 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1932 if (rs6000_vector_unit[m] || rs6000_vector_mem[m]
1933 || (reg_addr[m].reload_load != CODE_FOR_nothing)
1934 || (reg_addr[m].reload_store != CODE_FOR_nothing))
1936 nl = "\n";
1937 fprintf (stderr,
1938 "Vector mode: %-5s arithmetic: %-10s move: %-10s "
1939 "reload-out: %c reload-in: %c\n",
1940 GET_MODE_NAME (m),
1941 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1942 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ],
1943 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 'y' : 'n',
1944 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'y' : 'n');
1947 if (nl)
1948 fputs (nl, stderr);
1950 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
1952 enum machine_mode mode1 = print_tieable_modes[m1];
1953 bool first_time = true;
1955 nl = (const char *)0;
1956 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
1958 enum machine_mode mode2 = print_tieable_modes[m2];
1959 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
1961 if (first_time)
1963 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
1964 nl = "\n";
1965 first_time = false;
1968 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
1972 if (!first_time)
1973 fputs ("\n", stderr);
1976 if (nl)
1977 fputs (nl, stderr);
1979 if (rs6000_recip_control)
1981 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1983 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1984 if (rs6000_recip_bits[m])
1986 fprintf (stderr,
1987 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1988 GET_MODE_NAME (m),
1989 (RS6000_RECIP_AUTO_RE_P (m)
1990 ? "auto"
1991 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1992 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1993 ? "auto"
1994 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1997 fputs ("\n", stderr);
2000 if (rs6000_cpu_index >= 0)
2002 const char *name = processor_target_table[rs6000_cpu_index].name;
2003 HOST_WIDE_INT flags
2004 = processor_target_table[rs6000_cpu_index].target_enable;
2006 sprintf (flags_buffer, "-mcpu=%s flags", name);
2007 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2009 else
2010 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2012 if (rs6000_tune_index >= 0)
2014 const char *name = processor_target_table[rs6000_tune_index].name;
2015 HOST_WIDE_INT flags
2016 = processor_target_table[rs6000_tune_index].target_enable;
2018 sprintf (flags_buffer, "-mtune=%s flags", name);
2019 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2021 else
2022 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2024 cl_target_option_save (&cl_opts, &global_options);
2025 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2026 rs6000_isa_flags);
2028 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2029 rs6000_isa_flags_explicit);
2031 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2032 rs6000_builtin_mask);
2034 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2036 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2037 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2039 switch (rs6000_sched_costly_dep)
2041 case max_dep_latency:
2042 costly_str = "max_dep_latency";
2043 break;
2045 case no_dep_costly:
2046 costly_str = "no_dep_costly";
2047 break;
2049 case all_deps_costly:
2050 costly_str = "all_deps_costly";
2051 break;
2053 case true_store_to_load_dep_costly:
2054 costly_str = "true_store_to_load_dep_costly";
2055 break;
2057 case store_to_load_dep_costly:
2058 costly_str = "store_to_load_dep_costly";
2059 break;
2061 default:
2062 costly_str = costly_num;
2063 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2064 break;
2067 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2069 switch (rs6000_sched_insert_nops)
2071 case sched_finish_regroup_exact:
2072 nop_str = "sched_finish_regroup_exact";
2073 break;
2075 case sched_finish_pad_groups:
2076 nop_str = "sched_finish_pad_groups";
2077 break;
2079 case sched_finish_none:
2080 nop_str = "sched_finish_none";
2081 break;
2083 default:
2084 nop_str = nop_num;
2085 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2086 break;
2089 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2091 switch (rs6000_sdata)
2093 default:
2094 case SDATA_NONE:
2095 break;
2097 case SDATA_DATA:
2098 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2099 break;
2101 case SDATA_SYSV:
2102 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2103 break;
2105 case SDATA_EABI:
2106 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2107 break;
2111 switch (rs6000_traceback)
2113 case traceback_default: trace_str = "default"; break;
2114 case traceback_none: trace_str = "none"; break;
2115 case traceback_part: trace_str = "part"; break;
2116 case traceback_full: trace_str = "full"; break;
2117 default: trace_str = "unknown"; break;
2120 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2122 switch (rs6000_current_cmodel)
2124 case CMODEL_SMALL: cmodel_str = "small"; break;
2125 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2126 case CMODEL_LARGE: cmodel_str = "large"; break;
2127 default: cmodel_str = "unknown"; break;
2130 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2132 switch (rs6000_current_abi)
2134 case ABI_NONE: abi_str = "none"; break;
2135 case ABI_AIX: abi_str = "aix"; break;
2136 case ABI_V4: abi_str = "V4"; break;
2137 case ABI_DARWIN: abi_str = "darwin"; break;
2138 default: abi_str = "unknown"; break;
2141 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2143 if (rs6000_altivec_abi)
2144 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2146 if (rs6000_spe_abi)
2147 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2149 if (rs6000_darwin64_abi)
2150 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2152 if (rs6000_float_gprs)
2153 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2155 if (TARGET_LINK_STACK)
2156 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2158 if (targetm.lra_p ())
2159 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2161 if (TARGET_P8_FUSION)
2162 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2163 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2165 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2166 TARGET_SECURE_PLT ? "secure" : "bss");
2167 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2168 aix_struct_return ? "aix" : "sysv");
2169 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2170 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2171 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2172 tf[!!rs6000_align_branch_targets]);
2173 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2174 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2175 rs6000_long_double_type_size);
2176 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2177 (int)rs6000_sched_restricted_insns_priority);
2178 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2179 (int)END_BUILTINS);
2180 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2181 (int)RS6000_BUILTIN_COUNT);
2184 /* Initialize the various global tables that are based on register size. */
2185 static void
2186 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2188 ssize_t r, m, c;
2189 int align64;
2190 int align32;
2192 /* Precalculate REGNO_REG_CLASS. */
2193 rs6000_regno_regclass[0] = GENERAL_REGS;
2194 for (r = 1; r < 32; ++r)
2195 rs6000_regno_regclass[r] = BASE_REGS;
2197 for (r = 32; r < 64; ++r)
2198 rs6000_regno_regclass[r] = FLOAT_REGS;
2200 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2201 rs6000_regno_regclass[r] = NO_REGS;
2203 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2204 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2206 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2207 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2208 rs6000_regno_regclass[r] = CR_REGS;
2210 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2211 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2212 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2213 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2214 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2215 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2216 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2217 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2218 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2219 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2220 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2221 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2223 /* Precalculate register class to simpler reload register class. We don't
2224 need all of the register classes that are combinations of different
2225 classes, just the simple ones that have constraint letters. */
2226 for (c = 0; c < N_REG_CLASSES; c++)
2227 reg_class_to_reg_type[c] = NO_REG_TYPE;
2229 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2230 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2231 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2232 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2233 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2234 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2235 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2236 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2237 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2238 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2239 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2240 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2242 if (TARGET_VSX)
2244 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2245 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2247 else
2249 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2250 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2253 /* Precalculate the valid memory formats as well as the vector information,
2254 this must be set up before the rs6000_hard_regno_nregs_internal calls
2255 below. */
2256 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2258 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
2259 reg_addr[m].reload_load = CODE_FOR_nothing;
2260 reg_addr[m].reload_store = CODE_FOR_nothing;
2261 reg_addr[m].reload_fpr_gpr = CODE_FOR_nothing;
2262 reg_addr[m].reload_gpr_vsx = CODE_FOR_nothing;
2263 reg_addr[m].reload_vsx_gpr = CODE_FOR_nothing;
2266 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
2267 rs6000_constraints[c] = NO_REGS;
2269 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2270 believes it can use native alignment or still uses 128-bit alignment. */
2271 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2273 align64 = 64;
2274 align32 = 32;
2276 else
2278 align64 = 128;
2279 align32 = 128;
2282 /* V2DF mode, VSX only. */
2283 if (TARGET_VSX)
2285 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2286 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2287 rs6000_vector_align[V2DFmode] = align64;
2290 /* V4SF mode, either VSX or Altivec. */
2291 if (TARGET_VSX)
2293 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2294 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2295 rs6000_vector_align[V4SFmode] = align32;
2297 else if (TARGET_ALTIVEC)
2299 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2300 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2301 rs6000_vector_align[V4SFmode] = align32;
2304 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2305 and stores. */
2306 if (TARGET_ALTIVEC)
2308 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2309 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2310 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2311 rs6000_vector_align[V4SImode] = align32;
2312 rs6000_vector_align[V8HImode] = align32;
2313 rs6000_vector_align[V16QImode] = align32;
2315 if (TARGET_VSX)
2317 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2318 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2319 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2321 else
2323 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2324 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2325 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2329 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2330 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2331 if (TARGET_VSX)
2333 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2334 rs6000_vector_unit[V2DImode]
2335 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2336 rs6000_vector_align[V2DImode] = align64;
2339 /* DFmode, see if we want to use the VSX unit. */
2340 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2342 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2343 rs6000_vector_mem[DFmode]
2344 = (TARGET_UPPER_REGS_DF ? VECTOR_VSX : VECTOR_NONE);
2345 rs6000_vector_align[DFmode] = align64;
2348 /* Allow TImode in VSX register and set the VSX memory macros. */
2349 if (TARGET_VSX && TARGET_VSX_TIMODE)
2351 rs6000_vector_mem[TImode] = VECTOR_VSX;
2352 rs6000_vector_align[TImode] = align64;
2355 /* TODO add SPE and paired floating point vector support. */
2357 /* Register class constraints for the constraints that depend on compile
2358 switches. When the VSX code was added, different constraints were added
2359 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2360 of the VSX registers are used. The register classes for scalar floating
2361 point types is set, based on whether we allow that type into the upper
2362 (Altivec) registers. GCC has register classes to target the Altivec
2363 registers for load/store operations, to select using a VSX memory
2364 operation instead of the traditional floating point operation. The
2365 constraints are:
2367 d - Register class to use with traditional DFmode instructions.
2368 f - Register class to use with traditional SFmode instructions.
2369 v - Altivec register.
2370 wa - Any VSX register.
2371 wd - Preferred register class for V2DFmode.
2372 wf - Preferred register class for V4SFmode.
2373 wg - Float register for power6x move insns.
2374 wl - Float register if we can do 32-bit signed int loads.
2375 wm - VSX register for ISA 2.07 direct move operations.
2376 wr - GPR if 64-bit mode is permitted.
2377 ws - Register class to do ISA 2.06 DF operations.
2378 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2379 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2380 wt - VSX register for TImode in VSX registers.
2381 ww - Register class to do SF conversions in with VSX operations.
2382 wx - Float register if we can do 32-bit int stores.
2383 wy - Register class to do ISA 2.07 SF operations.
2384 wz - Float register if we can do 32-bit unsigned int loads. */
2386 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2387 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2389 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2390 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2392 if (TARGET_VSX)
2394 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2395 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2396 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2398 if (TARGET_VSX_TIMODE)
2399 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS;
2401 if (TARGET_UPPER_REGS_DF)
2403 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2404 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2406 else
2407 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
2410 /* Add conditional constraints based on various options, to allow us to
2411 collapse multiple insn patterns. */
2412 if (TARGET_ALTIVEC)
2413 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2415 if (TARGET_MFPGPR)
2416 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2418 if (TARGET_LFIWAX)
2419 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS;
2421 if (TARGET_DIRECT_MOVE)
2422 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2424 if (TARGET_POWERPC64)
2425 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2427 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF)
2429 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
2430 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
2431 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
2433 else if (TARGET_P8_VECTOR)
2435 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
2436 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2438 else if (TARGET_VSX)
2439 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2441 if (TARGET_STFIWX)
2442 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS;
2444 if (TARGET_LFIWZX)
2445 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS;
2447 /* Set up the reload helper and direct move functions. */
2448 if (TARGET_VSX || TARGET_ALTIVEC)
2450 if (TARGET_64BIT)
2452 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
2453 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
2454 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
2455 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
2456 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
2457 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
2458 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
2459 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
2460 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
2461 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
2462 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
2463 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
2464 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2466 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
2467 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
2468 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
2469 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
2471 if (TARGET_P8_VECTOR)
2473 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
2474 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
2475 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
2476 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
2478 if (TARGET_VSX_TIMODE)
2480 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
2481 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
2483 if (TARGET_DIRECT_MOVE)
2485 if (TARGET_POWERPC64)
2487 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
2488 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
2489 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
2490 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
2491 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
2492 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
2493 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
2494 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
2496 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
2497 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
2498 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
2499 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
2500 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
2501 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
2502 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
2503 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
2505 else
2507 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
2508 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
2509 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
2513 else
2515 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
2516 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
2517 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
2518 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
2519 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
2520 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
2521 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
2522 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
2523 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
2524 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
2525 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
2526 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
2527 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2529 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
2530 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
2531 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
2532 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
2534 if (TARGET_P8_VECTOR)
2536 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
2537 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
2538 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
2539 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
2541 if (TARGET_VSX_TIMODE)
2543 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
2544 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
2549 /* Precalculate HARD_REGNO_NREGS. */
2550 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2551 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2552 rs6000_hard_regno_nregs[m][r]
2553 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2555 /* Precalculate HARD_REGNO_MODE_OK. */
2556 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2557 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2558 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2559 rs6000_hard_regno_mode_ok_p[m][r] = true;
2561 /* Precalculate CLASS_MAX_NREGS sizes. */
2562 for (c = 0; c < LIM_REG_CLASSES; ++c)
2564 int reg_size;
2566 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2567 reg_size = UNITS_PER_VSX_WORD;
2569 else if (c == ALTIVEC_REGS)
2570 reg_size = UNITS_PER_ALTIVEC_WORD;
2572 else if (c == FLOAT_REGS)
2573 reg_size = UNITS_PER_FP_WORD;
2575 else
2576 reg_size = UNITS_PER_WORD;
2578 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2580 int reg_size2 = reg_size;
2582 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2583 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2584 && (m == TDmode || m == TFmode))
2585 reg_size2 = UNITS_PER_FP_WORD;
2587 rs6000_class_max_nregs[m][c]
2588 = (GET_MODE_SIZE (m) + reg_size2 - 1) / reg_size2;
2592 if (TARGET_E500_DOUBLE)
2593 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2595 /* Calculate which modes to automatically generate code to use a the
2596 reciprocal divide and square root instructions. In the future, possibly
2597 automatically generate the instructions even if the user did not specify
2598 -mrecip. The older machines double precision reciprocal sqrt estimate is
2599 not accurate enough. */
2600 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2601 if (TARGET_FRES)
2602 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2603 if (TARGET_FRE)
2604 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2605 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2606 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2607 if (VECTOR_UNIT_VSX_P (V2DFmode))
2608 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2610 if (TARGET_FRSQRTES)
2611 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2612 if (TARGET_FRSQRTE)
2613 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2614 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2615 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2616 if (VECTOR_UNIT_VSX_P (V2DFmode))
2617 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2619 if (rs6000_recip_control)
2621 if (!flag_finite_math_only)
2622 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2623 if (flag_trapping_math)
2624 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2625 if (!flag_reciprocal_math)
2626 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2627 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2629 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2630 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2631 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2633 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2634 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2635 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2637 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2638 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2639 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2641 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2642 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2643 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2645 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2646 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2647 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2649 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2650 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2651 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2653 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2654 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2655 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2657 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2658 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2659 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2663 if (global_init_p || TARGET_DEBUG_TARGET)
2665 if (TARGET_DEBUG_REG)
2666 rs6000_debug_reg_global ();
2668 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2669 fprintf (stderr,
2670 "SImode variable mult cost = %d\n"
2671 "SImode constant mult cost = %d\n"
2672 "SImode short constant mult cost = %d\n"
2673 "DImode multipliciation cost = %d\n"
2674 "SImode division cost = %d\n"
2675 "DImode division cost = %d\n"
2676 "Simple fp operation cost = %d\n"
2677 "DFmode multiplication cost = %d\n"
2678 "SFmode division cost = %d\n"
2679 "DFmode division cost = %d\n"
2680 "cache line size = %d\n"
2681 "l1 cache size = %d\n"
2682 "l2 cache size = %d\n"
2683 "simultaneous prefetches = %d\n"
2684 "\n",
2685 rs6000_cost->mulsi,
2686 rs6000_cost->mulsi_const,
2687 rs6000_cost->mulsi_const9,
2688 rs6000_cost->muldi,
2689 rs6000_cost->divsi,
2690 rs6000_cost->divdi,
2691 rs6000_cost->fp,
2692 rs6000_cost->dmul,
2693 rs6000_cost->sdiv,
2694 rs6000_cost->ddiv,
2695 rs6000_cost->cache_line_size,
2696 rs6000_cost->l1_cache_size,
2697 rs6000_cost->l2_cache_size,
2698 rs6000_cost->simultaneous_prefetches);
2702 #if TARGET_MACHO
2703 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2705 static void
2706 darwin_rs6000_override_options (void)
2708 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2709 off. */
2710 rs6000_altivec_abi = 1;
2711 TARGET_ALTIVEC_VRSAVE = 1;
2712 rs6000_current_abi = ABI_DARWIN;
2714 if (DEFAULT_ABI == ABI_DARWIN
2715 && TARGET_64BIT)
2716 darwin_one_byte_bool = 1;
2718 if (TARGET_64BIT && ! TARGET_POWERPC64)
2720 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
2721 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2723 if (flag_mkernel)
2725 rs6000_default_long_calls = 1;
2726 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
2729 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2730 Altivec. */
2731 if (!flag_mkernel && !flag_apple_kext
2732 && TARGET_64BIT
2733 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
2734 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2736 /* Unless the user (not the configurer) has explicitly overridden
2737 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2738 G4 unless targeting the kernel. */
2739 if (!flag_mkernel
2740 && !flag_apple_kext
2741 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2742 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
2743 && ! global_options_set.x_rs6000_cpu_index)
2745 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2748 #endif
2750 /* If not otherwise specified by a target, make 'long double' equivalent to
2751 'double'. */
2753 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2754 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2755 #endif
2757 /* Return the builtin mask of the various options used that could affect which
2758 builtins were used. In the past we used target_flags, but we've run out of
2759 bits, and some options like SPE and PAIRED are no longer in
2760 target_flags. */
2762 HOST_WIDE_INT
2763 rs6000_builtin_mask_calculate (void)
2765 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2766 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2767 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2768 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2769 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2770 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2771 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2772 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2773 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2774 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
2775 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
2776 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
2777 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0));
2780 /* Override command line options. Mostly we process the processor type and
2781 sometimes adjust other TARGET_ options. */
2783 static bool
2784 rs6000_option_override_internal (bool global_init_p)
2786 bool ret = true;
2787 bool have_cpu = false;
2789 /* The default cpu requested at configure time, if any. */
2790 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2792 HOST_WIDE_INT set_masks;
2793 int cpu_index;
2794 int tune_index;
2795 struct cl_target_option *main_target_opt
2796 = ((global_init_p || target_option_default_node == NULL)
2797 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2799 /* Remember the explicit arguments. */
2800 if (global_init_p)
2801 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
2803 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2804 library functions, so warn about it. The flag may be useful for
2805 performance studies from time to time though, so don't disable it
2806 entirely. */
2807 if (global_options_set.x_rs6000_alignment_flags
2808 && rs6000_alignment_flags == MASK_ALIGN_POWER
2809 && DEFAULT_ABI == ABI_DARWIN
2810 && TARGET_64BIT)
2811 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2812 " it is incompatible with the installed C and C++ libraries");
2814 /* Numerous experiment shows that IRA based loop pressure
2815 calculation works better for RTL loop invariant motion on targets
2816 with enough (>= 32) registers. It is an expensive optimization.
2817 So it is on only for peak performance. */
2818 if (optimize >= 3 && global_init_p)
2819 flag_ira_loop_pressure = 1;
2821 /* Set the pointer size. */
2822 if (TARGET_64BIT)
2824 rs6000_pmode = (int)DImode;
2825 rs6000_pointer_size = 64;
2827 else
2829 rs6000_pmode = (int)SImode;
2830 rs6000_pointer_size = 32;
2833 /* Some OSs don't support saving the high part of 64-bit registers on context
2834 switch. Other OSs don't support saving Altivec registers. On those OSs,
2835 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
2836 if the user wants either, the user must explicitly specify them and we
2837 won't interfere with the user's specification. */
2839 set_masks = POWERPC_MASKS;
2840 #ifdef OS_MISSING_POWERPC64
2841 if (OS_MISSING_POWERPC64)
2842 set_masks &= ~OPTION_MASK_POWERPC64;
2843 #endif
2844 #ifdef OS_MISSING_ALTIVEC
2845 if (OS_MISSING_ALTIVEC)
2846 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
2847 #endif
2849 /* Don't override by the processor default if given explicitly. */
2850 set_masks &= ~rs6000_isa_flags_explicit;
2852 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2853 the cpu in a target attribute or pragma, but did not specify a tuning
2854 option, use the cpu for the tuning option rather than the option specified
2855 with -mtune on the command line. Process a '--with-cpu' configuration
2856 request as an implicit --cpu. */
2857 if (rs6000_cpu_index >= 0)
2859 cpu_index = rs6000_cpu_index;
2860 have_cpu = true;
2862 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2864 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2865 have_cpu = true;
2867 else if (implicit_cpu)
2869 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
2870 have_cpu = true;
2872 else
2874 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
2875 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2876 have_cpu = false;
2879 gcc_assert (cpu_index >= 0);
2881 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
2882 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
2883 with those from the cpu, except for options that were explicitly set. If
2884 we don't have a cpu, do not override the target bits set in
2885 TARGET_DEFAULT. */
2886 if (have_cpu)
2888 rs6000_isa_flags &= ~set_masks;
2889 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2890 & set_masks);
2892 else
2893 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2894 & ~rs6000_isa_flags_explicit);
2896 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
2897 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
2898 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
2899 to using rs6000_isa_flags, we need to do the initialization here. */
2900 if (!have_cpu)
2901 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
2903 if (rs6000_tune_index >= 0)
2904 tune_index = rs6000_tune_index;
2905 else if (have_cpu)
2906 rs6000_tune_index = tune_index = cpu_index;
2907 else
2909 size_t i;
2910 enum processor_type tune_proc
2911 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2913 tune_index = -1;
2914 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2915 if (processor_target_table[i].processor == tune_proc)
2917 rs6000_tune_index = tune_index = i;
2918 break;
2922 gcc_assert (tune_index >= 0);
2923 rs6000_cpu = processor_target_table[tune_index].processor;
2925 /* Pick defaults for SPE related control flags. Do this early to make sure
2926 that the TARGET_ macros are representative ASAP. */
2928 int spe_capable_cpu =
2929 (rs6000_cpu == PROCESSOR_PPC8540
2930 || rs6000_cpu == PROCESSOR_PPC8548);
2932 if (!global_options_set.x_rs6000_spe_abi)
2933 rs6000_spe_abi = spe_capable_cpu;
2935 if (!global_options_set.x_rs6000_spe)
2936 rs6000_spe = spe_capable_cpu;
2938 if (!global_options_set.x_rs6000_float_gprs)
2939 rs6000_float_gprs =
2940 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2941 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2942 : 0);
2945 if (global_options_set.x_rs6000_spe_abi
2946 && rs6000_spe_abi
2947 && !TARGET_SPE_ABI)
2948 error ("not configured for SPE ABI");
2950 if (global_options_set.x_rs6000_spe
2951 && rs6000_spe
2952 && !TARGET_SPE)
2953 error ("not configured for SPE instruction set");
2955 if (main_target_opt != NULL
2956 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2957 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2958 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2959 error ("target attribute or pragma changes SPE ABI");
2961 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2962 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2963 || rs6000_cpu == PROCESSOR_PPCE5500)
2965 if (TARGET_ALTIVEC)
2966 error ("AltiVec not supported in this target");
2967 if (TARGET_SPE)
2968 error ("SPE not supported in this target");
2970 if (rs6000_cpu == PROCESSOR_PPCE6500)
2972 if (TARGET_SPE)
2973 error ("SPE not supported in this target");
2976 /* Disable Cell microcode if we are optimizing for the Cell
2977 and not optimizing for size. */
2978 if (rs6000_gen_cell_microcode == -1)
2979 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2980 && !optimize_size);
2982 /* If we are optimizing big endian systems for space and it's OK to
2983 use instructions that would be microcoded on the Cell, use the
2984 load/store multiple and string instructions. */
2985 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2986 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
2987 | OPTION_MASK_STRING);
2989 /* Don't allow -mmultiple or -mstring on little endian systems
2990 unless the cpu is a 750, because the hardware doesn't support the
2991 instructions used in little endian mode, and causes an alignment
2992 trap. The 750 does not cause an alignment trap (except when the
2993 target is unaligned). */
2995 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2997 if (TARGET_MULTIPLE)
2999 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3000 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3001 warning (0, "-mmultiple is not supported on little endian systems");
3004 if (TARGET_STRING)
3006 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3007 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3008 warning (0, "-mstring is not supported on little endian systems");
3012 /* Add some warnings for VSX. */
3013 if (TARGET_VSX)
3015 const char *msg = NULL;
3016 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3017 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3019 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3020 msg = N_("-mvsx requires hardware floating point");
3021 else
3023 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3024 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3027 else if (TARGET_PAIRED_FLOAT)
3028 msg = N_("-mvsx and -mpaired are incompatible");
3029 /* The hardware will allow VSX and little endian, but until we make sure
3030 things like vector select, etc. work don't allow VSX on little endian
3031 systems at this point. */
3032 else if (!BYTES_BIG_ENDIAN)
3033 msg = N_("-mvsx used with little endian code");
3034 else if (TARGET_AVOID_XFORM > 0)
3035 msg = N_("-mvsx needs indexed addressing");
3036 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3037 & OPTION_MASK_ALTIVEC))
3039 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3040 msg = N_("-mvsx and -mno-altivec are incompatible");
3041 else
3042 msg = N_("-mno-altivec disables vsx");
3045 if (msg)
3047 warning (0, msg);
3048 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3049 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3053 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3054 the -mcpu setting to enable options that conflict. */
3055 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3056 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3057 | OPTION_MASK_ALTIVEC
3058 | OPTION_MASK_VSX)) != 0)
3059 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3060 | OPTION_MASK_DIRECT_MOVE)
3061 & ~rs6000_isa_flags_explicit);
3063 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3064 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3066 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3067 unless the user explicitly used the -mno-<option> to disable the code. */
3068 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3069 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3070 else if (TARGET_VSX)
3071 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3072 else if (TARGET_POPCNTD)
3073 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3074 else if (TARGET_DFP)
3075 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3076 else if (TARGET_CMPB)
3077 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3078 else if (TARGET_FPRND)
3079 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3080 else if (TARGET_POPCNTB)
3081 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3082 else if (TARGET_ALTIVEC)
3083 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3085 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3087 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3088 error ("-mcrypto requires -maltivec");
3089 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3092 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3094 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3095 error ("-mdirect-move requires -mvsx");
3096 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3099 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3101 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3102 error ("-mpower8-vector requires -maltivec");
3103 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3106 if (TARGET_P8_VECTOR && !TARGET_VSX)
3108 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3109 error ("-mpower8-vector requires -mvsx");
3110 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3113 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3115 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3116 error ("-mvsx-timode requires -mvsx");
3117 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3120 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3121 silently turn off quad memory mode. */
3122 if (TARGET_QUAD_MEMORY && !TARGET_POWERPC64)
3124 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3125 warning (0, N_("-mquad-memory requires 64-bit mode"));
3127 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3130 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3131 generating power8 instructions. */
3132 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3133 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
3134 & OPTION_MASK_P8_FUSION);
3136 /* Power8 does not fuse sign extended loads with the addis. If we are
3137 optimizing at high levels for speed, convert a sign extended load into a
3138 zero extending load, and an explicit sign extension. */
3139 if (TARGET_P8_FUSION
3140 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3141 && optimize_function_for_speed_p (cfun)
3142 && optimize >= 3)
3143 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3145 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3146 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3148 /* E500mc does "better" if we inline more aggressively. Respect the
3149 user's opinion, though. */
3150 if (rs6000_block_move_inline_limit == 0
3151 && (rs6000_cpu == PROCESSOR_PPCE500MC
3152 || rs6000_cpu == PROCESSOR_PPCE500MC64
3153 || rs6000_cpu == PROCESSOR_PPCE5500
3154 || rs6000_cpu == PROCESSOR_PPCE6500))
3155 rs6000_block_move_inline_limit = 128;
3157 /* store_one_arg depends on expand_block_move to handle at least the
3158 size of reg_parm_stack_space. */
3159 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3160 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3162 if (global_init_p)
3164 /* If the appropriate debug option is enabled, replace the target hooks
3165 with debug versions that call the real version and then prints
3166 debugging information. */
3167 if (TARGET_DEBUG_COST)
3169 targetm.rtx_costs = rs6000_debug_rtx_costs;
3170 targetm.address_cost = rs6000_debug_address_cost;
3171 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3174 if (TARGET_DEBUG_ADDR)
3176 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3177 targetm.legitimize_address = rs6000_debug_legitimize_address;
3178 rs6000_secondary_reload_class_ptr
3179 = rs6000_debug_secondary_reload_class;
3180 rs6000_secondary_memory_needed_ptr
3181 = rs6000_debug_secondary_memory_needed;
3182 rs6000_cannot_change_mode_class_ptr
3183 = rs6000_debug_cannot_change_mode_class;
3184 rs6000_preferred_reload_class_ptr
3185 = rs6000_debug_preferred_reload_class;
3186 rs6000_legitimize_reload_address_ptr
3187 = rs6000_debug_legitimize_reload_address;
3188 rs6000_mode_dependent_address_ptr
3189 = rs6000_debug_mode_dependent_address;
3192 if (rs6000_veclibabi_name)
3194 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3195 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3196 else
3198 error ("unknown vectorization library ABI type (%s) for "
3199 "-mveclibabi= switch", rs6000_veclibabi_name);
3200 ret = false;
3205 if (!global_options_set.x_rs6000_long_double_type_size)
3207 if (main_target_opt != NULL
3208 && (main_target_opt->x_rs6000_long_double_type_size
3209 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3210 error ("target attribute or pragma changes long double size");
3211 else
3212 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3215 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3216 if (!global_options_set.x_rs6000_ieeequad)
3217 rs6000_ieeequad = 1;
3218 #endif
3220 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3221 target attribute or pragma which automatically enables both options,
3222 unless the altivec ABI was set. This is set by default for 64-bit, but
3223 not for 32-bit. */
3224 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3225 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3226 & ~rs6000_isa_flags_explicit);
3228 /* Enable Altivec ABI for AIX -maltivec. */
3229 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3231 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3232 error ("target attribute or pragma changes AltiVec ABI");
3233 else
3234 rs6000_altivec_abi = 1;
3237 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3238 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3239 be explicitly overridden in either case. */
3240 if (TARGET_ELF)
3242 if (!global_options_set.x_rs6000_altivec_abi
3243 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3245 if (main_target_opt != NULL &&
3246 !main_target_opt->x_rs6000_altivec_abi)
3247 error ("target attribute or pragma changes AltiVec ABI");
3248 else
3249 rs6000_altivec_abi = 1;
3253 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3254 So far, the only darwin64 targets are also MACH-O. */
3255 if (TARGET_MACHO
3256 && DEFAULT_ABI == ABI_DARWIN
3257 && TARGET_64BIT)
3259 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3260 error ("target attribute or pragma changes darwin64 ABI");
3261 else
3263 rs6000_darwin64_abi = 1;
3264 /* Default to natural alignment, for better performance. */
3265 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3269 /* Place FP constants in the constant pool instead of TOC
3270 if section anchors enabled. */
3271 if (flag_section_anchors
3272 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3273 TARGET_NO_FP_IN_TOC = 1;
3275 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3276 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3278 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3279 SUBTARGET_OVERRIDE_OPTIONS;
3280 #endif
3281 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3282 SUBSUBTARGET_OVERRIDE_OPTIONS;
3283 #endif
3284 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3285 SUB3TARGET_OVERRIDE_OPTIONS;
3286 #endif
3288 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3289 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3291 /* For the E500 family of cores, reset the single/double FP flags to let us
3292 check that they remain constant across attributes or pragmas. Also,
3293 clear a possible request for string instructions, not supported and which
3294 we might have silently queried above for -Os.
3296 For other families, clear ISEL in case it was set implicitly.
3299 switch (rs6000_cpu)
3301 case PROCESSOR_PPC8540:
3302 case PROCESSOR_PPC8548:
3303 case PROCESSOR_PPCE500MC:
3304 case PROCESSOR_PPCE500MC64:
3305 case PROCESSOR_PPCE5500:
3306 case PROCESSOR_PPCE6500:
3308 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3309 rs6000_double_float = TARGET_E500_DOUBLE;
3311 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3313 break;
3315 default:
3317 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3318 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3320 break;
3323 if (main_target_opt)
3325 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3326 error ("target attribute or pragma changes single precision floating "
3327 "point");
3328 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3329 error ("target attribute or pragma changes double precision floating "
3330 "point");
3333 /* Detect invalid option combinations with E500. */
3334 CHECK_E500_OPTIONS;
3336 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3337 && rs6000_cpu != PROCESSOR_POWER5
3338 && rs6000_cpu != PROCESSOR_POWER6
3339 && rs6000_cpu != PROCESSOR_POWER7
3340 && rs6000_cpu != PROCESSOR_POWER8
3341 && rs6000_cpu != PROCESSOR_PPCA2
3342 && rs6000_cpu != PROCESSOR_CELL
3343 && rs6000_cpu != PROCESSOR_PPC476);
3344 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3345 || rs6000_cpu == PROCESSOR_POWER5
3346 || rs6000_cpu == PROCESSOR_POWER7
3347 || rs6000_cpu == PROCESSOR_POWER8);
3348 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3349 || rs6000_cpu == PROCESSOR_POWER5
3350 || rs6000_cpu == PROCESSOR_POWER6
3351 || rs6000_cpu == PROCESSOR_POWER7
3352 || rs6000_cpu == PROCESSOR_POWER8
3353 || rs6000_cpu == PROCESSOR_PPCE500MC
3354 || rs6000_cpu == PROCESSOR_PPCE500MC64
3355 || rs6000_cpu == PROCESSOR_PPCE5500
3356 || rs6000_cpu == PROCESSOR_PPCE6500);
3358 /* Allow debug switches to override the above settings. These are set to -1
3359 in rs6000.opt to indicate the user hasn't directly set the switch. */
3360 if (TARGET_ALWAYS_HINT >= 0)
3361 rs6000_always_hint = TARGET_ALWAYS_HINT;
3363 if (TARGET_SCHED_GROUPS >= 0)
3364 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3366 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3367 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3369 rs6000_sched_restricted_insns_priority
3370 = (rs6000_sched_groups ? 1 : 0);
3372 /* Handle -msched-costly-dep option. */
3373 rs6000_sched_costly_dep
3374 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3376 if (rs6000_sched_costly_dep_str)
3378 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3379 rs6000_sched_costly_dep = no_dep_costly;
3380 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3381 rs6000_sched_costly_dep = all_deps_costly;
3382 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3383 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3384 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3385 rs6000_sched_costly_dep = store_to_load_dep_costly;
3386 else
3387 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3388 atoi (rs6000_sched_costly_dep_str));
3391 /* Handle -minsert-sched-nops option. */
3392 rs6000_sched_insert_nops
3393 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3395 if (rs6000_sched_insert_nops_str)
3397 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3398 rs6000_sched_insert_nops = sched_finish_none;
3399 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3400 rs6000_sched_insert_nops = sched_finish_pad_groups;
3401 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3402 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3403 else
3404 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3405 atoi (rs6000_sched_insert_nops_str));
3408 if (global_init_p)
3410 #ifdef TARGET_REGNAMES
3411 /* If the user desires alternate register names, copy in the
3412 alternate names now. */
3413 if (TARGET_REGNAMES)
3414 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3415 #endif
3417 /* Set aix_struct_return last, after the ABI is determined.
3418 If -maix-struct-return or -msvr4-struct-return was explicitly
3419 used, don't override with the ABI default. */
3420 if (!global_options_set.x_aix_struct_return)
3421 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3423 #if 0
3424 /* IBM XL compiler defaults to unsigned bitfields. */
3425 if (TARGET_XL_COMPAT)
3426 flag_signed_bitfields = 0;
3427 #endif
3429 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3430 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3432 if (TARGET_TOC)
3433 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3435 /* We can only guarantee the availability of DI pseudo-ops when
3436 assembling for 64-bit targets. */
3437 if (!TARGET_64BIT)
3439 targetm.asm_out.aligned_op.di = NULL;
3440 targetm.asm_out.unaligned_op.di = NULL;
3444 /* Set branch target alignment, if not optimizing for size. */
3445 if (!optimize_size)
3447 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3448 aligned 8byte to avoid misprediction by the branch predictor. */
3449 if (rs6000_cpu == PROCESSOR_TITAN
3450 || rs6000_cpu == PROCESSOR_CELL)
3452 if (align_functions <= 0)
3453 align_functions = 8;
3454 if (align_jumps <= 0)
3455 align_jumps = 8;
3456 if (align_loops <= 0)
3457 align_loops = 8;
3459 if (rs6000_align_branch_targets)
3461 if (align_functions <= 0)
3462 align_functions = 16;
3463 if (align_jumps <= 0)
3464 align_jumps = 16;
3465 if (align_loops <= 0)
3467 can_override_loop_align = 1;
3468 align_loops = 16;
3471 if (align_jumps_max_skip <= 0)
3472 align_jumps_max_skip = 15;
3473 if (align_loops_max_skip <= 0)
3474 align_loops_max_skip = 15;
3477 /* Arrange to save and restore machine status around nested functions. */
3478 init_machine_status = rs6000_init_machine_status;
3480 /* We should always be splitting complex arguments, but we can't break
3481 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3482 if (DEFAULT_ABI != ABI_AIX)
3483 targetm.calls.split_complex_arg = NULL;
3486 /* Initialize rs6000_cost with the appropriate target costs. */
3487 if (optimize_size)
3488 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3489 else
3490 switch (rs6000_cpu)
3492 case PROCESSOR_RS64A:
3493 rs6000_cost = &rs64a_cost;
3494 break;
3496 case PROCESSOR_MPCCORE:
3497 rs6000_cost = &mpccore_cost;
3498 break;
3500 case PROCESSOR_PPC403:
3501 rs6000_cost = &ppc403_cost;
3502 break;
3504 case PROCESSOR_PPC405:
3505 rs6000_cost = &ppc405_cost;
3506 break;
3508 case PROCESSOR_PPC440:
3509 rs6000_cost = &ppc440_cost;
3510 break;
3512 case PROCESSOR_PPC476:
3513 rs6000_cost = &ppc476_cost;
3514 break;
3516 case PROCESSOR_PPC601:
3517 rs6000_cost = &ppc601_cost;
3518 break;
3520 case PROCESSOR_PPC603:
3521 rs6000_cost = &ppc603_cost;
3522 break;
3524 case PROCESSOR_PPC604:
3525 rs6000_cost = &ppc604_cost;
3526 break;
3528 case PROCESSOR_PPC604e:
3529 rs6000_cost = &ppc604e_cost;
3530 break;
3532 case PROCESSOR_PPC620:
3533 rs6000_cost = &ppc620_cost;
3534 break;
3536 case PROCESSOR_PPC630:
3537 rs6000_cost = &ppc630_cost;
3538 break;
3540 case PROCESSOR_CELL:
3541 rs6000_cost = &ppccell_cost;
3542 break;
3544 case PROCESSOR_PPC750:
3545 case PROCESSOR_PPC7400:
3546 rs6000_cost = &ppc750_cost;
3547 break;
3549 case PROCESSOR_PPC7450:
3550 rs6000_cost = &ppc7450_cost;
3551 break;
3553 case PROCESSOR_PPC8540:
3554 case PROCESSOR_PPC8548:
3555 rs6000_cost = &ppc8540_cost;
3556 break;
3558 case PROCESSOR_PPCE300C2:
3559 case PROCESSOR_PPCE300C3:
3560 rs6000_cost = &ppce300c2c3_cost;
3561 break;
3563 case PROCESSOR_PPCE500MC:
3564 rs6000_cost = &ppce500mc_cost;
3565 break;
3567 case PROCESSOR_PPCE500MC64:
3568 rs6000_cost = &ppce500mc64_cost;
3569 break;
3571 case PROCESSOR_PPCE5500:
3572 rs6000_cost = &ppce5500_cost;
3573 break;
3575 case PROCESSOR_PPCE6500:
3576 rs6000_cost = &ppce6500_cost;
3577 break;
3579 case PROCESSOR_TITAN:
3580 rs6000_cost = &titan_cost;
3581 break;
3583 case PROCESSOR_POWER4:
3584 case PROCESSOR_POWER5:
3585 rs6000_cost = &power4_cost;
3586 break;
3588 case PROCESSOR_POWER6:
3589 rs6000_cost = &power6_cost;
3590 break;
3592 case PROCESSOR_POWER7:
3593 rs6000_cost = &power7_cost;
3594 break;
3596 case PROCESSOR_POWER8:
3597 rs6000_cost = &power8_cost;
3598 break;
3600 case PROCESSOR_PPCA2:
3601 rs6000_cost = &ppca2_cost;
3602 break;
3604 default:
3605 gcc_unreachable ();
3608 if (global_init_p)
3610 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3611 rs6000_cost->simultaneous_prefetches,
3612 global_options.x_param_values,
3613 global_options_set.x_param_values);
3614 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3615 global_options.x_param_values,
3616 global_options_set.x_param_values);
3617 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3618 rs6000_cost->cache_line_size,
3619 global_options.x_param_values,
3620 global_options_set.x_param_values);
3621 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3622 global_options.x_param_values,
3623 global_options_set.x_param_values);
3625 /* Increase loop peeling limits based on performance analysis. */
3626 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3627 global_options.x_param_values,
3628 global_options_set.x_param_values);
3629 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3630 global_options.x_param_values,
3631 global_options_set.x_param_values);
3633 /* If using typedef char *va_list, signal that
3634 __builtin_va_start (&ap, 0) can be optimized to
3635 ap = __builtin_next_arg (0). */
3636 if (DEFAULT_ABI != ABI_V4)
3637 targetm.expand_builtin_va_start = NULL;
3640 /* Set up single/double float flags.
3641 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3642 then set both flags. */
3643 if (TARGET_HARD_FLOAT && TARGET_FPRS
3644 && rs6000_single_float == 0 && rs6000_double_float == 0)
3645 rs6000_single_float = rs6000_double_float = 1;
3647 /* If not explicitly specified via option, decide whether to generate indexed
3648 load/store instructions. */
3649 if (TARGET_AVOID_XFORM == -1)
3650 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3651 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3652 need indexed accesses and the type used is the scalar type of the element
3653 being loaded or stored. */
3654 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3655 && !TARGET_ALTIVEC);
3657 /* Set the -mrecip options. */
3658 if (rs6000_recip_name)
3660 char *p = ASTRDUP (rs6000_recip_name);
3661 char *q;
3662 unsigned int mask, i;
3663 bool invert;
3665 while ((q = strtok (p, ",")) != NULL)
3667 p = NULL;
3668 if (*q == '!')
3670 invert = true;
3671 q++;
3673 else
3674 invert = false;
3676 if (!strcmp (q, "default"))
3677 mask = ((TARGET_RECIP_PRECISION)
3678 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3679 else
3681 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3682 if (!strcmp (q, recip_options[i].string))
3684 mask = recip_options[i].mask;
3685 break;
3688 if (i == ARRAY_SIZE (recip_options))
3690 error ("unknown option for -mrecip=%s", q);
3691 invert = false;
3692 mask = 0;
3693 ret = false;
3697 if (invert)
3698 rs6000_recip_control &= ~mask;
3699 else
3700 rs6000_recip_control |= mask;
3704 /* Set the builtin mask of the various options used that could affect which
3705 builtins were used. In the past we used target_flags, but we've run out
3706 of bits, and some options like SPE and PAIRED are no longer in
3707 target_flags. */
3708 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3709 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3711 fprintf (stderr,
3712 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
3713 rs6000_builtin_mask);
3714 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
3717 /* Initialize all of the registers. */
3718 rs6000_init_hard_regno_mode_ok (global_init_p);
3720 /* Save the initial options in case the user does function specific options */
3721 if (global_init_p)
3722 target_option_default_node = target_option_current_node
3723 = build_target_option_node (&global_options);
3725 /* If not explicitly specified via option, decide whether to generate the
3726 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3727 if (TARGET_LINK_STACK == -1)
3728 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3730 return ret;
3733 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3734 define the target cpu type. */
3736 static void
3737 rs6000_option_override (void)
3739 (void) rs6000_option_override_internal (true);
3743 /* Implement targetm.vectorize.builtin_mask_for_load. */
3744 static tree
3745 rs6000_builtin_mask_for_load (void)
3747 if (TARGET_ALTIVEC || TARGET_VSX)
3748 return altivec_builtin_mask_for_load;
3749 else
3750 return 0;
3753 /* Implement LOOP_ALIGN. */
3755 rs6000_loop_align (rtx label)
3757 basic_block bb;
3758 int ninsns;
3760 /* Don't override loop alignment if -falign-loops was specified. */
3761 if (!can_override_loop_align)
3762 return align_loops_log;
3764 bb = BLOCK_FOR_INSN (label);
3765 ninsns = num_loop_insns(bb->loop_father);
3767 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3768 if (ninsns > 4 && ninsns <= 8
3769 && (rs6000_cpu == PROCESSOR_POWER4
3770 || rs6000_cpu == PROCESSOR_POWER5
3771 || rs6000_cpu == PROCESSOR_POWER6
3772 || rs6000_cpu == PROCESSOR_POWER7
3773 || rs6000_cpu == PROCESSOR_POWER8))
3774 return 5;
3775 else
3776 return align_loops_log;
3779 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3780 static int
3781 rs6000_loop_align_max_skip (rtx label)
3783 return (1 << rs6000_loop_align (label)) - 1;
3786 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3787 after applying N number of iterations. This routine does not determine
3788 how may iterations are required to reach desired alignment. */
3790 static bool
3791 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3793 if (is_packed)
3794 return false;
3796 if (TARGET_32BIT)
3798 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3799 return true;
3801 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3802 return true;
3804 return false;
3806 else
3808 if (TARGET_MACHO)
3809 return false;
3811 /* Assuming that all other types are naturally aligned. CHECKME! */
3812 return true;
3816 /* Return true if the vector misalignment factor is supported by the
3817 target. */
3818 static bool
3819 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3820 const_tree type,
3821 int misalignment,
3822 bool is_packed)
3824 if (TARGET_VSX)
3826 /* Return if movmisalign pattern is not supported for this mode. */
3827 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3828 return false;
3830 if (misalignment == -1)
3832 /* Misalignment factor is unknown at compile time but we know
3833 it's word aligned. */
3834 if (rs6000_vector_alignment_reachable (type, is_packed))
3836 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3838 if (element_size == 64 || element_size == 32)
3839 return true;
3842 return false;
3845 /* VSX supports word-aligned vector. */
3846 if (misalignment % 4 == 0)
3847 return true;
3849 return false;
3852 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3853 static int
3854 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3855 tree vectype, int misalign)
3857 unsigned elements;
3858 tree elem_type;
3860 switch (type_of_cost)
3862 case scalar_stmt:
3863 case scalar_load:
3864 case scalar_store:
3865 case vector_stmt:
3866 case vector_load:
3867 case vector_store:
3868 case vec_to_scalar:
3869 case scalar_to_vec:
3870 case cond_branch_not_taken:
3871 return 1;
3873 case vec_perm:
3874 if (TARGET_VSX)
3875 return 3;
3876 else
3877 return 1;
3879 case vec_promote_demote:
3880 if (TARGET_VSX)
3881 return 4;
3882 else
3883 return 1;
3885 case cond_branch_taken:
3886 return 3;
3888 case unaligned_load:
3889 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3891 elements = TYPE_VECTOR_SUBPARTS (vectype);
3892 if (elements == 2)
3893 /* Double word aligned. */
3894 return 2;
3896 if (elements == 4)
3898 switch (misalign)
3900 case 8:
3901 /* Double word aligned. */
3902 return 2;
3904 case -1:
3905 /* Unknown misalignment. */
3906 case 4:
3907 case 12:
3908 /* Word aligned. */
3909 return 22;
3911 default:
3912 gcc_unreachable ();
3917 if (TARGET_ALTIVEC)
3918 /* Misaligned loads are not supported. */
3919 gcc_unreachable ();
3921 return 2;
3923 case unaligned_store:
3924 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3926 elements = TYPE_VECTOR_SUBPARTS (vectype);
3927 if (elements == 2)
3928 /* Double word aligned. */
3929 return 2;
3931 if (elements == 4)
3933 switch (misalign)
3935 case 8:
3936 /* Double word aligned. */
3937 return 2;
3939 case -1:
3940 /* Unknown misalignment. */
3941 case 4:
3942 case 12:
3943 /* Word aligned. */
3944 return 23;
3946 default:
3947 gcc_unreachable ();
3952 if (TARGET_ALTIVEC)
3953 /* Misaligned stores are not supported. */
3954 gcc_unreachable ();
3956 return 2;
3958 case vec_construct:
3959 elements = TYPE_VECTOR_SUBPARTS (vectype);
3960 elem_type = TREE_TYPE (vectype);
3961 /* 32-bit vectors loaded into registers are stored as double
3962 precision, so we need n/2 converts in addition to the usual
3963 n/2 merges to construct a vector of short floats from them. */
3964 if (SCALAR_FLOAT_TYPE_P (elem_type)
3965 && TYPE_PRECISION (elem_type) == 32)
3966 return elements + 1;
3967 else
3968 return elements / 2 + 1;
3970 default:
3971 gcc_unreachable ();
3975 /* Implement targetm.vectorize.preferred_simd_mode. */
3977 static enum machine_mode
3978 rs6000_preferred_simd_mode (enum machine_mode mode)
3980 if (TARGET_VSX)
3981 switch (mode)
3983 case DFmode:
3984 return V2DFmode;
3985 default:;
3987 if (TARGET_ALTIVEC || TARGET_VSX)
3988 switch (mode)
3990 case SFmode:
3991 return V4SFmode;
3992 case DImode:
3993 return V2DImode;
3994 case SImode:
3995 return V4SImode;
3996 case HImode:
3997 return V8HImode;
3998 case QImode:
3999 return V16QImode;
4000 default:;
4002 if (TARGET_SPE)
4003 switch (mode)
4005 case SFmode:
4006 return V2SFmode;
4007 case SImode:
4008 return V2SImode;
4009 default:;
4011 if (TARGET_PAIRED_FLOAT
4012 && mode == SFmode)
4013 return V2SFmode;
4014 return word_mode;
4017 typedef struct _rs6000_cost_data
4019 struct loop *loop_info;
4020 unsigned cost[3];
4021 } rs6000_cost_data;
4023 /* Test for likely overcommitment of vector hardware resources. If a
4024 loop iteration is relatively large, and too large a percentage of
4025 instructions in the loop are vectorized, the cost model may not
4026 adequately reflect delays from unavailable vector resources.
4027 Penalize the loop body cost for this case. */
4029 static void
4030 rs6000_density_test (rs6000_cost_data *data)
4032 const int DENSITY_PCT_THRESHOLD = 85;
4033 const int DENSITY_SIZE_THRESHOLD = 70;
4034 const int DENSITY_PENALTY = 10;
4035 struct loop *loop = data->loop_info;
4036 basic_block *bbs = get_loop_body (loop);
4037 int nbbs = loop->num_nodes;
4038 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
4039 int i, density_pct;
4041 for (i = 0; i < nbbs; i++)
4043 basic_block bb = bbs[i];
4044 gimple_stmt_iterator gsi;
4046 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4048 gimple stmt = gsi_stmt (gsi);
4049 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4051 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4052 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
4053 not_vec_cost++;
4057 free (bbs);
4058 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
4060 if (density_pct > DENSITY_PCT_THRESHOLD
4061 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
4063 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
4064 if (dump_enabled_p ())
4065 dump_printf_loc (MSG_NOTE, vect_location,
4066 "density %d%%, cost %d exceeds threshold, penalizing "
4067 "loop body cost by %d%%", density_pct,
4068 vec_cost + not_vec_cost, DENSITY_PENALTY);
4072 /* Implement targetm.vectorize.init_cost. */
4074 static void *
4075 rs6000_init_cost (struct loop *loop_info)
4077 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
4078 data->loop_info = loop_info;
4079 data->cost[vect_prologue] = 0;
4080 data->cost[vect_body] = 0;
4081 data->cost[vect_epilogue] = 0;
4082 return data;
4085 /* Implement targetm.vectorize.add_stmt_cost. */
4087 static unsigned
4088 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4089 struct _stmt_vec_info *stmt_info, int misalign,
4090 enum vect_cost_model_location where)
4092 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4093 unsigned retval = 0;
4095 if (flag_vect_cost_model)
4097 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4098 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4099 misalign);
4100 /* Statements in an inner loop relative to the loop being
4101 vectorized are weighted more heavily. The value here is
4102 arbitrary and could potentially be improved with analysis. */
4103 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4104 count *= 50; /* FIXME. */
4106 retval = (unsigned) (count * stmt_cost);
4107 cost_data->cost[where] += retval;
4110 return retval;
4113 /* Implement targetm.vectorize.finish_cost. */
4115 static void
4116 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4117 unsigned *body_cost, unsigned *epilogue_cost)
4119 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4121 if (cost_data->loop_info)
4122 rs6000_density_test (cost_data);
4124 *prologue_cost = cost_data->cost[vect_prologue];
4125 *body_cost = cost_data->cost[vect_body];
4126 *epilogue_cost = cost_data->cost[vect_epilogue];
4129 /* Implement targetm.vectorize.destroy_cost_data. */
4131 static void
4132 rs6000_destroy_cost_data (void *data)
4134 free (data);
4137 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4138 library with vectorized intrinsics. */
4140 static tree
4141 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4143 char name[32];
4144 const char *suffix = NULL;
4145 tree fntype, new_fndecl, bdecl = NULL_TREE;
4146 int n_args = 1;
4147 const char *bname;
4148 enum machine_mode el_mode, in_mode;
4149 int n, in_n;
4151 /* Libmass is suitable for unsafe math only as it does not correctly support
4152 parts of IEEE with the required precision such as denormals. Only support
4153 it if we have VSX to use the simd d2 or f4 functions.
4154 XXX: Add variable length support. */
4155 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4156 return NULL_TREE;
4158 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4159 n = TYPE_VECTOR_SUBPARTS (type_out);
4160 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4161 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4162 if (el_mode != in_mode
4163 || n != in_n)
4164 return NULL_TREE;
4166 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4168 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4169 switch (fn)
4171 case BUILT_IN_ATAN2:
4172 case BUILT_IN_HYPOT:
4173 case BUILT_IN_POW:
4174 n_args = 2;
4175 /* fall through */
4177 case BUILT_IN_ACOS:
4178 case BUILT_IN_ACOSH:
4179 case BUILT_IN_ASIN:
4180 case BUILT_IN_ASINH:
4181 case BUILT_IN_ATAN:
4182 case BUILT_IN_ATANH:
4183 case BUILT_IN_CBRT:
4184 case BUILT_IN_COS:
4185 case BUILT_IN_COSH:
4186 case BUILT_IN_ERF:
4187 case BUILT_IN_ERFC:
4188 case BUILT_IN_EXP2:
4189 case BUILT_IN_EXP:
4190 case BUILT_IN_EXPM1:
4191 case BUILT_IN_LGAMMA:
4192 case BUILT_IN_LOG10:
4193 case BUILT_IN_LOG1P:
4194 case BUILT_IN_LOG2:
4195 case BUILT_IN_LOG:
4196 case BUILT_IN_SIN:
4197 case BUILT_IN_SINH:
4198 case BUILT_IN_SQRT:
4199 case BUILT_IN_TAN:
4200 case BUILT_IN_TANH:
4201 bdecl = builtin_decl_implicit (fn);
4202 suffix = "d2"; /* pow -> powd2 */
4203 if (el_mode != DFmode
4204 || n != 2
4205 || !bdecl)
4206 return NULL_TREE;
4207 break;
4209 case BUILT_IN_ATAN2F:
4210 case BUILT_IN_HYPOTF:
4211 case BUILT_IN_POWF:
4212 n_args = 2;
4213 /* fall through */
4215 case BUILT_IN_ACOSF:
4216 case BUILT_IN_ACOSHF:
4217 case BUILT_IN_ASINF:
4218 case BUILT_IN_ASINHF:
4219 case BUILT_IN_ATANF:
4220 case BUILT_IN_ATANHF:
4221 case BUILT_IN_CBRTF:
4222 case BUILT_IN_COSF:
4223 case BUILT_IN_COSHF:
4224 case BUILT_IN_ERFF:
4225 case BUILT_IN_ERFCF:
4226 case BUILT_IN_EXP2F:
4227 case BUILT_IN_EXPF:
4228 case BUILT_IN_EXPM1F:
4229 case BUILT_IN_LGAMMAF:
4230 case BUILT_IN_LOG10F:
4231 case BUILT_IN_LOG1PF:
4232 case BUILT_IN_LOG2F:
4233 case BUILT_IN_LOGF:
4234 case BUILT_IN_SINF:
4235 case BUILT_IN_SINHF:
4236 case BUILT_IN_SQRTF:
4237 case BUILT_IN_TANF:
4238 case BUILT_IN_TANHF:
4239 bdecl = builtin_decl_implicit (fn);
4240 suffix = "4"; /* powf -> powf4 */
4241 if (el_mode != SFmode
4242 || n != 4
4243 || !bdecl)
4244 return NULL_TREE;
4245 break;
4247 default:
4248 return NULL_TREE;
4251 else
4252 return NULL_TREE;
4254 gcc_assert (suffix != NULL);
4255 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4256 if (!bname)
4257 return NULL_TREE;
4259 strcpy (name, bname + sizeof ("__builtin_") - 1);
4260 strcat (name, suffix);
4262 if (n_args == 1)
4263 fntype = build_function_type_list (type_out, type_in, NULL);
4264 else if (n_args == 2)
4265 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4266 else
4267 gcc_unreachable ();
4269 /* Build a function declaration for the vectorized function. */
4270 new_fndecl = build_decl (BUILTINS_LOCATION,
4271 FUNCTION_DECL, get_identifier (name), fntype);
4272 TREE_PUBLIC (new_fndecl) = 1;
4273 DECL_EXTERNAL (new_fndecl) = 1;
4274 DECL_IS_NOVOPS (new_fndecl) = 1;
4275 TREE_READONLY (new_fndecl) = 1;
4277 return new_fndecl;
4280 /* Returns a function decl for a vectorized version of the builtin function
4281 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4282 if it is not available. */
4284 static tree
4285 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4286 tree type_in)
4288 enum machine_mode in_mode, out_mode;
4289 int in_n, out_n;
4291 if (TARGET_DEBUG_BUILTIN)
4292 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4293 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4294 GET_MODE_NAME (TYPE_MODE (type_out)),
4295 GET_MODE_NAME (TYPE_MODE (type_in)));
4297 if (TREE_CODE (type_out) != VECTOR_TYPE
4298 || TREE_CODE (type_in) != VECTOR_TYPE
4299 || !TARGET_VECTORIZE_BUILTINS)
4300 return NULL_TREE;
4302 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4303 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4304 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4305 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4307 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4309 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4310 switch (fn)
4312 case BUILT_IN_CLZIMAX:
4313 case BUILT_IN_CLZLL:
4314 case BUILT_IN_CLZL:
4315 case BUILT_IN_CLZ:
4316 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4318 if (out_mode == QImode && out_n == 16)
4319 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4320 else if (out_mode == HImode && out_n == 8)
4321 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4322 else if (out_mode == SImode && out_n == 4)
4323 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4324 else if (out_mode == DImode && out_n == 2)
4325 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4327 break;
4328 case BUILT_IN_COPYSIGN:
4329 if (VECTOR_UNIT_VSX_P (V2DFmode)
4330 && out_mode == DFmode && out_n == 2
4331 && in_mode == DFmode && in_n == 2)
4332 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4333 break;
4334 case BUILT_IN_COPYSIGNF:
4335 if (out_mode != SFmode || out_n != 4
4336 || in_mode != SFmode || in_n != 4)
4337 break;
4338 if (VECTOR_UNIT_VSX_P (V4SFmode))
4339 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4340 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4341 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4342 break;
4343 case BUILT_IN_POPCOUNTIMAX:
4344 case BUILT_IN_POPCOUNTLL:
4345 case BUILT_IN_POPCOUNTL:
4346 case BUILT_IN_POPCOUNT:
4347 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4349 if (out_mode == QImode && out_n == 16)
4350 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4351 else if (out_mode == HImode && out_n == 8)
4352 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4353 else if (out_mode == SImode && out_n == 4)
4354 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4355 else if (out_mode == DImode && out_n == 2)
4356 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4358 break;
4359 case BUILT_IN_SQRT:
4360 if (VECTOR_UNIT_VSX_P (V2DFmode)
4361 && out_mode == DFmode && out_n == 2
4362 && in_mode == DFmode && in_n == 2)
4363 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4364 break;
4365 case BUILT_IN_SQRTF:
4366 if (VECTOR_UNIT_VSX_P (V4SFmode)
4367 && out_mode == SFmode && out_n == 4
4368 && in_mode == SFmode && in_n == 4)
4369 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4370 break;
4371 case BUILT_IN_CEIL:
4372 if (VECTOR_UNIT_VSX_P (V2DFmode)
4373 && out_mode == DFmode && out_n == 2
4374 && in_mode == DFmode && in_n == 2)
4375 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4376 break;
4377 case BUILT_IN_CEILF:
4378 if (out_mode != SFmode || out_n != 4
4379 || in_mode != SFmode || in_n != 4)
4380 break;
4381 if (VECTOR_UNIT_VSX_P (V4SFmode))
4382 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4383 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4384 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4385 break;
4386 case BUILT_IN_FLOOR:
4387 if (VECTOR_UNIT_VSX_P (V2DFmode)
4388 && out_mode == DFmode && out_n == 2
4389 && in_mode == DFmode && in_n == 2)
4390 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4391 break;
4392 case BUILT_IN_FLOORF:
4393 if (out_mode != SFmode || out_n != 4
4394 || in_mode != SFmode || in_n != 4)
4395 break;
4396 if (VECTOR_UNIT_VSX_P (V4SFmode))
4397 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4398 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4399 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4400 break;
4401 case BUILT_IN_FMA:
4402 if (VECTOR_UNIT_VSX_P (V2DFmode)
4403 && out_mode == DFmode && out_n == 2
4404 && in_mode == DFmode && in_n == 2)
4405 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4406 break;
4407 case BUILT_IN_FMAF:
4408 if (VECTOR_UNIT_VSX_P (V4SFmode)
4409 && out_mode == SFmode && out_n == 4
4410 && in_mode == SFmode && in_n == 4)
4411 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4412 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4413 && out_mode == SFmode && out_n == 4
4414 && in_mode == SFmode && in_n == 4)
4415 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4416 break;
4417 case BUILT_IN_TRUNC:
4418 if (VECTOR_UNIT_VSX_P (V2DFmode)
4419 && out_mode == DFmode && out_n == 2
4420 && in_mode == DFmode && in_n == 2)
4421 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4422 break;
4423 case BUILT_IN_TRUNCF:
4424 if (out_mode != SFmode || out_n != 4
4425 || in_mode != SFmode || in_n != 4)
4426 break;
4427 if (VECTOR_UNIT_VSX_P (V4SFmode))
4428 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4429 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4430 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4431 break;
4432 case BUILT_IN_NEARBYINT:
4433 if (VECTOR_UNIT_VSX_P (V2DFmode)
4434 && flag_unsafe_math_optimizations
4435 && out_mode == DFmode && out_n == 2
4436 && in_mode == DFmode && in_n == 2)
4437 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4438 break;
4439 case BUILT_IN_NEARBYINTF:
4440 if (VECTOR_UNIT_VSX_P (V4SFmode)
4441 && flag_unsafe_math_optimizations
4442 && out_mode == SFmode && out_n == 4
4443 && in_mode == SFmode && in_n == 4)
4444 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4445 break;
4446 case BUILT_IN_RINT:
4447 if (VECTOR_UNIT_VSX_P (V2DFmode)
4448 && !flag_trapping_math
4449 && out_mode == DFmode && out_n == 2
4450 && in_mode == DFmode && in_n == 2)
4451 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4452 break;
4453 case BUILT_IN_RINTF:
4454 if (VECTOR_UNIT_VSX_P (V4SFmode)
4455 && !flag_trapping_math
4456 && out_mode == SFmode && out_n == 4
4457 && in_mode == SFmode && in_n == 4)
4458 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4459 break;
4460 default:
4461 break;
4465 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4467 enum rs6000_builtins fn
4468 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4469 switch (fn)
4471 case RS6000_BUILTIN_RSQRTF:
4472 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4473 && out_mode == SFmode && out_n == 4
4474 && in_mode == SFmode && in_n == 4)
4475 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4476 break;
4477 case RS6000_BUILTIN_RSQRT:
4478 if (VECTOR_UNIT_VSX_P (V2DFmode)
4479 && out_mode == DFmode && out_n == 2
4480 && in_mode == DFmode && in_n == 2)
4481 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4482 break;
4483 case RS6000_BUILTIN_RECIPF:
4484 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4485 && out_mode == SFmode && out_n == 4
4486 && in_mode == SFmode && in_n == 4)
4487 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4488 break;
4489 case RS6000_BUILTIN_RECIP:
4490 if (VECTOR_UNIT_VSX_P (V2DFmode)
4491 && out_mode == DFmode && out_n == 2
4492 && in_mode == DFmode && in_n == 2)
4493 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4494 break;
4495 default:
4496 break;
4500 /* Generate calls to libmass if appropriate. */
4501 if (rs6000_veclib_handler)
4502 return rs6000_veclib_handler (fndecl, type_out, type_in);
4504 return NULL_TREE;
4507 /* Default CPU string for rs6000*_file_start functions. */
4508 static const char *rs6000_default_cpu;
4510 /* Do anything needed at the start of the asm file. */
4512 static void
4513 rs6000_file_start (void)
4515 char buffer[80];
4516 const char *start = buffer;
4517 FILE *file = asm_out_file;
4519 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4521 default_file_start ();
4523 if (flag_verbose_asm)
4525 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4527 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4529 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4530 start = "";
4533 if (global_options_set.x_rs6000_cpu_index)
4535 fprintf (file, "%s -mcpu=%s", start,
4536 processor_target_table[rs6000_cpu_index].name);
4537 start = "";
4540 if (global_options_set.x_rs6000_tune_index)
4542 fprintf (file, "%s -mtune=%s", start,
4543 processor_target_table[rs6000_tune_index].name);
4544 start = "";
4547 if (PPC405_ERRATUM77)
4549 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4550 start = "";
4553 #ifdef USING_ELFOS_H
4554 switch (rs6000_sdata)
4556 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4557 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4558 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4559 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4562 if (rs6000_sdata && g_switch_value)
4564 fprintf (file, "%s -G %d", start,
4565 g_switch_value);
4566 start = "";
4568 #endif
4570 if (*start == '\0')
4571 putc ('\n', file);
4574 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
4576 switch_to_section (toc_section);
4577 switch_to_section (text_section);
4582 /* Return nonzero if this function is known to have a null epilogue. */
4585 direct_return (void)
4587 if (reload_completed)
4589 rs6000_stack_t *info = rs6000_stack_info ();
4591 if (info->first_gp_reg_save == 32
4592 && info->first_fp_reg_save == 64
4593 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4594 && ! info->lr_save_p
4595 && ! info->cr_save_p
4596 && info->vrsave_mask == 0
4597 && ! info->push_p)
4598 return 1;
4601 return 0;
4604 /* Return the number of instructions it takes to form a constant in an
4605 integer register. */
4608 num_insns_constant_wide (HOST_WIDE_INT value)
4610 /* signed constant loadable with addi */
4611 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4612 return 1;
4614 /* constant loadable with addis */
4615 else if ((value & 0xffff) == 0
4616 && (value >> 31 == -1 || value >> 31 == 0))
4617 return 1;
4619 else if (TARGET_POWERPC64)
4621 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4622 HOST_WIDE_INT high = value >> 31;
4624 if (high == 0 || high == -1)
4625 return 2;
4627 high >>= 1;
4629 if (low == 0)
4630 return num_insns_constant_wide (high) + 1;
4631 else if (high == 0)
4632 return num_insns_constant_wide (low) + 1;
4633 else
4634 return (num_insns_constant_wide (high)
4635 + num_insns_constant_wide (low) + 1);
4638 else
4639 return 2;
4643 num_insns_constant (rtx op, enum machine_mode mode)
4645 HOST_WIDE_INT low, high;
4647 switch (GET_CODE (op))
4649 case CONST_INT:
4650 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4651 && mask64_operand (op, mode))
4652 return 2;
4653 else
4654 return num_insns_constant_wide (INTVAL (op));
4656 case CONST_DOUBLE:
4657 if (mode == SFmode || mode == SDmode)
4659 long l;
4660 REAL_VALUE_TYPE rv;
4662 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4663 if (DECIMAL_FLOAT_MODE_P (mode))
4664 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4665 else
4666 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4667 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4670 long l[2];
4671 REAL_VALUE_TYPE rv;
4673 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4674 if (DECIMAL_FLOAT_MODE_P (mode))
4675 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4676 else
4677 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4678 high = l[WORDS_BIG_ENDIAN == 0];
4679 low = l[WORDS_BIG_ENDIAN != 0];
4681 if (TARGET_32BIT)
4682 return (num_insns_constant_wide (low)
4683 + num_insns_constant_wide (high));
4684 else
4686 if ((high == 0 && low >= 0)
4687 || (high == -1 && low < 0))
4688 return num_insns_constant_wide (low);
4690 else if (mask64_operand (op, mode))
4691 return 2;
4693 else if (low == 0)
4694 return num_insns_constant_wide (high) + 1;
4696 else
4697 return (num_insns_constant_wide (high)
4698 + num_insns_constant_wide (low) + 1);
4701 default:
4702 gcc_unreachable ();
4706 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4707 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4708 corresponding element of the vector, but for V4SFmode and V2SFmode,
4709 the corresponding "float" is interpreted as an SImode integer. */
4711 HOST_WIDE_INT
4712 const_vector_elt_as_int (rtx op, unsigned int elt)
4714 rtx tmp;
4716 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4717 gcc_assert (GET_MODE (op) != V2DImode
4718 && GET_MODE (op) != V2DFmode);
4720 tmp = CONST_VECTOR_ELT (op, elt);
4721 if (GET_MODE (op) == V4SFmode
4722 || GET_MODE (op) == V2SFmode)
4723 tmp = gen_lowpart (SImode, tmp);
4724 return INTVAL (tmp);
4727 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4728 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4729 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4730 all items are set to the same value and contain COPIES replicas of the
4731 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4732 operand and the others are set to the value of the operand's msb. */
4734 static bool
4735 vspltis_constant (rtx op, unsigned step, unsigned copies)
4737 enum machine_mode mode = GET_MODE (op);
4738 enum machine_mode inner = GET_MODE_INNER (mode);
4740 unsigned i;
4741 unsigned nunits;
4742 unsigned bitsize;
4743 unsigned mask;
4745 HOST_WIDE_INT val;
4746 HOST_WIDE_INT splat_val;
4747 HOST_WIDE_INT msb_val;
4749 if (mode == V2DImode || mode == V2DFmode)
4750 return false;
4752 nunits = GET_MODE_NUNITS (mode);
4753 bitsize = GET_MODE_BITSIZE (inner);
4754 mask = GET_MODE_MASK (inner);
4756 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
4757 splat_val = val;
4758 msb_val = val > 0 ? 0 : -1;
4760 /* Construct the value to be splatted, if possible. If not, return 0. */
4761 for (i = 2; i <= copies; i *= 2)
4763 HOST_WIDE_INT small_val;
4764 bitsize /= 2;
4765 small_val = splat_val >> bitsize;
4766 mask >>= bitsize;
4767 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4768 return false;
4769 splat_val = small_val;
4772 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4773 if (EASY_VECTOR_15 (splat_val))
4776 /* Also check if we can splat, and then add the result to itself. Do so if
4777 the value is positive, of if the splat instruction is using OP's mode;
4778 for splat_val < 0, the splat and the add should use the same mode. */
4779 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4780 && (splat_val >= 0 || (step == 1 && copies == 1)))
4783 /* Also check if are loading up the most significant bit which can be done by
4784 loading up -1 and shifting the value left by -1. */
4785 else if (EASY_VECTOR_MSB (splat_val, inner))
4788 else
4789 return false;
4791 /* Check if VAL is present in every STEP-th element, and the
4792 other elements are filled with its most significant bit. */
4793 for (i = 0; i < nunits - 1; ++i)
4795 HOST_WIDE_INT desired_val;
4796 if (((BYTES_BIG_ENDIAN ? i + 1 : i) & (step - 1)) == 0)
4797 desired_val = val;
4798 else
4799 desired_val = msb_val;
4801 if (desired_val != const_vector_elt_as_int (op, i))
4802 return false;
4805 return true;
4809 /* Return true if OP is of the given MODE and can be synthesized
4810 with a vspltisb, vspltish or vspltisw. */
4812 bool
4813 easy_altivec_constant (rtx op, enum machine_mode mode)
4815 unsigned step, copies;
4817 if (mode == VOIDmode)
4818 mode = GET_MODE (op);
4819 else if (mode != GET_MODE (op))
4820 return false;
4822 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4823 constants. */
4824 if (mode == V2DFmode)
4825 return zero_constant (op, mode);
4827 if (mode == V2DImode)
4829 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4830 easy. */
4831 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4832 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4833 return false;
4835 if (zero_constant (op, mode))
4836 return true;
4838 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4839 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4840 return true;
4842 return false;
4845 /* Start with a vspltisw. */
4846 step = GET_MODE_NUNITS (mode) / 4;
4847 copies = 1;
4849 if (vspltis_constant (op, step, copies))
4850 return true;
4852 /* Then try with a vspltish. */
4853 if (step == 1)
4854 copies <<= 1;
4855 else
4856 step >>= 1;
4858 if (vspltis_constant (op, step, copies))
4859 return true;
4861 /* And finally a vspltisb. */
4862 if (step == 1)
4863 copies <<= 1;
4864 else
4865 step >>= 1;
4867 if (vspltis_constant (op, step, copies))
4868 return true;
4870 return false;
4873 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4874 result is OP. Abort if it is not possible. */
4877 gen_easy_altivec_constant (rtx op)
4879 enum machine_mode mode = GET_MODE (op);
4880 int nunits = GET_MODE_NUNITS (mode);
4881 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
4882 unsigned step = nunits / 4;
4883 unsigned copies = 1;
4885 /* Start with a vspltisw. */
4886 if (vspltis_constant (op, step, copies))
4887 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
4889 /* Then try with a vspltish. */
4890 if (step == 1)
4891 copies <<= 1;
4892 else
4893 step >>= 1;
4895 if (vspltis_constant (op, step, copies))
4896 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
4898 /* And finally a vspltisb. */
4899 if (step == 1)
4900 copies <<= 1;
4901 else
4902 step >>= 1;
4904 if (vspltis_constant (op, step, copies))
4905 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
4907 gcc_unreachable ();
4910 const char *
4911 output_vec_const_move (rtx *operands)
4913 int cst, cst2;
4914 enum machine_mode mode;
4915 rtx dest, vec;
4917 dest = operands[0];
4918 vec = operands[1];
4919 mode = GET_MODE (dest);
4921 if (TARGET_VSX)
4923 if (zero_constant (vec, mode))
4924 return "xxlxor %x0,%x0,%x0";
4926 if (mode == V2DImode
4927 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4928 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4929 return "vspltisw %0,-1";
4932 if (TARGET_ALTIVEC)
4934 rtx splat_vec;
4935 if (zero_constant (vec, mode))
4936 return "vxor %0,%0,%0";
4938 splat_vec = gen_easy_altivec_constant (vec);
4939 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4940 operands[1] = XEXP (splat_vec, 0);
4941 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4942 return "#";
4944 switch (GET_MODE (splat_vec))
4946 case V4SImode:
4947 return "vspltisw %0,%1";
4949 case V8HImode:
4950 return "vspltish %0,%1";
4952 case V16QImode:
4953 return "vspltisb %0,%1";
4955 default:
4956 gcc_unreachable ();
4960 gcc_assert (TARGET_SPE);
4962 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4963 pattern of V1DI, V4HI, and V2SF.
4965 FIXME: We should probably return # and add post reload
4966 splitters for these, but this way is so easy ;-). */
4967 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4968 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4969 operands[1] = CONST_VECTOR_ELT (vec, 0);
4970 operands[2] = CONST_VECTOR_ELT (vec, 1);
4971 if (cst == cst2)
4972 return "li %0,%1\n\tevmergelo %0,%0,%0";
4973 else
4974 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4977 /* Initialize TARGET of vector PAIRED to VALS. */
4979 void
4980 paired_expand_vector_init (rtx target, rtx vals)
4982 enum machine_mode mode = GET_MODE (target);
4983 int n_elts = GET_MODE_NUNITS (mode);
4984 int n_var = 0;
4985 rtx x, new_rtx, tmp, constant_op, op1, op2;
4986 int i;
4988 for (i = 0; i < n_elts; ++i)
4990 x = XVECEXP (vals, 0, i);
4991 if (!(CONST_INT_P (x)
4992 || GET_CODE (x) == CONST_DOUBLE
4993 || GET_CODE (x) == CONST_FIXED))
4994 ++n_var;
4996 if (n_var == 0)
4998 /* Load from constant pool. */
4999 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
5000 return;
5003 if (n_var == 2)
5005 /* The vector is initialized only with non-constants. */
5006 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
5007 XVECEXP (vals, 0, 1));
5009 emit_move_insn (target, new_rtx);
5010 return;
5013 /* One field is non-constant and the other one is a constant. Load the
5014 constant from the constant pool and use ps_merge instruction to
5015 construct the whole vector. */
5016 op1 = XVECEXP (vals, 0, 0);
5017 op2 = XVECEXP (vals, 0, 1);
5019 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
5021 tmp = gen_reg_rtx (GET_MODE (constant_op));
5022 emit_move_insn (tmp, constant_op);
5024 if (CONSTANT_P (op1))
5025 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
5026 else
5027 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
5029 emit_move_insn (target, new_rtx);
5032 void
5033 paired_expand_vector_move (rtx operands[])
5035 rtx op0 = operands[0], op1 = operands[1];
5037 emit_move_insn (op0, op1);
5040 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5041 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5042 operands for the relation operation COND. This is a recursive
5043 function. */
5045 static void
5046 paired_emit_vector_compare (enum rtx_code rcode,
5047 rtx dest, rtx op0, rtx op1,
5048 rtx cc_op0, rtx cc_op1)
5050 rtx tmp = gen_reg_rtx (V2SFmode);
5051 rtx tmp1, max, min;
5053 gcc_assert (TARGET_PAIRED_FLOAT);
5054 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
5056 switch (rcode)
5058 case LT:
5059 case LTU:
5060 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5061 return;
5062 case GE:
5063 case GEU:
5064 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5065 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
5066 return;
5067 case LE:
5068 case LEU:
5069 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
5070 return;
5071 case GT:
5072 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5073 return;
5074 case EQ:
5075 tmp1 = gen_reg_rtx (V2SFmode);
5076 max = gen_reg_rtx (V2SFmode);
5077 min = gen_reg_rtx (V2SFmode);
5078 gen_reg_rtx (V2SFmode);
5080 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5081 emit_insn (gen_selv2sf4
5082 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5083 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
5084 emit_insn (gen_selv2sf4
5085 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5086 emit_insn (gen_subv2sf3 (tmp1, min, max));
5087 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
5088 return;
5089 case NE:
5090 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
5091 return;
5092 case UNLE:
5093 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5094 return;
5095 case UNLT:
5096 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5097 return;
5098 case UNGE:
5099 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5100 return;
5101 case UNGT:
5102 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5103 return;
5104 default:
5105 gcc_unreachable ();
5108 return;
5111 /* Emit vector conditional expression.
5112 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5113 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5116 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5117 rtx cond, rtx cc_op0, rtx cc_op1)
5119 enum rtx_code rcode = GET_CODE (cond);
5121 if (!TARGET_PAIRED_FLOAT)
5122 return 0;
5124 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5126 return 1;
5129 /* Initialize vector TARGET to VALS. */
5131 void
5132 rs6000_expand_vector_init (rtx target, rtx vals)
5134 enum machine_mode mode = GET_MODE (target);
5135 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5136 int n_elts = GET_MODE_NUNITS (mode);
5137 int n_var = 0, one_var = -1;
5138 bool all_same = true, all_const_zero = true;
5139 rtx x, mem;
5140 int i;
5142 for (i = 0; i < n_elts; ++i)
5144 x = XVECEXP (vals, 0, i);
5145 if (!(CONST_INT_P (x)
5146 || GET_CODE (x) == CONST_DOUBLE
5147 || GET_CODE (x) == CONST_FIXED))
5148 ++n_var, one_var = i;
5149 else if (x != CONST0_RTX (inner_mode))
5150 all_const_zero = false;
5152 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5153 all_same = false;
5156 if (n_var == 0)
5158 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5159 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5160 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5162 /* Zero register. */
5163 emit_insn (gen_rtx_SET (VOIDmode, target,
5164 gen_rtx_XOR (mode, target, target)));
5165 return;
5167 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5169 /* Splat immediate. */
5170 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
5171 return;
5173 else
5175 /* Load from constant pool. */
5176 emit_move_insn (target, const_vec);
5177 return;
5181 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5182 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5184 rtx op0 = XVECEXP (vals, 0, 0);
5185 rtx op1 = XVECEXP (vals, 0, 1);
5186 if (all_same)
5188 if (!MEM_P (op0) && !REG_P (op0))
5189 op0 = force_reg (inner_mode, op0);
5190 if (mode == V2DFmode)
5191 emit_insn (gen_vsx_splat_v2df (target, op0));
5192 else
5193 emit_insn (gen_vsx_splat_v2di (target, op0));
5195 else
5197 op0 = force_reg (inner_mode, op0);
5198 op1 = force_reg (inner_mode, op1);
5199 if (mode == V2DFmode)
5200 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5201 else
5202 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5204 return;
5207 /* With single precision floating point on VSX, know that internally single
5208 precision is actually represented as a double, and either make 2 V2DF
5209 vectors, and convert these vectors to single precision, or do one
5210 conversion, and splat the result to the other elements. */
5211 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5213 if (all_same)
5215 rtx freg = gen_reg_rtx (V4SFmode);
5216 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5217 rtx cvt = ((TARGET_XSCVDPSPN)
5218 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5219 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5221 emit_insn (cvt);
5222 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
5224 else
5226 rtx dbl_even = gen_reg_rtx (V2DFmode);
5227 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5228 rtx flt_even = gen_reg_rtx (V4SFmode);
5229 rtx flt_odd = gen_reg_rtx (V4SFmode);
5230 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5231 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5232 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5233 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5235 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5236 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5237 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5238 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5239 rs6000_expand_extract_even (target, flt_even, flt_odd);
5241 return;
5244 /* Store value to stack temp. Load vector element. Splat. However, splat
5245 of 64-bit items is not supported on Altivec. */
5246 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5248 rtx field;
5249 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5250 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5251 XVECEXP (vals, 0, 0));
5252 x = gen_rtx_UNSPEC (VOIDmode,
5253 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5254 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5255 gen_rtvec (2,
5256 gen_rtx_SET (VOIDmode,
5257 target, mem),
5258 x)));
5259 field = (BYTES_BIG_ENDIAN ? const0_rtx
5260 : GEN_INT (GET_MODE_NUNITS (mode) - 1));
5261 x = gen_rtx_VEC_SELECT (inner_mode, target,
5262 gen_rtx_PARALLEL (VOIDmode,
5263 gen_rtvec (1, field)));
5264 emit_insn (gen_rtx_SET (VOIDmode, target,
5265 gen_rtx_VEC_DUPLICATE (mode, x)));
5266 return;
5269 /* One field is non-constant. Load constant then overwrite
5270 varying field. */
5271 if (n_var == 1)
5273 rtx copy = copy_rtx (vals);
5275 /* Load constant part of vector, substitute neighboring value for
5276 varying element. */
5277 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5278 rs6000_expand_vector_init (target, copy);
5280 /* Insert variable. */
5281 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5282 return;
5285 /* Construct the vector in memory one field at a time
5286 and load the whole vector. */
5287 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5288 for (i = 0; i < n_elts; i++)
5289 emit_move_insn (adjust_address_nv (mem, inner_mode,
5290 i * GET_MODE_SIZE (inner_mode)),
5291 XVECEXP (vals, 0, i));
5292 emit_move_insn (target, mem);
5295 /* Set field ELT of TARGET to VAL. */
5297 void
5298 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5300 enum machine_mode mode = GET_MODE (target);
5301 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5302 rtx reg = gen_reg_rtx (mode);
5303 rtx mask, mem, x;
5304 int width = GET_MODE_SIZE (inner_mode);
5305 int i;
5307 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5309 rtx (*set_func) (rtx, rtx, rtx, rtx)
5310 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5311 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5312 return;
5315 /* Load single variable value. */
5316 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5317 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5318 x = gen_rtx_UNSPEC (VOIDmode,
5319 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5320 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5321 gen_rtvec (2,
5322 gen_rtx_SET (VOIDmode,
5323 reg, mem),
5324 x)));
5326 /* Linear sequence. */
5327 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5328 for (i = 0; i < 16; ++i)
5329 XVECEXP (mask, 0, i) = GEN_INT (i);
5331 /* Set permute mask to insert element into target. */
5332 for (i = 0; i < width; ++i)
5333 XVECEXP (mask, 0, elt*width + i)
5334 = GEN_INT (i + 0x10);
5335 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5336 x = gen_rtx_UNSPEC (mode,
5337 gen_rtvec (3, target, reg,
5338 force_reg (V16QImode, x)),
5339 UNSPEC_VPERM);
5340 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5343 /* Extract field ELT from VEC into TARGET. */
5345 void
5346 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5348 enum machine_mode mode = GET_MODE (vec);
5349 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5350 rtx mem;
5352 if (VECTOR_MEM_VSX_P (mode))
5354 switch (mode)
5356 default:
5357 break;
5358 case V2DFmode:
5359 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5360 return;
5361 case V2DImode:
5362 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5363 return;
5364 case V4SFmode:
5365 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5366 return;
5370 /* Allocate mode-sized buffer. */
5371 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5373 emit_move_insn (mem, vec);
5375 /* Add offset to field within buffer matching vector element. */
5376 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5378 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5381 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5382 implement ANDing by the mask IN. */
5383 void
5384 build_mask64_2_operands (rtx in, rtx *out)
5386 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5387 int shift;
5389 gcc_assert (GET_CODE (in) == CONST_INT);
5391 c = INTVAL (in);
5392 if (c & 1)
5394 /* Assume c initially something like 0x00fff000000fffff. The idea
5395 is to rotate the word so that the middle ^^^^^^ group of zeros
5396 is at the MS end and can be cleared with an rldicl mask. We then
5397 rotate back and clear off the MS ^^ group of zeros with a
5398 second rldicl. */
5399 c = ~c; /* c == 0xff000ffffff00000 */
5400 lsb = c & -c; /* lsb == 0x0000000000100000 */
5401 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5402 c = ~c; /* c == 0x00fff000000fffff */
5403 c &= -lsb; /* c == 0x00fff00000000000 */
5404 lsb = c & -c; /* lsb == 0x0000100000000000 */
5405 c = ~c; /* c == 0xff000fffffffffff */
5406 c &= -lsb; /* c == 0xff00000000000000 */
5407 shift = 0;
5408 while ((lsb >>= 1) != 0)
5409 shift++; /* shift == 44 on exit from loop */
5410 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5411 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5412 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5414 else
5416 /* Assume c initially something like 0xff000f0000000000. The idea
5417 is to rotate the word so that the ^^^ middle group of zeros
5418 is at the LS end and can be cleared with an rldicr mask. We then
5419 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5420 a second rldicr. */
5421 lsb = c & -c; /* lsb == 0x0000010000000000 */
5422 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5423 c = ~c; /* c == 0x00fff0ffffffffff */
5424 c &= -lsb; /* c == 0x00fff00000000000 */
5425 lsb = c & -c; /* lsb == 0x0000100000000000 */
5426 c = ~c; /* c == 0xff000fffffffffff */
5427 c &= -lsb; /* c == 0xff00000000000000 */
5428 shift = 0;
5429 while ((lsb >>= 1) != 0)
5430 shift++; /* shift == 44 on exit from loop */
5431 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5432 m1 >>= shift; /* m1 == 0x0000000000000fff */
5433 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5436 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5437 masks will be all 1's. We are guaranteed more than one transition. */
5438 out[0] = GEN_INT (64 - shift);
5439 out[1] = GEN_INT (m1);
5440 out[2] = GEN_INT (shift);
5441 out[3] = GEN_INT (m2);
5444 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5446 bool
5447 invalid_e500_subreg (rtx op, enum machine_mode mode)
5449 if (TARGET_E500_DOUBLE)
5451 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5452 subreg:TI and reg:TF. Decimal float modes are like integer
5453 modes (only low part of each register used) for this
5454 purpose. */
5455 if (GET_CODE (op) == SUBREG
5456 && (mode == SImode || mode == DImode || mode == TImode
5457 || mode == DDmode || mode == TDmode || mode == PTImode)
5458 && REG_P (SUBREG_REG (op))
5459 && (GET_MODE (SUBREG_REG (op)) == DFmode
5460 || GET_MODE (SUBREG_REG (op)) == TFmode))
5461 return true;
5463 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5464 reg:TI. */
5465 if (GET_CODE (op) == SUBREG
5466 && (mode == DFmode || mode == TFmode)
5467 && REG_P (SUBREG_REG (op))
5468 && (GET_MODE (SUBREG_REG (op)) == DImode
5469 || GET_MODE (SUBREG_REG (op)) == TImode
5470 || GET_MODE (SUBREG_REG (op)) == PTImode
5471 || GET_MODE (SUBREG_REG (op)) == DDmode
5472 || GET_MODE (SUBREG_REG (op)) == TDmode))
5473 return true;
5476 if (TARGET_SPE
5477 && GET_CODE (op) == SUBREG
5478 && mode == SImode
5479 && REG_P (SUBREG_REG (op))
5480 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5481 return true;
5483 return false;
5486 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5487 selects whether the alignment is abi mandated, optional, or
5488 both abi and optional alignment. */
5490 unsigned int
5491 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
5493 if (how != align_opt)
5495 if (TREE_CODE (type) == VECTOR_TYPE)
5497 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
5498 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
5500 if (align < 64)
5501 align = 64;
5503 else if (align < 128)
5504 align = 128;
5506 else if (TARGET_E500_DOUBLE
5507 && TREE_CODE (type) == REAL_TYPE
5508 && TYPE_MODE (type) == DFmode)
5510 if (align < 64)
5511 align = 64;
5515 if (how != align_abi)
5517 if (TREE_CODE (type) == ARRAY_TYPE
5518 && TYPE_MODE (TREE_TYPE (type)) == QImode)
5520 if (align < BITS_PER_WORD)
5521 align = BITS_PER_WORD;
5525 return align;
5528 /* AIX increases natural record alignment to doubleword if the first
5529 field is an FP double while the FP fields remain word aligned. */
5531 unsigned int
5532 rs6000_special_round_type_align (tree type, unsigned int computed,
5533 unsigned int specified)
5535 unsigned int align = MAX (computed, specified);
5536 tree field = TYPE_FIELDS (type);
5538 /* Skip all non field decls */
5539 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5540 field = DECL_CHAIN (field);
5542 if (field != NULL && field != type)
5544 type = TREE_TYPE (field);
5545 while (TREE_CODE (type) == ARRAY_TYPE)
5546 type = TREE_TYPE (type);
5548 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5549 align = MAX (align, 64);
5552 return align;
5555 /* Darwin increases record alignment to the natural alignment of
5556 the first field. */
5558 unsigned int
5559 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5560 unsigned int specified)
5562 unsigned int align = MAX (computed, specified);
5564 if (TYPE_PACKED (type))
5565 return align;
5567 /* Find the first field, looking down into aggregates. */
5568 do {
5569 tree field = TYPE_FIELDS (type);
5570 /* Skip all non field decls */
5571 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5572 field = DECL_CHAIN (field);
5573 if (! field)
5574 break;
5575 /* A packed field does not contribute any extra alignment. */
5576 if (DECL_PACKED (field))
5577 return align;
5578 type = TREE_TYPE (field);
5579 while (TREE_CODE (type) == ARRAY_TYPE)
5580 type = TREE_TYPE (type);
5581 } while (AGGREGATE_TYPE_P (type));
5583 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
5584 align = MAX (align, TYPE_ALIGN (type));
5586 return align;
5589 /* Return 1 for an operand in small memory on V.4/eabi. */
5592 small_data_operand (rtx op ATTRIBUTE_UNUSED,
5593 enum machine_mode mode ATTRIBUTE_UNUSED)
5595 #if TARGET_ELF
5596 rtx sym_ref;
5598 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
5599 return 0;
5601 if (DEFAULT_ABI != ABI_V4)
5602 return 0;
5604 /* Vector and float memory instructions have a limited offset on the
5605 SPE, so using a vector or float variable directly as an operand is
5606 not useful. */
5607 if (TARGET_SPE
5608 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5609 return 0;
5611 if (GET_CODE (op) == SYMBOL_REF)
5612 sym_ref = op;
5614 else if (GET_CODE (op) != CONST
5615 || GET_CODE (XEXP (op, 0)) != PLUS
5616 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5617 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5618 return 0;
5620 else
5622 rtx sum = XEXP (op, 0);
5623 HOST_WIDE_INT summand;
5625 /* We have to be careful here, because it is the referenced address
5626 that must be 32k from _SDA_BASE_, not just the symbol. */
5627 summand = INTVAL (XEXP (sum, 1));
5628 if (summand < 0 || summand > g_switch_value)
5629 return 0;
5631 sym_ref = XEXP (sum, 0);
5634 return SYMBOL_REF_SMALL_P (sym_ref);
5635 #else
5636 return 0;
5637 #endif
5640 /* Return true if either operand is a general purpose register. */
5642 bool
5643 gpr_or_gpr_p (rtx op0, rtx op1)
5645 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5646 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5649 /* Return true if this is a move direct operation between GPR registers and
5650 floating point/VSX registers. */
5652 bool
5653 direct_move_p (rtx op0, rtx op1)
5655 int regno0, regno1;
5657 if (!REG_P (op0) || !REG_P (op1))
5658 return false;
5660 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
5661 return false;
5663 regno0 = REGNO (op0);
5664 regno1 = REGNO (op1);
5665 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
5666 return false;
5668 if (INT_REGNO_P (regno0))
5669 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
5671 else if (INT_REGNO_P (regno1))
5673 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
5674 return true;
5676 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
5677 return true;
5680 return false;
5683 /* Return true if this is a load or store quad operation. */
5685 bool
5686 quad_load_store_p (rtx op0, rtx op1)
5688 bool ret;
5690 if (!TARGET_QUAD_MEMORY)
5691 ret = false;
5693 else if (REG_P (op0) && MEM_P (op1))
5694 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
5695 && quad_memory_operand (op1, GET_MODE (op1))
5696 && !reg_overlap_mentioned_p (op0, op1));
5698 else if (MEM_P (op0) && REG_P (op1))
5699 ret = (quad_memory_operand (op0, GET_MODE (op0))
5700 && quad_int_reg_operand (op1, GET_MODE (op1)));
5702 else
5703 ret = false;
5705 if (TARGET_DEBUG_ADDR)
5707 fprintf (stderr, "\n========== quad_load_store, return %s\n",
5708 ret ? "true" : "false");
5709 debug_rtx (gen_rtx_SET (VOIDmode, op0, op1));
5712 return ret;
5715 /* Given an address, return a constant offset term if one exists. */
5717 static rtx
5718 address_offset (rtx op)
5720 if (GET_CODE (op) == PRE_INC
5721 || GET_CODE (op) == PRE_DEC)
5722 op = XEXP (op, 0);
5723 else if (GET_CODE (op) == PRE_MODIFY
5724 || GET_CODE (op) == LO_SUM)
5725 op = XEXP (op, 1);
5727 if (GET_CODE (op) == CONST)
5728 op = XEXP (op, 0);
5730 if (GET_CODE (op) == PLUS)
5731 op = XEXP (op, 1);
5733 if (CONST_INT_P (op))
5734 return op;
5736 return NULL_RTX;
5739 /* Return true if the MEM operand is a memory operand suitable for use
5740 with a (full width, possibly multiple) gpr load/store. On
5741 powerpc64 this means the offset must be divisible by 4.
5742 Implements 'Y' constraint.
5744 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5745 a constraint function we know the operand has satisfied a suitable
5746 memory predicate. Also accept some odd rtl generated by reload
5747 (see rs6000_legitimize_reload_address for various forms). It is
5748 important that reload rtl be accepted by appropriate constraints
5749 but not by the operand predicate.
5751 Offsetting a lo_sum should not be allowed, except where we know by
5752 alignment that a 32k boundary is not crossed, but see the ???
5753 comment in rs6000_legitimize_reload_address. Note that by
5754 "offsetting" here we mean a further offset to access parts of the
5755 MEM. It's fine to have a lo_sum where the inner address is offset
5756 from a sym, since the same sym+offset will appear in the high part
5757 of the address calculation. */
5759 bool
5760 mem_operand_gpr (rtx op, enum machine_mode mode)
5762 unsigned HOST_WIDE_INT offset;
5763 int extra;
5764 rtx addr = XEXP (op, 0);
5766 op = address_offset (addr);
5767 if (op == NULL_RTX)
5768 return true;
5770 offset = INTVAL (op);
5771 if (TARGET_POWERPC64 && (offset & 3) != 0)
5772 return false;
5774 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5775 gcc_assert (extra >= 0);
5777 if (GET_CODE (addr) == LO_SUM)
5778 /* For lo_sum addresses, we must allow any offset except one that
5779 causes a wrap, so test only the low 16 bits. */
5780 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
5782 return offset + 0x8000 < 0x10000u - extra;
5785 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5787 static bool
5788 reg_offset_addressing_ok_p (enum machine_mode mode)
5790 switch (mode)
5792 case V16QImode:
5793 case V8HImode:
5794 case V4SFmode:
5795 case V4SImode:
5796 case V2DFmode:
5797 case V2DImode:
5798 case TImode:
5799 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
5800 TImode is not a vector mode, if we want to use the VSX registers to
5801 move it around, we need to restrict ourselves to reg+reg
5802 addressing. */
5803 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5804 return false;
5805 break;
5807 case V4HImode:
5808 case V2SImode:
5809 case V1DImode:
5810 case V2SFmode:
5811 /* Paired vector modes. Only reg+reg addressing is valid. */
5812 if (TARGET_PAIRED_FLOAT)
5813 return false;
5814 break;
5816 case SDmode:
5817 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
5818 addressing for the LFIWZX and STFIWX instructions. */
5819 if (TARGET_NO_SDMODE_STACK)
5820 return false;
5821 break;
5823 default:
5824 break;
5827 return true;
5830 static bool
5831 virtual_stack_registers_memory_p (rtx op)
5833 int regnum;
5835 if (GET_CODE (op) == REG)
5836 regnum = REGNO (op);
5838 else if (GET_CODE (op) == PLUS
5839 && GET_CODE (XEXP (op, 0)) == REG
5840 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5841 regnum = REGNO (XEXP (op, 0));
5843 else
5844 return false;
5846 return (regnum >= FIRST_VIRTUAL_REGISTER
5847 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5850 /* Return true if a MODE sized memory accesses to OP plus OFFSET
5851 is known to not straddle a 32k boundary. */
5853 static bool
5854 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5855 enum machine_mode mode)
5857 tree decl, type;
5858 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
5860 if (GET_CODE (op) != SYMBOL_REF)
5861 return false;
5863 dsize = GET_MODE_SIZE (mode);
5864 decl = SYMBOL_REF_DECL (op);
5865 if (!decl)
5867 if (dsize == 0)
5868 return false;
5870 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5871 replacing memory addresses with an anchor plus offset. We
5872 could find the decl by rummaging around in the block->objects
5873 VEC for the given offset but that seems like too much work. */
5874 dalign = BITS_PER_UNIT;
5875 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5876 && SYMBOL_REF_ANCHOR_P (op)
5877 && SYMBOL_REF_BLOCK (op) != NULL)
5879 struct object_block *block = SYMBOL_REF_BLOCK (op);
5881 dalign = block->alignment;
5882 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5884 else if (CONSTANT_POOL_ADDRESS_P (op))
5886 /* It would be nice to have get_pool_align().. */
5887 enum machine_mode cmode = get_pool_mode (op);
5889 dalign = GET_MODE_ALIGNMENT (cmode);
5892 else if (DECL_P (decl))
5894 dalign = DECL_ALIGN (decl);
5896 if (dsize == 0)
5898 /* Allow BLKmode when the entire object is known to not
5899 cross a 32k boundary. */
5900 if (!DECL_SIZE_UNIT (decl))
5901 return false;
5903 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5904 return false;
5906 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5907 if (dsize > 32768)
5908 return false;
5910 return dalign / BITS_PER_UNIT >= dsize;
5913 else
5915 type = TREE_TYPE (decl);
5917 dalign = TYPE_ALIGN (type);
5918 if (CONSTANT_CLASS_P (decl))
5919 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5920 else
5921 dalign = DATA_ALIGNMENT (decl, dalign);
5923 if (dsize == 0)
5925 /* BLKmode, check the entire object. */
5926 if (TREE_CODE (decl) == STRING_CST)
5927 dsize = TREE_STRING_LENGTH (decl);
5928 else if (TYPE_SIZE_UNIT (type)
5929 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5930 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5931 else
5932 return false;
5933 if (dsize > 32768)
5934 return false;
5936 return dalign / BITS_PER_UNIT >= dsize;
5940 /* Find how many bits of the alignment we know for this access. */
5941 mask = dalign / BITS_PER_UNIT - 1;
5942 lsb = offset & -offset;
5943 mask &= lsb - 1;
5944 dalign = mask + 1;
5946 return dalign >= dsize;
5949 static bool
5950 constant_pool_expr_p (rtx op)
5952 rtx base, offset;
5954 split_const (op, &base, &offset);
5955 return (GET_CODE (base) == SYMBOL_REF
5956 && CONSTANT_POOL_ADDRESS_P (base)
5957 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5960 static const_rtx tocrel_base, tocrel_offset;
5962 /* Return true if OP is a toc pointer relative address (the output
5963 of create_TOC_reference). If STRICT, do not match high part or
5964 non-split -mcmodel=large/medium toc pointer relative addresses. */
5966 bool
5967 toc_relative_expr_p (const_rtx op, bool strict)
5969 if (!TARGET_TOC)
5970 return false;
5972 if (TARGET_CMODEL != CMODEL_SMALL)
5974 /* Only match the low part. */
5975 if (GET_CODE (op) == LO_SUM
5976 && REG_P (XEXP (op, 0))
5977 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5978 op = XEXP (op, 1);
5979 else if (strict)
5980 return false;
5983 tocrel_base = op;
5984 tocrel_offset = const0_rtx;
5985 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
5987 tocrel_base = XEXP (op, 0);
5988 tocrel_offset = XEXP (op, 1);
5991 return (GET_CODE (tocrel_base) == UNSPEC
5992 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5995 /* Return true if X is a constant pool address, and also for cmodel=medium
5996 if X is a toc-relative address known to be offsettable within MODE. */
5998 bool
5999 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
6000 bool strict)
6002 return (toc_relative_expr_p (x, strict)
6003 && (TARGET_CMODEL != CMODEL_MEDIUM
6004 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
6005 || mode == QImode
6006 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
6007 INTVAL (tocrel_offset), mode)));
6010 static bool
6011 legitimate_small_data_p (enum machine_mode mode, rtx x)
6013 return (DEFAULT_ABI == ABI_V4
6014 && !flag_pic && !TARGET_TOC
6015 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
6016 && small_data_operand (x, mode));
6019 /* SPE offset addressing is limited to 5-bits worth of double words. */
6020 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6022 bool
6023 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
6024 bool strict, bool worst_case)
6026 unsigned HOST_WIDE_INT offset;
6027 unsigned int extra;
6029 if (GET_CODE (x) != PLUS)
6030 return false;
6031 if (!REG_P (XEXP (x, 0)))
6032 return false;
6033 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6034 return false;
6035 if (!reg_offset_addressing_ok_p (mode))
6036 return virtual_stack_registers_memory_p (x);
6037 if (legitimate_constant_pool_address_p (x, mode, strict))
6038 return true;
6039 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6040 return false;
6042 offset = INTVAL (XEXP (x, 1));
6043 extra = 0;
6044 switch (mode)
6046 case V4HImode:
6047 case V2SImode:
6048 case V1DImode:
6049 case V2SFmode:
6050 /* SPE vector modes. */
6051 return SPE_CONST_OFFSET_OK (offset);
6053 case DFmode:
6054 case DDmode:
6055 case DImode:
6056 /* On e500v2, we may have:
6058 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6060 Which gets addressed with evldd instructions. */
6061 if (TARGET_E500_DOUBLE)
6062 return SPE_CONST_OFFSET_OK (offset);
6064 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6065 addressing. */
6066 if (VECTOR_MEM_VSX_P (mode))
6067 return false;
6069 if (!worst_case)
6070 break;
6071 if (!TARGET_POWERPC64)
6072 extra = 4;
6073 else if (offset & 3)
6074 return false;
6075 break;
6077 case TFmode:
6078 case TDmode:
6079 case TImode:
6080 case PTImode:
6081 if (TARGET_E500_DOUBLE)
6082 return (SPE_CONST_OFFSET_OK (offset)
6083 && SPE_CONST_OFFSET_OK (offset + 8));
6085 extra = 8;
6086 if (!worst_case)
6087 break;
6088 if (!TARGET_POWERPC64)
6089 extra = 12;
6090 else if (offset & 3)
6091 return false;
6092 break;
6094 default:
6095 break;
6098 offset += 0x8000;
6099 return offset < 0x10000 - extra;
6102 bool
6103 legitimate_indexed_address_p (rtx x, int strict)
6105 rtx op0, op1;
6107 if (GET_CODE (x) != PLUS)
6108 return false;
6110 op0 = XEXP (x, 0);
6111 op1 = XEXP (x, 1);
6113 /* Recognize the rtl generated by reload which we know will later be
6114 replaced with proper base and index regs. */
6115 if (!strict
6116 && reload_in_progress
6117 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6118 && REG_P (op1))
6119 return true;
6121 return (REG_P (op0) && REG_P (op1)
6122 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6123 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6124 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6125 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6128 bool
6129 avoiding_indexed_address_p (enum machine_mode mode)
6131 /* Avoid indexed addressing for modes that have non-indexed
6132 load/store instruction forms. */
6133 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6136 bool
6137 legitimate_indirect_address_p (rtx x, int strict)
6139 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6142 bool
6143 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
6145 if (!TARGET_MACHO || !flag_pic
6146 || mode != SImode || GET_CODE (x) != MEM)
6147 return false;
6148 x = XEXP (x, 0);
6150 if (GET_CODE (x) != LO_SUM)
6151 return false;
6152 if (GET_CODE (XEXP (x, 0)) != REG)
6153 return false;
6154 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6155 return false;
6156 x = XEXP (x, 1);
6158 return CONSTANT_P (x);
6161 static bool
6162 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
6164 if (GET_CODE (x) != LO_SUM)
6165 return false;
6166 if (GET_CODE (XEXP (x, 0)) != REG)
6167 return false;
6168 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6169 return false;
6170 /* Restrict addressing for DI because of our SUBREG hackery. */
6171 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6172 return false;
6173 x = XEXP (x, 1);
6175 if (TARGET_ELF || TARGET_MACHO)
6177 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
6178 return false;
6179 if (TARGET_TOC)
6180 return false;
6181 if (GET_MODE_NUNITS (mode) != 1)
6182 return false;
6183 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6184 && !(/* ??? Assume floating point reg based on mode? */
6185 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6186 && (mode == DFmode || mode == DDmode)))
6187 return false;
6189 return CONSTANT_P (x);
6192 return false;
6196 /* Try machine-dependent ways of modifying an illegitimate address
6197 to be legitimate. If we find one, return the new, valid address.
6198 This is used from only one place: `memory_address' in explow.c.
6200 OLDX is the address as it was before break_out_memory_refs was
6201 called. In some cases it is useful to look at this to decide what
6202 needs to be done.
6204 It is always safe for this function to do nothing. It exists to
6205 recognize opportunities to optimize the output.
6207 On RS/6000, first check for the sum of a register with a constant
6208 integer that is out of range. If so, generate code to add the
6209 constant with the low-order 16 bits masked to the register and force
6210 this result into another register (this can be done with `cau').
6211 Then generate an address of REG+(CONST&0xffff), allowing for the
6212 possibility of bit 16 being a one.
6214 Then check for the sum of a register and something not constant, try to
6215 load the other things into a register and return the sum. */
6217 static rtx
6218 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6219 enum machine_mode mode)
6221 unsigned int extra;
6223 if (!reg_offset_addressing_ok_p (mode))
6225 if (virtual_stack_registers_memory_p (x))
6226 return x;
6228 /* In theory we should not be seeing addresses of the form reg+0,
6229 but just in case it is generated, optimize it away. */
6230 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6231 return force_reg (Pmode, XEXP (x, 0));
6233 /* For TImode with load/store quad, restrict addresses to just a single
6234 pointer, so it works with both GPRs and VSX registers. */
6235 /* Make sure both operands are registers. */
6236 else if (GET_CODE (x) == PLUS
6237 && (mode != TImode || !TARGET_QUAD_MEMORY))
6238 return gen_rtx_PLUS (Pmode,
6239 force_reg (Pmode, XEXP (x, 0)),
6240 force_reg (Pmode, XEXP (x, 1)));
6241 else
6242 return force_reg (Pmode, x);
6244 if (GET_CODE (x) == SYMBOL_REF)
6246 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6247 if (model != 0)
6248 return rs6000_legitimize_tls_address (x, model);
6251 extra = 0;
6252 switch (mode)
6254 case TFmode:
6255 case TDmode:
6256 case TImode:
6257 case PTImode:
6258 /* As in legitimate_offset_address_p we do not assume
6259 worst-case. The mode here is just a hint as to the registers
6260 used. A TImode is usually in gprs, but may actually be in
6261 fprs. Leave worst-case scenario for reload to handle via
6262 insn constraints. PTImode is only GPRs. */
6263 extra = 8;
6264 break;
6265 default:
6266 break;
6269 if (GET_CODE (x) == PLUS
6270 && GET_CODE (XEXP (x, 0)) == REG
6271 && GET_CODE (XEXP (x, 1)) == CONST_INT
6272 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6273 >= 0x10000 - extra)
6274 && !(SPE_VECTOR_MODE (mode)
6275 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6277 HOST_WIDE_INT high_int, low_int;
6278 rtx sum;
6279 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6280 if (low_int >= 0x8000 - extra)
6281 low_int = 0;
6282 high_int = INTVAL (XEXP (x, 1)) - low_int;
6283 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6284 GEN_INT (high_int)), 0);
6285 return plus_constant (Pmode, sum, low_int);
6287 else if (GET_CODE (x) == PLUS
6288 && GET_CODE (XEXP (x, 0)) == REG
6289 && GET_CODE (XEXP (x, 1)) != CONST_INT
6290 && GET_MODE_NUNITS (mode) == 1
6291 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6292 || (/* ??? Assume floating point reg based on mode? */
6293 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6294 && (mode == DFmode || mode == DDmode)))
6295 && !avoiding_indexed_address_p (mode))
6297 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6298 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6300 else if (SPE_VECTOR_MODE (mode)
6301 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6303 if (mode == DImode)
6304 return x;
6305 /* We accept [reg + reg] and [reg + OFFSET]. */
6307 if (GET_CODE (x) == PLUS)
6309 rtx op1 = XEXP (x, 0);
6310 rtx op2 = XEXP (x, 1);
6311 rtx y;
6313 op1 = force_reg (Pmode, op1);
6315 if (GET_CODE (op2) != REG
6316 && (GET_CODE (op2) != CONST_INT
6317 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6318 || (GET_MODE_SIZE (mode) > 8
6319 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6320 op2 = force_reg (Pmode, op2);
6322 /* We can't always do [reg + reg] for these, because [reg +
6323 reg + offset] is not a legitimate addressing mode. */
6324 y = gen_rtx_PLUS (Pmode, op1, op2);
6326 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
6327 return force_reg (Pmode, y);
6328 else
6329 return y;
6332 return force_reg (Pmode, x);
6334 else if ((TARGET_ELF
6335 #if TARGET_MACHO
6336 || !MACHO_DYNAMIC_NO_PIC_P
6337 #endif
6339 && TARGET_32BIT
6340 && TARGET_NO_TOC
6341 && ! flag_pic
6342 && GET_CODE (x) != CONST_INT
6343 && GET_CODE (x) != CONST_DOUBLE
6344 && CONSTANT_P (x)
6345 && GET_MODE_NUNITS (mode) == 1
6346 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6347 || (/* ??? Assume floating point reg based on mode? */
6348 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6349 && (mode == DFmode || mode == DDmode))))
6351 rtx reg = gen_reg_rtx (Pmode);
6352 if (TARGET_ELF)
6353 emit_insn (gen_elf_high (reg, x));
6354 else
6355 emit_insn (gen_macho_high (reg, x));
6356 return gen_rtx_LO_SUM (Pmode, reg, x);
6358 else if (TARGET_TOC
6359 && GET_CODE (x) == SYMBOL_REF
6360 && constant_pool_expr_p (x)
6361 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
6362 return create_TOC_reference (x, NULL_RTX);
6363 else
6364 return x;
6367 /* Debug version of rs6000_legitimize_address. */
6368 static rtx
6369 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
6371 rtx ret;
6372 rtx insns;
6374 start_sequence ();
6375 ret = rs6000_legitimize_address (x, oldx, mode);
6376 insns = get_insns ();
6377 end_sequence ();
6379 if (ret != x)
6381 fprintf (stderr,
6382 "\nrs6000_legitimize_address: mode %s, old code %s, "
6383 "new code %s, modified\n",
6384 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
6385 GET_RTX_NAME (GET_CODE (ret)));
6387 fprintf (stderr, "Original address:\n");
6388 debug_rtx (x);
6390 fprintf (stderr, "oldx:\n");
6391 debug_rtx (oldx);
6393 fprintf (stderr, "New address:\n");
6394 debug_rtx (ret);
6396 if (insns)
6398 fprintf (stderr, "Insns added:\n");
6399 debug_rtx_list (insns, 20);
6402 else
6404 fprintf (stderr,
6405 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6406 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6408 debug_rtx (x);
6411 if (insns)
6412 emit_insn (insns);
6414 return ret;
6417 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6418 We need to emit DTP-relative relocations. */
6420 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6421 static void
6422 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6424 switch (size)
6426 case 4:
6427 fputs ("\t.long\t", file);
6428 break;
6429 case 8:
6430 fputs (DOUBLE_INT_ASM_OP, file);
6431 break;
6432 default:
6433 gcc_unreachable ();
6435 output_addr_const (file, x);
6436 fputs ("@dtprel+0x8000", file);
6439 /* In the name of slightly smaller debug output, and to cater to
6440 general assembler lossage, recognize various UNSPEC sequences
6441 and turn them back into a direct symbol reference. */
6443 static rtx
6444 rs6000_delegitimize_address (rtx orig_x)
6446 rtx x, y, offset;
6448 orig_x = delegitimize_mem_from_attrs (orig_x);
6449 x = orig_x;
6450 if (MEM_P (x))
6451 x = XEXP (x, 0);
6453 y = x;
6454 if (TARGET_CMODEL != CMODEL_SMALL
6455 && GET_CODE (y) == LO_SUM)
6456 y = XEXP (y, 1);
6458 offset = NULL_RTX;
6459 if (GET_CODE (y) == PLUS
6460 && GET_MODE (y) == Pmode
6461 && CONST_INT_P (XEXP (y, 1)))
6463 offset = XEXP (y, 1);
6464 y = XEXP (y, 0);
6467 if (GET_CODE (y) == UNSPEC
6468 && XINT (y, 1) == UNSPEC_TOCREL)
6470 #ifdef ENABLE_CHECKING
6471 if (REG_P (XVECEXP (y, 0, 1))
6472 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6474 /* All good. */
6476 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6478 /* Weirdness alert. df_note_compute can replace r2 with a
6479 debug_expr when this unspec is in a debug_insn.
6480 Seen in gcc.dg/pr51957-1.c */
6482 else
6484 debug_rtx (orig_x);
6485 abort ();
6487 #endif
6488 y = XVECEXP (y, 0, 0);
6490 #ifdef HAVE_AS_TLS
6491 /* Do not associate thread-local symbols with the original
6492 constant pool symbol. */
6493 if (TARGET_XCOFF
6494 && GET_CODE (y) == SYMBOL_REF
6495 && CONSTANT_POOL_ADDRESS_P (y)
6496 && SYMBOL_REF_TLS_MODEL (get_pool_constant (y)) >= TLS_MODEL_REAL)
6497 return orig_x;
6498 #endif
6500 if (offset != NULL_RTX)
6501 y = gen_rtx_PLUS (Pmode, y, offset);
6502 if (!MEM_P (orig_x))
6503 return y;
6504 else
6505 return replace_equiv_address_nv (orig_x, y);
6508 if (TARGET_MACHO
6509 && GET_CODE (orig_x) == LO_SUM
6510 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6512 y = XEXP (XEXP (orig_x, 1), 0);
6513 if (GET_CODE (y) == UNSPEC
6514 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6515 return XVECEXP (y, 0, 0);
6518 return orig_x;
6521 /* Return true if X shouldn't be emitted into the debug info.
6522 The linker doesn't like .toc section references from
6523 .debug_* sections, so reject .toc section symbols. */
6525 static bool
6526 rs6000_const_not_ok_for_debug_p (rtx x)
6528 if (GET_CODE (x) == SYMBOL_REF
6529 && CONSTANT_POOL_ADDRESS_P (x))
6531 rtx c = get_pool_constant (x);
6532 enum machine_mode cmode = get_pool_mode (x);
6533 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6534 return true;
6537 return false;
6540 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6542 static GTY(()) rtx rs6000_tls_symbol;
6543 static rtx
6544 rs6000_tls_get_addr (void)
6546 if (!rs6000_tls_symbol)
6547 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
6549 return rs6000_tls_symbol;
6552 /* Construct the SYMBOL_REF for TLS GOT references. */
6554 static GTY(()) rtx rs6000_got_symbol;
6555 static rtx
6556 rs6000_got_sym (void)
6558 if (!rs6000_got_symbol)
6560 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
6561 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
6562 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
6565 return rs6000_got_symbol;
6568 /* AIX Thread-Local Address support. */
6570 static rtx
6571 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
6573 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
6574 const char *name;
6575 char *tlsname;
6577 name = XSTR (addr, 0);
6578 /* Append TLS CSECT qualifier, unless the symbol already is qualified
6579 or the symbol will be in TLS private data section. */
6580 if (name[strlen (name) - 1] != ']'
6581 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
6582 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
6584 tlsname = XALLOCAVEC (char, strlen (name) + 4);
6585 strcpy (tlsname, name);
6586 strcat (tlsname,
6587 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
6588 tlsaddr = copy_rtx (addr);
6589 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
6591 else
6592 tlsaddr = addr;
6594 /* Place addr into TOC constant pool. */
6595 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
6597 /* Output the TOC entry and create the MEM referencing the value. */
6598 if (constant_pool_expr_p (XEXP (sym, 0))
6599 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
6601 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
6602 mem = gen_const_mem (Pmode, tocref);
6603 set_mem_alias_set (mem, get_TOC_alias_set ());
6605 else
6606 return sym;
6608 /* Use global-dynamic for local-dynamic. */
6609 if (model == TLS_MODEL_GLOBAL_DYNAMIC
6610 || model == TLS_MODEL_LOCAL_DYNAMIC)
6612 /* Create new TOC reference for @m symbol. */
6613 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
6614 tlsname = XALLOCAVEC (char, strlen (name) + 1);
6615 strcpy (tlsname, "*LCM");
6616 strcat (tlsname, name + 3);
6617 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
6618 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
6619 tocref = create_TOC_reference (modaddr, NULL_RTX);
6620 rtx modmem = gen_const_mem (Pmode, tocref);
6621 set_mem_alias_set (modmem, get_TOC_alias_set ());
6623 rtx modreg = gen_reg_rtx (Pmode);
6624 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
6626 tmpreg = gen_reg_rtx (Pmode);
6627 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6629 dest = gen_reg_rtx (Pmode);
6630 if (TARGET_32BIT)
6631 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
6632 else
6633 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
6634 return dest;
6636 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
6637 else if (TARGET_32BIT)
6639 tlsreg = gen_reg_rtx (SImode);
6640 emit_insn (gen_tls_get_tpointer (tlsreg));
6642 else
6643 tlsreg = gen_rtx_REG (DImode, 13);
6645 /* Load the TOC value into temporary register. */
6646 tmpreg = gen_reg_rtx (Pmode);
6647 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6648 set_unique_reg_note (get_last_insn (), REG_EQUAL,
6649 gen_rtx_MINUS (Pmode, addr, tlsreg));
6651 /* Add TOC symbol value to TLS pointer. */
6652 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
6654 return dest;
6657 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
6658 this (thread-local) address. */
6660 static rtx
6661 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
6663 rtx dest, insn;
6665 if (TARGET_XCOFF)
6666 return rs6000_legitimize_tls_address_aix (addr, model);
6668 dest = gen_reg_rtx (Pmode);
6669 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
6671 rtx tlsreg;
6673 if (TARGET_64BIT)
6675 tlsreg = gen_rtx_REG (Pmode, 13);
6676 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
6678 else
6680 tlsreg = gen_rtx_REG (Pmode, 2);
6681 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
6683 emit_insn (insn);
6685 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
6687 rtx tlsreg, tmp;
6689 tmp = gen_reg_rtx (Pmode);
6690 if (TARGET_64BIT)
6692 tlsreg = gen_rtx_REG (Pmode, 13);
6693 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
6695 else
6697 tlsreg = gen_rtx_REG (Pmode, 2);
6698 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
6700 emit_insn (insn);
6701 if (TARGET_64BIT)
6702 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
6703 else
6704 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
6705 emit_insn (insn);
6707 else
6709 rtx r3, got, tga, tmp1, tmp2, call_insn;
6711 /* We currently use relocations like @got@tlsgd for tls, which
6712 means the linker will handle allocation of tls entries, placing
6713 them in the .got section. So use a pointer to the .got section,
6714 not one to secondary TOC sections used by 64-bit -mminimal-toc,
6715 or to secondary GOT sections used by 32-bit -fPIC. */
6716 if (TARGET_64BIT)
6717 got = gen_rtx_REG (Pmode, 2);
6718 else
6720 if (flag_pic == 1)
6721 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
6722 else
6724 rtx gsym = rs6000_got_sym ();
6725 got = gen_reg_rtx (Pmode);
6726 if (flag_pic == 0)
6727 rs6000_emit_move (got, gsym, Pmode);
6728 else
6730 rtx mem, lab, last;
6732 tmp1 = gen_reg_rtx (Pmode);
6733 tmp2 = gen_reg_rtx (Pmode);
6734 mem = gen_const_mem (Pmode, tmp1);
6735 lab = gen_label_rtx ();
6736 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
6737 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
6738 if (TARGET_LINK_STACK)
6739 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
6740 emit_move_insn (tmp2, mem);
6741 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
6742 set_unique_reg_note (last, REG_EQUAL, gsym);
6747 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
6749 tga = rs6000_tls_get_addr ();
6750 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
6751 1, const0_rtx, Pmode);
6753 r3 = gen_rtx_REG (Pmode, 3);
6754 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6755 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
6756 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6757 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
6758 else if (DEFAULT_ABI == ABI_V4)
6759 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
6760 else
6761 gcc_unreachable ();
6762 call_insn = last_call_insn ();
6763 PATTERN (call_insn) = insn;
6764 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6765 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6766 pic_offset_table_rtx);
6768 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
6770 tga = rs6000_tls_get_addr ();
6771 tmp1 = gen_reg_rtx (Pmode);
6772 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
6773 1, const0_rtx, Pmode);
6775 r3 = gen_rtx_REG (Pmode, 3);
6776 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6777 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
6778 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6779 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
6780 else if (DEFAULT_ABI == ABI_V4)
6781 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
6782 else
6783 gcc_unreachable ();
6784 call_insn = last_call_insn ();
6785 PATTERN (call_insn) = insn;
6786 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6787 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6788 pic_offset_table_rtx);
6790 if (rs6000_tls_size == 16)
6792 if (TARGET_64BIT)
6793 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
6794 else
6795 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
6797 else if (rs6000_tls_size == 32)
6799 tmp2 = gen_reg_rtx (Pmode);
6800 if (TARGET_64BIT)
6801 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
6802 else
6803 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
6804 emit_insn (insn);
6805 if (TARGET_64BIT)
6806 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6807 else
6808 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6810 else
6812 tmp2 = gen_reg_rtx (Pmode);
6813 if (TARGET_64BIT)
6814 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6815 else
6816 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6817 emit_insn (insn);
6818 insn = gen_rtx_SET (Pmode, dest,
6819 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6821 emit_insn (insn);
6823 else
6825 /* IE, or 64-bit offset LE. */
6826 tmp2 = gen_reg_rtx (Pmode);
6827 if (TARGET_64BIT)
6828 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6829 else
6830 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6831 emit_insn (insn);
6832 if (TARGET_64BIT)
6833 insn = gen_tls_tls_64 (dest, tmp2, addr);
6834 else
6835 insn = gen_tls_tls_32 (dest, tmp2, addr);
6836 emit_insn (insn);
6840 return dest;
6843 /* Return 1 if X contains a thread-local symbol. */
6845 static bool
6846 rs6000_tls_referenced_p (rtx x)
6848 if (! TARGET_HAVE_TLS)
6849 return false;
6851 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6854 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6856 static bool
6857 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6859 if (GET_CODE (x) == HIGH
6860 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6861 return true;
6863 /* A TLS symbol in the TOC cannot contain a sum. */
6864 if (GET_CODE (x) == CONST
6865 && GET_CODE (XEXP (x, 0)) == PLUS
6866 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6867 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
6868 return true;
6870 /* Do not place an ELF TLS symbol in the constant pool. */
6871 return TARGET_ELF && rs6000_tls_referenced_p (x);
6874 /* Return 1 if *X is a thread-local symbol. This is the same as
6875 rs6000_tls_symbol_ref except for the type of the unused argument. */
6877 static int
6878 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6880 return RS6000_SYMBOL_REF_TLS_P (*x);
6883 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6884 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6885 can be addressed relative to the toc pointer. */
6887 static bool
6888 use_toc_relative_ref (rtx sym)
6890 return ((constant_pool_expr_p (sym)
6891 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6892 get_pool_mode (sym)))
6893 || (TARGET_CMODEL == CMODEL_MEDIUM
6894 && SYMBOL_REF_LOCAL_P (sym)));
6897 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6898 replace the input X, or the original X if no replacement is called for.
6899 The output parameter *WIN is 1 if the calling macro should goto WIN,
6900 0 if it should not.
6902 For RS/6000, we wish to handle large displacements off a base
6903 register by splitting the addend across an addiu/addis and the mem insn.
6904 This cuts number of extra insns needed from 3 to 1.
6906 On Darwin, we use this to generate code for floating point constants.
6907 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6908 The Darwin code is inside #if TARGET_MACHO because only then are the
6909 machopic_* functions defined. */
6910 static rtx
6911 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6912 int opnum, int type,
6913 int ind_levels ATTRIBUTE_UNUSED, int *win)
6915 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6917 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6918 DFmode/DImode MEM. */
6919 if (reg_offset_p
6920 && opnum == 1
6921 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6922 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6923 reg_offset_p = false;
6925 /* We must recognize output that we have already generated ourselves. */
6926 if (GET_CODE (x) == PLUS
6927 && GET_CODE (XEXP (x, 0)) == PLUS
6928 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6929 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6930 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6932 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6933 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6934 opnum, (enum reload_type) type);
6935 *win = 1;
6936 return x;
6939 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6940 if (GET_CODE (x) == LO_SUM
6941 && GET_CODE (XEXP (x, 0)) == HIGH)
6943 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6944 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6945 opnum, (enum reload_type) type);
6946 *win = 1;
6947 return x;
6950 #if TARGET_MACHO
6951 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6952 && GET_CODE (x) == LO_SUM
6953 && GET_CODE (XEXP (x, 0)) == PLUS
6954 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6955 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6956 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6957 && machopic_operand_p (XEXP (x, 1)))
6959 /* Result of previous invocation of this function on Darwin
6960 floating point constant. */
6961 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6962 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6963 opnum, (enum reload_type) type);
6964 *win = 1;
6965 return x;
6967 #endif
6969 if (TARGET_CMODEL != CMODEL_SMALL
6970 && reg_offset_p
6971 && small_toc_ref (x, VOIDmode))
6973 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6974 x = gen_rtx_LO_SUM (Pmode, hi, x);
6975 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6976 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6977 opnum, (enum reload_type) type);
6978 *win = 1;
6979 return x;
6982 if (GET_CODE (x) == PLUS
6983 && GET_CODE (XEXP (x, 0)) == REG
6984 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6985 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6986 && GET_CODE (XEXP (x, 1)) == CONST_INT
6987 && reg_offset_p
6988 && !SPE_VECTOR_MODE (mode)
6989 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6990 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
6992 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6993 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6994 HOST_WIDE_INT high
6995 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6997 /* Check for 32-bit overflow. */
6998 if (high + low != val)
7000 *win = 0;
7001 return x;
7004 /* Reload the high part into a base reg; leave the low part
7005 in the mem directly. */
7007 x = gen_rtx_PLUS (GET_MODE (x),
7008 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
7009 GEN_INT (high)),
7010 GEN_INT (low));
7012 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7013 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7014 opnum, (enum reload_type) type);
7015 *win = 1;
7016 return x;
7019 if (GET_CODE (x) == SYMBOL_REF
7020 && reg_offset_p
7021 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
7022 && !SPE_VECTOR_MODE (mode)
7023 #if TARGET_MACHO
7024 && DEFAULT_ABI == ABI_DARWIN
7025 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
7026 && machopic_symbol_defined_p (x)
7027 #else
7028 && DEFAULT_ABI == ABI_V4
7029 && !flag_pic
7030 #endif
7031 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7032 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7033 without fprs.
7034 ??? Assume floating point reg based on mode? This assumption is
7035 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7036 where reload ends up doing a DFmode load of a constant from
7037 mem using two gprs. Unfortunately, at this point reload
7038 hasn't yet selected regs so poking around in reload data
7039 won't help and even if we could figure out the regs reliably,
7040 we'd still want to allow this transformation when the mem is
7041 naturally aligned. Since we say the address is good here, we
7042 can't disable offsets from LO_SUMs in mem_operand_gpr.
7043 FIXME: Allow offset from lo_sum for other modes too, when
7044 mem is sufficiently aligned. */
7045 && mode != TFmode
7046 && mode != TDmode
7047 && (mode != TImode || !TARGET_VSX_TIMODE)
7048 && mode != PTImode
7049 && (mode != DImode || TARGET_POWERPC64)
7050 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
7051 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
7053 #if TARGET_MACHO
7054 if (flag_pic)
7056 rtx offset = machopic_gen_offset (x);
7057 x = gen_rtx_LO_SUM (GET_MODE (x),
7058 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
7059 gen_rtx_HIGH (Pmode, offset)), offset);
7061 else
7062 #endif
7063 x = gen_rtx_LO_SUM (GET_MODE (x),
7064 gen_rtx_HIGH (Pmode, x), x);
7066 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7067 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7068 opnum, (enum reload_type) type);
7069 *win = 1;
7070 return x;
7073 /* Reload an offset address wrapped by an AND that represents the
7074 masking of the lower bits. Strip the outer AND and let reload
7075 convert the offset address into an indirect address. For VSX,
7076 force reload to create the address with an AND in a separate
7077 register, because we can't guarantee an altivec register will
7078 be used. */
7079 if (VECTOR_MEM_ALTIVEC_P (mode)
7080 && GET_CODE (x) == AND
7081 && GET_CODE (XEXP (x, 0)) == PLUS
7082 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7083 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7084 && GET_CODE (XEXP (x, 1)) == CONST_INT
7085 && INTVAL (XEXP (x, 1)) == -16)
7087 x = XEXP (x, 0);
7088 *win = 1;
7089 return x;
7092 if (TARGET_TOC
7093 && reg_offset_p
7094 && GET_CODE (x) == SYMBOL_REF
7095 && use_toc_relative_ref (x))
7097 x = create_TOC_reference (x, NULL_RTX);
7098 if (TARGET_CMODEL != CMODEL_SMALL)
7099 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7100 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7101 opnum, (enum reload_type) type);
7102 *win = 1;
7103 return x;
7105 *win = 0;
7106 return x;
7109 /* Debug version of rs6000_legitimize_reload_address. */
7110 static rtx
7111 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
7112 int opnum, int type,
7113 int ind_levels, int *win)
7115 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7116 ind_levels, win);
7117 fprintf (stderr,
7118 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7119 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7120 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7121 debug_rtx (x);
7123 if (x == ret)
7124 fprintf (stderr, "Same address returned\n");
7125 else if (!ret)
7126 fprintf (stderr, "NULL returned\n");
7127 else
7129 fprintf (stderr, "New address:\n");
7130 debug_rtx (ret);
7133 return ret;
7136 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7137 that is a valid memory address for an instruction.
7138 The MODE argument is the machine mode for the MEM expression
7139 that wants to use this address.
7141 On the RS/6000, there are four valid address: a SYMBOL_REF that
7142 refers to a constant pool entry of an address (or the sum of it
7143 plus a constant), a short (16-bit signed) constant plus a register,
7144 the sum of two registers, or a register indirect, possibly with an
7145 auto-increment. For DFmode, DDmode and DImode with a constant plus
7146 register, we must ensure that both words are addressable or PowerPC64
7147 with offset word aligned.
7149 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7150 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7151 because adjacent memory cells are accessed by adding word-sized offsets
7152 during assembly output. */
7153 static bool
7154 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
7156 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7158 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7159 if (VECTOR_MEM_ALTIVEC_P (mode)
7160 && GET_CODE (x) == AND
7161 && GET_CODE (XEXP (x, 1)) == CONST_INT
7162 && INTVAL (XEXP (x, 1)) == -16)
7163 x = XEXP (x, 0);
7165 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7166 return 0;
7167 if (legitimate_indirect_address_p (x, reg_ok_strict))
7168 return 1;
7169 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7170 && !ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7171 && !SPE_VECTOR_MODE (mode)
7172 && mode != TFmode
7173 && mode != TDmode
7174 && mode != TImode
7175 && mode != PTImode
7176 /* Restrict addressing for DI because of our SUBREG hackery. */
7177 && !(TARGET_E500_DOUBLE
7178 && (mode == DFmode || mode == DDmode || mode == DImode))
7179 && TARGET_UPDATE
7180 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7181 return 1;
7182 if (virtual_stack_registers_memory_p (x))
7183 return 1;
7184 if (reg_offset_p && legitimate_small_data_p (mode, x))
7185 return 1;
7186 if (reg_offset_p
7187 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
7188 return 1;
7189 /* For TImode, if we have load/store quad, only allow register indirect
7190 addresses. This will allow the values to go in either GPRs or VSX
7191 registers without reloading. The vector types would tend to go into VSX
7192 registers, so we allow REG+REG, while TImode seems somewhat split, in that
7193 some uses are GPR based, and some VSX based. */
7194 if (mode == TImode && TARGET_QUAD_MEMORY)
7195 return 0;
7196 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7197 if (! reg_ok_strict
7198 && reg_offset_p
7199 && GET_CODE (x) == PLUS
7200 && GET_CODE (XEXP (x, 0)) == REG
7201 && (XEXP (x, 0) == virtual_stack_vars_rtx
7202 || XEXP (x, 0) == arg_pointer_rtx)
7203 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7204 return 1;
7205 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7206 return 1;
7207 if (mode != TFmode
7208 && mode != TDmode
7209 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7210 || TARGET_POWERPC64
7211 || (mode != DFmode && mode != DDmode)
7212 || (TARGET_E500_DOUBLE && mode != DDmode))
7213 && (TARGET_POWERPC64 || mode != DImode)
7214 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7215 && mode != PTImode
7216 && !avoiding_indexed_address_p (mode)
7217 && legitimate_indexed_address_p (x, reg_ok_strict))
7218 return 1;
7219 if (GET_CODE (x) == PRE_MODIFY
7220 && mode != TImode
7221 && mode != PTImode
7222 && mode != TFmode
7223 && mode != TDmode
7224 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7225 || TARGET_POWERPC64
7226 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
7227 && (TARGET_POWERPC64 || mode != DImode)
7228 && !ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7229 && !SPE_VECTOR_MODE (mode)
7230 /* Restrict addressing for DI because of our SUBREG hackery. */
7231 && !(TARGET_E500_DOUBLE
7232 && (mode == DFmode || mode == DDmode || mode == DImode))
7233 && TARGET_UPDATE
7234 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7235 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7236 reg_ok_strict, false)
7237 || (!avoiding_indexed_address_p (mode)
7238 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7239 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7240 return 1;
7241 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7242 return 1;
7243 return 0;
7246 /* Debug version of rs6000_legitimate_address_p. */
7247 static bool
7248 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
7249 bool reg_ok_strict)
7251 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7252 fprintf (stderr,
7253 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7254 "strict = %d, reload = %s, code = %s\n",
7255 ret ? "true" : "false",
7256 GET_MODE_NAME (mode),
7257 reg_ok_strict,
7258 (reload_completed
7259 ? "after"
7260 : (reload_in_progress ? "progress" : "before")),
7261 GET_RTX_NAME (GET_CODE (x)));
7262 debug_rtx (x);
7264 return ret;
7267 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7269 static bool
7270 rs6000_mode_dependent_address_p (const_rtx addr,
7271 addr_space_t as ATTRIBUTE_UNUSED)
7273 return rs6000_mode_dependent_address_ptr (addr);
7276 /* Go to LABEL if ADDR (a legitimate address expression)
7277 has an effect that depends on the machine mode it is used for.
7279 On the RS/6000 this is true of all integral offsets (since AltiVec
7280 and VSX modes don't allow them) or is a pre-increment or decrement.
7282 ??? Except that due to conceptual problems in offsettable_address_p
7283 we can't really report the problems of integral offsets. So leave
7284 this assuming that the adjustable offset must be valid for the
7285 sub-words of a TFmode operand, which is what we had before. */
7287 static bool
7288 rs6000_mode_dependent_address (const_rtx addr)
7290 switch (GET_CODE (addr))
7292 case PLUS:
7293 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7294 is considered a legitimate address before reload, so there
7295 are no offset restrictions in that case. Note that this
7296 condition is safe in strict mode because any address involving
7297 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7298 been rejected as illegitimate. */
7299 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7300 && XEXP (addr, 0) != arg_pointer_rtx
7301 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7303 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7304 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7306 break;
7308 case LO_SUM:
7309 /* Anything in the constant pool is sufficiently aligned that
7310 all bytes have the same high part address. */
7311 return !legitimate_constant_pool_address_p (addr, QImode, false);
7313 /* Auto-increment cases are now treated generically in recog.c. */
7314 case PRE_MODIFY:
7315 return TARGET_UPDATE;
7317 /* AND is only allowed in Altivec loads. */
7318 case AND:
7319 return true;
7321 default:
7322 break;
7325 return false;
7328 /* Debug version of rs6000_mode_dependent_address. */
7329 static bool
7330 rs6000_debug_mode_dependent_address (const_rtx addr)
7332 bool ret = rs6000_mode_dependent_address (addr);
7334 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7335 ret ? "true" : "false");
7336 debug_rtx (addr);
7338 return ret;
7341 /* Implement FIND_BASE_TERM. */
7344 rs6000_find_base_term (rtx op)
7346 rtx base;
7348 base = op;
7349 if (GET_CODE (base) == CONST)
7350 base = XEXP (base, 0);
7351 if (GET_CODE (base) == PLUS)
7352 base = XEXP (base, 0);
7353 if (GET_CODE (base) == UNSPEC)
7354 switch (XINT (base, 1))
7356 case UNSPEC_TOCREL:
7357 case UNSPEC_MACHOPIC_OFFSET:
7358 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7359 for aliasing purposes. */
7360 return XVECEXP (base, 0, 0);
7363 return op;
7366 /* More elaborate version of recog's offsettable_memref_p predicate
7367 that works around the ??? note of rs6000_mode_dependent_address.
7368 In particular it accepts
7370 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7372 in 32-bit mode, that the recog predicate rejects. */
7374 static bool
7375 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
7377 bool worst_case;
7379 if (!MEM_P (op))
7380 return false;
7382 /* First mimic offsettable_memref_p. */
7383 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
7384 return true;
7386 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7387 the latter predicate knows nothing about the mode of the memory
7388 reference and, therefore, assumes that it is the largest supported
7389 mode (TFmode). As a consequence, legitimate offsettable memory
7390 references are rejected. rs6000_legitimate_offset_address_p contains
7391 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7392 at least with a little bit of help here given that we know the
7393 actual registers used. */
7394 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7395 || GET_MODE_SIZE (reg_mode) == 4);
7396 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7397 true, worst_case);
7400 /* Change register usage conditional on target flags. */
7401 static void
7402 rs6000_conditional_register_usage (void)
7404 int i;
7406 if (TARGET_DEBUG_TARGET)
7407 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7409 /* Set MQ register fixed (already call_used) so that it will not be
7410 allocated. */
7411 fixed_regs[64] = 1;
7413 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7414 if (TARGET_64BIT)
7415 fixed_regs[13] = call_used_regs[13]
7416 = call_really_used_regs[13] = 1;
7418 /* Conditionally disable FPRs. */
7419 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7420 for (i = 32; i < 64; i++)
7421 fixed_regs[i] = call_used_regs[i]
7422 = call_really_used_regs[i] = 1;
7424 /* The TOC register is not killed across calls in a way that is
7425 visible to the compiler. */
7426 if (DEFAULT_ABI == ABI_AIX)
7427 call_really_used_regs[2] = 0;
7429 if (DEFAULT_ABI == ABI_V4
7430 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7431 && flag_pic == 2)
7432 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7434 if (DEFAULT_ABI == ABI_V4
7435 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7436 && flag_pic == 1)
7437 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7438 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7439 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7441 if (DEFAULT_ABI == ABI_DARWIN
7442 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7443 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7444 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7445 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7447 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7448 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7449 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7451 if (TARGET_SPE)
7453 global_regs[SPEFSCR_REGNO] = 1;
7454 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7455 registers in prologues and epilogues. We no longer use r14
7456 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7457 pool for link-compatibility with older versions of GCC. Once
7458 "old" code has died out, we can return r14 to the allocation
7459 pool. */
7460 fixed_regs[14]
7461 = call_used_regs[14]
7462 = call_really_used_regs[14] = 1;
7465 if (!TARGET_ALTIVEC && !TARGET_VSX)
7467 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7468 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7469 call_really_used_regs[VRSAVE_REGNO] = 1;
7472 if (TARGET_ALTIVEC || TARGET_VSX)
7473 global_regs[VSCR_REGNO] = 1;
7475 if (TARGET_ALTIVEC_ABI)
7477 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7478 call_used_regs[i] = call_really_used_regs[i] = 1;
7480 /* AIX reserves VR20:31 in non-extended ABI mode. */
7481 if (TARGET_XCOFF)
7482 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7483 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7487 /* Try to output insns to set TARGET equal to the constant C if it can
7488 be done in less than N insns. Do all computations in MODE.
7489 Returns the place where the output has been placed if it can be
7490 done and the insns have been emitted. If it would take more than N
7491 insns, zero is returned and no insns and emitted. */
7494 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
7495 rtx source, int n ATTRIBUTE_UNUSED)
7497 rtx result, insn, set;
7498 HOST_WIDE_INT c0, c1;
7500 switch (mode)
7502 case QImode:
7503 case HImode:
7504 if (dest == NULL)
7505 dest = gen_reg_rtx (mode);
7506 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7507 return dest;
7509 case SImode:
7510 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7512 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
7513 GEN_INT (INTVAL (source)
7514 & (~ (HOST_WIDE_INT) 0xffff))));
7515 emit_insn (gen_rtx_SET (VOIDmode, dest,
7516 gen_rtx_IOR (SImode, copy_rtx (result),
7517 GEN_INT (INTVAL (source) & 0xffff))));
7518 result = dest;
7519 break;
7521 case DImode:
7522 switch (GET_CODE (source))
7524 case CONST_INT:
7525 c0 = INTVAL (source);
7526 c1 = -(c0 < 0);
7527 break;
7529 default:
7530 gcc_unreachable ();
7533 result = rs6000_emit_set_long_const (dest, c0, c1);
7534 break;
7536 default:
7537 gcc_unreachable ();
7540 insn = get_last_insn ();
7541 set = single_set (insn);
7542 if (! CONSTANT_P (SET_SRC (set)))
7543 set_unique_reg_note (insn, REG_EQUAL, source);
7545 return result;
7548 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
7549 fall back to a straight forward decomposition. We do this to avoid
7550 exponential run times encountered when looking for longer sequences
7551 with rs6000_emit_set_const. */
7552 static rtx
7553 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
7555 if (!TARGET_POWERPC64)
7557 rtx operand1, operand2;
7559 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
7560 DImode);
7561 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
7562 DImode);
7563 emit_move_insn (operand1, GEN_INT (c1));
7564 emit_move_insn (operand2, GEN_INT (c2));
7566 else
7568 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7570 ud1 = c1 & 0xffff;
7571 ud2 = (c1 & 0xffff0000) >> 16;
7572 c2 = c1 >> 32;
7573 ud3 = c2 & 0xffff;
7574 ud4 = (c2 & 0xffff0000) >> 16;
7576 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7577 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7578 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7580 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7581 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7583 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7584 - 0x80000000));
7585 if (ud1 != 0)
7586 emit_move_insn (copy_rtx (dest),
7587 gen_rtx_IOR (DImode, copy_rtx (dest),
7588 GEN_INT (ud1)));
7590 else if (ud3 == 0 && ud4 == 0)
7592 gcc_assert (ud2 & 0x8000);
7593 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7594 - 0x80000000));
7595 if (ud1 != 0)
7596 emit_move_insn (copy_rtx (dest),
7597 gen_rtx_IOR (DImode, copy_rtx (dest),
7598 GEN_INT (ud1)));
7599 emit_move_insn (copy_rtx (dest),
7600 gen_rtx_ZERO_EXTEND (DImode,
7601 gen_lowpart (SImode,
7602 copy_rtx (dest))));
7604 else if ((ud4 == 0xffff && (ud3 & 0x8000))
7605 || (ud4 == 0 && ! (ud3 & 0x8000)))
7607 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
7608 - 0x80000000));
7609 if (ud2 != 0)
7610 emit_move_insn (copy_rtx (dest),
7611 gen_rtx_IOR (DImode, copy_rtx (dest),
7612 GEN_INT (ud2)));
7613 emit_move_insn (copy_rtx (dest),
7614 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7615 GEN_INT (16)));
7616 if (ud1 != 0)
7617 emit_move_insn (copy_rtx (dest),
7618 gen_rtx_IOR (DImode, copy_rtx (dest),
7619 GEN_INT (ud1)));
7621 else
7623 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
7624 - 0x80000000));
7625 if (ud3 != 0)
7626 emit_move_insn (copy_rtx (dest),
7627 gen_rtx_IOR (DImode, copy_rtx (dest),
7628 GEN_INT (ud3)));
7630 emit_move_insn (copy_rtx (dest),
7631 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7632 GEN_INT (32)));
7633 if (ud2 != 0)
7634 emit_move_insn (copy_rtx (dest),
7635 gen_rtx_IOR (DImode, copy_rtx (dest),
7636 GEN_INT (ud2 << 16)));
7637 if (ud1 != 0)
7638 emit_move_insn (copy_rtx (dest),
7639 gen_rtx_IOR (DImode, copy_rtx (dest),
7640 GEN_INT (ud1)));
7643 return dest;
7646 /* Helper for the following. Get rid of [r+r] memory refs
7647 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
7649 static void
7650 rs6000_eliminate_indexed_memrefs (rtx operands[2])
7652 if (reload_in_progress)
7653 return;
7655 if (GET_CODE (operands[0]) == MEM
7656 && GET_CODE (XEXP (operands[0], 0)) != REG
7657 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
7658 GET_MODE (operands[0]), false))
7659 operands[0]
7660 = replace_equiv_address (operands[0],
7661 copy_addr_to_reg (XEXP (operands[0], 0)));
7663 if (GET_CODE (operands[1]) == MEM
7664 && GET_CODE (XEXP (operands[1], 0)) != REG
7665 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
7666 GET_MODE (operands[1]), false))
7667 operands[1]
7668 = replace_equiv_address (operands[1],
7669 copy_addr_to_reg (XEXP (operands[1], 0)));
7672 /* Generate a vector of constants to permute MODE for a little-endian
7673 storage operation by swapping the two halves of a vector. */
7674 static rtvec
7675 rs6000_const_vec (enum machine_mode mode)
7677 int i, subparts;
7678 rtvec v;
7680 switch (mode)
7682 case V2DFmode:
7683 case V2DImode:
7684 subparts = 2;
7685 break;
7686 case V4SFmode:
7687 case V4SImode:
7688 subparts = 4;
7689 break;
7690 case V8HImode:
7691 subparts = 8;
7692 break;
7693 case V16QImode:
7694 subparts = 16;
7695 break;
7696 default:
7697 gcc_unreachable();
7700 v = rtvec_alloc (subparts);
7702 for (i = 0; i < subparts / 2; ++i)
7703 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
7704 for (i = subparts / 2; i < subparts; ++i)
7705 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
7707 return v;
7710 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
7711 for a VSX load or store operation. */
7713 rs6000_gen_le_vsx_permute (rtx source, enum machine_mode mode)
7715 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
7716 return gen_rtx_VEC_SELECT (mode, source, par);
7719 /* Emit a little-endian load from vector memory location SOURCE to VSX
7720 register DEST in mode MODE. The load is done with two permuting
7721 insn's that represent an lxvd2x and xxpermdi. */
7722 void
7723 rs6000_emit_le_vsx_load (rtx dest, rtx source, enum machine_mode mode)
7725 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
7726 rtx permute_mem = rs6000_gen_le_vsx_permute (source, mode);
7727 rtx permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
7728 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_mem));
7729 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_reg));
7732 /* Emit a little-endian store to vector memory location DEST from VSX
7733 register SOURCE in mode MODE. The store is done with two permuting
7734 insn's that represent an xxpermdi and an stxvd2x. */
7735 void
7736 rs6000_emit_le_vsx_store (rtx dest, rtx source, enum machine_mode mode)
7738 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
7739 rtx permute_src = rs6000_gen_le_vsx_permute (source, mode);
7740 rtx permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
7741 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_src));
7742 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_tmp));
7745 /* Emit a sequence representing a little-endian VSX load or store,
7746 moving data from SOURCE to DEST in mode MODE. This is done
7747 separately from rs6000_emit_move to ensure it is called only
7748 during expand. LE VSX loads and stores introduced later are
7749 handled with a split. The expand-time RTL generation allows
7750 us to optimize away redundant pairs of register-permutes. */
7751 void
7752 rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
7754 gcc_assert (!BYTES_BIG_ENDIAN
7755 && VECTOR_MEM_VSX_P (mode)
7756 && mode != TImode
7757 && (MEM_P (source) ^ MEM_P (dest)));
7759 if (MEM_P (source))
7761 gcc_assert (REG_P (dest));
7762 rs6000_emit_le_vsx_load (dest, source, mode);
7764 else
7766 if (!REG_P (source))
7767 source = force_reg (mode, source);
7768 rs6000_emit_le_vsx_store (dest, source, mode);
7772 /* Emit a move from SOURCE to DEST in mode MODE. */
7773 void
7774 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
7776 rtx operands[2];
7777 operands[0] = dest;
7778 operands[1] = source;
7780 if (TARGET_DEBUG_ADDR)
7782 fprintf (stderr,
7783 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
7784 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
7785 GET_MODE_NAME (mode),
7786 reload_in_progress,
7787 reload_completed,
7788 can_create_pseudo_p ());
7789 debug_rtx (dest);
7790 fprintf (stderr, "source:\n");
7791 debug_rtx (source);
7794 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
7795 if (GET_CODE (operands[1]) == CONST_DOUBLE
7796 && ! FLOAT_MODE_P (mode)
7797 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
7799 /* FIXME. This should never happen. */
7800 /* Since it seems that it does, do the safe thing and convert
7801 to a CONST_INT. */
7802 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
7804 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
7805 || FLOAT_MODE_P (mode)
7806 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
7807 || CONST_DOUBLE_LOW (operands[1]) < 0)
7808 && (CONST_DOUBLE_HIGH (operands[1]) != -1
7809 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
7811 /* Check if GCC is setting up a block move that will end up using FP
7812 registers as temporaries. We must make sure this is acceptable. */
7813 if (GET_CODE (operands[0]) == MEM
7814 && GET_CODE (operands[1]) == MEM
7815 && mode == DImode
7816 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
7817 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
7818 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
7819 ? 32 : MEM_ALIGN (operands[0])))
7820 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
7821 ? 32
7822 : MEM_ALIGN (operands[1]))))
7823 && ! MEM_VOLATILE_P (operands [0])
7824 && ! MEM_VOLATILE_P (operands [1]))
7826 emit_move_insn (adjust_address (operands[0], SImode, 0),
7827 adjust_address (operands[1], SImode, 0));
7828 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
7829 adjust_address (copy_rtx (operands[1]), SImode, 4));
7830 return;
7833 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
7834 && !gpc_reg_operand (operands[1], mode))
7835 operands[1] = force_reg (mode, operands[1]);
7837 /* Recognize the case where operand[1] is a reference to thread-local
7838 data and load its address to a register. */
7839 if (rs6000_tls_referenced_p (operands[1]))
7841 enum tls_model model;
7842 rtx tmp = operands[1];
7843 rtx addend = NULL;
7845 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
7847 addend = XEXP (XEXP (tmp, 0), 1);
7848 tmp = XEXP (XEXP (tmp, 0), 0);
7851 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
7852 model = SYMBOL_REF_TLS_MODEL (tmp);
7853 gcc_assert (model != 0);
7855 tmp = rs6000_legitimize_tls_address (tmp, model);
7856 if (addend)
7858 tmp = gen_rtx_PLUS (mode, tmp, addend);
7859 tmp = force_operand (tmp, operands[0]);
7861 operands[1] = tmp;
7864 /* Handle the case where reload calls us with an invalid address. */
7865 if (reload_in_progress && mode == Pmode
7866 && (! general_operand (operands[1], mode)
7867 || ! nonimmediate_operand (operands[0], mode)))
7868 goto emit_set;
7870 /* 128-bit constant floating-point values on Darwin should really be
7871 loaded as two parts. */
7872 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
7873 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
7875 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
7876 simplify_gen_subreg (DFmode, operands[1], mode, 0),
7877 DFmode);
7878 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
7879 GET_MODE_SIZE (DFmode)),
7880 simplify_gen_subreg (DFmode, operands[1], mode,
7881 GET_MODE_SIZE (DFmode)),
7882 DFmode);
7883 return;
7886 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7887 cfun->machine->sdmode_stack_slot =
7888 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7890 if (reload_in_progress
7891 && mode == SDmode
7892 && cfun->machine->sdmode_stack_slot != NULL_RTX
7893 && MEM_P (operands[0])
7894 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7895 && REG_P (operands[1]))
7897 if (FP_REGNO_P (REGNO (operands[1])))
7899 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7900 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7901 emit_insn (gen_movsd_store (mem, operands[1]));
7903 else if (INT_REGNO_P (REGNO (operands[1])))
7905 rtx mem = adjust_address_nv (operands[0], mode, 4);
7906 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7907 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7909 else
7910 gcc_unreachable();
7911 return;
7913 if (reload_in_progress
7914 && mode == SDmode
7915 && REG_P (operands[0])
7916 && MEM_P (operands[1])
7917 && cfun->machine->sdmode_stack_slot != NULL_RTX
7918 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7920 if (FP_REGNO_P (REGNO (operands[0])))
7922 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7923 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7924 emit_insn (gen_movsd_load (operands[0], mem));
7926 else if (INT_REGNO_P (REGNO (operands[0])))
7928 rtx mem = adjust_address_nv (operands[1], mode, 4);
7929 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7930 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7932 else
7933 gcc_unreachable();
7934 return;
7937 /* FIXME: In the long term, this switch statement should go away
7938 and be replaced by a sequence of tests based on things like
7939 mode == Pmode. */
7940 switch (mode)
7942 case HImode:
7943 case QImode:
7944 if (CONSTANT_P (operands[1])
7945 && GET_CODE (operands[1]) != CONST_INT)
7946 operands[1] = force_const_mem (mode, operands[1]);
7947 break;
7949 case TFmode:
7950 case TDmode:
7951 rs6000_eliminate_indexed_memrefs (operands);
7952 /* fall through */
7954 case DFmode:
7955 case DDmode:
7956 case SFmode:
7957 case SDmode:
7958 if (CONSTANT_P (operands[1])
7959 && ! easy_fp_constant (operands[1], mode))
7960 operands[1] = force_const_mem (mode, operands[1]);
7961 break;
7963 case V16QImode:
7964 case V8HImode:
7965 case V4SFmode:
7966 case V4SImode:
7967 case V4HImode:
7968 case V2SFmode:
7969 case V2SImode:
7970 case V1DImode:
7971 case V2DFmode:
7972 case V2DImode:
7973 if (CONSTANT_P (operands[1])
7974 && !easy_vector_constant (operands[1], mode))
7975 operands[1] = force_const_mem (mode, operands[1]);
7976 break;
7978 case SImode:
7979 case DImode:
7980 /* Use default pattern for address of ELF small data */
7981 if (TARGET_ELF
7982 && mode == Pmode
7983 && DEFAULT_ABI == ABI_V4
7984 && (GET_CODE (operands[1]) == SYMBOL_REF
7985 || GET_CODE (operands[1]) == CONST)
7986 && small_data_operand (operands[1], mode))
7988 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7989 return;
7992 if (DEFAULT_ABI == ABI_V4
7993 && mode == Pmode && mode == SImode
7994 && flag_pic == 1 && got_operand (operands[1], mode))
7996 emit_insn (gen_movsi_got (operands[0], operands[1]));
7997 return;
8000 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
8001 && TARGET_NO_TOC
8002 && ! flag_pic
8003 && mode == Pmode
8004 && CONSTANT_P (operands[1])
8005 && GET_CODE (operands[1]) != HIGH
8006 && GET_CODE (operands[1]) != CONST_INT)
8008 rtx target = (!can_create_pseudo_p ()
8009 ? operands[0]
8010 : gen_reg_rtx (mode));
8012 /* If this is a function address on -mcall-aixdesc,
8013 convert it to the address of the descriptor. */
8014 if (DEFAULT_ABI == ABI_AIX
8015 && GET_CODE (operands[1]) == SYMBOL_REF
8016 && XSTR (operands[1], 0)[0] == '.')
8018 const char *name = XSTR (operands[1], 0);
8019 rtx new_ref;
8020 while (*name == '.')
8021 name++;
8022 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
8023 CONSTANT_POOL_ADDRESS_P (new_ref)
8024 = CONSTANT_POOL_ADDRESS_P (operands[1]);
8025 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
8026 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
8027 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
8028 operands[1] = new_ref;
8031 if (DEFAULT_ABI == ABI_DARWIN)
8033 #if TARGET_MACHO
8034 if (MACHO_DYNAMIC_NO_PIC_P)
8036 /* Take care of any required data indirection. */
8037 operands[1] = rs6000_machopic_legitimize_pic_address (
8038 operands[1], mode, operands[0]);
8039 if (operands[0] != operands[1])
8040 emit_insn (gen_rtx_SET (VOIDmode,
8041 operands[0], operands[1]));
8042 return;
8044 #endif
8045 emit_insn (gen_macho_high (target, operands[1]));
8046 emit_insn (gen_macho_low (operands[0], target, operands[1]));
8047 return;
8050 emit_insn (gen_elf_high (target, operands[1]));
8051 emit_insn (gen_elf_low (operands[0], target, operands[1]));
8052 return;
8055 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8056 and we have put it in the TOC, we just need to make a TOC-relative
8057 reference to it. */
8058 if (TARGET_TOC
8059 && GET_CODE (operands[1]) == SYMBOL_REF
8060 && use_toc_relative_ref (operands[1]))
8061 operands[1] = create_TOC_reference (operands[1], operands[0]);
8062 else if (mode == Pmode
8063 && CONSTANT_P (operands[1])
8064 && GET_CODE (operands[1]) != HIGH
8065 && ((GET_CODE (operands[1]) != CONST_INT
8066 && ! easy_fp_constant (operands[1], mode))
8067 || (GET_CODE (operands[1]) == CONST_INT
8068 && (num_insns_constant (operands[1], mode)
8069 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
8070 || (GET_CODE (operands[0]) == REG
8071 && FP_REGNO_P (REGNO (operands[0]))))
8072 && !toc_relative_expr_p (operands[1], false)
8073 && (TARGET_CMODEL == CMODEL_SMALL
8074 || can_create_pseudo_p ()
8075 || (REG_P (operands[0])
8076 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
8079 #if TARGET_MACHO
8080 /* Darwin uses a special PIC legitimizer. */
8081 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
8083 operands[1] =
8084 rs6000_machopic_legitimize_pic_address (operands[1], mode,
8085 operands[0]);
8086 if (operands[0] != operands[1])
8087 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8088 return;
8090 #endif
8092 /* If we are to limit the number of things we put in the TOC and
8093 this is a symbol plus a constant we can add in one insn,
8094 just put the symbol in the TOC and add the constant. Don't do
8095 this if reload is in progress. */
8096 if (GET_CODE (operands[1]) == CONST
8097 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
8098 && GET_CODE (XEXP (operands[1], 0)) == PLUS
8099 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
8100 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
8101 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
8102 && ! side_effects_p (operands[0]))
8104 rtx sym =
8105 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
8106 rtx other = XEXP (XEXP (operands[1], 0), 1);
8108 sym = force_reg (mode, sym);
8109 emit_insn (gen_add3_insn (operands[0], sym, other));
8110 return;
8113 operands[1] = force_const_mem (mode, operands[1]);
8115 if (TARGET_TOC
8116 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
8117 && constant_pool_expr_p (XEXP (operands[1], 0))
8118 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8119 get_pool_constant (XEXP (operands[1], 0)),
8120 get_pool_mode (XEXP (operands[1], 0))))
8122 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
8123 operands[0]);
8124 operands[1] = gen_const_mem (mode, tocref);
8125 set_mem_alias_set (operands[1], get_TOC_alias_set ());
8128 break;
8130 case TImode:
8131 if (!VECTOR_MEM_VSX_P (TImode))
8132 rs6000_eliminate_indexed_memrefs (operands);
8133 break;
8135 case PTImode:
8136 rs6000_eliminate_indexed_memrefs (operands);
8137 break;
8139 default:
8140 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
8143 /* Above, we may have called force_const_mem which may have returned
8144 an invalid address. If we can, fix this up; otherwise, reload will
8145 have to deal with it. */
8146 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
8147 operands[1] = validize_mem (operands[1]);
8149 emit_set:
8150 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8153 /* Return true if a structure, union or array containing FIELD should be
8154 accessed using `BLKMODE'.
8156 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8157 entire thing in a DI and use subregs to access the internals.
8158 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8159 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8160 best thing to do is set structs to BLKmode and avoid Severe Tire
8161 Damage.
8163 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8164 fit into 1, whereas DI still needs two. */
8166 static bool
8167 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
8169 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
8170 || (TARGET_E500_DOUBLE && mode == DFmode));
8173 /* Nonzero if we can use a floating-point register to pass this arg. */
8174 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
8175 (SCALAR_FLOAT_MODE_P (MODE) \
8176 && (CUM)->fregno <= FP_ARG_MAX_REG \
8177 && TARGET_HARD_FLOAT && TARGET_FPRS)
8179 /* Nonzero if we can use an AltiVec register to pass this arg. */
8180 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
8181 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8182 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8183 && TARGET_ALTIVEC_ABI \
8184 && (NAMED))
8186 /* Return a nonzero value to say to return the function value in
8187 memory, just as large structures are always returned. TYPE will be
8188 the data type of the value, and FNTYPE will be the type of the
8189 function doing the returning, or @code{NULL} for libcalls.
8191 The AIX ABI for the RS/6000 specifies that all structures are
8192 returned in memory. The Darwin ABI does the same.
8194 For the Darwin 64 Bit ABI, a function result can be returned in
8195 registers or in memory, depending on the size of the return data
8196 type. If it is returned in registers, the value occupies the same
8197 registers as it would if it were the first and only function
8198 argument. Otherwise, the function places its result in memory at
8199 the location pointed to by GPR3.
8201 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8202 but a draft put them in memory, and GCC used to implement the draft
8203 instead of the final standard. Therefore, aix_struct_return
8204 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8205 compatibility can change DRAFT_V4_STRUCT_RET to override the
8206 default, and -m switches get the final word. See
8207 rs6000_option_override_internal for more details.
8209 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8210 long double support is enabled. These values are returned in memory.
8212 int_size_in_bytes returns -1 for variable size objects, which go in
8213 memory always. The cast to unsigned makes -1 > 8. */
8215 static bool
8216 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
8218 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8219 if (TARGET_MACHO
8220 && rs6000_darwin64_abi
8221 && TREE_CODE (type) == RECORD_TYPE
8222 && int_size_in_bytes (type) > 0)
8224 CUMULATIVE_ARGS valcum;
8225 rtx valret;
8227 valcum.words = 0;
8228 valcum.fregno = FP_ARG_MIN_REG;
8229 valcum.vregno = ALTIVEC_ARG_MIN_REG;
8230 /* Do a trial code generation as if this were going to be passed
8231 as an argument; if any part goes in memory, we return NULL. */
8232 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
8233 if (valret)
8234 return false;
8235 /* Otherwise fall through to more conventional ABI rules. */
8238 if (AGGREGATE_TYPE_P (type)
8239 && (aix_struct_return
8240 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
8241 return true;
8243 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8244 modes only exist for GCC vector types if -maltivec. */
8245 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
8246 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
8247 return false;
8249 /* Return synthetic vectors in memory. */
8250 if (TREE_CODE (type) == VECTOR_TYPE
8251 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8253 static bool warned_for_return_big_vectors = false;
8254 if (!warned_for_return_big_vectors)
8256 warning (0, "GCC vector returned by reference: "
8257 "non-standard ABI extension with no compatibility guarantee");
8258 warned_for_return_big_vectors = true;
8260 return true;
8263 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
8264 return true;
8266 return false;
8269 #ifdef HAVE_AS_GNU_ATTRIBUTE
8270 /* Return TRUE if a call to function FNDECL may be one that
8271 potentially affects the function calling ABI of the object file. */
8273 static bool
8274 call_ABI_of_interest (tree fndecl)
8276 if (cgraph_state == CGRAPH_STATE_EXPANSION)
8278 struct cgraph_node *c_node;
8280 /* Libcalls are always interesting. */
8281 if (fndecl == NULL_TREE)
8282 return true;
8284 /* Any call to an external function is interesting. */
8285 if (DECL_EXTERNAL (fndecl))
8286 return true;
8288 /* Interesting functions that we are emitting in this object file. */
8289 c_node = cgraph_get_node (fndecl);
8290 c_node = cgraph_function_or_thunk_node (c_node, NULL);
8291 return !cgraph_only_called_directly_p (c_node);
8293 return false;
8295 #endif
8297 /* Initialize a variable CUM of type CUMULATIVE_ARGS
8298 for a call to a function whose data type is FNTYPE.
8299 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
8301 For incoming args we set the number of arguments in the prototype large
8302 so we never return a PARALLEL. */
8304 void
8305 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
8306 rtx libname ATTRIBUTE_UNUSED, int incoming,
8307 int libcall, int n_named_args,
8308 tree fndecl ATTRIBUTE_UNUSED,
8309 enum machine_mode return_mode ATTRIBUTE_UNUSED)
8311 static CUMULATIVE_ARGS zero_cumulative;
8313 *cum = zero_cumulative;
8314 cum->words = 0;
8315 cum->fregno = FP_ARG_MIN_REG;
8316 cum->vregno = ALTIVEC_ARG_MIN_REG;
8317 cum->prototype = (fntype && prototype_p (fntype));
8318 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
8319 ? CALL_LIBCALL : CALL_NORMAL);
8320 cum->sysv_gregno = GP_ARG_MIN_REG;
8321 cum->stdarg = stdarg_p (fntype);
8323 cum->nargs_prototype = 0;
8324 if (incoming || cum->prototype)
8325 cum->nargs_prototype = n_named_args;
8327 /* Check for a longcall attribute. */
8328 if ((!fntype && rs6000_default_long_calls)
8329 || (fntype
8330 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
8331 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
8332 cum->call_cookie |= CALL_LONG;
8334 if (TARGET_DEBUG_ARG)
8336 fprintf (stderr, "\ninit_cumulative_args:");
8337 if (fntype)
8339 tree ret_type = TREE_TYPE (fntype);
8340 fprintf (stderr, " ret code = %s,",
8341 get_tree_code_name (TREE_CODE (ret_type)));
8344 if (cum->call_cookie & CALL_LONG)
8345 fprintf (stderr, " longcall,");
8347 fprintf (stderr, " proto = %d, nargs = %d\n",
8348 cum->prototype, cum->nargs_prototype);
8351 #ifdef HAVE_AS_GNU_ATTRIBUTE
8352 if (DEFAULT_ABI == ABI_V4)
8354 cum->escapes = call_ABI_of_interest (fndecl);
8355 if (cum->escapes)
8357 tree return_type;
8359 if (fntype)
8361 return_type = TREE_TYPE (fntype);
8362 return_mode = TYPE_MODE (return_type);
8364 else
8365 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
8367 if (return_type != NULL)
8369 if (TREE_CODE (return_type) == RECORD_TYPE
8370 && TYPE_TRANSPARENT_AGGR (return_type))
8372 return_type = TREE_TYPE (first_field (return_type));
8373 return_mode = TYPE_MODE (return_type);
8375 if (AGGREGATE_TYPE_P (return_type)
8376 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
8377 <= 8))
8378 rs6000_returns_struct = true;
8380 if (SCALAR_FLOAT_MODE_P (return_mode))
8381 rs6000_passes_float = true;
8382 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
8383 || SPE_VECTOR_MODE (return_mode))
8384 rs6000_passes_vector = true;
8387 #endif
8389 if (fntype
8390 && !TARGET_ALTIVEC
8391 && TARGET_ALTIVEC_ABI
8392 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
8394 error ("cannot return value in vector register because"
8395 " altivec instructions are disabled, use -maltivec"
8396 " to enable them");
8400 /* Return true if TYPE must be passed on the stack and not in registers. */
8402 static bool
8403 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
8405 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
8406 return must_pass_in_stack_var_size (mode, type);
8407 else
8408 return must_pass_in_stack_var_size_or_pad (mode, type);
8411 /* If defined, a C expression which determines whether, and in which
8412 direction, to pad out an argument with extra space. The value
8413 should be of type `enum direction': either `upward' to pad above
8414 the argument, `downward' to pad below, or `none' to inhibit
8415 padding.
8417 For the AIX ABI structs are always stored left shifted in their
8418 argument slot. */
8420 enum direction
8421 function_arg_padding (enum machine_mode mode, const_tree type)
8423 #ifndef AGGREGATE_PADDING_FIXED
8424 #define AGGREGATE_PADDING_FIXED 0
8425 #endif
8426 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
8427 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
8428 #endif
8430 if (!AGGREGATE_PADDING_FIXED)
8432 /* GCC used to pass structures of the same size as integer types as
8433 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
8434 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
8435 passed padded downward, except that -mstrict-align further
8436 muddied the water in that multi-component structures of 2 and 4
8437 bytes in size were passed padded upward.
8439 The following arranges for best compatibility with previous
8440 versions of gcc, but removes the -mstrict-align dependency. */
8441 if (BYTES_BIG_ENDIAN)
8443 HOST_WIDE_INT size = 0;
8445 if (mode == BLKmode)
8447 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
8448 size = int_size_in_bytes (type);
8450 else
8451 size = GET_MODE_SIZE (mode);
8453 if (size == 1 || size == 2 || size == 4)
8454 return downward;
8456 return upward;
8459 if (AGGREGATES_PAD_UPWARD_ALWAYS)
8461 if (type != 0 && AGGREGATE_TYPE_P (type))
8462 return upward;
8465 /* Fall back to the default. */
8466 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8469 /* If defined, a C expression that gives the alignment boundary, in bits,
8470 of an argument with the specified mode and type. If it is not defined,
8471 PARM_BOUNDARY is used for all arguments.
8473 V.4 wants long longs and doubles to be double word aligned. Just
8474 testing the mode size is a boneheaded way to do this as it means
8475 that other types such as complex int are also double word aligned.
8476 However, we're stuck with this because changing the ABI might break
8477 existing library interfaces.
8479 Doubleword align SPE vectors.
8480 Quadword align Altivec/VSX vectors.
8481 Quadword align large synthetic vector types. */
8483 static unsigned int
8484 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
8486 if (DEFAULT_ABI == ABI_V4
8487 && (GET_MODE_SIZE (mode) == 8
8488 || (TARGET_HARD_FLOAT
8489 && TARGET_FPRS
8490 && (mode == TFmode || mode == TDmode))))
8491 return 64;
8492 else if (SPE_VECTOR_MODE (mode)
8493 || (type && TREE_CODE (type) == VECTOR_TYPE
8494 && int_size_in_bytes (type) >= 8
8495 && int_size_in_bytes (type) < 16))
8496 return 64;
8497 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8498 || (type && TREE_CODE (type) == VECTOR_TYPE
8499 && int_size_in_bytes (type) >= 16))
8500 return 128;
8501 else if (((TARGET_MACHO && rs6000_darwin64_abi)
8502 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
8503 && mode == BLKmode
8504 && type && TYPE_ALIGN (type) > 64)
8505 return 128;
8506 else
8507 return PARM_BOUNDARY;
8510 /* For a function parm of MODE and TYPE, return the starting word in
8511 the parameter area. NWORDS of the parameter area are already used. */
8513 static unsigned int
8514 rs6000_parm_start (enum machine_mode mode, const_tree type,
8515 unsigned int nwords)
8517 unsigned int align;
8518 unsigned int parm_offset;
8520 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
8521 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
8522 return nwords + (-(parm_offset + nwords) & align);
8525 /* Compute the size (in words) of a function argument. */
8527 static unsigned long
8528 rs6000_arg_size (enum machine_mode mode, const_tree type)
8530 unsigned long size;
8532 if (mode != BLKmode)
8533 size = GET_MODE_SIZE (mode);
8534 else
8535 size = int_size_in_bytes (type);
8537 if (TARGET_32BIT)
8538 return (size + 3) >> 2;
8539 else
8540 return (size + 7) >> 3;
8543 /* Use this to flush pending int fields. */
8545 static void
8546 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
8547 HOST_WIDE_INT bitpos, int final)
8549 unsigned int startbit, endbit;
8550 int intregs, intoffset;
8551 enum machine_mode mode;
8553 /* Handle the situations where a float is taking up the first half
8554 of the GPR, and the other half is empty (typically due to
8555 alignment restrictions). We can detect this by a 8-byte-aligned
8556 int field, or by seeing that this is the final flush for this
8557 argument. Count the word and continue on. */
8558 if (cum->floats_in_gpr == 1
8559 && (cum->intoffset % 64 == 0
8560 || (cum->intoffset == -1 && final)))
8562 cum->words++;
8563 cum->floats_in_gpr = 0;
8566 if (cum->intoffset == -1)
8567 return;
8569 intoffset = cum->intoffset;
8570 cum->intoffset = -1;
8571 cum->floats_in_gpr = 0;
8573 if (intoffset % BITS_PER_WORD != 0)
8575 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8576 MODE_INT, 0);
8577 if (mode == BLKmode)
8579 /* We couldn't find an appropriate mode, which happens,
8580 e.g., in packed structs when there are 3 bytes to load.
8581 Back intoffset back to the beginning of the word in this
8582 case. */
8583 intoffset = intoffset & -BITS_PER_WORD;
8587 startbit = intoffset & -BITS_PER_WORD;
8588 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8589 intregs = (endbit - startbit) / BITS_PER_WORD;
8590 cum->words += intregs;
8591 /* words should be unsigned. */
8592 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
8594 int pad = (endbit/BITS_PER_WORD) - cum->words;
8595 cum->words += pad;
8599 /* The darwin64 ABI calls for us to recurse down through structs,
8600 looking for elements passed in registers. Unfortunately, we have
8601 to track int register count here also because of misalignments
8602 in powerpc alignment mode. */
8604 static void
8605 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
8606 const_tree type,
8607 HOST_WIDE_INT startbitpos)
8609 tree f;
8611 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8612 if (TREE_CODE (f) == FIELD_DECL)
8614 HOST_WIDE_INT bitpos = startbitpos;
8615 tree ftype = TREE_TYPE (f);
8616 enum machine_mode mode;
8617 if (ftype == error_mark_node)
8618 continue;
8619 mode = TYPE_MODE (ftype);
8621 if (DECL_SIZE (f) != 0
8622 && host_integerp (bit_position (f), 1))
8623 bitpos += int_bit_position (f);
8625 /* ??? FIXME: else assume zero offset. */
8627 if (TREE_CODE (ftype) == RECORD_TYPE)
8628 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
8629 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
8631 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
8632 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
8633 cum->fregno += n_fpregs;
8634 /* Single-precision floats present a special problem for
8635 us, because they are smaller than an 8-byte GPR, and so
8636 the structure-packing rules combined with the standard
8637 varargs behavior mean that we want to pack float/float
8638 and float/int combinations into a single register's
8639 space. This is complicated by the arg advance flushing,
8640 which works on arbitrarily large groups of int-type
8641 fields. */
8642 if (mode == SFmode)
8644 if (cum->floats_in_gpr == 1)
8646 /* Two floats in a word; count the word and reset
8647 the float count. */
8648 cum->words++;
8649 cum->floats_in_gpr = 0;
8651 else if (bitpos % 64 == 0)
8653 /* A float at the beginning of an 8-byte word;
8654 count it and put off adjusting cum->words until
8655 we see if a arg advance flush is going to do it
8656 for us. */
8657 cum->floats_in_gpr++;
8659 else
8661 /* The float is at the end of a word, preceded
8662 by integer fields, so the arg advance flush
8663 just above has already set cum->words and
8664 everything is taken care of. */
8667 else
8668 cum->words += n_fpregs;
8670 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
8672 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
8673 cum->vregno++;
8674 cum->words += 2;
8676 else if (cum->intoffset == -1)
8677 cum->intoffset = bitpos;
8681 /* Check for an item that needs to be considered specially under the darwin 64
8682 bit ABI. These are record types where the mode is BLK or the structure is
8683 8 bytes in size. */
8684 static int
8685 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
8687 return rs6000_darwin64_abi
8688 && ((mode == BLKmode
8689 && TREE_CODE (type) == RECORD_TYPE
8690 && int_size_in_bytes (type) > 0)
8691 || (type && TREE_CODE (type) == RECORD_TYPE
8692 && int_size_in_bytes (type) == 8)) ? 1 : 0;
8695 /* Update the data in CUM to advance over an argument
8696 of mode MODE and data type TYPE.
8697 (TYPE is null for libcalls where that information may not be available.)
8699 Note that for args passed by reference, function_arg will be called
8700 with MODE and TYPE set to that of the pointer to the arg, not the arg
8701 itself. */
8703 static void
8704 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8705 const_tree type, bool named, int depth)
8707 /* Only tick off an argument if we're not recursing. */
8708 if (depth == 0)
8709 cum->nargs_prototype--;
8711 #ifdef HAVE_AS_GNU_ATTRIBUTE
8712 if (DEFAULT_ABI == ABI_V4
8713 && cum->escapes)
8715 if (SCALAR_FLOAT_MODE_P (mode))
8716 rs6000_passes_float = true;
8717 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
8718 rs6000_passes_vector = true;
8719 else if (SPE_VECTOR_MODE (mode)
8720 && !cum->stdarg
8721 && cum->sysv_gregno <= GP_ARG_MAX_REG)
8722 rs6000_passes_vector = true;
8724 #endif
8726 if (TARGET_ALTIVEC_ABI
8727 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8728 || (type && TREE_CODE (type) == VECTOR_TYPE
8729 && int_size_in_bytes (type) == 16)))
8731 bool stack = false;
8733 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8735 cum->vregno++;
8736 if (!TARGET_ALTIVEC)
8737 error ("cannot pass argument in vector register because"
8738 " altivec instructions are disabled, use -maltivec"
8739 " to enable them");
8741 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
8742 even if it is going to be passed in a vector register.
8743 Darwin does the same for variable-argument functions. */
8744 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
8745 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
8746 stack = true;
8748 else
8749 stack = true;
8751 if (stack)
8753 int align;
8755 /* Vector parameters must be 16-byte aligned. This places
8756 them at 2 mod 4 in terms of words in 32-bit mode, since
8757 the parameter save area starts at offset 24 from the
8758 stack. In 64-bit mode, they just have to start on an
8759 even word, since the parameter save area is 16-byte
8760 aligned. Space for GPRs is reserved even if the argument
8761 will be passed in memory. */
8762 if (TARGET_32BIT)
8763 align = (2 - cum->words) & 3;
8764 else
8765 align = cum->words & 1;
8766 cum->words += align + rs6000_arg_size (mode, type);
8768 if (TARGET_DEBUG_ARG)
8770 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
8771 cum->words, align);
8772 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
8773 cum->nargs_prototype, cum->prototype,
8774 GET_MODE_NAME (mode));
8778 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
8779 && !cum->stdarg
8780 && cum->sysv_gregno <= GP_ARG_MAX_REG)
8781 cum->sysv_gregno++;
8783 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8785 int size = int_size_in_bytes (type);
8786 /* Variable sized types have size == -1 and are
8787 treated as if consisting entirely of ints.
8788 Pad to 16 byte boundary if needed. */
8789 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8790 && (cum->words % 2) != 0)
8791 cum->words++;
8792 /* For varargs, we can just go up by the size of the struct. */
8793 if (!named)
8794 cum->words += (size + 7) / 8;
8795 else
8797 /* It is tempting to say int register count just goes up by
8798 sizeof(type)/8, but this is wrong in a case such as
8799 { int; double; int; } [powerpc alignment]. We have to
8800 grovel through the fields for these too. */
8801 cum->intoffset = 0;
8802 cum->floats_in_gpr = 0;
8803 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
8804 rs6000_darwin64_record_arg_advance_flush (cum,
8805 size * BITS_PER_UNIT, 1);
8807 if (TARGET_DEBUG_ARG)
8809 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
8810 cum->words, TYPE_ALIGN (type), size);
8811 fprintf (stderr,
8812 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
8813 cum->nargs_prototype, cum->prototype,
8814 GET_MODE_NAME (mode));
8817 else if (DEFAULT_ABI == ABI_V4)
8819 if (TARGET_HARD_FLOAT && TARGET_FPRS
8820 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8821 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8822 || (mode == TFmode && !TARGET_IEEEQUAD)
8823 || mode == SDmode || mode == DDmode || mode == TDmode))
8825 /* _Decimal128 must use an even/odd register pair. This assumes
8826 that the register number is odd when fregno is odd. */
8827 if (mode == TDmode && (cum->fregno % 2) == 1)
8828 cum->fregno++;
8830 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8831 <= FP_ARG_V4_MAX_REG)
8832 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8833 else
8835 cum->fregno = FP_ARG_V4_MAX_REG + 1;
8836 if (mode == DFmode || mode == TFmode
8837 || mode == DDmode || mode == TDmode)
8838 cum->words += cum->words & 1;
8839 cum->words += rs6000_arg_size (mode, type);
8842 else
8844 int n_words = rs6000_arg_size (mode, type);
8845 int gregno = cum->sysv_gregno;
8847 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8848 (r7,r8) or (r9,r10). As does any other 2 word item such
8849 as complex int due to a historical mistake. */
8850 if (n_words == 2)
8851 gregno += (1 - gregno) & 1;
8853 /* Multi-reg args are not split between registers and stack. */
8854 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8856 /* Long long and SPE vectors are aligned on the stack.
8857 So are other 2 word items such as complex int due to
8858 a historical mistake. */
8859 if (n_words == 2)
8860 cum->words += cum->words & 1;
8861 cum->words += n_words;
8864 /* Note: continuing to accumulate gregno past when we've started
8865 spilling to the stack indicates the fact that we've started
8866 spilling to the stack to expand_builtin_saveregs. */
8867 cum->sysv_gregno = gregno + n_words;
8870 if (TARGET_DEBUG_ARG)
8872 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8873 cum->words, cum->fregno);
8874 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
8875 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
8876 fprintf (stderr, "mode = %4s, named = %d\n",
8877 GET_MODE_NAME (mode), named);
8880 else
8882 int n_words = rs6000_arg_size (mode, type);
8883 int start_words = cum->words;
8884 int align_words = rs6000_parm_start (mode, type, start_words);
8886 cum->words = align_words + n_words;
8888 if (SCALAR_FLOAT_MODE_P (mode)
8889 && TARGET_HARD_FLOAT && TARGET_FPRS)
8891 /* _Decimal128 must be passed in an even/odd float register pair.
8892 This assumes that the register number is odd when fregno is
8893 odd. */
8894 if (mode == TDmode && (cum->fregno % 2) == 1)
8895 cum->fregno++;
8896 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8899 if (TARGET_DEBUG_ARG)
8901 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8902 cum->words, cum->fregno);
8903 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8904 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8905 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8906 named, align_words - start_words, depth);
8911 static void
8912 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8913 const_tree type, bool named)
8915 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8919 static rtx
8920 spe_build_register_parallel (enum machine_mode mode, int gregno)
8922 rtx r1, r3, r5, r7;
8924 switch (mode)
8926 case DFmode:
8927 r1 = gen_rtx_REG (DImode, gregno);
8928 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8929 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8931 case DCmode:
8932 case TFmode:
8933 r1 = gen_rtx_REG (DImode, gregno);
8934 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8935 r3 = gen_rtx_REG (DImode, gregno + 2);
8936 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8937 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8939 case TCmode:
8940 r1 = gen_rtx_REG (DImode, gregno);
8941 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8942 r3 = gen_rtx_REG (DImode, gregno + 2);
8943 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8944 r5 = gen_rtx_REG (DImode, gregno + 4);
8945 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8946 r7 = gen_rtx_REG (DImode, gregno + 6);
8947 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8948 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8950 default:
8951 gcc_unreachable ();
8955 /* Determine where to put a SIMD argument on the SPE. */
8956 static rtx
8957 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8958 const_tree type)
8960 int gregno = cum->sysv_gregno;
8962 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8963 are passed and returned in a pair of GPRs for ABI compatibility. */
8964 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8965 || mode == DCmode || mode == TCmode))
8967 int n_words = rs6000_arg_size (mode, type);
8969 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8970 if (mode == DFmode)
8971 gregno += (1 - gregno) & 1;
8973 /* Multi-reg args are not split between registers and stack. */
8974 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8975 return NULL_RTX;
8977 return spe_build_register_parallel (mode, gregno);
8979 if (cum->stdarg)
8981 int n_words = rs6000_arg_size (mode, type);
8983 /* SPE vectors are put in odd registers. */
8984 if (n_words == 2 && (gregno & 1) == 0)
8985 gregno += 1;
8987 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8989 rtx r1, r2;
8990 enum machine_mode m = SImode;
8992 r1 = gen_rtx_REG (m, gregno);
8993 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8994 r2 = gen_rtx_REG (m, gregno + 1);
8995 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8996 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8998 else
8999 return NULL_RTX;
9001 else
9003 if (gregno <= GP_ARG_MAX_REG)
9004 return gen_rtx_REG (mode, gregno);
9005 else
9006 return NULL_RTX;
9010 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9011 structure between cum->intoffset and bitpos to integer registers. */
9013 static void
9014 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
9015 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
9017 enum machine_mode mode;
9018 unsigned int regno;
9019 unsigned int startbit, endbit;
9020 int this_regno, intregs, intoffset;
9021 rtx reg;
9023 if (cum->intoffset == -1)
9024 return;
9026 intoffset = cum->intoffset;
9027 cum->intoffset = -1;
9029 /* If this is the trailing part of a word, try to only load that
9030 much into the register. Otherwise load the whole register. Note
9031 that in the latter case we may pick up unwanted bits. It's not a
9032 problem at the moment but may wish to revisit. */
9034 if (intoffset % BITS_PER_WORD != 0)
9036 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9037 MODE_INT, 0);
9038 if (mode == BLKmode)
9040 /* We couldn't find an appropriate mode, which happens,
9041 e.g., in packed structs when there are 3 bytes to load.
9042 Back intoffset back to the beginning of the word in this
9043 case. */
9044 intoffset = intoffset & -BITS_PER_WORD;
9045 mode = word_mode;
9048 else
9049 mode = word_mode;
9051 startbit = intoffset & -BITS_PER_WORD;
9052 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9053 intregs = (endbit - startbit) / BITS_PER_WORD;
9054 this_regno = cum->words + intoffset / BITS_PER_WORD;
9056 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
9057 cum->use_stack = 1;
9059 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
9060 if (intregs <= 0)
9061 return;
9063 intoffset /= BITS_PER_UNIT;
9066 regno = GP_ARG_MIN_REG + this_regno;
9067 reg = gen_rtx_REG (mode, regno);
9068 rvec[(*k)++] =
9069 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
9071 this_regno += 1;
9072 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
9073 mode = word_mode;
9074 intregs -= 1;
9076 while (intregs > 0);
9079 /* Recursive workhorse for the following. */
9081 static void
9082 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
9083 HOST_WIDE_INT startbitpos, rtx rvec[],
9084 int *k)
9086 tree f;
9088 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9089 if (TREE_CODE (f) == FIELD_DECL)
9091 HOST_WIDE_INT bitpos = startbitpos;
9092 tree ftype = TREE_TYPE (f);
9093 enum machine_mode mode;
9094 if (ftype == error_mark_node)
9095 continue;
9096 mode = TYPE_MODE (ftype);
9098 if (DECL_SIZE (f) != 0
9099 && host_integerp (bit_position (f), 1))
9100 bitpos += int_bit_position (f);
9102 /* ??? FIXME: else assume zero offset. */
9104 if (TREE_CODE (ftype) == RECORD_TYPE)
9105 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
9106 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
9108 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9109 #if 0
9110 switch (mode)
9112 case SCmode: mode = SFmode; break;
9113 case DCmode: mode = DFmode; break;
9114 case TCmode: mode = TFmode; break;
9115 default: break;
9117 #endif
9118 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9119 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9121 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9122 && (mode == TFmode || mode == TDmode));
9123 /* Long double or _Decimal128 split over regs and memory. */
9124 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
9125 cum->use_stack=1;
9127 rvec[(*k)++]
9128 = gen_rtx_EXPR_LIST (VOIDmode,
9129 gen_rtx_REG (mode, cum->fregno++),
9130 GEN_INT (bitpos / BITS_PER_UNIT));
9131 if (mode == TFmode || mode == TDmode)
9132 cum->fregno++;
9134 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
9136 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9137 rvec[(*k)++]
9138 = gen_rtx_EXPR_LIST (VOIDmode,
9139 gen_rtx_REG (mode, cum->vregno++),
9140 GEN_INT (bitpos / BITS_PER_UNIT));
9142 else if (cum->intoffset == -1)
9143 cum->intoffset = bitpos;
9147 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9148 the register(s) to be used for each field and subfield of a struct
9149 being passed by value, along with the offset of where the
9150 register's value may be found in the block. FP fields go in FP
9151 register, vector fields go in vector registers, and everything
9152 else goes in int registers, packed as in memory.
9154 This code is also used for function return values. RETVAL indicates
9155 whether this is the case.
9157 Much of this is taken from the SPARC V9 port, which has a similar
9158 calling convention. */
9160 static rtx
9161 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
9162 bool named, bool retval)
9164 rtx rvec[FIRST_PSEUDO_REGISTER];
9165 int k = 1, kbase = 1;
9166 HOST_WIDE_INT typesize = int_size_in_bytes (type);
9167 /* This is a copy; modifications are not visible to our caller. */
9168 CUMULATIVE_ARGS copy_cum = *orig_cum;
9169 CUMULATIVE_ARGS *cum = &copy_cum;
9171 /* Pad to 16 byte boundary if needed. */
9172 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9173 && (cum->words % 2) != 0)
9174 cum->words++;
9176 cum->intoffset = 0;
9177 cum->use_stack = 0;
9178 cum->named = named;
9180 /* Put entries into rvec[] for individual FP and vector fields, and
9181 for the chunks of memory that go in int regs. Note we start at
9182 element 1; 0 is reserved for an indication of using memory, and
9183 may or may not be filled in below. */
9184 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
9185 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
9187 /* If any part of the struct went on the stack put all of it there.
9188 This hack is because the generic code for
9189 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
9190 parts of the struct are not at the beginning. */
9191 if (cum->use_stack)
9193 if (retval)
9194 return NULL_RTX; /* doesn't go in registers at all */
9195 kbase = 0;
9196 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9198 if (k > 1 || cum->use_stack)
9199 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
9200 else
9201 return NULL_RTX;
9204 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
9206 static rtx
9207 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
9208 int align_words)
9210 int n_units;
9211 int i, k;
9212 rtx rvec[GP_ARG_NUM_REG + 1];
9214 if (align_words >= GP_ARG_NUM_REG)
9215 return NULL_RTX;
9217 n_units = rs6000_arg_size (mode, type);
9219 /* Optimize the simple case where the arg fits in one gpr, except in
9220 the case of BLKmode due to assign_parms assuming that registers are
9221 BITS_PER_WORD wide. */
9222 if (n_units == 0
9223 || (n_units == 1 && mode != BLKmode))
9224 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9226 k = 0;
9227 if (align_words + n_units > GP_ARG_NUM_REG)
9228 /* Not all of the arg fits in gprs. Say that it goes in memory too,
9229 using a magic NULL_RTX component.
9230 This is not strictly correct. Only some of the arg belongs in
9231 memory, not all of it. However, the normal scheme using
9232 function_arg_partial_nregs can result in unusual subregs, eg.
9233 (subreg:SI (reg:DF) 4), which are not handled well. The code to
9234 store the whole arg to memory is often more efficient than code
9235 to store pieces, and we know that space is available in the right
9236 place for the whole arg. */
9237 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9239 i = 0;
9242 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
9243 rtx off = GEN_INT (i++ * 4);
9244 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9246 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
9248 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9251 /* Determine where to put an argument to a function.
9252 Value is zero to push the argument on the stack,
9253 or a hard register in which to store the argument.
9255 MODE is the argument's machine mode.
9256 TYPE is the data type of the argument (as a tree).
9257 This is null for libcalls where that information may
9258 not be available.
9259 CUM is a variable of type CUMULATIVE_ARGS which gives info about
9260 the preceding args and about the function being called. It is
9261 not modified in this routine.
9262 NAMED is nonzero if this argument is a named parameter
9263 (otherwise it is an extra parameter matching an ellipsis).
9265 On RS/6000 the first eight words of non-FP are normally in registers
9266 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
9267 Under V.4, the first 8 FP args are in registers.
9269 If this is floating-point and no prototype is specified, we use
9270 both an FP and integer register (or possibly FP reg and stack). Library
9271 functions (when CALL_LIBCALL is set) always have the proper types for args,
9272 so we can pass the FP value just in one register. emit_library_function
9273 doesn't support PARALLEL anyway.
9275 Note that for args passed by reference, function_arg will be called
9276 with MODE and TYPE set to that of the pointer to the arg, not the arg
9277 itself. */
9279 static rtx
9280 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9281 const_tree type, bool named)
9283 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9284 enum rs6000_abi abi = DEFAULT_ABI;
9286 /* Return a marker to indicate whether CR1 needs to set or clear the
9287 bit that V.4 uses to say fp args were passed in registers.
9288 Assume that we don't need the marker for software floating point,
9289 or compiler generated library calls. */
9290 if (mode == VOIDmode)
9292 if (abi == ABI_V4
9293 && (cum->call_cookie & CALL_LIBCALL) == 0
9294 && (cum->stdarg
9295 || (cum->nargs_prototype < 0
9296 && (cum->prototype || TARGET_NO_PROTOTYPE))))
9298 /* For the SPE, we need to crxor CR6 always. */
9299 if (TARGET_SPE_ABI)
9300 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
9301 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
9302 return GEN_INT (cum->call_cookie
9303 | ((cum->fregno == FP_ARG_MIN_REG)
9304 ? CALL_V4_SET_FP_ARGS
9305 : CALL_V4_CLEAR_FP_ARGS));
9308 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
9311 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9313 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
9314 if (rslt != NULL_RTX)
9315 return rslt;
9316 /* Else fall through to usual handling. */
9319 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
9320 if (TARGET_64BIT && ! cum->prototype)
9322 /* Vector parameters get passed in vector register
9323 and also in GPRs or memory, in absence of prototype. */
9324 int align_words;
9325 rtx slot;
9326 align_words = (cum->words + 1) & ~1;
9328 if (align_words >= GP_ARG_NUM_REG)
9330 slot = NULL_RTX;
9332 else
9334 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9336 return gen_rtx_PARALLEL (mode,
9337 gen_rtvec (2,
9338 gen_rtx_EXPR_LIST (VOIDmode,
9339 slot, const0_rtx),
9340 gen_rtx_EXPR_LIST (VOIDmode,
9341 gen_rtx_REG (mode, cum->vregno),
9342 const0_rtx)));
9344 else
9345 return gen_rtx_REG (mode, cum->vregno);
9346 else if (TARGET_ALTIVEC_ABI
9347 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
9348 || (type && TREE_CODE (type) == VECTOR_TYPE
9349 && int_size_in_bytes (type) == 16)))
9351 if (named || abi == ABI_V4)
9352 return NULL_RTX;
9353 else
9355 /* Vector parameters to varargs functions under AIX or Darwin
9356 get passed in memory and possibly also in GPRs. */
9357 int align, align_words, n_words;
9358 enum machine_mode part_mode;
9360 /* Vector parameters must be 16-byte aligned. This places them at
9361 2 mod 4 in terms of words in 32-bit mode, since the parameter
9362 save area starts at offset 24 from the stack. In 64-bit mode,
9363 they just have to start on an even word, since the parameter
9364 save area is 16-byte aligned. */
9365 if (TARGET_32BIT)
9366 align = (2 - cum->words) & 3;
9367 else
9368 align = cum->words & 1;
9369 align_words = cum->words + align;
9371 /* Out of registers? Memory, then. */
9372 if (align_words >= GP_ARG_NUM_REG)
9373 return NULL_RTX;
9375 if (TARGET_32BIT && TARGET_POWERPC64)
9376 return rs6000_mixed_function_arg (mode, type, align_words);
9378 /* The vector value goes in GPRs. Only the part of the
9379 value in GPRs is reported here. */
9380 part_mode = mode;
9381 n_words = rs6000_arg_size (mode, type);
9382 if (align_words + n_words > GP_ARG_NUM_REG)
9383 /* Fortunately, there are only two possibilities, the value
9384 is either wholly in GPRs or half in GPRs and half not. */
9385 part_mode = DImode;
9387 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
9390 else if (TARGET_SPE_ABI && TARGET_SPE
9391 && (SPE_VECTOR_MODE (mode)
9392 || (TARGET_E500_DOUBLE && (mode == DFmode
9393 || mode == DCmode
9394 || mode == TFmode
9395 || mode == TCmode))))
9396 return rs6000_spe_function_arg (cum, mode, type);
9398 else if (abi == ABI_V4)
9400 if (TARGET_HARD_FLOAT && TARGET_FPRS
9401 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9402 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9403 || (mode == TFmode && !TARGET_IEEEQUAD)
9404 || mode == SDmode || mode == DDmode || mode == TDmode))
9406 /* _Decimal128 must use an even/odd register pair. This assumes
9407 that the register number is odd when fregno is odd. */
9408 if (mode == TDmode && (cum->fregno % 2) == 1)
9409 cum->fregno++;
9411 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9412 <= FP_ARG_V4_MAX_REG)
9413 return gen_rtx_REG (mode, cum->fregno);
9414 else
9415 return NULL_RTX;
9417 else
9419 int n_words = rs6000_arg_size (mode, type);
9420 int gregno = cum->sysv_gregno;
9422 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9423 (r7,r8) or (r9,r10). As does any other 2 word item such
9424 as complex int due to a historical mistake. */
9425 if (n_words == 2)
9426 gregno += (1 - gregno) & 1;
9428 /* Multi-reg args are not split between registers and stack. */
9429 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9430 return NULL_RTX;
9432 if (TARGET_32BIT && TARGET_POWERPC64)
9433 return rs6000_mixed_function_arg (mode, type,
9434 gregno - GP_ARG_MIN_REG);
9435 return gen_rtx_REG (mode, gregno);
9438 else
9440 int align_words = rs6000_parm_start (mode, type, cum->words);
9442 /* _Decimal128 must be passed in an even/odd float register pair.
9443 This assumes that the register number is odd when fregno is odd. */
9444 if (mode == TDmode && (cum->fregno % 2) == 1)
9445 cum->fregno++;
9447 if (USE_FP_FOR_ARG_P (cum, mode, type))
9449 rtx rvec[GP_ARG_NUM_REG + 1];
9450 rtx r;
9451 int k;
9452 bool needs_psave;
9453 enum machine_mode fmode = mode;
9454 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9456 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9458 /* Currently, we only ever need one reg here because complex
9459 doubles are split. */
9460 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9461 && (fmode == TFmode || fmode == TDmode));
9463 /* Long double or _Decimal128 split over regs and memory. */
9464 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
9467 /* Do we also need to pass this arg in the parameter save
9468 area? */
9469 needs_psave = (type
9470 && (cum->nargs_prototype <= 0
9471 || (DEFAULT_ABI == ABI_AIX
9472 && TARGET_XL_COMPAT
9473 && align_words >= GP_ARG_NUM_REG)));
9475 if (!needs_psave && mode == fmode)
9476 return gen_rtx_REG (fmode, cum->fregno);
9478 k = 0;
9479 if (needs_psave)
9481 /* Describe the part that goes in gprs or the stack.
9482 This piece must come first, before the fprs. */
9483 if (align_words < GP_ARG_NUM_REG)
9485 unsigned long n_words = rs6000_arg_size (mode, type);
9487 if (align_words + n_words > GP_ARG_NUM_REG
9488 || (TARGET_32BIT && TARGET_POWERPC64))
9490 /* If this is partially on the stack, then we only
9491 include the portion actually in registers here. */
9492 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
9493 rtx off;
9494 int i = 0;
9495 if (align_words + n_words > GP_ARG_NUM_REG)
9496 /* Not all of the arg fits in gprs. Say that it
9497 goes in memory too, using a magic NULL_RTX
9498 component. Also see comment in
9499 rs6000_mixed_function_arg for why the normal
9500 function_arg_partial_nregs scheme doesn't work
9501 in this case. */
9502 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
9503 const0_rtx);
9506 r = gen_rtx_REG (rmode,
9507 GP_ARG_MIN_REG + align_words);
9508 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
9509 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9511 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
9513 else
9515 /* The whole arg fits in gprs. */
9516 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9517 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
9520 else
9521 /* It's entirely in memory. */
9522 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9525 /* Describe where this piece goes in the fprs. */
9526 r = gen_rtx_REG (fmode, cum->fregno);
9527 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
9529 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9531 else if (align_words < GP_ARG_NUM_REG)
9533 if (TARGET_32BIT && TARGET_POWERPC64)
9534 return rs6000_mixed_function_arg (mode, type, align_words);
9536 if (mode == BLKmode)
9537 mode = Pmode;
9539 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9541 else
9542 return NULL_RTX;
9546 /* For an arg passed partly in registers and partly in memory, this is
9547 the number of bytes passed in registers. For args passed entirely in
9548 registers or entirely in memory, zero. When an arg is described by a
9549 PARALLEL, perhaps using more than one register type, this function
9550 returns the number of bytes used by the first element of the PARALLEL. */
9552 static int
9553 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9554 tree type, bool named)
9556 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9557 int ret = 0;
9558 int align_words;
9560 if (DEFAULT_ABI == ABI_V4)
9561 return 0;
9563 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
9564 && cum->nargs_prototype >= 0)
9565 return 0;
9567 /* In this complicated case we just disable the partial_nregs code. */
9568 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9569 return 0;
9571 align_words = rs6000_parm_start (mode, type, cum->words);
9573 if (USE_FP_FOR_ARG_P (cum, mode, type))
9575 /* If we are passing this arg in the fixed parameter save area
9576 (gprs or memory) as well as fprs, then this function should
9577 return the number of partial bytes passed in the parameter
9578 save area rather than partial bytes passed in fprs. */
9579 if (type
9580 && (cum->nargs_prototype <= 0
9581 || (DEFAULT_ABI == ABI_AIX
9582 && TARGET_XL_COMPAT
9583 && align_words >= GP_ARG_NUM_REG)))
9584 return 0;
9585 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
9586 > FP_ARG_MAX_REG + 1)
9587 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
9588 else if (cum->nargs_prototype >= 0)
9589 return 0;
9592 if (align_words < GP_ARG_NUM_REG
9593 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
9594 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
9596 if (ret != 0 && TARGET_DEBUG_ARG)
9597 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
9599 return ret;
9602 /* A C expression that indicates when an argument must be passed by
9603 reference. If nonzero for an argument, a copy of that argument is
9604 made in memory and a pointer to the argument is passed instead of
9605 the argument itself. The pointer is passed in whatever way is
9606 appropriate for passing a pointer to that type.
9608 Under V.4, aggregates and long double are passed by reference.
9610 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
9611 reference unless the AltiVec vector extension ABI is in force.
9613 As an extension to all ABIs, variable sized types are passed by
9614 reference. */
9616 static bool
9617 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
9618 enum machine_mode mode, const_tree type,
9619 bool named ATTRIBUTE_UNUSED)
9621 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
9623 if (TARGET_DEBUG_ARG)
9624 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
9625 return 1;
9628 if (!type)
9629 return 0;
9631 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
9633 if (TARGET_DEBUG_ARG)
9634 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
9635 return 1;
9638 if (int_size_in_bytes (type) < 0)
9640 if (TARGET_DEBUG_ARG)
9641 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
9642 return 1;
9645 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
9646 modes only exist for GCC vector types if -maltivec. */
9647 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
9649 if (TARGET_DEBUG_ARG)
9650 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
9651 return 1;
9654 /* Pass synthetic vectors in memory. */
9655 if (TREE_CODE (type) == VECTOR_TYPE
9656 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
9658 static bool warned_for_pass_big_vectors = false;
9659 if (TARGET_DEBUG_ARG)
9660 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
9661 if (!warned_for_pass_big_vectors)
9663 warning (0, "GCC vector passed by reference: "
9664 "non-standard ABI extension with no compatibility guarantee");
9665 warned_for_pass_big_vectors = true;
9667 return 1;
9670 return 0;
9673 static void
9674 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
9676 int i;
9677 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
9679 if (nregs == 0)
9680 return;
9682 for (i = 0; i < nregs; i++)
9684 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
9685 if (reload_completed)
9687 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
9688 tem = NULL_RTX;
9689 else
9690 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
9691 i * GET_MODE_SIZE (reg_mode));
9693 else
9694 tem = replace_equiv_address (tem, XEXP (tem, 0));
9696 gcc_assert (tem);
9698 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
9702 /* Perform any needed actions needed for a function that is receiving a
9703 variable number of arguments.
9705 CUM is as above.
9707 MODE and TYPE are the mode and type of the current parameter.
9709 PRETEND_SIZE is a variable that should be set to the amount of stack
9710 that must be pushed by the prolog to pretend that our caller pushed
9713 Normally, this macro will push all remaining incoming registers on the
9714 stack and set PRETEND_SIZE to the length of the registers pushed. */
9716 static void
9717 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
9718 tree type, int *pretend_size ATTRIBUTE_UNUSED,
9719 int no_rtl)
9721 CUMULATIVE_ARGS next_cum;
9722 int reg_size = TARGET_32BIT ? 4 : 8;
9723 rtx save_area = NULL_RTX, mem;
9724 int first_reg_offset;
9725 alias_set_type set;
9727 /* Skip the last named argument. */
9728 next_cum = *get_cumulative_args (cum);
9729 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
9731 if (DEFAULT_ABI == ABI_V4)
9733 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
9735 if (! no_rtl)
9737 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
9738 HOST_WIDE_INT offset = 0;
9740 /* Try to optimize the size of the varargs save area.
9741 The ABI requires that ap.reg_save_area is doubleword
9742 aligned, but we don't need to allocate space for all
9743 the bytes, only those to which we actually will save
9744 anything. */
9745 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
9746 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
9747 if (TARGET_HARD_FLOAT && TARGET_FPRS
9748 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9749 && cfun->va_list_fpr_size)
9751 if (gpr_reg_num)
9752 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
9753 * UNITS_PER_FP_WORD;
9754 if (cfun->va_list_fpr_size
9755 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9756 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
9757 else
9758 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9759 * UNITS_PER_FP_WORD;
9761 if (gpr_reg_num)
9763 offset = -((first_reg_offset * reg_size) & ~7);
9764 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
9766 gpr_reg_num = cfun->va_list_gpr_size;
9767 if (reg_size == 4 && (first_reg_offset & 1))
9768 gpr_reg_num++;
9770 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
9772 else if (fpr_size)
9773 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
9774 * UNITS_PER_FP_WORD
9775 - (int) (GP_ARG_NUM_REG * reg_size);
9777 if (gpr_size + fpr_size)
9779 rtx reg_save_area
9780 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
9781 gcc_assert (GET_CODE (reg_save_area) == MEM);
9782 reg_save_area = XEXP (reg_save_area, 0);
9783 if (GET_CODE (reg_save_area) == PLUS)
9785 gcc_assert (XEXP (reg_save_area, 0)
9786 == virtual_stack_vars_rtx);
9787 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
9788 offset += INTVAL (XEXP (reg_save_area, 1));
9790 else
9791 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
9794 cfun->machine->varargs_save_offset = offset;
9795 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
9798 else
9800 first_reg_offset = next_cum.words;
9801 save_area = virtual_incoming_args_rtx;
9803 if (targetm.calls.must_pass_in_stack (mode, type))
9804 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
9807 set = get_varargs_alias_set ();
9808 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
9809 && cfun->va_list_gpr_size)
9811 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
9813 if (va_list_gpr_counter_field)
9814 /* V4 va_list_gpr_size counts number of registers needed. */
9815 n_gpr = cfun->va_list_gpr_size;
9816 else
9817 /* char * va_list instead counts number of bytes needed. */
9818 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
9820 if (nregs > n_gpr)
9821 nregs = n_gpr;
9823 mem = gen_rtx_MEM (BLKmode,
9824 plus_constant (Pmode, save_area,
9825 first_reg_offset * reg_size));
9826 MEM_NOTRAP_P (mem) = 1;
9827 set_mem_alias_set (mem, set);
9828 set_mem_align (mem, BITS_PER_WORD);
9830 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
9831 nregs);
9834 /* Save FP registers if needed. */
9835 if (DEFAULT_ABI == ABI_V4
9836 && TARGET_HARD_FLOAT && TARGET_FPRS
9837 && ! no_rtl
9838 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9839 && cfun->va_list_fpr_size)
9841 int fregno = next_cum.fregno, nregs;
9842 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
9843 rtx lab = gen_label_rtx ();
9844 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
9845 * UNITS_PER_FP_WORD);
9847 emit_jump_insn
9848 (gen_rtx_SET (VOIDmode,
9849 pc_rtx,
9850 gen_rtx_IF_THEN_ELSE (VOIDmode,
9851 gen_rtx_NE (VOIDmode, cr1,
9852 const0_rtx),
9853 gen_rtx_LABEL_REF (VOIDmode, lab),
9854 pc_rtx)));
9856 for (nregs = 0;
9857 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
9858 fregno++, off += UNITS_PER_FP_WORD, nregs++)
9860 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9861 ? DFmode : SFmode,
9862 plus_constant (Pmode, save_area, off));
9863 MEM_NOTRAP_P (mem) = 1;
9864 set_mem_alias_set (mem, set);
9865 set_mem_align (mem, GET_MODE_ALIGNMENT (
9866 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9867 ? DFmode : SFmode));
9868 emit_move_insn (mem, gen_rtx_REG (
9869 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9870 ? DFmode : SFmode, fregno));
9873 emit_label (lab);
9877 /* Create the va_list data type. */
9879 static tree
9880 rs6000_build_builtin_va_list (void)
9882 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9884 /* For AIX, prefer 'char *' because that's what the system
9885 header files like. */
9886 if (DEFAULT_ABI != ABI_V4)
9887 return build_pointer_type (char_type_node);
9889 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9890 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9891 get_identifier ("__va_list_tag"), record);
9893 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9894 unsigned_char_type_node);
9895 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9896 unsigned_char_type_node);
9897 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9898 every user file. */
9899 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9900 get_identifier ("reserved"), short_unsigned_type_node);
9901 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9902 get_identifier ("overflow_arg_area"),
9903 ptr_type_node);
9904 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9905 get_identifier ("reg_save_area"),
9906 ptr_type_node);
9908 va_list_gpr_counter_field = f_gpr;
9909 va_list_fpr_counter_field = f_fpr;
9911 DECL_FIELD_CONTEXT (f_gpr) = record;
9912 DECL_FIELD_CONTEXT (f_fpr) = record;
9913 DECL_FIELD_CONTEXT (f_res) = record;
9914 DECL_FIELD_CONTEXT (f_ovf) = record;
9915 DECL_FIELD_CONTEXT (f_sav) = record;
9917 TYPE_STUB_DECL (record) = type_decl;
9918 TYPE_NAME (record) = type_decl;
9919 TYPE_FIELDS (record) = f_gpr;
9920 DECL_CHAIN (f_gpr) = f_fpr;
9921 DECL_CHAIN (f_fpr) = f_res;
9922 DECL_CHAIN (f_res) = f_ovf;
9923 DECL_CHAIN (f_ovf) = f_sav;
9925 layout_type (record);
9927 /* The correct type is an array type of one element. */
9928 return build_array_type (record, build_index_type (size_zero_node));
9931 /* Implement va_start. */
9933 static void
9934 rs6000_va_start (tree valist, rtx nextarg)
9936 HOST_WIDE_INT words, n_gpr, n_fpr;
9937 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9938 tree gpr, fpr, ovf, sav, t;
9940 /* Only SVR4 needs something special. */
9941 if (DEFAULT_ABI != ABI_V4)
9943 std_expand_builtin_va_start (valist, nextarg);
9944 return;
9947 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9948 f_fpr = DECL_CHAIN (f_gpr);
9949 f_res = DECL_CHAIN (f_fpr);
9950 f_ovf = DECL_CHAIN (f_res);
9951 f_sav = DECL_CHAIN (f_ovf);
9953 valist = build_simple_mem_ref (valist);
9954 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9955 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9956 f_fpr, NULL_TREE);
9957 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9958 f_ovf, NULL_TREE);
9959 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9960 f_sav, NULL_TREE);
9962 /* Count number of gp and fp argument registers used. */
9963 words = crtl->args.info.words;
9964 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9965 GP_ARG_NUM_REG);
9966 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9967 FP_ARG_NUM_REG);
9969 if (TARGET_DEBUG_ARG)
9970 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9971 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9972 words, n_gpr, n_fpr);
9974 if (cfun->va_list_gpr_size)
9976 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9977 build_int_cst (NULL_TREE, n_gpr));
9978 TREE_SIDE_EFFECTS (t) = 1;
9979 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9982 if (cfun->va_list_fpr_size)
9984 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9985 build_int_cst (NULL_TREE, n_fpr));
9986 TREE_SIDE_EFFECTS (t) = 1;
9987 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9989 #ifdef HAVE_AS_GNU_ATTRIBUTE
9990 if (call_ABI_of_interest (cfun->decl))
9991 rs6000_passes_float = true;
9992 #endif
9995 /* Find the overflow area. */
9996 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9997 if (words != 0)
9998 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9999 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
10000 TREE_SIDE_EFFECTS (t) = 1;
10001 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10003 /* If there were no va_arg invocations, don't set up the register
10004 save area. */
10005 if (!cfun->va_list_gpr_size
10006 && !cfun->va_list_fpr_size
10007 && n_gpr < GP_ARG_NUM_REG
10008 && n_fpr < FP_ARG_V4_MAX_REG)
10009 return;
10011 /* Find the register save area. */
10012 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
10013 if (cfun->machine->varargs_save_offset)
10014 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
10015 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
10016 TREE_SIDE_EFFECTS (t) = 1;
10017 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10020 /* Implement va_arg. */
10022 static tree
10023 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
10024 gimple_seq *post_p)
10026 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
10027 tree gpr, fpr, ovf, sav, reg, t, u;
10028 int size, rsize, n_reg, sav_ofs, sav_scale;
10029 tree lab_false, lab_over, addr;
10030 int align;
10031 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
10032 int regalign = 0;
10033 gimple stmt;
10035 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
10037 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
10038 return build_va_arg_indirect_ref (t);
10041 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
10042 earlier version of gcc, with the property that it always applied alignment
10043 adjustments to the va-args (even for zero-sized types). The cheapest way
10044 to deal with this is to replicate the effect of the part of
10045 std_gimplify_va_arg_expr that carries out the align adjust, for the case
10046 of relevance.
10047 We don't need to check for pass-by-reference because of the test above.
10048 We can return a simplifed answer, since we know there's no offset to add. */
10050 if (((TARGET_MACHO
10051 && rs6000_darwin64_abi)
10052 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
10053 && integer_zerop (TYPE_SIZE (type)))
10055 unsigned HOST_WIDE_INT align, boundary;
10056 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
10057 align = PARM_BOUNDARY / BITS_PER_UNIT;
10058 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
10059 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
10060 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
10061 boundary /= BITS_PER_UNIT;
10062 if (boundary > align)
10064 tree t ;
10065 /* This updates arg ptr by the amount that would be necessary
10066 to align the zero-sized (but not zero-alignment) item. */
10067 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
10068 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
10069 gimplify_and_add (t, pre_p);
10071 t = fold_convert (sizetype, valist_tmp);
10072 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
10073 fold_convert (TREE_TYPE (valist),
10074 fold_build2 (BIT_AND_EXPR, sizetype, t,
10075 size_int (-boundary))));
10076 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
10077 gimplify_and_add (t, pre_p);
10079 /* Since it is zero-sized there's no increment for the item itself. */
10080 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
10081 return build_va_arg_indirect_ref (valist_tmp);
10084 if (DEFAULT_ABI != ABI_V4)
10086 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
10088 tree elem_type = TREE_TYPE (type);
10089 enum machine_mode elem_mode = TYPE_MODE (elem_type);
10090 int elem_size = GET_MODE_SIZE (elem_mode);
10092 if (elem_size < UNITS_PER_WORD)
10094 tree real_part, imag_part;
10095 gimple_seq post = NULL;
10097 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
10098 &post);
10099 /* Copy the value into a temporary, lest the formal temporary
10100 be reused out from under us. */
10101 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
10102 gimple_seq_add_seq (pre_p, post);
10104 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
10105 post_p);
10107 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
10111 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
10114 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
10115 f_fpr = DECL_CHAIN (f_gpr);
10116 f_res = DECL_CHAIN (f_fpr);
10117 f_ovf = DECL_CHAIN (f_res);
10118 f_sav = DECL_CHAIN (f_ovf);
10120 valist = build_va_arg_indirect_ref (valist);
10121 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
10122 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
10123 f_fpr, NULL_TREE);
10124 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
10125 f_ovf, NULL_TREE);
10126 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
10127 f_sav, NULL_TREE);
10129 size = int_size_in_bytes (type);
10130 rsize = (size + 3) / 4;
10131 align = 1;
10133 if (TARGET_HARD_FLOAT && TARGET_FPRS
10134 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
10135 || (TARGET_DOUBLE_FLOAT
10136 && (TYPE_MODE (type) == DFmode
10137 || TYPE_MODE (type) == TFmode
10138 || TYPE_MODE (type) == SDmode
10139 || TYPE_MODE (type) == DDmode
10140 || TYPE_MODE (type) == TDmode))))
10142 /* FP args go in FP registers, if present. */
10143 reg = fpr;
10144 n_reg = (size + 7) / 8;
10145 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
10146 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
10147 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
10148 align = 8;
10150 else
10152 /* Otherwise into GP registers. */
10153 reg = gpr;
10154 n_reg = rsize;
10155 sav_ofs = 0;
10156 sav_scale = 4;
10157 if (n_reg == 2)
10158 align = 8;
10161 /* Pull the value out of the saved registers.... */
10163 lab_over = NULL;
10164 addr = create_tmp_var (ptr_type_node, "addr");
10166 /* AltiVec vectors never go in registers when -mabi=altivec. */
10167 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10168 align = 16;
10169 else
10171 lab_false = create_artificial_label (input_location);
10172 lab_over = create_artificial_label (input_location);
10174 /* Long long and SPE vectors are aligned in the registers.
10175 As are any other 2 gpr item such as complex int due to a
10176 historical mistake. */
10177 u = reg;
10178 if (n_reg == 2 && reg == gpr)
10180 regalign = 1;
10181 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
10182 build_int_cst (TREE_TYPE (reg), n_reg - 1));
10183 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
10184 unshare_expr (reg), u);
10186 /* _Decimal128 is passed in even/odd fpr pairs; the stored
10187 reg number is 0 for f1, so we want to make it odd. */
10188 else if (reg == fpr && TYPE_MODE (type) == TDmode)
10190 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
10191 build_int_cst (TREE_TYPE (reg), 1));
10192 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
10195 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
10196 t = build2 (GE_EXPR, boolean_type_node, u, t);
10197 u = build1 (GOTO_EXPR, void_type_node, lab_false);
10198 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
10199 gimplify_and_add (t, pre_p);
10201 t = sav;
10202 if (sav_ofs)
10203 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
10205 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
10206 build_int_cst (TREE_TYPE (reg), n_reg));
10207 u = fold_convert (sizetype, u);
10208 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
10209 t = fold_build_pointer_plus (t, u);
10211 /* _Decimal32 varargs are located in the second word of the 64-bit
10212 FP register for 32-bit binaries. */
10213 if (!TARGET_POWERPC64
10214 && TARGET_HARD_FLOAT && TARGET_FPRS
10215 && TYPE_MODE (type) == SDmode)
10216 t = fold_build_pointer_plus_hwi (t, size);
10218 gimplify_assign (addr, t, pre_p);
10220 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
10222 stmt = gimple_build_label (lab_false);
10223 gimple_seq_add_stmt (pre_p, stmt);
10225 if ((n_reg == 2 && !regalign) || n_reg > 2)
10227 /* Ensure that we don't find any more args in regs.
10228 Alignment has taken care of for special cases. */
10229 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
10233 /* ... otherwise out of the overflow area. */
10235 /* Care for on-stack alignment if needed. */
10236 t = ovf;
10237 if (align != 1)
10239 t = fold_build_pointer_plus_hwi (t, align - 1);
10240 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
10241 build_int_cst (TREE_TYPE (t), -align));
10243 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
10245 gimplify_assign (unshare_expr (addr), t, pre_p);
10247 t = fold_build_pointer_plus_hwi (t, size);
10248 gimplify_assign (unshare_expr (ovf), t, pre_p);
10250 if (lab_over)
10252 stmt = gimple_build_label (lab_over);
10253 gimple_seq_add_stmt (pre_p, stmt);
10256 if (STRICT_ALIGNMENT
10257 && (TYPE_ALIGN (type)
10258 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
10260 /* The value (of type complex double, for example) may not be
10261 aligned in memory in the saved registers, so copy via a
10262 temporary. (This is the same code as used for SPARC.) */
10263 tree tmp = create_tmp_var (type, "va_arg_tmp");
10264 tree dest_addr = build_fold_addr_expr (tmp);
10266 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
10267 3, dest_addr, addr, size_int (rsize * 4));
10269 gimplify_and_add (copy, pre_p);
10270 addr = dest_addr;
10273 addr = fold_convert (ptrtype, addr);
10274 return build_va_arg_indirect_ref (addr);
10277 /* Builtins. */
10279 static void
10280 def_builtin (const char *name, tree type, enum rs6000_builtins code)
10282 tree t;
10283 unsigned classify = rs6000_builtin_info[(int)code].attr;
10284 const char *attr_string = "";
10286 gcc_assert (name != NULL);
10287 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
10289 if (rs6000_builtin_decls[(int)code])
10290 fatal_error ("internal error: builtin function %s already processed", name);
10292 rs6000_builtin_decls[(int)code] = t =
10293 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
10295 /* Set any special attributes. */
10296 if ((classify & RS6000_BTC_CONST) != 0)
10298 /* const function, function only depends on the inputs. */
10299 TREE_READONLY (t) = 1;
10300 TREE_NOTHROW (t) = 1;
10301 attr_string = ", pure";
10303 else if ((classify & RS6000_BTC_PURE) != 0)
10305 /* pure function, function can read global memory, but does not set any
10306 external state. */
10307 DECL_PURE_P (t) = 1;
10308 TREE_NOTHROW (t) = 1;
10309 attr_string = ", const";
10311 else if ((classify & RS6000_BTC_FP) != 0)
10313 /* Function is a math function. If rounding mode is on, then treat the
10314 function as not reading global memory, but it can have arbitrary side
10315 effects. If it is off, then assume the function is a const function.
10316 This mimics the ATTR_MATHFN_FPROUNDING attribute in
10317 builtin-attribute.def that is used for the math functions. */
10318 TREE_NOTHROW (t) = 1;
10319 if (flag_rounding_math)
10321 DECL_PURE_P (t) = 1;
10322 DECL_IS_NOVOPS (t) = 1;
10323 attr_string = ", fp, pure";
10325 else
10327 TREE_READONLY (t) = 1;
10328 attr_string = ", fp, const";
10331 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
10332 gcc_unreachable ();
10334 if (TARGET_DEBUG_BUILTIN)
10335 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
10336 (int)code, name, attr_string);
10339 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
10341 #undef RS6000_BUILTIN_1
10342 #undef RS6000_BUILTIN_2
10343 #undef RS6000_BUILTIN_3
10344 #undef RS6000_BUILTIN_A
10345 #undef RS6000_BUILTIN_D
10346 #undef RS6000_BUILTIN_E
10347 #undef RS6000_BUILTIN_H
10348 #undef RS6000_BUILTIN_P
10349 #undef RS6000_BUILTIN_Q
10350 #undef RS6000_BUILTIN_S
10351 #undef RS6000_BUILTIN_X
10353 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10354 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10355 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
10356 { MASK, ICODE, NAME, ENUM },
10358 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10359 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10360 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10361 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10362 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10363 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10364 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10365 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10367 static const struct builtin_description bdesc_3arg[] =
10369 #include "rs6000-builtin.def"
10372 /* DST operations: void foo (void *, const int, const char). */
10374 #undef RS6000_BUILTIN_1
10375 #undef RS6000_BUILTIN_2
10376 #undef RS6000_BUILTIN_3
10377 #undef RS6000_BUILTIN_A
10378 #undef RS6000_BUILTIN_D
10379 #undef RS6000_BUILTIN_E
10380 #undef RS6000_BUILTIN_H
10381 #undef RS6000_BUILTIN_P
10382 #undef RS6000_BUILTIN_Q
10383 #undef RS6000_BUILTIN_S
10384 #undef RS6000_BUILTIN_X
10386 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10387 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10388 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10389 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10390 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
10391 { MASK, ICODE, NAME, ENUM },
10393 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10394 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10395 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10396 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10397 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10398 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10400 static const struct builtin_description bdesc_dst[] =
10402 #include "rs6000-builtin.def"
10405 /* Simple binary operations: VECc = foo (VECa, VECb). */
10407 #undef RS6000_BUILTIN_1
10408 #undef RS6000_BUILTIN_2
10409 #undef RS6000_BUILTIN_3
10410 #undef RS6000_BUILTIN_A
10411 #undef RS6000_BUILTIN_D
10412 #undef RS6000_BUILTIN_E
10413 #undef RS6000_BUILTIN_H
10414 #undef RS6000_BUILTIN_P
10415 #undef RS6000_BUILTIN_Q
10416 #undef RS6000_BUILTIN_S
10417 #undef RS6000_BUILTIN_X
10419 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10420 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
10421 { MASK, ICODE, NAME, ENUM },
10423 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10424 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10425 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10426 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10427 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10428 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10429 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10430 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10431 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10433 static const struct builtin_description bdesc_2arg[] =
10435 #include "rs6000-builtin.def"
10438 #undef RS6000_BUILTIN_1
10439 #undef RS6000_BUILTIN_2
10440 #undef RS6000_BUILTIN_3
10441 #undef RS6000_BUILTIN_A
10442 #undef RS6000_BUILTIN_D
10443 #undef RS6000_BUILTIN_E
10444 #undef RS6000_BUILTIN_H
10445 #undef RS6000_BUILTIN_P
10446 #undef RS6000_BUILTIN_Q
10447 #undef RS6000_BUILTIN_S
10448 #undef RS6000_BUILTIN_X
10450 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10451 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10452 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10453 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10454 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10455 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10456 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10457 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
10458 { MASK, ICODE, NAME, ENUM },
10460 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10461 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10462 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10464 /* AltiVec predicates. */
10466 static const struct builtin_description bdesc_altivec_preds[] =
10468 #include "rs6000-builtin.def"
10471 /* SPE predicates. */
10472 #undef RS6000_BUILTIN_1
10473 #undef RS6000_BUILTIN_2
10474 #undef RS6000_BUILTIN_3
10475 #undef RS6000_BUILTIN_A
10476 #undef RS6000_BUILTIN_D
10477 #undef RS6000_BUILTIN_E
10478 #undef RS6000_BUILTIN_H
10479 #undef RS6000_BUILTIN_P
10480 #undef RS6000_BUILTIN_Q
10481 #undef RS6000_BUILTIN_S
10482 #undef RS6000_BUILTIN_X
10484 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10485 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10486 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10487 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10488 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10489 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10490 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10491 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10492 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10493 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
10494 { MASK, ICODE, NAME, ENUM },
10496 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10498 static const struct builtin_description bdesc_spe_predicates[] =
10500 #include "rs6000-builtin.def"
10503 /* SPE evsel predicates. */
10504 #undef RS6000_BUILTIN_1
10505 #undef RS6000_BUILTIN_2
10506 #undef RS6000_BUILTIN_3
10507 #undef RS6000_BUILTIN_A
10508 #undef RS6000_BUILTIN_D
10509 #undef RS6000_BUILTIN_E
10510 #undef RS6000_BUILTIN_H
10511 #undef RS6000_BUILTIN_P
10512 #undef RS6000_BUILTIN_Q
10513 #undef RS6000_BUILTIN_S
10514 #undef RS6000_BUILTIN_X
10516 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10517 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10518 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10519 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10520 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10521 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
10522 { MASK, ICODE, NAME, ENUM },
10524 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10525 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10526 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10527 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10528 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10530 static const struct builtin_description bdesc_spe_evsel[] =
10532 #include "rs6000-builtin.def"
10535 /* PAIRED predicates. */
10536 #undef RS6000_BUILTIN_1
10537 #undef RS6000_BUILTIN_2
10538 #undef RS6000_BUILTIN_3
10539 #undef RS6000_BUILTIN_A
10540 #undef RS6000_BUILTIN_D
10541 #undef RS6000_BUILTIN_E
10542 #undef RS6000_BUILTIN_H
10543 #undef RS6000_BUILTIN_P
10544 #undef RS6000_BUILTIN_Q
10545 #undef RS6000_BUILTIN_S
10546 #undef RS6000_BUILTIN_X
10548 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10549 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10550 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10551 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10552 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10553 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10554 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10555 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10556 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
10557 { MASK, ICODE, NAME, ENUM },
10559 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10560 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10562 static const struct builtin_description bdesc_paired_preds[] =
10564 #include "rs6000-builtin.def"
10567 /* ABS* operations. */
10569 #undef RS6000_BUILTIN_1
10570 #undef RS6000_BUILTIN_2
10571 #undef RS6000_BUILTIN_3
10572 #undef RS6000_BUILTIN_A
10573 #undef RS6000_BUILTIN_D
10574 #undef RS6000_BUILTIN_E
10575 #undef RS6000_BUILTIN_H
10576 #undef RS6000_BUILTIN_P
10577 #undef RS6000_BUILTIN_Q
10578 #undef RS6000_BUILTIN_S
10579 #undef RS6000_BUILTIN_X
10581 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10582 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10583 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10584 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
10585 { MASK, ICODE, NAME, ENUM },
10587 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10588 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10589 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10590 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10591 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10592 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10593 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10595 static const struct builtin_description bdesc_abs[] =
10597 #include "rs6000-builtin.def"
10600 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
10601 foo (VECa). */
10603 #undef RS6000_BUILTIN_1
10604 #undef RS6000_BUILTIN_2
10605 #undef RS6000_BUILTIN_3
10606 #undef RS6000_BUILTIN_A
10607 #undef RS6000_BUILTIN_D
10608 #undef RS6000_BUILTIN_E
10609 #undef RS6000_BUILTIN_H
10610 #undef RS6000_BUILTIN_P
10611 #undef RS6000_BUILTIN_Q
10612 #undef RS6000_BUILTIN_S
10613 #undef RS6000_BUILTIN_X
10615 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
10616 { MASK, ICODE, NAME, ENUM },
10618 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10619 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10620 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10621 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10622 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10623 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
10624 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10625 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10626 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10627 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10629 static const struct builtin_description bdesc_1arg[] =
10631 #include "rs6000-builtin.def"
10634 /* HTM builtins. */
10635 #undef RS6000_BUILTIN_1
10636 #undef RS6000_BUILTIN_2
10637 #undef RS6000_BUILTIN_3
10638 #undef RS6000_BUILTIN_A
10639 #undef RS6000_BUILTIN_D
10640 #undef RS6000_BUILTIN_E
10641 #undef RS6000_BUILTIN_H
10642 #undef RS6000_BUILTIN_P
10643 #undef RS6000_BUILTIN_Q
10644 #undef RS6000_BUILTIN_S
10645 #undef RS6000_BUILTIN_X
10647 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10648 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10649 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10650 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10651 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10652 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10653 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
10654 { MASK, ICODE, NAME, ENUM },
10656 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10657 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10658 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10659 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10661 static const struct builtin_description bdesc_htm[] =
10663 #include "rs6000-builtin.def"
10666 #undef RS6000_BUILTIN_1
10667 #undef RS6000_BUILTIN_2
10668 #undef RS6000_BUILTIN_3
10669 #undef RS6000_BUILTIN_A
10670 #undef RS6000_BUILTIN_D
10671 #undef RS6000_BUILTIN_E
10672 #undef RS6000_BUILTIN_H
10673 #undef RS6000_BUILTIN_P
10674 #undef RS6000_BUILTIN_Q
10675 #undef RS6000_BUILTIN_S
10677 /* Return true if a builtin function is overloaded. */
10678 bool
10679 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
10681 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
10684 /* Expand an expression EXP that calls a builtin without arguments. */
10685 static rtx
10686 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
10688 rtx pat;
10689 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10691 if (icode == CODE_FOR_nothing)
10692 /* Builtin not supported on this processor. */
10693 return 0;
10695 if (target == 0
10696 || GET_MODE (target) != tmode
10697 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10698 target = gen_reg_rtx (tmode);
10700 pat = GEN_FCN (icode) (target);
10701 if (! pat)
10702 return 0;
10703 emit_insn (pat);
10705 return target;
10709 static rtx
10710 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
10712 rtx pat;
10713 tree arg0 = CALL_EXPR_ARG (exp, 0);
10714 rtx op0 = expand_normal (arg0);
10715 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10716 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10718 if (icode == CODE_FOR_nothing)
10719 /* Builtin not supported on this processor. */
10720 return 0;
10722 /* If we got invalid arguments bail out before generating bad rtl. */
10723 if (arg0 == error_mark_node)
10724 return const0_rtx;
10726 if (icode == CODE_FOR_altivec_vspltisb
10727 || icode == CODE_FOR_altivec_vspltish
10728 || icode == CODE_FOR_altivec_vspltisw
10729 || icode == CODE_FOR_spe_evsplatfi
10730 || icode == CODE_FOR_spe_evsplati)
10732 /* Only allow 5-bit *signed* literals. */
10733 if (GET_CODE (op0) != CONST_INT
10734 || INTVAL (op0) > 15
10735 || INTVAL (op0) < -16)
10737 error ("argument 1 must be a 5-bit signed literal");
10738 return const0_rtx;
10742 if (target == 0
10743 || GET_MODE (target) != tmode
10744 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10745 target = gen_reg_rtx (tmode);
10747 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10748 op0 = copy_to_mode_reg (mode0, op0);
10750 pat = GEN_FCN (icode) (target, op0);
10751 if (! pat)
10752 return 0;
10753 emit_insn (pat);
10755 return target;
10758 static rtx
10759 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
10761 rtx pat, scratch1, scratch2;
10762 tree arg0 = CALL_EXPR_ARG (exp, 0);
10763 rtx op0 = expand_normal (arg0);
10764 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10765 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10767 /* If we have invalid arguments, bail out before generating bad rtl. */
10768 if (arg0 == error_mark_node)
10769 return const0_rtx;
10771 if (target == 0
10772 || GET_MODE (target) != tmode
10773 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10774 target = gen_reg_rtx (tmode);
10776 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10777 op0 = copy_to_mode_reg (mode0, op0);
10779 scratch1 = gen_reg_rtx (mode0);
10780 scratch2 = gen_reg_rtx (mode0);
10782 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
10783 if (! pat)
10784 return 0;
10785 emit_insn (pat);
10787 return target;
10790 static rtx
10791 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
10793 rtx pat;
10794 tree arg0 = CALL_EXPR_ARG (exp, 0);
10795 tree arg1 = CALL_EXPR_ARG (exp, 1);
10796 rtx op0 = expand_normal (arg0);
10797 rtx op1 = expand_normal (arg1);
10798 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10799 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10800 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10802 if (icode == CODE_FOR_nothing)
10803 /* Builtin not supported on this processor. */
10804 return 0;
10806 /* If we got invalid arguments bail out before generating bad rtl. */
10807 if (arg0 == error_mark_node || arg1 == error_mark_node)
10808 return const0_rtx;
10810 if (icode == CODE_FOR_altivec_vcfux
10811 || icode == CODE_FOR_altivec_vcfsx
10812 || icode == CODE_FOR_altivec_vctsxs
10813 || icode == CODE_FOR_altivec_vctuxs
10814 || icode == CODE_FOR_altivec_vspltb
10815 || icode == CODE_FOR_altivec_vsplth
10816 || icode == CODE_FOR_altivec_vspltw
10817 || icode == CODE_FOR_spe_evaddiw
10818 || icode == CODE_FOR_spe_evldd
10819 || icode == CODE_FOR_spe_evldh
10820 || icode == CODE_FOR_spe_evldw
10821 || icode == CODE_FOR_spe_evlhhesplat
10822 || icode == CODE_FOR_spe_evlhhossplat
10823 || icode == CODE_FOR_spe_evlhhousplat
10824 || icode == CODE_FOR_spe_evlwhe
10825 || icode == CODE_FOR_spe_evlwhos
10826 || icode == CODE_FOR_spe_evlwhou
10827 || icode == CODE_FOR_spe_evlwhsplat
10828 || icode == CODE_FOR_spe_evlwwsplat
10829 || icode == CODE_FOR_spe_evrlwi
10830 || icode == CODE_FOR_spe_evslwi
10831 || icode == CODE_FOR_spe_evsrwis
10832 || icode == CODE_FOR_spe_evsubifw
10833 || icode == CODE_FOR_spe_evsrwiu)
10835 /* Only allow 5-bit unsigned literals. */
10836 STRIP_NOPS (arg1);
10837 if (TREE_CODE (arg1) != INTEGER_CST
10838 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10840 error ("argument 2 must be a 5-bit unsigned literal");
10841 return const0_rtx;
10845 if (target == 0
10846 || GET_MODE (target) != tmode
10847 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10848 target = gen_reg_rtx (tmode);
10850 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10851 op0 = copy_to_mode_reg (mode0, op0);
10852 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10853 op1 = copy_to_mode_reg (mode1, op1);
10855 pat = GEN_FCN (icode) (target, op0, op1);
10856 if (! pat)
10857 return 0;
10858 emit_insn (pat);
10860 return target;
10863 static rtx
10864 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10866 rtx pat, scratch;
10867 tree cr6_form = CALL_EXPR_ARG (exp, 0);
10868 tree arg0 = CALL_EXPR_ARG (exp, 1);
10869 tree arg1 = CALL_EXPR_ARG (exp, 2);
10870 rtx op0 = expand_normal (arg0);
10871 rtx op1 = expand_normal (arg1);
10872 enum machine_mode tmode = SImode;
10873 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10874 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10875 int cr6_form_int;
10877 if (TREE_CODE (cr6_form) != INTEGER_CST)
10879 error ("argument 1 of __builtin_altivec_predicate must be a constant");
10880 return const0_rtx;
10882 else
10883 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
10885 gcc_assert (mode0 == mode1);
10887 /* If we have invalid arguments, bail out before generating bad rtl. */
10888 if (arg0 == error_mark_node || arg1 == error_mark_node)
10889 return const0_rtx;
10891 if (target == 0
10892 || GET_MODE (target) != tmode
10893 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10894 target = gen_reg_rtx (tmode);
10896 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10897 op0 = copy_to_mode_reg (mode0, op0);
10898 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10899 op1 = copy_to_mode_reg (mode1, op1);
10901 scratch = gen_reg_rtx (mode0);
10903 pat = GEN_FCN (icode) (scratch, op0, op1);
10904 if (! pat)
10905 return 0;
10906 emit_insn (pat);
10908 /* The vec_any* and vec_all* predicates use the same opcodes for two
10909 different operations, but the bits in CR6 will be different
10910 depending on what information we want. So we have to play tricks
10911 with CR6 to get the right bits out.
10913 If you think this is disgusting, look at the specs for the
10914 AltiVec predicates. */
10916 switch (cr6_form_int)
10918 case 0:
10919 emit_insn (gen_cr6_test_for_zero (target));
10920 break;
10921 case 1:
10922 emit_insn (gen_cr6_test_for_zero_reverse (target));
10923 break;
10924 case 2:
10925 emit_insn (gen_cr6_test_for_lt (target));
10926 break;
10927 case 3:
10928 emit_insn (gen_cr6_test_for_lt_reverse (target));
10929 break;
10930 default:
10931 error ("argument 1 of __builtin_altivec_predicate is out of range");
10932 break;
10935 return target;
10938 static rtx
10939 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10941 rtx pat, addr;
10942 tree arg0 = CALL_EXPR_ARG (exp, 0);
10943 tree arg1 = CALL_EXPR_ARG (exp, 1);
10944 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10945 enum machine_mode mode0 = Pmode;
10946 enum machine_mode mode1 = Pmode;
10947 rtx op0 = expand_normal (arg0);
10948 rtx op1 = expand_normal (arg1);
10950 if (icode == CODE_FOR_nothing)
10951 /* Builtin not supported on this processor. */
10952 return 0;
10954 /* If we got invalid arguments bail out before generating bad rtl. */
10955 if (arg0 == error_mark_node || arg1 == error_mark_node)
10956 return const0_rtx;
10958 if (target == 0
10959 || GET_MODE (target) != tmode
10960 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10961 target = gen_reg_rtx (tmode);
10963 op1 = copy_to_mode_reg (mode1, op1);
10965 if (op0 == const0_rtx)
10967 addr = gen_rtx_MEM (tmode, op1);
10969 else
10971 op0 = copy_to_mode_reg (mode0, op0);
10972 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10975 pat = GEN_FCN (icode) (target, addr);
10977 if (! pat)
10978 return 0;
10979 emit_insn (pat);
10981 return target;
10984 static rtx
10985 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10987 rtx pat, addr;
10988 tree arg0 = CALL_EXPR_ARG (exp, 0);
10989 tree arg1 = CALL_EXPR_ARG (exp, 1);
10990 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10991 enum machine_mode mode0 = Pmode;
10992 enum machine_mode mode1 = Pmode;
10993 rtx op0 = expand_normal (arg0);
10994 rtx op1 = expand_normal (arg1);
10996 if (icode == CODE_FOR_nothing)
10997 /* Builtin not supported on this processor. */
10998 return 0;
11000 /* If we got invalid arguments bail out before generating bad rtl. */
11001 if (arg0 == error_mark_node || arg1 == error_mark_node)
11002 return const0_rtx;
11004 if (target == 0
11005 || GET_MODE (target) != tmode
11006 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11007 target = gen_reg_rtx (tmode);
11009 op1 = copy_to_mode_reg (mode1, op1);
11011 if (op0 == const0_rtx)
11013 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
11015 else
11017 op0 = copy_to_mode_reg (mode0, op0);
11018 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
11021 pat = GEN_FCN (icode) (target, addr);
11023 if (! pat)
11024 return 0;
11025 emit_insn (pat);
11027 return target;
11030 static rtx
11031 spe_expand_stv_builtin (enum insn_code icode, tree exp)
11033 tree arg0 = CALL_EXPR_ARG (exp, 0);
11034 tree arg1 = CALL_EXPR_ARG (exp, 1);
11035 tree arg2 = CALL_EXPR_ARG (exp, 2);
11036 rtx op0 = expand_normal (arg0);
11037 rtx op1 = expand_normal (arg1);
11038 rtx op2 = expand_normal (arg2);
11039 rtx pat;
11040 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
11041 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
11042 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
11044 /* Invalid arguments. Bail before doing anything stoopid! */
11045 if (arg0 == error_mark_node
11046 || arg1 == error_mark_node
11047 || arg2 == error_mark_node)
11048 return const0_rtx;
11050 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
11051 op0 = copy_to_mode_reg (mode2, op0);
11052 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
11053 op1 = copy_to_mode_reg (mode0, op1);
11054 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11055 op2 = copy_to_mode_reg (mode1, op2);
11057 pat = GEN_FCN (icode) (op1, op2, op0);
11058 if (pat)
11059 emit_insn (pat);
11060 return NULL_RTX;
11063 static rtx
11064 paired_expand_stv_builtin (enum insn_code icode, tree exp)
11066 tree arg0 = CALL_EXPR_ARG (exp, 0);
11067 tree arg1 = CALL_EXPR_ARG (exp, 1);
11068 tree arg2 = CALL_EXPR_ARG (exp, 2);
11069 rtx op0 = expand_normal (arg0);
11070 rtx op1 = expand_normal (arg1);
11071 rtx op2 = expand_normal (arg2);
11072 rtx pat, addr;
11073 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11074 enum machine_mode mode1 = Pmode;
11075 enum machine_mode mode2 = Pmode;
11077 /* Invalid arguments. Bail before doing anything stoopid! */
11078 if (arg0 == error_mark_node
11079 || arg1 == error_mark_node
11080 || arg2 == error_mark_node)
11081 return const0_rtx;
11083 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
11084 op0 = copy_to_mode_reg (tmode, op0);
11086 op2 = copy_to_mode_reg (mode2, op2);
11088 if (op1 == const0_rtx)
11090 addr = gen_rtx_MEM (tmode, op2);
11092 else
11094 op1 = copy_to_mode_reg (mode1, op1);
11095 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
11098 pat = GEN_FCN (icode) (addr, op0);
11099 if (pat)
11100 emit_insn (pat);
11101 return NULL_RTX;
11104 static rtx
11105 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
11107 tree arg0 = CALL_EXPR_ARG (exp, 0);
11108 tree arg1 = CALL_EXPR_ARG (exp, 1);
11109 tree arg2 = CALL_EXPR_ARG (exp, 2);
11110 rtx op0 = expand_normal (arg0);
11111 rtx op1 = expand_normal (arg1);
11112 rtx op2 = expand_normal (arg2);
11113 rtx pat, addr;
11114 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11115 enum machine_mode smode = insn_data[icode].operand[1].mode;
11116 enum machine_mode mode1 = Pmode;
11117 enum machine_mode mode2 = Pmode;
11119 /* Invalid arguments. Bail before doing anything stoopid! */
11120 if (arg0 == error_mark_node
11121 || arg1 == error_mark_node
11122 || arg2 == error_mark_node)
11123 return const0_rtx;
11125 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
11126 op0 = copy_to_mode_reg (smode, op0);
11128 op2 = copy_to_mode_reg (mode2, op2);
11130 if (op1 == const0_rtx)
11132 addr = gen_rtx_MEM (tmode, op2);
11134 else
11136 op1 = copy_to_mode_reg (mode1, op1);
11137 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
11140 pat = GEN_FCN (icode) (addr, op0);
11141 if (pat)
11142 emit_insn (pat);
11143 return NULL_RTX;
11146 /* Return the appropriate SPR number associated with the given builtin. */
11147 static inline HOST_WIDE_INT
11148 htm_spr_num (enum rs6000_builtins code)
11150 if (code == HTM_BUILTIN_GET_TFHAR
11151 || code == HTM_BUILTIN_SET_TFHAR)
11152 return TFHAR_SPR;
11153 else if (code == HTM_BUILTIN_GET_TFIAR
11154 || code == HTM_BUILTIN_SET_TFIAR)
11155 return TFIAR_SPR;
11156 else if (code == HTM_BUILTIN_GET_TEXASR
11157 || code == HTM_BUILTIN_SET_TEXASR)
11158 return TEXASR_SPR;
11159 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
11160 || code == HTM_BUILTIN_SET_TEXASRU);
11161 return TEXASRU_SPR;
11164 /* Return the appropriate SPR regno associated with the given builtin. */
11165 static inline HOST_WIDE_INT
11166 htm_spr_regno (enum rs6000_builtins code)
11168 if (code == HTM_BUILTIN_GET_TFHAR
11169 || code == HTM_BUILTIN_SET_TFHAR)
11170 return TFHAR_REGNO;
11171 else if (code == HTM_BUILTIN_GET_TFIAR
11172 || code == HTM_BUILTIN_SET_TFIAR)
11173 return TFIAR_REGNO;
11174 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
11175 || code == HTM_BUILTIN_SET_TEXASR
11176 || code == HTM_BUILTIN_GET_TEXASRU
11177 || code == HTM_BUILTIN_SET_TEXASRU);
11178 return TEXASR_REGNO;
11181 /* Return the correct ICODE value depending on whether we are
11182 setting or reading the HTM SPRs. */
11183 static inline enum insn_code
11184 rs6000_htm_spr_icode (bool nonvoid)
11186 if (nonvoid)
11187 return (TARGET_64BIT) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
11188 else
11189 return (TARGET_64BIT) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
11192 /* Expand the HTM builtin in EXP and store the result in TARGET.
11193 Store true in *EXPANDEDP if we found a builtin to expand. */
11194 static rtx
11195 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
11197 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11198 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11199 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11200 const struct builtin_description *d;
11201 size_t i;
11203 *expandedp = false;
11205 /* Expand the HTM builtins. */
11206 d = bdesc_htm;
11207 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
11208 if (d->code == fcode)
11210 rtx op[MAX_HTM_OPERANDS], pat;
11211 int nopnds = 0;
11212 tree arg;
11213 call_expr_arg_iterator iter;
11214 unsigned attr = rs6000_builtin_info[fcode].attr;
11215 enum insn_code icode = d->icode;
11217 if (attr & RS6000_BTC_SPR)
11218 icode = rs6000_htm_spr_icode (nonvoid);
11220 if (nonvoid)
11222 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11223 if (!target
11224 || GET_MODE (target) != tmode
11225 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
11226 target = gen_reg_rtx (tmode);
11227 op[nopnds++] = target;
11230 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11232 const struct insn_operand_data *insn_op;
11234 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
11235 return NULL_RTX;
11237 insn_op = &insn_data[icode].operand[nopnds];
11239 op[nopnds] = expand_normal (arg);
11241 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
11243 if (!strcmp (insn_op->constraint, "n"))
11245 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
11246 if (!CONST_INT_P (op[nopnds]))
11247 error ("argument %d must be an unsigned literal", arg_num);
11248 else
11249 error ("argument %d is an unsigned literal that is "
11250 "out of range", arg_num);
11251 return const0_rtx;
11253 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
11256 nopnds++;
11259 /* Handle the builtins for extended mnemonics. These accept
11260 no arguments, but map to builtins that take arguments. */
11261 switch (fcode)
11263 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
11264 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
11265 op[nopnds++] = GEN_INT (1);
11266 #ifdef ENABLE_CHECKING
11267 attr |= RS6000_BTC_UNARY;
11268 #endif
11269 break;
11270 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
11271 op[nopnds++] = GEN_INT (0);
11272 #ifdef ENABLE_CHECKING
11273 attr |= RS6000_BTC_UNARY;
11274 #endif
11275 break;
11276 default:
11277 break;
11280 /* If this builtin accesses SPRs, then pass in the appropriate
11281 SPR number and SPR regno as the last two operands. */
11282 if (attr & RS6000_BTC_SPR)
11284 op[nopnds++] = gen_rtx_CONST_INT (Pmode, htm_spr_num (fcode));
11285 op[nopnds++] = gen_rtx_REG (Pmode, htm_spr_regno (fcode));
11288 #ifdef ENABLE_CHECKING
11289 int expected_nopnds = 0;
11290 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
11291 expected_nopnds = 1;
11292 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
11293 expected_nopnds = 2;
11294 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
11295 expected_nopnds = 3;
11296 if (!(attr & RS6000_BTC_VOID))
11297 expected_nopnds += 1;
11298 if (attr & RS6000_BTC_SPR)
11299 expected_nopnds += 2;
11301 gcc_assert (nopnds == expected_nopnds && nopnds <= MAX_HTM_OPERANDS);
11302 #endif
11304 switch (nopnds)
11306 case 1:
11307 pat = GEN_FCN (icode) (op[0]);
11308 break;
11309 case 2:
11310 pat = GEN_FCN (icode) (op[0], op[1]);
11311 break;
11312 case 3:
11313 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11314 break;
11315 case 4:
11316 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11317 break;
11318 default:
11319 gcc_unreachable ();
11321 if (!pat)
11322 return NULL_RTX;
11323 emit_insn (pat);
11325 *expandedp = true;
11326 if (nonvoid)
11327 return target;
11328 return const0_rtx;
11331 return NULL_RTX;
11334 static rtx
11335 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
11337 rtx pat;
11338 tree arg0 = CALL_EXPR_ARG (exp, 0);
11339 tree arg1 = CALL_EXPR_ARG (exp, 1);
11340 tree arg2 = CALL_EXPR_ARG (exp, 2);
11341 rtx op0 = expand_normal (arg0);
11342 rtx op1 = expand_normal (arg1);
11343 rtx op2 = expand_normal (arg2);
11344 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11345 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11346 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11347 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
11349 if (icode == CODE_FOR_nothing)
11350 /* Builtin not supported on this processor. */
11351 return 0;
11353 /* If we got invalid arguments bail out before generating bad rtl. */
11354 if (arg0 == error_mark_node
11355 || arg1 == error_mark_node
11356 || arg2 == error_mark_node)
11357 return const0_rtx;
11359 /* Check and prepare argument depending on the instruction code.
11361 Note that a switch statement instead of the sequence of tests
11362 would be incorrect as many of the CODE_FOR values could be
11363 CODE_FOR_nothing and that would yield multiple alternatives
11364 with identical values. We'd never reach here at runtime in
11365 this case. */
11366 if (icode == CODE_FOR_altivec_vsldoi_v4sf
11367 || icode == CODE_FOR_altivec_vsldoi_v4si
11368 || icode == CODE_FOR_altivec_vsldoi_v8hi
11369 || icode == CODE_FOR_altivec_vsldoi_v16qi)
11371 /* Only allow 4-bit unsigned literals. */
11372 STRIP_NOPS (arg2);
11373 if (TREE_CODE (arg2) != INTEGER_CST
11374 || TREE_INT_CST_LOW (arg2) & ~0xf)
11376 error ("argument 3 must be a 4-bit unsigned literal");
11377 return const0_rtx;
11380 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
11381 || icode == CODE_FOR_vsx_xxpermdi_v2di
11382 || icode == CODE_FOR_vsx_xxsldwi_v16qi
11383 || icode == CODE_FOR_vsx_xxsldwi_v8hi
11384 || icode == CODE_FOR_vsx_xxsldwi_v4si
11385 || icode == CODE_FOR_vsx_xxsldwi_v4sf
11386 || icode == CODE_FOR_vsx_xxsldwi_v2di
11387 || icode == CODE_FOR_vsx_xxsldwi_v2df)
11389 /* Only allow 2-bit unsigned literals. */
11390 STRIP_NOPS (arg2);
11391 if (TREE_CODE (arg2) != INTEGER_CST
11392 || TREE_INT_CST_LOW (arg2) & ~0x3)
11394 error ("argument 3 must be a 2-bit unsigned literal");
11395 return const0_rtx;
11398 else if (icode == CODE_FOR_vsx_set_v2df
11399 || icode == CODE_FOR_vsx_set_v2di)
11401 /* Only allow 1-bit unsigned literals. */
11402 STRIP_NOPS (arg2);
11403 if (TREE_CODE (arg2) != INTEGER_CST
11404 || TREE_INT_CST_LOW (arg2) & ~0x1)
11406 error ("argument 3 must be a 1-bit unsigned literal");
11407 return const0_rtx;
11410 else if (icode == CODE_FOR_crypto_vshasigmaw
11411 || icode == CODE_FOR_crypto_vshasigmad)
11413 /* Check whether the 2nd and 3rd arguments are integer constants and in
11414 range and prepare arguments. */
11415 STRIP_NOPS (arg1);
11416 if (TREE_CODE (arg1) != INTEGER_CST
11417 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
11419 error ("argument 2 must be 0 or 1");
11420 return const0_rtx;
11423 STRIP_NOPS (arg2);
11424 if (TREE_CODE (arg2) != INTEGER_CST
11425 || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 15))
11427 error ("argument 3 must be in the range 0..15");
11428 return const0_rtx;
11432 if (target == 0
11433 || GET_MODE (target) != tmode
11434 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11435 target = gen_reg_rtx (tmode);
11437 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11438 op0 = copy_to_mode_reg (mode0, op0);
11439 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11440 op1 = copy_to_mode_reg (mode1, op1);
11441 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11442 op2 = copy_to_mode_reg (mode2, op2);
11444 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
11445 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
11446 else
11447 pat = GEN_FCN (icode) (target, op0, op1, op2);
11448 if (! pat)
11449 return 0;
11450 emit_insn (pat);
11452 return target;
11455 /* Expand the lvx builtins. */
11456 static rtx
11457 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
11459 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11460 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
11461 tree arg0;
11462 enum machine_mode tmode, mode0;
11463 rtx pat, op0;
11464 enum insn_code icode;
11466 switch (fcode)
11468 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
11469 icode = CODE_FOR_vector_altivec_load_v16qi;
11470 break;
11471 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
11472 icode = CODE_FOR_vector_altivec_load_v8hi;
11473 break;
11474 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
11475 icode = CODE_FOR_vector_altivec_load_v4si;
11476 break;
11477 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
11478 icode = CODE_FOR_vector_altivec_load_v4sf;
11479 break;
11480 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
11481 icode = CODE_FOR_vector_altivec_load_v2df;
11482 break;
11483 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
11484 icode = CODE_FOR_vector_altivec_load_v2di;
11485 break;
11486 default:
11487 *expandedp = false;
11488 return NULL_RTX;
11491 *expandedp = true;
11493 arg0 = CALL_EXPR_ARG (exp, 0);
11494 op0 = expand_normal (arg0);
11495 tmode = insn_data[icode].operand[0].mode;
11496 mode0 = insn_data[icode].operand[1].mode;
11498 if (target == 0
11499 || GET_MODE (target) != tmode
11500 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11501 target = gen_reg_rtx (tmode);
11503 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11504 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11506 pat = GEN_FCN (icode) (target, op0);
11507 if (! pat)
11508 return 0;
11509 emit_insn (pat);
11510 return target;
11513 /* Expand the stvx builtins. */
11514 static rtx
11515 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
11516 bool *expandedp)
11518 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11519 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
11520 tree arg0, arg1;
11521 enum machine_mode mode0, mode1;
11522 rtx pat, op0, op1;
11523 enum insn_code icode;
11525 switch (fcode)
11527 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
11528 icode = CODE_FOR_vector_altivec_store_v16qi;
11529 break;
11530 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
11531 icode = CODE_FOR_vector_altivec_store_v8hi;
11532 break;
11533 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
11534 icode = CODE_FOR_vector_altivec_store_v4si;
11535 break;
11536 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
11537 icode = CODE_FOR_vector_altivec_store_v4sf;
11538 break;
11539 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
11540 icode = CODE_FOR_vector_altivec_store_v2df;
11541 break;
11542 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
11543 icode = CODE_FOR_vector_altivec_store_v2di;
11544 break;
11545 default:
11546 *expandedp = false;
11547 return NULL_RTX;
11550 arg0 = CALL_EXPR_ARG (exp, 0);
11551 arg1 = CALL_EXPR_ARG (exp, 1);
11552 op0 = expand_normal (arg0);
11553 op1 = expand_normal (arg1);
11554 mode0 = insn_data[icode].operand[0].mode;
11555 mode1 = insn_data[icode].operand[1].mode;
11557 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11558 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11559 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11560 op1 = copy_to_mode_reg (mode1, op1);
11562 pat = GEN_FCN (icode) (op0, op1);
11563 if (pat)
11564 emit_insn (pat);
11566 *expandedp = true;
11567 return NULL_RTX;
11570 /* Expand the dst builtins. */
11571 static rtx
11572 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
11573 bool *expandedp)
11575 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11576 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11577 tree arg0, arg1, arg2;
11578 enum machine_mode mode0, mode1;
11579 rtx pat, op0, op1, op2;
11580 const struct builtin_description *d;
11581 size_t i;
11583 *expandedp = false;
11585 /* Handle DST variants. */
11586 d = bdesc_dst;
11587 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
11588 if (d->code == fcode)
11590 arg0 = CALL_EXPR_ARG (exp, 0);
11591 arg1 = CALL_EXPR_ARG (exp, 1);
11592 arg2 = CALL_EXPR_ARG (exp, 2);
11593 op0 = expand_normal (arg0);
11594 op1 = expand_normal (arg1);
11595 op2 = expand_normal (arg2);
11596 mode0 = insn_data[d->icode].operand[0].mode;
11597 mode1 = insn_data[d->icode].operand[1].mode;
11599 /* Invalid arguments, bail out before generating bad rtl. */
11600 if (arg0 == error_mark_node
11601 || arg1 == error_mark_node
11602 || arg2 == error_mark_node)
11603 return const0_rtx;
11605 *expandedp = true;
11606 STRIP_NOPS (arg2);
11607 if (TREE_CODE (arg2) != INTEGER_CST
11608 || TREE_INT_CST_LOW (arg2) & ~0x3)
11610 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
11611 return const0_rtx;
11614 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
11615 op0 = copy_to_mode_reg (Pmode, op0);
11616 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
11617 op1 = copy_to_mode_reg (mode1, op1);
11619 pat = GEN_FCN (d->icode) (op0, op1, op2);
11620 if (pat != 0)
11621 emit_insn (pat);
11623 return NULL_RTX;
11626 return NULL_RTX;
11629 /* Expand vec_init builtin. */
11630 static rtx
11631 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
11633 enum machine_mode tmode = TYPE_MODE (type);
11634 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
11635 int i, n_elt = GET_MODE_NUNITS (tmode);
11636 rtvec v = rtvec_alloc (n_elt);
11638 gcc_assert (VECTOR_MODE_P (tmode));
11639 gcc_assert (n_elt == call_expr_nargs (exp));
11641 for (i = 0; i < n_elt; ++i)
11643 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
11644 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
11647 if (!target || !register_operand (target, tmode))
11648 target = gen_reg_rtx (tmode);
11650 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
11651 return target;
11654 /* Return the integer constant in ARG. Constrain it to be in the range
11655 of the subparts of VEC_TYPE; issue an error if not. */
11657 static int
11658 get_element_number (tree vec_type, tree arg)
11660 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
11662 if (!host_integerp (arg, 1)
11663 || (elt = tree_low_cst (arg, 1), elt > max))
11665 error ("selector must be an integer constant in the range 0..%wi", max);
11666 return 0;
11669 return elt;
11672 /* Expand vec_set builtin. */
11673 static rtx
11674 altivec_expand_vec_set_builtin (tree exp)
11676 enum machine_mode tmode, mode1;
11677 tree arg0, arg1, arg2;
11678 int elt;
11679 rtx op0, op1;
11681 arg0 = CALL_EXPR_ARG (exp, 0);
11682 arg1 = CALL_EXPR_ARG (exp, 1);
11683 arg2 = CALL_EXPR_ARG (exp, 2);
11685 tmode = TYPE_MODE (TREE_TYPE (arg0));
11686 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
11687 gcc_assert (VECTOR_MODE_P (tmode));
11689 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
11690 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
11691 elt = get_element_number (TREE_TYPE (arg0), arg2);
11693 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
11694 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
11696 op0 = force_reg (tmode, op0);
11697 op1 = force_reg (mode1, op1);
11699 rs6000_expand_vector_set (op0, op1, elt);
11701 return op0;
11704 /* Expand vec_ext builtin. */
11705 static rtx
11706 altivec_expand_vec_ext_builtin (tree exp, rtx target)
11708 enum machine_mode tmode, mode0;
11709 tree arg0, arg1;
11710 int elt;
11711 rtx op0;
11713 arg0 = CALL_EXPR_ARG (exp, 0);
11714 arg1 = CALL_EXPR_ARG (exp, 1);
11716 op0 = expand_normal (arg0);
11717 elt = get_element_number (TREE_TYPE (arg0), arg1);
11719 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
11720 mode0 = TYPE_MODE (TREE_TYPE (arg0));
11721 gcc_assert (VECTOR_MODE_P (mode0));
11723 op0 = force_reg (mode0, op0);
11725 if (optimize || !target || !register_operand (target, tmode))
11726 target = gen_reg_rtx (tmode);
11728 rs6000_expand_vector_extract (target, op0, elt);
11730 return target;
11733 /* Expand the builtin in EXP and store the result in TARGET. Store
11734 true in *EXPANDEDP if we found a builtin to expand. */
11735 static rtx
11736 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
11738 const struct builtin_description *d;
11739 size_t i;
11740 enum insn_code icode;
11741 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11742 tree arg0;
11743 rtx op0, pat;
11744 enum machine_mode tmode, mode0;
11745 enum rs6000_builtins fcode
11746 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11748 if (rs6000_overloaded_builtin_p (fcode))
11750 *expandedp = true;
11751 error ("unresolved overload for Altivec builtin %qF", fndecl);
11753 /* Given it is invalid, just generate a normal call. */
11754 return expand_call (exp, target, false);
11757 target = altivec_expand_ld_builtin (exp, target, expandedp);
11758 if (*expandedp)
11759 return target;
11761 target = altivec_expand_st_builtin (exp, target, expandedp);
11762 if (*expandedp)
11763 return target;
11765 target = altivec_expand_dst_builtin (exp, target, expandedp);
11766 if (*expandedp)
11767 return target;
11769 *expandedp = true;
11771 switch (fcode)
11773 case ALTIVEC_BUILTIN_STVX:
11774 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
11775 case ALTIVEC_BUILTIN_STVEBX:
11776 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
11777 case ALTIVEC_BUILTIN_STVEHX:
11778 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
11779 case ALTIVEC_BUILTIN_STVEWX:
11780 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
11781 case ALTIVEC_BUILTIN_STVXL:
11782 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
11784 case ALTIVEC_BUILTIN_STVLX:
11785 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
11786 case ALTIVEC_BUILTIN_STVLXL:
11787 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
11788 case ALTIVEC_BUILTIN_STVRX:
11789 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
11790 case ALTIVEC_BUILTIN_STVRXL:
11791 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
11793 case VSX_BUILTIN_STXVD2X_V2DF:
11794 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
11795 case VSX_BUILTIN_STXVD2X_V2DI:
11796 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
11797 case VSX_BUILTIN_STXVW4X_V4SF:
11798 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
11799 case VSX_BUILTIN_STXVW4X_V4SI:
11800 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
11801 case VSX_BUILTIN_STXVW4X_V8HI:
11802 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
11803 case VSX_BUILTIN_STXVW4X_V16QI:
11804 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
11806 case ALTIVEC_BUILTIN_MFVSCR:
11807 icode = CODE_FOR_altivec_mfvscr;
11808 tmode = insn_data[icode].operand[0].mode;
11810 if (target == 0
11811 || GET_MODE (target) != tmode
11812 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11813 target = gen_reg_rtx (tmode);
11815 pat = GEN_FCN (icode) (target);
11816 if (! pat)
11817 return 0;
11818 emit_insn (pat);
11819 return target;
11821 case ALTIVEC_BUILTIN_MTVSCR:
11822 icode = CODE_FOR_altivec_mtvscr;
11823 arg0 = CALL_EXPR_ARG (exp, 0);
11824 op0 = expand_normal (arg0);
11825 mode0 = insn_data[icode].operand[0].mode;
11827 /* If we got invalid arguments bail out before generating bad rtl. */
11828 if (arg0 == error_mark_node)
11829 return const0_rtx;
11831 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11832 op0 = copy_to_mode_reg (mode0, op0);
11834 pat = GEN_FCN (icode) (op0);
11835 if (pat)
11836 emit_insn (pat);
11837 return NULL_RTX;
11839 case ALTIVEC_BUILTIN_DSSALL:
11840 emit_insn (gen_altivec_dssall ());
11841 return NULL_RTX;
11843 case ALTIVEC_BUILTIN_DSS:
11844 icode = CODE_FOR_altivec_dss;
11845 arg0 = CALL_EXPR_ARG (exp, 0);
11846 STRIP_NOPS (arg0);
11847 op0 = expand_normal (arg0);
11848 mode0 = insn_data[icode].operand[0].mode;
11850 /* If we got invalid arguments bail out before generating bad rtl. */
11851 if (arg0 == error_mark_node)
11852 return const0_rtx;
11854 if (TREE_CODE (arg0) != INTEGER_CST
11855 || TREE_INT_CST_LOW (arg0) & ~0x3)
11857 error ("argument to dss must be a 2-bit unsigned literal");
11858 return const0_rtx;
11861 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11862 op0 = copy_to_mode_reg (mode0, op0);
11864 emit_insn (gen_altivec_dss (op0));
11865 return NULL_RTX;
11867 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
11868 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
11869 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
11870 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
11871 case VSX_BUILTIN_VEC_INIT_V2DF:
11872 case VSX_BUILTIN_VEC_INIT_V2DI:
11873 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
11875 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
11876 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
11877 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
11878 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
11879 case VSX_BUILTIN_VEC_SET_V2DF:
11880 case VSX_BUILTIN_VEC_SET_V2DI:
11881 return altivec_expand_vec_set_builtin (exp);
11883 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
11884 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
11885 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
11886 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
11887 case VSX_BUILTIN_VEC_EXT_V2DF:
11888 case VSX_BUILTIN_VEC_EXT_V2DI:
11889 return altivec_expand_vec_ext_builtin (exp, target);
11891 default:
11892 break;
11893 /* Fall through. */
11896 /* Expand abs* operations. */
11897 d = bdesc_abs;
11898 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
11899 if (d->code == fcode)
11900 return altivec_expand_abs_builtin (d->icode, exp, target);
11902 /* Expand the AltiVec predicates. */
11903 d = bdesc_altivec_preds;
11904 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
11905 if (d->code == fcode)
11906 return altivec_expand_predicate_builtin (d->icode, exp, target);
11908 /* LV* are funky. We initialized them differently. */
11909 switch (fcode)
11911 case ALTIVEC_BUILTIN_LVSL:
11912 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
11913 exp, target, false);
11914 case ALTIVEC_BUILTIN_LVSR:
11915 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
11916 exp, target, false);
11917 case ALTIVEC_BUILTIN_LVEBX:
11918 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
11919 exp, target, false);
11920 case ALTIVEC_BUILTIN_LVEHX:
11921 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
11922 exp, target, false);
11923 case ALTIVEC_BUILTIN_LVEWX:
11924 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
11925 exp, target, false);
11926 case ALTIVEC_BUILTIN_LVXL:
11927 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
11928 exp, target, false);
11929 case ALTIVEC_BUILTIN_LVX:
11930 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
11931 exp, target, false);
11932 case ALTIVEC_BUILTIN_LVLX:
11933 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
11934 exp, target, true);
11935 case ALTIVEC_BUILTIN_LVLXL:
11936 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
11937 exp, target, true);
11938 case ALTIVEC_BUILTIN_LVRX:
11939 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
11940 exp, target, true);
11941 case ALTIVEC_BUILTIN_LVRXL:
11942 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
11943 exp, target, true);
11944 case VSX_BUILTIN_LXVD2X_V2DF:
11945 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
11946 exp, target, false);
11947 case VSX_BUILTIN_LXVD2X_V2DI:
11948 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
11949 exp, target, false);
11950 case VSX_BUILTIN_LXVW4X_V4SF:
11951 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
11952 exp, target, false);
11953 case VSX_BUILTIN_LXVW4X_V4SI:
11954 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
11955 exp, target, false);
11956 case VSX_BUILTIN_LXVW4X_V8HI:
11957 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
11958 exp, target, false);
11959 case VSX_BUILTIN_LXVW4X_V16QI:
11960 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
11961 exp, target, false);
11962 break;
11963 default:
11964 break;
11965 /* Fall through. */
11968 *expandedp = false;
11969 return NULL_RTX;
11972 /* Expand the builtin in EXP and store the result in TARGET. Store
11973 true in *EXPANDEDP if we found a builtin to expand. */
11974 static rtx
11975 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
11977 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11978 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11979 const struct builtin_description *d;
11980 size_t i;
11982 *expandedp = true;
11984 switch (fcode)
11986 case PAIRED_BUILTIN_STX:
11987 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
11988 case PAIRED_BUILTIN_LX:
11989 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
11990 default:
11991 break;
11992 /* Fall through. */
11995 /* Expand the paired predicates. */
11996 d = bdesc_paired_preds;
11997 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
11998 if (d->code == fcode)
11999 return paired_expand_predicate_builtin (d->icode, exp, target);
12001 *expandedp = false;
12002 return NULL_RTX;
12005 /* Binops that need to be initialized manually, but can be expanded
12006 automagically by rs6000_expand_binop_builtin. */
12007 static const struct builtin_description bdesc_2arg_spe[] =
12009 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
12010 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
12011 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
12012 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
12013 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
12014 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
12015 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
12016 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
12017 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
12018 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
12019 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
12020 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
12021 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
12022 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
12023 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
12024 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
12025 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
12026 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
12027 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
12028 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
12029 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
12030 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
12033 /* Expand the builtin in EXP and store the result in TARGET. Store
12034 true in *EXPANDEDP if we found a builtin to expand.
12036 This expands the SPE builtins that are not simple unary and binary
12037 operations. */
12038 static rtx
12039 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
12041 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12042 tree arg1, arg0;
12043 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12044 enum insn_code icode;
12045 enum machine_mode tmode, mode0;
12046 rtx pat, op0;
12047 const struct builtin_description *d;
12048 size_t i;
12050 *expandedp = true;
12052 /* Syntax check for a 5-bit unsigned immediate. */
12053 switch (fcode)
12055 case SPE_BUILTIN_EVSTDD:
12056 case SPE_BUILTIN_EVSTDH:
12057 case SPE_BUILTIN_EVSTDW:
12058 case SPE_BUILTIN_EVSTWHE:
12059 case SPE_BUILTIN_EVSTWHO:
12060 case SPE_BUILTIN_EVSTWWE:
12061 case SPE_BUILTIN_EVSTWWO:
12062 arg1 = CALL_EXPR_ARG (exp, 2);
12063 if (TREE_CODE (arg1) != INTEGER_CST
12064 || TREE_INT_CST_LOW (arg1) & ~0x1f)
12066 error ("argument 2 must be a 5-bit unsigned literal");
12067 return const0_rtx;
12069 break;
12070 default:
12071 break;
12074 /* The evsplat*i instructions are not quite generic. */
12075 switch (fcode)
12077 case SPE_BUILTIN_EVSPLATFI:
12078 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
12079 exp, target);
12080 case SPE_BUILTIN_EVSPLATI:
12081 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
12082 exp, target);
12083 default:
12084 break;
12087 d = bdesc_2arg_spe;
12088 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
12089 if (d->code == fcode)
12090 return rs6000_expand_binop_builtin (d->icode, exp, target);
12092 d = bdesc_spe_predicates;
12093 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
12094 if (d->code == fcode)
12095 return spe_expand_predicate_builtin (d->icode, exp, target);
12097 d = bdesc_spe_evsel;
12098 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
12099 if (d->code == fcode)
12100 return spe_expand_evsel_builtin (d->icode, exp, target);
12102 switch (fcode)
12104 case SPE_BUILTIN_EVSTDDX:
12105 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
12106 case SPE_BUILTIN_EVSTDHX:
12107 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
12108 case SPE_BUILTIN_EVSTDWX:
12109 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
12110 case SPE_BUILTIN_EVSTWHEX:
12111 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
12112 case SPE_BUILTIN_EVSTWHOX:
12113 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
12114 case SPE_BUILTIN_EVSTWWEX:
12115 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
12116 case SPE_BUILTIN_EVSTWWOX:
12117 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
12118 case SPE_BUILTIN_EVSTDD:
12119 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
12120 case SPE_BUILTIN_EVSTDH:
12121 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
12122 case SPE_BUILTIN_EVSTDW:
12123 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
12124 case SPE_BUILTIN_EVSTWHE:
12125 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
12126 case SPE_BUILTIN_EVSTWHO:
12127 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
12128 case SPE_BUILTIN_EVSTWWE:
12129 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
12130 case SPE_BUILTIN_EVSTWWO:
12131 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
12132 case SPE_BUILTIN_MFSPEFSCR:
12133 icode = CODE_FOR_spe_mfspefscr;
12134 tmode = insn_data[icode].operand[0].mode;
12136 if (target == 0
12137 || GET_MODE (target) != tmode
12138 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12139 target = gen_reg_rtx (tmode);
12141 pat = GEN_FCN (icode) (target);
12142 if (! pat)
12143 return 0;
12144 emit_insn (pat);
12145 return target;
12146 case SPE_BUILTIN_MTSPEFSCR:
12147 icode = CODE_FOR_spe_mtspefscr;
12148 arg0 = CALL_EXPR_ARG (exp, 0);
12149 op0 = expand_normal (arg0);
12150 mode0 = insn_data[icode].operand[0].mode;
12152 if (arg0 == error_mark_node)
12153 return const0_rtx;
12155 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12156 op0 = copy_to_mode_reg (mode0, op0);
12158 pat = GEN_FCN (icode) (op0);
12159 if (pat)
12160 emit_insn (pat);
12161 return NULL_RTX;
12162 default:
12163 break;
12166 *expandedp = false;
12167 return NULL_RTX;
12170 static rtx
12171 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
12173 rtx pat, scratch, tmp;
12174 tree form = CALL_EXPR_ARG (exp, 0);
12175 tree arg0 = CALL_EXPR_ARG (exp, 1);
12176 tree arg1 = CALL_EXPR_ARG (exp, 2);
12177 rtx op0 = expand_normal (arg0);
12178 rtx op1 = expand_normal (arg1);
12179 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12180 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12181 int form_int;
12182 enum rtx_code code;
12184 if (TREE_CODE (form) != INTEGER_CST)
12186 error ("argument 1 of __builtin_paired_predicate must be a constant");
12187 return const0_rtx;
12189 else
12190 form_int = TREE_INT_CST_LOW (form);
12192 gcc_assert (mode0 == mode1);
12194 if (arg0 == error_mark_node || arg1 == error_mark_node)
12195 return const0_rtx;
12197 if (target == 0
12198 || GET_MODE (target) != SImode
12199 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
12200 target = gen_reg_rtx (SImode);
12201 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
12202 op0 = copy_to_mode_reg (mode0, op0);
12203 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
12204 op1 = copy_to_mode_reg (mode1, op1);
12206 scratch = gen_reg_rtx (CCFPmode);
12208 pat = GEN_FCN (icode) (scratch, op0, op1);
12209 if (!pat)
12210 return const0_rtx;
12212 emit_insn (pat);
12214 switch (form_int)
12216 /* LT bit. */
12217 case 0:
12218 code = LT;
12219 break;
12220 /* GT bit. */
12221 case 1:
12222 code = GT;
12223 break;
12224 /* EQ bit. */
12225 case 2:
12226 code = EQ;
12227 break;
12228 /* UN bit. */
12229 case 3:
12230 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
12231 return target;
12232 default:
12233 error ("argument 1 of __builtin_paired_predicate is out of range");
12234 return const0_rtx;
12237 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
12238 emit_move_insn (target, tmp);
12239 return target;
12242 static rtx
12243 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
12245 rtx pat, scratch, tmp;
12246 tree form = CALL_EXPR_ARG (exp, 0);
12247 tree arg0 = CALL_EXPR_ARG (exp, 1);
12248 tree arg1 = CALL_EXPR_ARG (exp, 2);
12249 rtx op0 = expand_normal (arg0);
12250 rtx op1 = expand_normal (arg1);
12251 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12252 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12253 int form_int;
12254 enum rtx_code code;
12256 if (TREE_CODE (form) != INTEGER_CST)
12258 error ("argument 1 of __builtin_spe_predicate must be a constant");
12259 return const0_rtx;
12261 else
12262 form_int = TREE_INT_CST_LOW (form);
12264 gcc_assert (mode0 == mode1);
12266 if (arg0 == error_mark_node || arg1 == error_mark_node)
12267 return const0_rtx;
12269 if (target == 0
12270 || GET_MODE (target) != SImode
12271 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
12272 target = gen_reg_rtx (SImode);
12274 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12275 op0 = copy_to_mode_reg (mode0, op0);
12276 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12277 op1 = copy_to_mode_reg (mode1, op1);
12279 scratch = gen_reg_rtx (CCmode);
12281 pat = GEN_FCN (icode) (scratch, op0, op1);
12282 if (! pat)
12283 return const0_rtx;
12284 emit_insn (pat);
12286 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
12287 _lower_. We use one compare, but look in different bits of the
12288 CR for each variant.
12290 There are 2 elements in each SPE simd type (upper/lower). The CR
12291 bits are set as follows:
12293 BIT0 | BIT 1 | BIT 2 | BIT 3
12294 U | L | (U | L) | (U & L)
12296 So, for an "all" relationship, BIT 3 would be set.
12297 For an "any" relationship, BIT 2 would be set. Etc.
12299 Following traditional nomenclature, these bits map to:
12301 BIT0 | BIT 1 | BIT 2 | BIT 3
12302 LT | GT | EQ | OV
12304 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
12307 switch (form_int)
12309 /* All variant. OV bit. */
12310 case 0:
12311 /* We need to get to the OV bit, which is the ORDERED bit. We
12312 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
12313 that's ugly and will make validate_condition_mode die.
12314 So let's just use another pattern. */
12315 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
12316 return target;
12317 /* Any variant. EQ bit. */
12318 case 1:
12319 code = EQ;
12320 break;
12321 /* Upper variant. LT bit. */
12322 case 2:
12323 code = LT;
12324 break;
12325 /* Lower variant. GT bit. */
12326 case 3:
12327 code = GT;
12328 break;
12329 default:
12330 error ("argument 1 of __builtin_spe_predicate is out of range");
12331 return const0_rtx;
12334 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
12335 emit_move_insn (target, tmp);
12337 return target;
12340 /* The evsel builtins look like this:
12342 e = __builtin_spe_evsel_OP (a, b, c, d);
12344 and work like this:
12346 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
12347 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
12350 static rtx
12351 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
12353 rtx pat, scratch;
12354 tree arg0 = CALL_EXPR_ARG (exp, 0);
12355 tree arg1 = CALL_EXPR_ARG (exp, 1);
12356 tree arg2 = CALL_EXPR_ARG (exp, 2);
12357 tree arg3 = CALL_EXPR_ARG (exp, 3);
12358 rtx op0 = expand_normal (arg0);
12359 rtx op1 = expand_normal (arg1);
12360 rtx op2 = expand_normal (arg2);
12361 rtx op3 = expand_normal (arg3);
12362 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12363 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12365 gcc_assert (mode0 == mode1);
12367 if (arg0 == error_mark_node || arg1 == error_mark_node
12368 || arg2 == error_mark_node || arg3 == error_mark_node)
12369 return const0_rtx;
12371 if (target == 0
12372 || GET_MODE (target) != mode0
12373 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
12374 target = gen_reg_rtx (mode0);
12376 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12377 op0 = copy_to_mode_reg (mode0, op0);
12378 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12379 op1 = copy_to_mode_reg (mode0, op1);
12380 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
12381 op2 = copy_to_mode_reg (mode0, op2);
12382 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
12383 op3 = copy_to_mode_reg (mode0, op3);
12385 /* Generate the compare. */
12386 scratch = gen_reg_rtx (CCmode);
12387 pat = GEN_FCN (icode) (scratch, op0, op1);
12388 if (! pat)
12389 return const0_rtx;
12390 emit_insn (pat);
12392 if (mode0 == V2SImode)
12393 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
12394 else
12395 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
12397 return target;
12400 /* Raise an error message for a builtin function that is called without the
12401 appropriate target options being set. */
12403 static void
12404 rs6000_invalid_builtin (enum rs6000_builtins fncode)
12406 size_t uns_fncode = (size_t)fncode;
12407 const char *name = rs6000_builtin_info[uns_fncode].name;
12408 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
12410 gcc_assert (name != NULL);
12411 if ((fnmask & RS6000_BTM_CELL) != 0)
12412 error ("Builtin function %s is only valid for the cell processor", name);
12413 else if ((fnmask & RS6000_BTM_VSX) != 0)
12414 error ("Builtin function %s requires the -mvsx option", name);
12415 else if ((fnmask & RS6000_BTM_HTM) != 0)
12416 error ("Builtin function %s requires the -mhtm option", name);
12417 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
12418 error ("Builtin function %s requires the -maltivec option", name);
12419 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
12420 error ("Builtin function %s requires the -mpaired option", name);
12421 else if ((fnmask & RS6000_BTM_SPE) != 0)
12422 error ("Builtin function %s requires the -mspe option", name);
12423 else
12424 error ("Builtin function %s is not supported with the current options",
12425 name);
12428 /* Expand an expression EXP that calls a built-in function,
12429 with result going to TARGET if that's convenient
12430 (and in mode MODE if that's convenient).
12431 SUBTARGET may be used as the target for computing one of EXP's operands.
12432 IGNORE is nonzero if the value is to be ignored. */
12434 static rtx
12435 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
12436 enum machine_mode mode ATTRIBUTE_UNUSED,
12437 int ignore ATTRIBUTE_UNUSED)
12439 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12440 enum rs6000_builtins fcode
12441 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
12442 size_t uns_fcode = (size_t)fcode;
12443 const struct builtin_description *d;
12444 size_t i;
12445 rtx ret;
12446 bool success;
12447 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
12448 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
12450 if (TARGET_DEBUG_BUILTIN)
12452 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
12453 const char *name1 = rs6000_builtin_info[uns_fcode].name;
12454 const char *name2 = ((icode != CODE_FOR_nothing)
12455 ? get_insn_name ((int)icode)
12456 : "nothing");
12457 const char *name3;
12459 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
12461 default: name3 = "unknown"; break;
12462 case RS6000_BTC_SPECIAL: name3 = "special"; break;
12463 case RS6000_BTC_UNARY: name3 = "unary"; break;
12464 case RS6000_BTC_BINARY: name3 = "binary"; break;
12465 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
12466 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
12467 case RS6000_BTC_ABS: name3 = "abs"; break;
12468 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
12469 case RS6000_BTC_DST: name3 = "dst"; break;
12473 fprintf (stderr,
12474 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
12475 (name1) ? name1 : "---", fcode,
12476 (name2) ? name2 : "---", (int)icode,
12477 name3,
12478 func_valid_p ? "" : ", not valid");
12481 if (!func_valid_p)
12483 rs6000_invalid_builtin (fcode);
12485 /* Given it is invalid, just generate a normal call. */
12486 return expand_call (exp, target, ignore);
12489 switch (fcode)
12491 case RS6000_BUILTIN_RECIP:
12492 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
12494 case RS6000_BUILTIN_RECIPF:
12495 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
12497 case RS6000_BUILTIN_RSQRTF:
12498 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
12500 case RS6000_BUILTIN_RSQRT:
12501 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
12503 case POWER7_BUILTIN_BPERMD:
12504 return rs6000_expand_binop_builtin (((TARGET_64BIT)
12505 ? CODE_FOR_bpermd_di
12506 : CODE_FOR_bpermd_si), exp, target);
12508 case RS6000_BUILTIN_GET_TB:
12509 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
12510 target);
12512 case RS6000_BUILTIN_MFTB:
12513 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
12514 ? CODE_FOR_rs6000_mftb_di
12515 : CODE_FOR_rs6000_mftb_si),
12516 target);
12518 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
12519 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
12521 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr
12522 : (int) CODE_FOR_altivec_lvsl);
12523 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12524 enum machine_mode mode = insn_data[icode].operand[1].mode;
12525 tree arg;
12526 rtx op, addr, pat;
12528 gcc_assert (TARGET_ALTIVEC);
12530 arg = CALL_EXPR_ARG (exp, 0);
12531 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
12532 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
12533 addr = memory_address (mode, op);
12534 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
12535 op = addr;
12536 else
12538 /* For the load case need to negate the address. */
12539 op = gen_reg_rtx (GET_MODE (addr));
12540 emit_insn (gen_rtx_SET (VOIDmode, op,
12541 gen_rtx_NEG (GET_MODE (addr), addr)));
12543 op = gen_rtx_MEM (mode, op);
12545 if (target == 0
12546 || GET_MODE (target) != tmode
12547 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12548 target = gen_reg_rtx (tmode);
12550 /*pat = gen_altivec_lvsr (target, op);*/
12551 pat = GEN_FCN (icode) (target, op);
12552 if (!pat)
12553 return 0;
12554 emit_insn (pat);
12556 return target;
12559 case ALTIVEC_BUILTIN_VCFUX:
12560 case ALTIVEC_BUILTIN_VCFSX:
12561 case ALTIVEC_BUILTIN_VCTUXS:
12562 case ALTIVEC_BUILTIN_VCTSXS:
12563 /* FIXME: There's got to be a nicer way to handle this case than
12564 constructing a new CALL_EXPR. */
12565 if (call_expr_nargs (exp) == 1)
12567 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
12568 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
12570 break;
12572 default:
12573 break;
12576 if (TARGET_ALTIVEC)
12578 ret = altivec_expand_builtin (exp, target, &success);
12580 if (success)
12581 return ret;
12583 if (TARGET_SPE)
12585 ret = spe_expand_builtin (exp, target, &success);
12587 if (success)
12588 return ret;
12590 if (TARGET_PAIRED_FLOAT)
12592 ret = paired_expand_builtin (exp, target, &success);
12594 if (success)
12595 return ret;
12597 if (TARGET_HTM)
12599 ret = htm_expand_builtin (exp, target, &success);
12601 if (success)
12602 return ret;
12605 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
12607 /* Handle simple unary operations. */
12608 d = bdesc_1arg;
12609 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12610 if (d->code == fcode)
12611 return rs6000_expand_unop_builtin (d->icode, exp, target);
12613 /* Handle simple binary operations. */
12614 d = bdesc_2arg;
12615 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12616 if (d->code == fcode)
12617 return rs6000_expand_binop_builtin (d->icode, exp, target);
12619 /* Handle simple ternary operations. */
12620 d = bdesc_3arg;
12621 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12622 if (d->code == fcode)
12623 return rs6000_expand_ternop_builtin (d->icode, exp, target);
12625 gcc_unreachable ();
12628 static void
12629 rs6000_init_builtins (void)
12631 tree tdecl;
12632 tree ftype;
12633 enum machine_mode mode;
12635 if (TARGET_DEBUG_BUILTIN)
12636 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
12637 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
12638 (TARGET_SPE) ? ", spe" : "",
12639 (TARGET_ALTIVEC) ? ", altivec" : "",
12640 (TARGET_VSX) ? ", vsx" : "");
12642 V2SI_type_node = build_vector_type (intSI_type_node, 2);
12643 V2SF_type_node = build_vector_type (float_type_node, 2);
12644 V2DI_type_node = build_vector_type (intDI_type_node, 2);
12645 V2DF_type_node = build_vector_type (double_type_node, 2);
12646 V4HI_type_node = build_vector_type (intHI_type_node, 4);
12647 V4SI_type_node = build_vector_type (intSI_type_node, 4);
12648 V4SF_type_node = build_vector_type (float_type_node, 4);
12649 V8HI_type_node = build_vector_type (intHI_type_node, 8);
12650 V16QI_type_node = build_vector_type (intQI_type_node, 16);
12652 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
12653 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
12654 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
12655 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
12657 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
12658 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
12659 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
12660 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
12662 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
12663 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
12664 'vector unsigned short'. */
12666 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
12667 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
12668 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
12669 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
12670 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
12672 long_integer_type_internal_node = long_integer_type_node;
12673 long_unsigned_type_internal_node = long_unsigned_type_node;
12674 long_long_integer_type_internal_node = long_long_integer_type_node;
12675 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
12676 intQI_type_internal_node = intQI_type_node;
12677 uintQI_type_internal_node = unsigned_intQI_type_node;
12678 intHI_type_internal_node = intHI_type_node;
12679 uintHI_type_internal_node = unsigned_intHI_type_node;
12680 intSI_type_internal_node = intSI_type_node;
12681 uintSI_type_internal_node = unsigned_intSI_type_node;
12682 intDI_type_internal_node = intDI_type_node;
12683 uintDI_type_internal_node = unsigned_intDI_type_node;
12684 float_type_internal_node = float_type_node;
12685 double_type_internal_node = double_type_node;
12686 void_type_internal_node = void_type_node;
12688 /* Initialize the modes for builtin_function_type, mapping a machine mode to
12689 tree type node. */
12690 builtin_mode_to_type[QImode][0] = integer_type_node;
12691 builtin_mode_to_type[HImode][0] = integer_type_node;
12692 builtin_mode_to_type[SImode][0] = intSI_type_node;
12693 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
12694 builtin_mode_to_type[DImode][0] = intDI_type_node;
12695 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
12696 builtin_mode_to_type[SFmode][0] = float_type_node;
12697 builtin_mode_to_type[DFmode][0] = double_type_node;
12698 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
12699 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
12700 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
12701 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
12702 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
12703 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
12704 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
12705 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
12706 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
12707 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
12708 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
12709 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
12710 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
12712 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
12713 TYPE_NAME (bool_char_type_node) = tdecl;
12715 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
12716 TYPE_NAME (bool_short_type_node) = tdecl;
12718 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
12719 TYPE_NAME (bool_int_type_node) = tdecl;
12721 tdecl = add_builtin_type ("__pixel", pixel_type_node);
12722 TYPE_NAME (pixel_type_node) = tdecl;
12724 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
12725 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
12726 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
12727 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
12728 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
12730 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
12731 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
12733 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
12734 TYPE_NAME (V16QI_type_node) = tdecl;
12736 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
12737 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
12739 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
12740 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
12742 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
12743 TYPE_NAME (V8HI_type_node) = tdecl;
12745 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
12746 TYPE_NAME (bool_V8HI_type_node) = tdecl;
12748 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
12749 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
12751 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
12752 TYPE_NAME (V4SI_type_node) = tdecl;
12754 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
12755 TYPE_NAME (bool_V4SI_type_node) = tdecl;
12757 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
12758 TYPE_NAME (V4SF_type_node) = tdecl;
12760 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
12761 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
12763 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
12764 TYPE_NAME (V2DF_type_node) = tdecl;
12766 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
12767 TYPE_NAME (V2DI_type_node) = tdecl;
12769 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
12770 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
12772 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
12773 TYPE_NAME (bool_V2DI_type_node) = tdecl;
12775 /* Paired and SPE builtins are only available if you build a compiler with
12776 the appropriate options, so only create those builtins with the
12777 appropriate compiler option. Create Altivec and VSX builtins on machines
12778 with at least the general purpose extensions (970 and newer) to allow the
12779 use of the target attribute. */
12780 if (TARGET_PAIRED_FLOAT)
12781 paired_init_builtins ();
12782 if (TARGET_SPE)
12783 spe_init_builtins ();
12784 if (TARGET_EXTRA_BUILTINS)
12785 altivec_init_builtins ();
12786 if (TARGET_HTM)
12787 htm_init_builtins ();
12789 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
12790 rs6000_common_init_builtins ();
12792 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
12793 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
12794 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
12796 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
12797 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
12798 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
12800 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
12801 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
12802 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
12804 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
12805 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
12806 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
12808 mode = (TARGET_64BIT) ? DImode : SImode;
12809 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
12810 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
12811 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
12813 ftype = build_function_type_list (unsigned_intDI_type_node,
12814 NULL_TREE);
12815 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
12817 if (TARGET_64BIT)
12818 ftype = build_function_type_list (unsigned_intDI_type_node,
12819 NULL_TREE);
12820 else
12821 ftype = build_function_type_list (unsigned_intSI_type_node,
12822 NULL_TREE);
12823 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
12825 #if TARGET_XCOFF
12826 /* AIX libm provides clog as __clog. */
12827 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
12828 set_user_assembler_name (tdecl, "__clog");
12829 #endif
12831 #ifdef SUBTARGET_INIT_BUILTINS
12832 SUBTARGET_INIT_BUILTINS;
12833 #endif
12836 /* Returns the rs6000 builtin decl for CODE. */
12838 static tree
12839 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
12841 HOST_WIDE_INT fnmask;
12843 if (code >= RS6000_BUILTIN_COUNT)
12844 return error_mark_node;
12846 fnmask = rs6000_builtin_info[code].mask;
12847 if ((fnmask & rs6000_builtin_mask) != fnmask)
12849 rs6000_invalid_builtin ((enum rs6000_builtins)code);
12850 return error_mark_node;
12853 return rs6000_builtin_decls[code];
12856 static void
12857 spe_init_builtins (void)
12859 tree puint_type_node = build_pointer_type (unsigned_type_node);
12860 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
12861 const struct builtin_description *d;
12862 size_t i;
12864 tree v2si_ftype_4_v2si
12865 = build_function_type_list (opaque_V2SI_type_node,
12866 opaque_V2SI_type_node,
12867 opaque_V2SI_type_node,
12868 opaque_V2SI_type_node,
12869 opaque_V2SI_type_node,
12870 NULL_TREE);
12872 tree v2sf_ftype_4_v2sf
12873 = build_function_type_list (opaque_V2SF_type_node,
12874 opaque_V2SF_type_node,
12875 opaque_V2SF_type_node,
12876 opaque_V2SF_type_node,
12877 opaque_V2SF_type_node,
12878 NULL_TREE);
12880 tree int_ftype_int_v2si_v2si
12881 = build_function_type_list (integer_type_node,
12882 integer_type_node,
12883 opaque_V2SI_type_node,
12884 opaque_V2SI_type_node,
12885 NULL_TREE);
12887 tree int_ftype_int_v2sf_v2sf
12888 = build_function_type_list (integer_type_node,
12889 integer_type_node,
12890 opaque_V2SF_type_node,
12891 opaque_V2SF_type_node,
12892 NULL_TREE);
12894 tree void_ftype_v2si_puint_int
12895 = build_function_type_list (void_type_node,
12896 opaque_V2SI_type_node,
12897 puint_type_node,
12898 integer_type_node,
12899 NULL_TREE);
12901 tree void_ftype_v2si_puint_char
12902 = build_function_type_list (void_type_node,
12903 opaque_V2SI_type_node,
12904 puint_type_node,
12905 char_type_node,
12906 NULL_TREE);
12908 tree void_ftype_v2si_pv2si_int
12909 = build_function_type_list (void_type_node,
12910 opaque_V2SI_type_node,
12911 opaque_p_V2SI_type_node,
12912 integer_type_node,
12913 NULL_TREE);
12915 tree void_ftype_v2si_pv2si_char
12916 = build_function_type_list (void_type_node,
12917 opaque_V2SI_type_node,
12918 opaque_p_V2SI_type_node,
12919 char_type_node,
12920 NULL_TREE);
12922 tree void_ftype_int
12923 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
12925 tree int_ftype_void
12926 = build_function_type_list (integer_type_node, NULL_TREE);
12928 tree v2si_ftype_pv2si_int
12929 = build_function_type_list (opaque_V2SI_type_node,
12930 opaque_p_V2SI_type_node,
12931 integer_type_node,
12932 NULL_TREE);
12934 tree v2si_ftype_puint_int
12935 = build_function_type_list (opaque_V2SI_type_node,
12936 puint_type_node,
12937 integer_type_node,
12938 NULL_TREE);
12940 tree v2si_ftype_pushort_int
12941 = build_function_type_list (opaque_V2SI_type_node,
12942 pushort_type_node,
12943 integer_type_node,
12944 NULL_TREE);
12946 tree v2si_ftype_signed_char
12947 = build_function_type_list (opaque_V2SI_type_node,
12948 signed_char_type_node,
12949 NULL_TREE);
12951 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
12953 /* Initialize irregular SPE builtins. */
12955 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
12956 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
12957 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
12958 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
12959 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
12960 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
12961 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
12962 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
12963 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
12964 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
12965 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
12966 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
12967 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
12968 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
12969 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
12970 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
12971 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
12972 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
12974 /* Loads. */
12975 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
12976 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
12977 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
12978 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
12979 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
12980 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
12981 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
12982 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
12983 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
12984 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
12985 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
12986 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
12987 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
12988 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
12989 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
12990 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
12991 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
12992 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
12993 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
12994 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
12995 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
12996 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
12998 /* Predicates. */
12999 d = bdesc_spe_predicates;
13000 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
13002 tree type;
13004 switch (insn_data[d->icode].operand[1].mode)
13006 case V2SImode:
13007 type = int_ftype_int_v2si_v2si;
13008 break;
13009 case V2SFmode:
13010 type = int_ftype_int_v2sf_v2sf;
13011 break;
13012 default:
13013 gcc_unreachable ();
13016 def_builtin (d->name, type, d->code);
13019 /* Evsel predicates. */
13020 d = bdesc_spe_evsel;
13021 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
13023 tree type;
13025 switch (insn_data[d->icode].operand[1].mode)
13027 case V2SImode:
13028 type = v2si_ftype_4_v2si;
13029 break;
13030 case V2SFmode:
13031 type = v2sf_ftype_4_v2sf;
13032 break;
13033 default:
13034 gcc_unreachable ();
13037 def_builtin (d->name, type, d->code);
13041 static void
13042 paired_init_builtins (void)
13044 const struct builtin_description *d;
13045 size_t i;
13047 tree int_ftype_int_v2sf_v2sf
13048 = build_function_type_list (integer_type_node,
13049 integer_type_node,
13050 V2SF_type_node,
13051 V2SF_type_node,
13052 NULL_TREE);
13053 tree pcfloat_type_node =
13054 build_pointer_type (build_qualified_type
13055 (float_type_node, TYPE_QUAL_CONST));
13057 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
13058 long_integer_type_node,
13059 pcfloat_type_node,
13060 NULL_TREE);
13061 tree void_ftype_v2sf_long_pcfloat =
13062 build_function_type_list (void_type_node,
13063 V2SF_type_node,
13064 long_integer_type_node,
13065 pcfloat_type_node,
13066 NULL_TREE);
13069 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
13070 PAIRED_BUILTIN_LX);
13073 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
13074 PAIRED_BUILTIN_STX);
13076 /* Predicates. */
13077 d = bdesc_paired_preds;
13078 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
13080 tree type;
13082 if (TARGET_DEBUG_BUILTIN)
13083 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
13084 (int)i, get_insn_name (d->icode), (int)d->icode,
13085 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
13087 switch (insn_data[d->icode].operand[1].mode)
13089 case V2SFmode:
13090 type = int_ftype_int_v2sf_v2sf;
13091 break;
13092 default:
13093 gcc_unreachable ();
13096 def_builtin (d->name, type, d->code);
13100 static void
13101 altivec_init_builtins (void)
13103 const struct builtin_description *d;
13104 size_t i;
13105 tree ftype;
13106 tree decl;
13108 tree pvoid_type_node = build_pointer_type (void_type_node);
13110 tree pcvoid_type_node
13111 = build_pointer_type (build_qualified_type (void_type_node,
13112 TYPE_QUAL_CONST));
13114 tree int_ftype_opaque
13115 = build_function_type_list (integer_type_node,
13116 opaque_V4SI_type_node, NULL_TREE);
13117 tree opaque_ftype_opaque
13118 = build_function_type_list (integer_type_node, NULL_TREE);
13119 tree opaque_ftype_opaque_int
13120 = build_function_type_list (opaque_V4SI_type_node,
13121 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
13122 tree opaque_ftype_opaque_opaque_int
13123 = build_function_type_list (opaque_V4SI_type_node,
13124 opaque_V4SI_type_node, opaque_V4SI_type_node,
13125 integer_type_node, NULL_TREE);
13126 tree int_ftype_int_opaque_opaque
13127 = build_function_type_list (integer_type_node,
13128 integer_type_node, opaque_V4SI_type_node,
13129 opaque_V4SI_type_node, NULL_TREE);
13130 tree int_ftype_int_v4si_v4si
13131 = build_function_type_list (integer_type_node,
13132 integer_type_node, V4SI_type_node,
13133 V4SI_type_node, NULL_TREE);
13134 tree int_ftype_int_v2di_v2di
13135 = build_function_type_list (integer_type_node,
13136 integer_type_node, V2DI_type_node,
13137 V2DI_type_node, NULL_TREE);
13138 tree void_ftype_v4si
13139 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
13140 tree v8hi_ftype_void
13141 = build_function_type_list (V8HI_type_node, NULL_TREE);
13142 tree void_ftype_void
13143 = build_function_type_list (void_type_node, NULL_TREE);
13144 tree void_ftype_int
13145 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
13147 tree opaque_ftype_long_pcvoid
13148 = build_function_type_list (opaque_V4SI_type_node,
13149 long_integer_type_node, pcvoid_type_node,
13150 NULL_TREE);
13151 tree v16qi_ftype_long_pcvoid
13152 = build_function_type_list (V16QI_type_node,
13153 long_integer_type_node, pcvoid_type_node,
13154 NULL_TREE);
13155 tree v8hi_ftype_long_pcvoid
13156 = build_function_type_list (V8HI_type_node,
13157 long_integer_type_node, pcvoid_type_node,
13158 NULL_TREE);
13159 tree v4si_ftype_long_pcvoid
13160 = build_function_type_list (V4SI_type_node,
13161 long_integer_type_node, pcvoid_type_node,
13162 NULL_TREE);
13163 tree v4sf_ftype_long_pcvoid
13164 = build_function_type_list (V4SF_type_node,
13165 long_integer_type_node, pcvoid_type_node,
13166 NULL_TREE);
13167 tree v2df_ftype_long_pcvoid
13168 = build_function_type_list (V2DF_type_node,
13169 long_integer_type_node, pcvoid_type_node,
13170 NULL_TREE);
13171 tree v2di_ftype_long_pcvoid
13172 = build_function_type_list (V2DI_type_node,
13173 long_integer_type_node, pcvoid_type_node,
13174 NULL_TREE);
13176 tree void_ftype_opaque_long_pvoid
13177 = build_function_type_list (void_type_node,
13178 opaque_V4SI_type_node, long_integer_type_node,
13179 pvoid_type_node, NULL_TREE);
13180 tree void_ftype_v4si_long_pvoid
13181 = build_function_type_list (void_type_node,
13182 V4SI_type_node, long_integer_type_node,
13183 pvoid_type_node, NULL_TREE);
13184 tree void_ftype_v16qi_long_pvoid
13185 = build_function_type_list (void_type_node,
13186 V16QI_type_node, long_integer_type_node,
13187 pvoid_type_node, NULL_TREE);
13188 tree void_ftype_v8hi_long_pvoid
13189 = build_function_type_list (void_type_node,
13190 V8HI_type_node, long_integer_type_node,
13191 pvoid_type_node, NULL_TREE);
13192 tree void_ftype_v4sf_long_pvoid
13193 = build_function_type_list (void_type_node,
13194 V4SF_type_node, long_integer_type_node,
13195 pvoid_type_node, NULL_TREE);
13196 tree void_ftype_v2df_long_pvoid
13197 = build_function_type_list (void_type_node,
13198 V2DF_type_node, long_integer_type_node,
13199 pvoid_type_node, NULL_TREE);
13200 tree void_ftype_v2di_long_pvoid
13201 = build_function_type_list (void_type_node,
13202 V2DI_type_node, long_integer_type_node,
13203 pvoid_type_node, NULL_TREE);
13204 tree int_ftype_int_v8hi_v8hi
13205 = build_function_type_list (integer_type_node,
13206 integer_type_node, V8HI_type_node,
13207 V8HI_type_node, NULL_TREE);
13208 tree int_ftype_int_v16qi_v16qi
13209 = build_function_type_list (integer_type_node,
13210 integer_type_node, V16QI_type_node,
13211 V16QI_type_node, NULL_TREE);
13212 tree int_ftype_int_v4sf_v4sf
13213 = build_function_type_list (integer_type_node,
13214 integer_type_node, V4SF_type_node,
13215 V4SF_type_node, NULL_TREE);
13216 tree int_ftype_int_v2df_v2df
13217 = build_function_type_list (integer_type_node,
13218 integer_type_node, V2DF_type_node,
13219 V2DF_type_node, NULL_TREE);
13220 tree v2di_ftype_v2di
13221 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
13222 tree v4si_ftype_v4si
13223 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
13224 tree v8hi_ftype_v8hi
13225 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
13226 tree v16qi_ftype_v16qi
13227 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
13228 tree v4sf_ftype_v4sf
13229 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
13230 tree v2df_ftype_v2df
13231 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
13232 tree void_ftype_pcvoid_int_int
13233 = build_function_type_list (void_type_node,
13234 pcvoid_type_node, integer_type_node,
13235 integer_type_node, NULL_TREE);
13237 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
13238 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
13239 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
13240 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
13241 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
13242 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
13243 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
13244 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
13245 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
13246 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
13247 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
13248 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
13249 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
13250 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
13251 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
13252 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
13253 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
13254 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
13255 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
13256 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
13257 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
13258 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
13259 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
13260 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
13261 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
13262 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
13263 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
13264 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
13265 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
13266 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
13268 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
13269 VSX_BUILTIN_LXVD2X_V2DF);
13270 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
13271 VSX_BUILTIN_LXVD2X_V2DI);
13272 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
13273 VSX_BUILTIN_LXVW4X_V4SF);
13274 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
13275 VSX_BUILTIN_LXVW4X_V4SI);
13276 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
13277 VSX_BUILTIN_LXVW4X_V8HI);
13278 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
13279 VSX_BUILTIN_LXVW4X_V16QI);
13280 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
13281 VSX_BUILTIN_STXVD2X_V2DF);
13282 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
13283 VSX_BUILTIN_STXVD2X_V2DI);
13284 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
13285 VSX_BUILTIN_STXVW4X_V4SF);
13286 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
13287 VSX_BUILTIN_STXVW4X_V4SI);
13288 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
13289 VSX_BUILTIN_STXVW4X_V8HI);
13290 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
13291 VSX_BUILTIN_STXVW4X_V16QI);
13292 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
13293 VSX_BUILTIN_VEC_LD);
13294 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
13295 VSX_BUILTIN_VEC_ST);
13297 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
13298 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
13299 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
13301 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
13302 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
13303 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
13304 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
13305 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
13306 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
13307 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
13308 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
13309 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
13310 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
13311 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
13312 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
13314 /* Cell builtins. */
13315 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
13316 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
13317 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
13318 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
13320 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
13321 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
13322 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
13323 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
13325 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
13326 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
13327 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
13328 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
13330 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
13331 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
13332 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
13333 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
13335 /* Add the DST variants. */
13336 d = bdesc_dst;
13337 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
13338 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
13340 /* Initialize the predicates. */
13341 d = bdesc_altivec_preds;
13342 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
13344 enum machine_mode mode1;
13345 tree type;
13347 if (rs6000_overloaded_builtin_p (d->code))
13348 mode1 = VOIDmode;
13349 else
13350 mode1 = insn_data[d->icode].operand[1].mode;
13352 switch (mode1)
13354 case VOIDmode:
13355 type = int_ftype_int_opaque_opaque;
13356 break;
13357 case V2DImode:
13358 type = int_ftype_int_v2di_v2di;
13359 break;
13360 case V4SImode:
13361 type = int_ftype_int_v4si_v4si;
13362 break;
13363 case V8HImode:
13364 type = int_ftype_int_v8hi_v8hi;
13365 break;
13366 case V16QImode:
13367 type = int_ftype_int_v16qi_v16qi;
13368 break;
13369 case V4SFmode:
13370 type = int_ftype_int_v4sf_v4sf;
13371 break;
13372 case V2DFmode:
13373 type = int_ftype_int_v2df_v2df;
13374 break;
13375 default:
13376 gcc_unreachable ();
13379 def_builtin (d->name, type, d->code);
13382 /* Initialize the abs* operators. */
13383 d = bdesc_abs;
13384 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
13386 enum machine_mode mode0;
13387 tree type;
13389 mode0 = insn_data[d->icode].operand[0].mode;
13391 switch (mode0)
13393 case V2DImode:
13394 type = v2di_ftype_v2di;
13395 break;
13396 case V4SImode:
13397 type = v4si_ftype_v4si;
13398 break;
13399 case V8HImode:
13400 type = v8hi_ftype_v8hi;
13401 break;
13402 case V16QImode:
13403 type = v16qi_ftype_v16qi;
13404 break;
13405 case V4SFmode:
13406 type = v4sf_ftype_v4sf;
13407 break;
13408 case V2DFmode:
13409 type = v2df_ftype_v2df;
13410 break;
13411 default:
13412 gcc_unreachable ();
13415 def_builtin (d->name, type, d->code);
13418 /* Initialize target builtin that implements
13419 targetm.vectorize.builtin_mask_for_load. */
13421 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
13422 v16qi_ftype_long_pcvoid,
13423 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
13424 BUILT_IN_MD, NULL, NULL_TREE);
13425 TREE_READONLY (decl) = 1;
13426 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
13427 altivec_builtin_mask_for_load = decl;
13429 /* Access to the vec_init patterns. */
13430 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
13431 integer_type_node, integer_type_node,
13432 integer_type_node, NULL_TREE);
13433 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
13435 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
13436 short_integer_type_node,
13437 short_integer_type_node,
13438 short_integer_type_node,
13439 short_integer_type_node,
13440 short_integer_type_node,
13441 short_integer_type_node,
13442 short_integer_type_node, NULL_TREE);
13443 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
13445 ftype = build_function_type_list (V16QI_type_node, char_type_node,
13446 char_type_node, char_type_node,
13447 char_type_node, char_type_node,
13448 char_type_node, char_type_node,
13449 char_type_node, char_type_node,
13450 char_type_node, char_type_node,
13451 char_type_node, char_type_node,
13452 char_type_node, char_type_node,
13453 char_type_node, NULL_TREE);
13454 def_builtin ("__builtin_vec_init_v16qi", ftype,
13455 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
13457 ftype = build_function_type_list (V4SF_type_node, float_type_node,
13458 float_type_node, float_type_node,
13459 float_type_node, NULL_TREE);
13460 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
13462 /* VSX builtins. */
13463 ftype = build_function_type_list (V2DF_type_node, double_type_node,
13464 double_type_node, NULL_TREE);
13465 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
13467 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
13468 intDI_type_node, NULL_TREE);
13469 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
13471 /* Access to the vec_set patterns. */
13472 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
13473 intSI_type_node,
13474 integer_type_node, NULL_TREE);
13475 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
13477 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
13478 intHI_type_node,
13479 integer_type_node, NULL_TREE);
13480 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
13482 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
13483 intQI_type_node,
13484 integer_type_node, NULL_TREE);
13485 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
13487 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
13488 float_type_node,
13489 integer_type_node, NULL_TREE);
13490 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
13492 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
13493 double_type_node,
13494 integer_type_node, NULL_TREE);
13495 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
13497 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
13498 intDI_type_node,
13499 integer_type_node, NULL_TREE);
13500 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
13502 /* Access to the vec_extract patterns. */
13503 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
13504 integer_type_node, NULL_TREE);
13505 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
13507 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
13508 integer_type_node, NULL_TREE);
13509 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
13511 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
13512 integer_type_node, NULL_TREE);
13513 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
13515 ftype = build_function_type_list (float_type_node, V4SF_type_node,
13516 integer_type_node, NULL_TREE);
13517 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
13519 ftype = build_function_type_list (double_type_node, V2DF_type_node,
13520 integer_type_node, NULL_TREE);
13521 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
13523 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
13524 integer_type_node, NULL_TREE);
13525 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
13528 static void
13529 htm_init_builtins (void)
13531 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
13532 const struct builtin_description *d;
13533 size_t i;
13535 d = bdesc_htm;
13536 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13538 tree op[MAX_HTM_OPERANDS], type;
13539 HOST_WIDE_INT mask = d->mask;
13540 unsigned attr = rs6000_builtin_info[d->code].attr;
13541 bool void_func = (attr & RS6000_BTC_VOID);
13542 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
13543 int nopnds = 0;
13544 tree argtype = (attr & RS6000_BTC_SPR) ? long_unsigned_type_node
13545 : unsigned_type_node;
13547 if ((mask & builtin_mask) != mask)
13549 if (TARGET_DEBUG_BUILTIN)
13550 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
13551 continue;
13554 if (d->name == 0)
13556 if (TARGET_DEBUG_BUILTIN)
13557 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
13558 (long unsigned) i);
13559 continue;
13562 op[nopnds++] = (void_func) ? void_type_node : argtype;
13564 if (attr_args == RS6000_BTC_UNARY)
13565 op[nopnds++] = argtype;
13566 else if (attr_args == RS6000_BTC_BINARY)
13568 op[nopnds++] = argtype;
13569 op[nopnds++] = argtype;
13571 else if (attr_args == RS6000_BTC_TERNARY)
13573 op[nopnds++] = argtype;
13574 op[nopnds++] = argtype;
13575 op[nopnds++] = argtype;
13578 switch (nopnds)
13580 case 1:
13581 type = build_function_type_list (op[0], NULL_TREE);
13582 break;
13583 case 2:
13584 type = build_function_type_list (op[0], op[1], NULL_TREE);
13585 break;
13586 case 3:
13587 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
13588 break;
13589 case 4:
13590 type = build_function_type_list (op[0], op[1], op[2], op[3],
13591 NULL_TREE);
13592 break;
13593 default:
13594 gcc_unreachable ();
13597 def_builtin (d->name, type, d->code);
13601 /* Hash function for builtin functions with up to 3 arguments and a return
13602 type. */
13603 static unsigned
13604 builtin_hash_function (const void *hash_entry)
13606 unsigned ret = 0;
13607 int i;
13608 const struct builtin_hash_struct *bh =
13609 (const struct builtin_hash_struct *) hash_entry;
13611 for (i = 0; i < 4; i++)
13613 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
13614 ret = (ret * 2) + bh->uns_p[i];
13617 return ret;
13620 /* Compare builtin hash entries H1 and H2 for equivalence. */
13621 static int
13622 builtin_hash_eq (const void *h1, const void *h2)
13624 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
13625 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
13627 return ((p1->mode[0] == p2->mode[0])
13628 && (p1->mode[1] == p2->mode[1])
13629 && (p1->mode[2] == p2->mode[2])
13630 && (p1->mode[3] == p2->mode[3])
13631 && (p1->uns_p[0] == p2->uns_p[0])
13632 && (p1->uns_p[1] == p2->uns_p[1])
13633 && (p1->uns_p[2] == p2->uns_p[2])
13634 && (p1->uns_p[3] == p2->uns_p[3]));
13637 /* Map types for builtin functions with an explicit return type and up to 3
13638 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
13639 of the argument. */
13640 static tree
13641 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
13642 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
13643 enum rs6000_builtins builtin, const char *name)
13645 struct builtin_hash_struct h;
13646 struct builtin_hash_struct *h2;
13647 void **found;
13648 int num_args = 3;
13649 int i;
13650 tree ret_type = NULL_TREE;
13651 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
13653 /* Create builtin_hash_table. */
13654 if (builtin_hash_table == NULL)
13655 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
13656 builtin_hash_eq, NULL);
13658 h.type = NULL_TREE;
13659 h.mode[0] = mode_ret;
13660 h.mode[1] = mode_arg0;
13661 h.mode[2] = mode_arg1;
13662 h.mode[3] = mode_arg2;
13663 h.uns_p[0] = 0;
13664 h.uns_p[1] = 0;
13665 h.uns_p[2] = 0;
13666 h.uns_p[3] = 0;
13668 /* If the builtin is a type that produces unsigned results or takes unsigned
13669 arguments, and it is returned as a decl for the vectorizer (such as
13670 widening multiplies, permute), make sure the arguments and return value
13671 are type correct. */
13672 switch (builtin)
13674 /* unsigned 1 argument functions. */
13675 case CRYPTO_BUILTIN_VSBOX:
13676 case P8V_BUILTIN_VGBBD:
13677 h.uns_p[0] = 1;
13678 h.uns_p[1] = 1;
13679 break;
13681 /* unsigned 2 argument functions. */
13682 case ALTIVEC_BUILTIN_VMULEUB_UNS:
13683 case ALTIVEC_BUILTIN_VMULEUH_UNS:
13684 case ALTIVEC_BUILTIN_VMULOUB_UNS:
13685 case ALTIVEC_BUILTIN_VMULOUH_UNS:
13686 case CRYPTO_BUILTIN_VCIPHER:
13687 case CRYPTO_BUILTIN_VCIPHERLAST:
13688 case CRYPTO_BUILTIN_VNCIPHER:
13689 case CRYPTO_BUILTIN_VNCIPHERLAST:
13690 case CRYPTO_BUILTIN_VPMSUMB:
13691 case CRYPTO_BUILTIN_VPMSUMH:
13692 case CRYPTO_BUILTIN_VPMSUMW:
13693 case CRYPTO_BUILTIN_VPMSUMD:
13694 case CRYPTO_BUILTIN_VPMSUM:
13695 h.uns_p[0] = 1;
13696 h.uns_p[1] = 1;
13697 h.uns_p[2] = 1;
13698 break;
13700 /* unsigned 3 argument functions. */
13701 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
13702 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
13703 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
13704 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
13705 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
13706 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
13707 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
13708 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
13709 case VSX_BUILTIN_VPERM_16QI_UNS:
13710 case VSX_BUILTIN_VPERM_8HI_UNS:
13711 case VSX_BUILTIN_VPERM_4SI_UNS:
13712 case VSX_BUILTIN_VPERM_2DI_UNS:
13713 case VSX_BUILTIN_XXSEL_16QI_UNS:
13714 case VSX_BUILTIN_XXSEL_8HI_UNS:
13715 case VSX_BUILTIN_XXSEL_4SI_UNS:
13716 case VSX_BUILTIN_XXSEL_2DI_UNS:
13717 case CRYPTO_BUILTIN_VPERMXOR:
13718 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
13719 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
13720 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
13721 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
13722 case CRYPTO_BUILTIN_VSHASIGMAW:
13723 case CRYPTO_BUILTIN_VSHASIGMAD:
13724 case CRYPTO_BUILTIN_VSHASIGMA:
13725 h.uns_p[0] = 1;
13726 h.uns_p[1] = 1;
13727 h.uns_p[2] = 1;
13728 h.uns_p[3] = 1;
13729 break;
13731 /* signed permute functions with unsigned char mask. */
13732 case ALTIVEC_BUILTIN_VPERM_16QI:
13733 case ALTIVEC_BUILTIN_VPERM_8HI:
13734 case ALTIVEC_BUILTIN_VPERM_4SI:
13735 case ALTIVEC_BUILTIN_VPERM_4SF:
13736 case ALTIVEC_BUILTIN_VPERM_2DI:
13737 case ALTIVEC_BUILTIN_VPERM_2DF:
13738 case VSX_BUILTIN_VPERM_16QI:
13739 case VSX_BUILTIN_VPERM_8HI:
13740 case VSX_BUILTIN_VPERM_4SI:
13741 case VSX_BUILTIN_VPERM_4SF:
13742 case VSX_BUILTIN_VPERM_2DI:
13743 case VSX_BUILTIN_VPERM_2DF:
13744 h.uns_p[3] = 1;
13745 break;
13747 /* unsigned args, signed return. */
13748 case VSX_BUILTIN_XVCVUXDDP_UNS:
13749 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
13750 h.uns_p[1] = 1;
13751 break;
13753 /* signed args, unsigned return. */
13754 case VSX_BUILTIN_XVCVDPUXDS_UNS:
13755 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
13756 h.uns_p[0] = 1;
13757 break;
13759 default:
13760 break;
13763 /* Figure out how many args are present. */
13764 while (num_args > 0 && h.mode[num_args] == VOIDmode)
13765 num_args--;
13767 if (num_args == 0)
13768 fatal_error ("internal error: builtin function %s had no type", name);
13770 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
13771 if (!ret_type && h.uns_p[0])
13772 ret_type = builtin_mode_to_type[h.mode[0]][0];
13774 if (!ret_type)
13775 fatal_error ("internal error: builtin function %s had an unexpected "
13776 "return type %s", name, GET_MODE_NAME (h.mode[0]));
13778 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
13779 arg_type[i] = NULL_TREE;
13781 for (i = 0; i < num_args; i++)
13783 int m = (int) h.mode[i+1];
13784 int uns_p = h.uns_p[i+1];
13786 arg_type[i] = builtin_mode_to_type[m][uns_p];
13787 if (!arg_type[i] && uns_p)
13788 arg_type[i] = builtin_mode_to_type[m][0];
13790 if (!arg_type[i])
13791 fatal_error ("internal error: builtin function %s, argument %d "
13792 "had unexpected argument type %s", name, i,
13793 GET_MODE_NAME (m));
13796 found = htab_find_slot (builtin_hash_table, &h, INSERT);
13797 if (*found == NULL)
13799 h2 = ggc_alloc_builtin_hash_struct ();
13800 *h2 = h;
13801 *found = (void *)h2;
13803 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
13804 arg_type[2], NULL_TREE);
13807 return ((struct builtin_hash_struct *)(*found))->type;
13810 static void
13811 rs6000_common_init_builtins (void)
13813 const struct builtin_description *d;
13814 size_t i;
13816 tree opaque_ftype_opaque = NULL_TREE;
13817 tree opaque_ftype_opaque_opaque = NULL_TREE;
13818 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
13819 tree v2si_ftype_qi = NULL_TREE;
13820 tree v2si_ftype_v2si_qi = NULL_TREE;
13821 tree v2si_ftype_int_qi = NULL_TREE;
13822 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
13824 if (!TARGET_PAIRED_FLOAT)
13826 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
13827 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
13830 /* Paired and SPE builtins are only available if you build a compiler with
13831 the appropriate options, so only create those builtins with the
13832 appropriate compiler option. Create Altivec and VSX builtins on machines
13833 with at least the general purpose extensions (970 and newer) to allow the
13834 use of the target attribute.. */
13836 if (TARGET_EXTRA_BUILTINS)
13837 builtin_mask |= RS6000_BTM_COMMON;
13839 /* Add the ternary operators. */
13840 d = bdesc_3arg;
13841 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
13843 tree type;
13844 HOST_WIDE_INT mask = d->mask;
13846 if ((mask & builtin_mask) != mask)
13848 if (TARGET_DEBUG_BUILTIN)
13849 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
13850 continue;
13853 if (rs6000_overloaded_builtin_p (d->code))
13855 if (! (type = opaque_ftype_opaque_opaque_opaque))
13856 type = opaque_ftype_opaque_opaque_opaque
13857 = build_function_type_list (opaque_V4SI_type_node,
13858 opaque_V4SI_type_node,
13859 opaque_V4SI_type_node,
13860 opaque_V4SI_type_node,
13861 NULL_TREE);
13863 else
13865 enum insn_code icode = d->icode;
13866 if (d->name == 0)
13868 if (TARGET_DEBUG_BUILTIN)
13869 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
13870 (long unsigned)i);
13872 continue;
13875 if (icode == CODE_FOR_nothing)
13877 if (TARGET_DEBUG_BUILTIN)
13878 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
13879 d->name);
13881 continue;
13884 type = builtin_function_type (insn_data[icode].operand[0].mode,
13885 insn_data[icode].operand[1].mode,
13886 insn_data[icode].operand[2].mode,
13887 insn_data[icode].operand[3].mode,
13888 d->code, d->name);
13891 def_builtin (d->name, type, d->code);
13894 /* Add the binary operators. */
13895 d = bdesc_2arg;
13896 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13898 enum machine_mode mode0, mode1, mode2;
13899 tree type;
13900 HOST_WIDE_INT mask = d->mask;
13902 if ((mask & builtin_mask) != mask)
13904 if (TARGET_DEBUG_BUILTIN)
13905 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
13906 continue;
13909 if (rs6000_overloaded_builtin_p (d->code))
13911 if (! (type = opaque_ftype_opaque_opaque))
13912 type = opaque_ftype_opaque_opaque
13913 = build_function_type_list (opaque_V4SI_type_node,
13914 opaque_V4SI_type_node,
13915 opaque_V4SI_type_node,
13916 NULL_TREE);
13918 else
13920 enum insn_code icode = d->icode;
13921 if (d->name == 0)
13923 if (TARGET_DEBUG_BUILTIN)
13924 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
13925 (long unsigned)i);
13927 continue;
13930 if (icode == CODE_FOR_nothing)
13932 if (TARGET_DEBUG_BUILTIN)
13933 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
13934 d->name);
13936 continue;
13939 mode0 = insn_data[icode].operand[0].mode;
13940 mode1 = insn_data[icode].operand[1].mode;
13941 mode2 = insn_data[icode].operand[2].mode;
13943 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
13945 if (! (type = v2si_ftype_v2si_qi))
13946 type = v2si_ftype_v2si_qi
13947 = build_function_type_list (opaque_V2SI_type_node,
13948 opaque_V2SI_type_node,
13949 char_type_node,
13950 NULL_TREE);
13953 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
13954 && mode2 == QImode)
13956 if (! (type = v2si_ftype_int_qi))
13957 type = v2si_ftype_int_qi
13958 = build_function_type_list (opaque_V2SI_type_node,
13959 integer_type_node,
13960 char_type_node,
13961 NULL_TREE);
13964 else
13965 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
13966 d->code, d->name);
13969 def_builtin (d->name, type, d->code);
13972 /* Add the simple unary operators. */
13973 d = bdesc_1arg;
13974 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13976 enum machine_mode mode0, mode1;
13977 tree type;
13978 HOST_WIDE_INT mask = d->mask;
13980 if ((mask & builtin_mask) != mask)
13982 if (TARGET_DEBUG_BUILTIN)
13983 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
13984 continue;
13987 if (rs6000_overloaded_builtin_p (d->code))
13989 if (! (type = opaque_ftype_opaque))
13990 type = opaque_ftype_opaque
13991 = build_function_type_list (opaque_V4SI_type_node,
13992 opaque_V4SI_type_node,
13993 NULL_TREE);
13995 else
13997 enum insn_code icode = d->icode;
13998 if (d->name == 0)
14000 if (TARGET_DEBUG_BUILTIN)
14001 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
14002 (long unsigned)i);
14004 continue;
14007 if (icode == CODE_FOR_nothing)
14009 if (TARGET_DEBUG_BUILTIN)
14010 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
14011 d->name);
14013 continue;
14016 mode0 = insn_data[icode].operand[0].mode;
14017 mode1 = insn_data[icode].operand[1].mode;
14019 if (mode0 == V2SImode && mode1 == QImode)
14021 if (! (type = v2si_ftype_qi))
14022 type = v2si_ftype_qi
14023 = build_function_type_list (opaque_V2SI_type_node,
14024 char_type_node,
14025 NULL_TREE);
14028 else
14029 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
14030 d->code, d->name);
14033 def_builtin (d->name, type, d->code);
14037 static void
14038 rs6000_init_libfuncs (void)
14040 if (!TARGET_IEEEQUAD)
14041 /* AIX/Darwin/64-bit Linux quad floating point routines. */
14042 if (!TARGET_XL_COMPAT)
14044 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
14045 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
14046 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
14047 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
14049 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
14051 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
14052 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
14053 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
14054 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
14055 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
14056 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
14057 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
14059 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
14060 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
14061 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
14062 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
14063 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
14064 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
14065 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
14066 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
14069 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
14070 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
14072 else
14074 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
14075 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
14076 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
14077 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
14079 else
14081 /* 32-bit SVR4 quad floating point routines. */
14083 set_optab_libfunc (add_optab, TFmode, "_q_add");
14084 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
14085 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
14086 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
14087 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
14088 if (TARGET_PPC_GPOPT)
14089 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
14091 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
14092 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
14093 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
14094 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
14095 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
14096 set_optab_libfunc (le_optab, TFmode, "_q_fle");
14098 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
14099 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
14100 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
14101 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
14102 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
14103 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
14104 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
14105 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
14110 /* Expand a block clear operation, and return 1 if successful. Return 0
14111 if we should let the compiler generate normal code.
14113 operands[0] is the destination
14114 operands[1] is the length
14115 operands[3] is the alignment */
14118 expand_block_clear (rtx operands[])
14120 rtx orig_dest = operands[0];
14121 rtx bytes_rtx = operands[1];
14122 rtx align_rtx = operands[3];
14123 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
14124 HOST_WIDE_INT align;
14125 HOST_WIDE_INT bytes;
14126 int offset;
14127 int clear_bytes;
14128 int clear_step;
14130 /* If this is not a fixed size move, just call memcpy */
14131 if (! constp)
14132 return 0;
14134 /* This must be a fixed size alignment */
14135 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
14136 align = INTVAL (align_rtx) * BITS_PER_UNIT;
14138 /* Anything to clear? */
14139 bytes = INTVAL (bytes_rtx);
14140 if (bytes <= 0)
14141 return 1;
14143 /* Use the builtin memset after a point, to avoid huge code bloat.
14144 When optimize_size, avoid any significant code bloat; calling
14145 memset is about 4 instructions, so allow for one instruction to
14146 load zero and three to do clearing. */
14147 if (TARGET_ALTIVEC && align >= 128)
14148 clear_step = 16;
14149 else if (TARGET_POWERPC64 && align >= 32)
14150 clear_step = 8;
14151 else if (TARGET_SPE && align >= 64)
14152 clear_step = 8;
14153 else
14154 clear_step = 4;
14156 if (optimize_size && bytes > 3 * clear_step)
14157 return 0;
14158 if (! optimize_size && bytes > 8 * clear_step)
14159 return 0;
14161 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
14163 enum machine_mode mode = BLKmode;
14164 rtx dest;
14166 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
14168 clear_bytes = 16;
14169 mode = V4SImode;
14171 else if (bytes >= 8 && TARGET_SPE && align >= 64)
14173 clear_bytes = 8;
14174 mode = V2SImode;
14176 else if (bytes >= 8 && TARGET_POWERPC64
14177 /* 64-bit loads and stores require word-aligned
14178 displacements. */
14179 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
14181 clear_bytes = 8;
14182 mode = DImode;
14184 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
14185 { /* move 4 bytes */
14186 clear_bytes = 4;
14187 mode = SImode;
14189 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
14190 { /* move 2 bytes */
14191 clear_bytes = 2;
14192 mode = HImode;
14194 else /* move 1 byte at a time */
14196 clear_bytes = 1;
14197 mode = QImode;
14200 dest = adjust_address (orig_dest, mode, offset);
14202 emit_move_insn (dest, CONST0_RTX (mode));
14205 return 1;
14209 /* Expand a block move operation, and return 1 if successful. Return 0
14210 if we should let the compiler generate normal code.
14212 operands[0] is the destination
14213 operands[1] is the source
14214 operands[2] is the length
14215 operands[3] is the alignment */
14217 #define MAX_MOVE_REG 4
14220 expand_block_move (rtx operands[])
14222 rtx orig_dest = operands[0];
14223 rtx orig_src = operands[1];
14224 rtx bytes_rtx = operands[2];
14225 rtx align_rtx = operands[3];
14226 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
14227 int align;
14228 int bytes;
14229 int offset;
14230 int move_bytes;
14231 rtx stores[MAX_MOVE_REG];
14232 int num_reg = 0;
14234 /* If this is not a fixed size move, just call memcpy */
14235 if (! constp)
14236 return 0;
14238 /* This must be a fixed size alignment */
14239 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
14240 align = INTVAL (align_rtx) * BITS_PER_UNIT;
14242 /* Anything to move? */
14243 bytes = INTVAL (bytes_rtx);
14244 if (bytes <= 0)
14245 return 1;
14247 if (bytes > rs6000_block_move_inline_limit)
14248 return 0;
14250 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
14252 union {
14253 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
14254 rtx (*mov) (rtx, rtx);
14255 } gen_func;
14256 enum machine_mode mode = BLKmode;
14257 rtx src, dest;
14259 /* Altivec first, since it will be faster than a string move
14260 when it applies, and usually not significantly larger. */
14261 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
14263 move_bytes = 16;
14264 mode = V4SImode;
14265 gen_func.mov = gen_movv4si;
14267 else if (TARGET_SPE && bytes >= 8 && align >= 64)
14269 move_bytes = 8;
14270 mode = V2SImode;
14271 gen_func.mov = gen_movv2si;
14273 else if (TARGET_STRING
14274 && bytes > 24 /* move up to 32 bytes at a time */
14275 && ! fixed_regs[5]
14276 && ! fixed_regs[6]
14277 && ! fixed_regs[7]
14278 && ! fixed_regs[8]
14279 && ! fixed_regs[9]
14280 && ! fixed_regs[10]
14281 && ! fixed_regs[11]
14282 && ! fixed_regs[12])
14284 move_bytes = (bytes > 32) ? 32 : bytes;
14285 gen_func.movmemsi = gen_movmemsi_8reg;
14287 else if (TARGET_STRING
14288 && bytes > 16 /* move up to 24 bytes at a time */
14289 && ! fixed_regs[5]
14290 && ! fixed_regs[6]
14291 && ! fixed_regs[7]
14292 && ! fixed_regs[8]
14293 && ! fixed_regs[9]
14294 && ! fixed_regs[10])
14296 move_bytes = (bytes > 24) ? 24 : bytes;
14297 gen_func.movmemsi = gen_movmemsi_6reg;
14299 else if (TARGET_STRING
14300 && bytes > 8 /* move up to 16 bytes at a time */
14301 && ! fixed_regs[5]
14302 && ! fixed_regs[6]
14303 && ! fixed_regs[7]
14304 && ! fixed_regs[8])
14306 move_bytes = (bytes > 16) ? 16 : bytes;
14307 gen_func.movmemsi = gen_movmemsi_4reg;
14309 else if (bytes >= 8 && TARGET_POWERPC64
14310 /* 64-bit loads and stores require word-aligned
14311 displacements. */
14312 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
14314 move_bytes = 8;
14315 mode = DImode;
14316 gen_func.mov = gen_movdi;
14318 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
14319 { /* move up to 8 bytes at a time */
14320 move_bytes = (bytes > 8) ? 8 : bytes;
14321 gen_func.movmemsi = gen_movmemsi_2reg;
14323 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
14324 { /* move 4 bytes */
14325 move_bytes = 4;
14326 mode = SImode;
14327 gen_func.mov = gen_movsi;
14329 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
14330 { /* move 2 bytes */
14331 move_bytes = 2;
14332 mode = HImode;
14333 gen_func.mov = gen_movhi;
14335 else if (TARGET_STRING && bytes > 1)
14336 { /* move up to 4 bytes at a time */
14337 move_bytes = (bytes > 4) ? 4 : bytes;
14338 gen_func.movmemsi = gen_movmemsi_1reg;
14340 else /* move 1 byte at a time */
14342 move_bytes = 1;
14343 mode = QImode;
14344 gen_func.mov = gen_movqi;
14347 src = adjust_address (orig_src, mode, offset);
14348 dest = adjust_address (orig_dest, mode, offset);
14350 if (mode != BLKmode)
14352 rtx tmp_reg = gen_reg_rtx (mode);
14354 emit_insn ((*gen_func.mov) (tmp_reg, src));
14355 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
14358 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
14360 int i;
14361 for (i = 0; i < num_reg; i++)
14362 emit_insn (stores[i]);
14363 num_reg = 0;
14366 if (mode == BLKmode)
14368 /* Move the address into scratch registers. The movmemsi
14369 patterns require zero offset. */
14370 if (!REG_P (XEXP (src, 0)))
14372 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
14373 src = replace_equiv_address (src, src_reg);
14375 set_mem_size (src, move_bytes);
14377 if (!REG_P (XEXP (dest, 0)))
14379 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
14380 dest = replace_equiv_address (dest, dest_reg);
14382 set_mem_size (dest, move_bytes);
14384 emit_insn ((*gen_func.movmemsi) (dest, src,
14385 GEN_INT (move_bytes & 31),
14386 align_rtx));
14390 return 1;
14394 /* Return a string to perform a load_multiple operation.
14395 operands[0] is the vector.
14396 operands[1] is the source address.
14397 operands[2] is the first destination register. */
14399 const char *
14400 rs6000_output_load_multiple (rtx operands[3])
14402 /* We have to handle the case where the pseudo used to contain the address
14403 is assigned to one of the output registers. */
14404 int i, j;
14405 int words = XVECLEN (operands[0], 0);
14406 rtx xop[10];
14408 if (XVECLEN (operands[0], 0) == 1)
14409 return "lwz %2,0(%1)";
14411 for (i = 0; i < words; i++)
14412 if (refers_to_regno_p (REGNO (operands[2]) + i,
14413 REGNO (operands[2]) + i + 1, operands[1], 0))
14415 if (i == words-1)
14417 xop[0] = GEN_INT (4 * (words-1));
14418 xop[1] = operands[1];
14419 xop[2] = operands[2];
14420 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
14421 return "";
14423 else if (i == 0)
14425 xop[0] = GEN_INT (4 * (words-1));
14426 xop[1] = operands[1];
14427 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
14428 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
14429 return "";
14431 else
14433 for (j = 0; j < words; j++)
14434 if (j != i)
14436 xop[0] = GEN_INT (j * 4);
14437 xop[1] = operands[1];
14438 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
14439 output_asm_insn ("lwz %2,%0(%1)", xop);
14441 xop[0] = GEN_INT (i * 4);
14442 xop[1] = operands[1];
14443 output_asm_insn ("lwz %1,%0(%1)", xop);
14444 return "";
14448 return "lswi %2,%1,%N0";
14452 /* A validation routine: say whether CODE, a condition code, and MODE
14453 match. The other alternatives either don't make sense or should
14454 never be generated. */
14456 void
14457 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
14459 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
14460 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
14461 && GET_MODE_CLASS (mode) == MODE_CC);
14463 /* These don't make sense. */
14464 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
14465 || mode != CCUNSmode);
14467 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
14468 || mode == CCUNSmode);
14470 gcc_assert (mode == CCFPmode
14471 || (code != ORDERED && code != UNORDERED
14472 && code != UNEQ && code != LTGT
14473 && code != UNGT && code != UNLT
14474 && code != UNGE && code != UNLE));
14476 /* These should never be generated except for
14477 flag_finite_math_only. */
14478 gcc_assert (mode != CCFPmode
14479 || flag_finite_math_only
14480 || (code != LE && code != GE
14481 && code != UNEQ && code != LTGT
14482 && code != UNGT && code != UNLT));
14484 /* These are invalid; the information is not there. */
14485 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
14489 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
14490 mask required to convert the result of a rotate insn into a shift
14491 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
14494 includes_lshift_p (rtx shiftop, rtx andop)
14496 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
14498 shift_mask <<= INTVAL (shiftop);
14500 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
14503 /* Similar, but for right shift. */
14506 includes_rshift_p (rtx shiftop, rtx andop)
14508 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
14510 shift_mask >>= INTVAL (shiftop);
14512 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
14515 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
14516 to perform a left shift. It must have exactly SHIFTOP least
14517 significant 0's, then one or more 1's, then zero or more 0's. */
14520 includes_rldic_lshift_p (rtx shiftop, rtx andop)
14522 if (GET_CODE (andop) == CONST_INT)
14524 HOST_WIDE_INT c, lsb, shift_mask;
14526 c = INTVAL (andop);
14527 if (c == 0 || c == ~0)
14528 return 0;
14530 shift_mask = ~0;
14531 shift_mask <<= INTVAL (shiftop);
14533 /* Find the least significant one bit. */
14534 lsb = c & -c;
14536 /* It must coincide with the LSB of the shift mask. */
14537 if (-lsb != shift_mask)
14538 return 0;
14540 /* Invert to look for the next transition (if any). */
14541 c = ~c;
14543 /* Remove the low group of ones (originally low group of zeros). */
14544 c &= -lsb;
14546 /* Again find the lsb, and check we have all 1's above. */
14547 lsb = c & -c;
14548 return c == -lsb;
14550 else
14551 return 0;
14554 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
14555 to perform a left shift. It must have SHIFTOP or more least
14556 significant 0's, with the remainder of the word 1's. */
14559 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
14561 if (GET_CODE (andop) == CONST_INT)
14563 HOST_WIDE_INT c, lsb, shift_mask;
14565 shift_mask = ~0;
14566 shift_mask <<= INTVAL (shiftop);
14567 c = INTVAL (andop);
14569 /* Find the least significant one bit. */
14570 lsb = c & -c;
14572 /* It must be covered by the shift mask.
14573 This test also rejects c == 0. */
14574 if ((lsb & shift_mask) == 0)
14575 return 0;
14577 /* Check we have all 1's above the transition, and reject all 1's. */
14578 return c == -lsb && lsb != 1;
14580 else
14581 return 0;
14584 /* Return 1 if operands will generate a valid arguments to rlwimi
14585 instruction for insert with right shift in 64-bit mode. The mask may
14586 not start on the first bit or stop on the last bit because wrap-around
14587 effects of instruction do not correspond to semantics of RTL insn. */
14590 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
14592 if (INTVAL (startop) > 32
14593 && INTVAL (startop) < 64
14594 && INTVAL (sizeop) > 1
14595 && INTVAL (sizeop) + INTVAL (startop) < 64
14596 && INTVAL (shiftop) > 0
14597 && INTVAL (sizeop) + INTVAL (shiftop) < 32
14598 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
14599 return 1;
14601 return 0;
14604 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
14605 for lfq and stfq insns iff the registers are hard registers. */
14608 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
14610 /* We might have been passed a SUBREG. */
14611 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
14612 return 0;
14614 /* We might have been passed non floating point registers. */
14615 if (!FP_REGNO_P (REGNO (reg1))
14616 || !FP_REGNO_P (REGNO (reg2)))
14617 return 0;
14619 return (REGNO (reg1) == REGNO (reg2) - 1);
14622 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
14623 addr1 and addr2 must be in consecutive memory locations
14624 (addr2 == addr1 + 8). */
14627 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
14629 rtx addr1, addr2;
14630 unsigned int reg1, reg2;
14631 int offset1, offset2;
14633 /* The mems cannot be volatile. */
14634 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
14635 return 0;
14637 addr1 = XEXP (mem1, 0);
14638 addr2 = XEXP (mem2, 0);
14640 /* Extract an offset (if used) from the first addr. */
14641 if (GET_CODE (addr1) == PLUS)
14643 /* If not a REG, return zero. */
14644 if (GET_CODE (XEXP (addr1, 0)) != REG)
14645 return 0;
14646 else
14648 reg1 = REGNO (XEXP (addr1, 0));
14649 /* The offset must be constant! */
14650 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
14651 return 0;
14652 offset1 = INTVAL (XEXP (addr1, 1));
14655 else if (GET_CODE (addr1) != REG)
14656 return 0;
14657 else
14659 reg1 = REGNO (addr1);
14660 /* This was a simple (mem (reg)) expression. Offset is 0. */
14661 offset1 = 0;
14664 /* And now for the second addr. */
14665 if (GET_CODE (addr2) == PLUS)
14667 /* If not a REG, return zero. */
14668 if (GET_CODE (XEXP (addr2, 0)) != REG)
14669 return 0;
14670 else
14672 reg2 = REGNO (XEXP (addr2, 0));
14673 /* The offset must be constant. */
14674 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
14675 return 0;
14676 offset2 = INTVAL (XEXP (addr2, 1));
14679 else if (GET_CODE (addr2) != REG)
14680 return 0;
14681 else
14683 reg2 = REGNO (addr2);
14684 /* This was a simple (mem (reg)) expression. Offset is 0. */
14685 offset2 = 0;
14688 /* Both of these must have the same base register. */
14689 if (reg1 != reg2)
14690 return 0;
14692 /* The offset for the second addr must be 8 more than the first addr. */
14693 if (offset2 != offset1 + 8)
14694 return 0;
14696 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
14697 instructions. */
14698 return 1;
14703 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
14705 static bool eliminated = false;
14706 rtx ret;
14708 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
14709 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14710 else
14712 rtx mem = cfun->machine->sdmode_stack_slot;
14713 gcc_assert (mem != NULL_RTX);
14715 if (!eliminated)
14717 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
14718 cfun->machine->sdmode_stack_slot = mem;
14719 eliminated = true;
14721 ret = mem;
14724 if (TARGET_DEBUG_ADDR)
14726 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
14727 GET_MODE_NAME (mode));
14728 if (!ret)
14729 fprintf (stderr, "\tNULL_RTX\n");
14730 else
14731 debug_rtx (ret);
14734 return ret;
14737 static tree
14738 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
14740 /* Don't walk into types. */
14741 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
14743 *walk_subtrees = 0;
14744 return NULL_TREE;
14747 switch (TREE_CODE (*tp))
14749 case VAR_DECL:
14750 case PARM_DECL:
14751 case FIELD_DECL:
14752 case RESULT_DECL:
14753 case SSA_NAME:
14754 case REAL_CST:
14755 case MEM_REF:
14756 case VIEW_CONVERT_EXPR:
14757 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
14758 return *tp;
14759 break;
14760 default:
14761 break;
14764 return NULL_TREE;
14767 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
14768 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
14769 only work on the traditional altivec registers, note if an altivec register
14770 was chosen. */
14772 static enum rs6000_reg_type
14773 register_to_reg_type (rtx reg, bool *is_altivec)
14775 HOST_WIDE_INT regno;
14776 enum reg_class rclass;
14778 if (GET_CODE (reg) == SUBREG)
14779 reg = SUBREG_REG (reg);
14781 if (!REG_P (reg))
14782 return NO_REG_TYPE;
14784 regno = REGNO (reg);
14785 if (regno >= FIRST_PSEUDO_REGISTER)
14787 if (!lra_in_progress && !reload_in_progress && !reload_completed)
14788 return PSEUDO_REG_TYPE;
14790 regno = true_regnum (reg);
14791 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
14792 return PSEUDO_REG_TYPE;
14795 gcc_assert (regno >= 0);
14797 if (is_altivec && ALTIVEC_REGNO_P (regno))
14798 *is_altivec = true;
14800 rclass = rs6000_regno_regclass[regno];
14801 return reg_class_to_reg_type[(int)rclass];
14804 /* Helper function for rs6000_secondary_reload to return true if a move to a
14805 different register classe is really a simple move. */
14807 static bool
14808 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
14809 enum rs6000_reg_type from_type,
14810 enum machine_mode mode)
14812 int size;
14814 /* Add support for various direct moves available. In this function, we only
14815 look at cases where we don't need any extra registers, and one or more
14816 simple move insns are issued. At present, 32-bit integers are not allowed
14817 in FPR/VSX registers. Single precision binary floating is not a simple
14818 move because we need to convert to the single precision memory layout.
14819 The 4-byte SDmode can be moved. */
14820 size = GET_MODE_SIZE (mode);
14821 if (TARGET_DIRECT_MOVE
14822 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
14823 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14824 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
14825 return true;
14827 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
14828 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
14829 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
14830 return true;
14832 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
14833 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
14834 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
14835 return true;
14837 return false;
14840 /* Power8 helper function for rs6000_secondary_reload, handle all of the
14841 special direct moves that involve allocating an extra register, return the
14842 insn code of the helper function if there is such a function or
14843 CODE_FOR_nothing if not. */
14845 static bool
14846 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
14847 enum rs6000_reg_type from_type,
14848 enum machine_mode mode,
14849 secondary_reload_info *sri,
14850 bool altivec_p)
14852 bool ret = false;
14853 enum insn_code icode = CODE_FOR_nothing;
14854 int cost = 0;
14855 int size = GET_MODE_SIZE (mode);
14857 if (TARGET_POWERPC64)
14859 if (size == 16)
14861 /* Handle moving 128-bit values from GPRs to VSX point registers on
14862 power8 when running in 64-bit mode using XXPERMDI to glue the two
14863 64-bit values back together. */
14864 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
14866 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
14867 icode = reg_addr[mode].reload_vsx_gpr;
14870 /* Handle moving 128-bit values from VSX point registers to GPRs on
14871 power8 when running in 64-bit mode using XXPERMDI to get access to the
14872 bottom 64-bit value. */
14873 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14875 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
14876 icode = reg_addr[mode].reload_gpr_vsx;
14880 else if (mode == SFmode)
14882 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14884 cost = 3; /* xscvdpspn, mfvsrd, and. */
14885 icode = reg_addr[mode].reload_gpr_vsx;
14888 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
14890 cost = 2; /* mtvsrz, xscvspdpn. */
14891 icode = reg_addr[mode].reload_vsx_gpr;
14896 if (TARGET_POWERPC64 && size == 16)
14898 /* Handle moving 128-bit values from GPRs to VSX point registers on
14899 power8 when running in 64-bit mode using XXPERMDI to glue the two
14900 64-bit values back together. */
14901 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
14903 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
14904 icode = reg_addr[mode].reload_vsx_gpr;
14907 /* Handle moving 128-bit values from VSX point registers to GPRs on
14908 power8 when running in 64-bit mode using XXPERMDI to get access to the
14909 bottom 64-bit value. */
14910 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14912 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
14913 icode = reg_addr[mode].reload_gpr_vsx;
14917 else if (!TARGET_POWERPC64 && size == 8)
14919 /* Handle moving 64-bit values from GPRs to floating point registers on
14920 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
14921 values back together. Altivec register classes must be handled
14922 specially since a different instruction is used, and the secondary
14923 reload support requires a single instruction class in the scratch
14924 register constraint. However, right now TFmode is not allowed in
14925 Altivec registers, so the pattern will never match. */
14926 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
14928 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
14929 icode = reg_addr[mode].reload_fpr_gpr;
14933 if (icode != CODE_FOR_nothing)
14935 ret = true;
14936 if (sri)
14938 sri->icode = icode;
14939 sri->extra_cost = cost;
14943 return ret;
14946 /* Return whether a move between two register classes can be done either
14947 directly (simple move) or via a pattern that uses a single extra temporary
14948 (using power8's direct move in this case. */
14950 static bool
14951 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
14952 enum rs6000_reg_type from_type,
14953 enum machine_mode mode,
14954 secondary_reload_info *sri,
14955 bool altivec_p)
14957 /* Fall back to load/store reloads if either type is not a register. */
14958 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
14959 return false;
14961 /* If we haven't allocated registers yet, assume the move can be done for the
14962 standard register types. */
14963 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
14964 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
14965 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
14966 return true;
14968 /* Moves to the same set of registers is a simple move for non-specialized
14969 registers. */
14970 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
14971 return true;
14973 /* Check whether a simple move can be done directly. */
14974 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
14976 if (sri)
14978 sri->icode = CODE_FOR_nothing;
14979 sri->extra_cost = 0;
14981 return true;
14984 /* Now check if we can do it in a few steps. */
14985 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
14986 altivec_p);
14989 /* Inform reload about cases where moving X with a mode MODE to a register in
14990 RCLASS requires an extra scratch or immediate register. Return the class
14991 needed for the immediate register.
14993 For VSX and Altivec, we may need a register to convert sp+offset into
14994 reg+sp.
14996 For misaligned 64-bit gpr loads and stores we need a register to
14997 convert an offset address to indirect. */
14999 static reg_class_t
15000 rs6000_secondary_reload (bool in_p,
15001 rtx x,
15002 reg_class_t rclass_i,
15003 enum machine_mode mode,
15004 secondary_reload_info *sri)
15006 enum reg_class rclass = (enum reg_class) rclass_i;
15007 reg_class_t ret = ALL_REGS;
15008 enum insn_code icode;
15009 bool default_p = false;
15011 sri->icode = CODE_FOR_nothing;
15012 icode = ((in_p)
15013 ? reg_addr[mode].reload_load
15014 : reg_addr[mode].reload_store);
15016 if (REG_P (x) || register_operand (x, mode))
15018 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
15019 bool altivec_p = (rclass == ALTIVEC_REGS);
15020 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
15022 if (!in_p)
15024 enum rs6000_reg_type exchange = to_type;
15025 to_type = from_type;
15026 from_type = exchange;
15029 /* Can we do a direct move of some sort? */
15030 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
15031 altivec_p))
15033 icode = (enum insn_code)sri->icode;
15034 default_p = false;
15035 ret = NO_REGS;
15039 /* Handle vector moves with reload helper functions. */
15040 if (ret == ALL_REGS && icode != CODE_FOR_nothing)
15042 ret = NO_REGS;
15043 sri->icode = CODE_FOR_nothing;
15044 sri->extra_cost = 0;
15046 if (GET_CODE (x) == MEM)
15048 rtx addr = XEXP (x, 0);
15050 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
15051 an extra register in that case, but it would need an extra
15052 register if the addressing is reg+reg or (reg+reg)&(-16). Special
15053 case load/store quad. */
15054 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
15056 if (TARGET_POWERPC64 && TARGET_QUAD_MEMORY
15057 && GET_MODE_SIZE (mode) == 16
15058 && quad_memory_operand (x, mode))
15060 sri->icode = icode;
15061 sri->extra_cost = 2;
15064 else if (!legitimate_indirect_address_p (addr, false)
15065 && !rs6000_legitimate_offset_address_p (PTImode, addr,
15066 false, true))
15068 sri->icode = icode;
15069 /* account for splitting the loads, and converting the
15070 address from reg+reg to reg. */
15071 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
15072 + ((GET_CODE (addr) == AND) ? 1 : 0));
15075 /* Allow scalar loads to/from the traditional floating point
15076 registers, even if VSX memory is set. */
15077 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
15078 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
15079 && (legitimate_indirect_address_p (addr, false)
15080 || legitimate_indirect_address_p (addr, false)
15081 || rs6000_legitimate_offset_address_p (mode, addr,
15082 false, true)))
15085 /* Loads to and stores from vector registers can only do reg+reg
15086 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
15087 scalar modes loading up the traditional floating point registers
15088 to use offset addresses. */
15089 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
15090 || rclass == FLOAT_REGS || rclass == NO_REGS)
15092 if (!VECTOR_MEM_ALTIVEC_P (mode)
15093 && GET_CODE (addr) == AND
15094 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15095 && INTVAL (XEXP (addr, 1)) == -16
15096 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
15097 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
15099 sri->icode = icode;
15100 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
15101 ? 2 : 1);
15103 else if (!legitimate_indirect_address_p (addr, false)
15104 && (rclass == NO_REGS
15105 || !legitimate_indexed_address_p (addr, false)))
15107 sri->icode = icode;
15108 sri->extra_cost = 1;
15110 else
15111 icode = CODE_FOR_nothing;
15113 /* Any other loads, including to pseudo registers which haven't been
15114 assigned to a register yet, default to require a scratch
15115 register. */
15116 else
15118 sri->icode = icode;
15119 sri->extra_cost = 2;
15122 else if (REG_P (x))
15124 int regno = true_regnum (x);
15126 icode = CODE_FOR_nothing;
15127 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
15128 default_p = true;
15129 else
15131 enum reg_class xclass = REGNO_REG_CLASS (regno);
15132 enum rs6000_reg_type rtype1 = reg_class_to_reg_type[(int)rclass];
15133 enum rs6000_reg_type rtype2 = reg_class_to_reg_type[(int)xclass];
15135 /* If memory is needed, use default_secondary_reload to create the
15136 stack slot. */
15137 if (rtype1 != rtype2 || !IS_STD_REG_TYPE (rtype1))
15138 default_p = true;
15139 else
15140 ret = NO_REGS;
15143 else
15144 default_p = true;
15146 else if (TARGET_POWERPC64
15147 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
15148 && MEM_P (x)
15149 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
15151 rtx addr = XEXP (x, 0);
15152 rtx off = address_offset (addr);
15154 if (off != NULL_RTX)
15156 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
15157 unsigned HOST_WIDE_INT offset = INTVAL (off);
15159 /* We need a secondary reload when our legitimate_address_p
15160 says the address is good (as otherwise the entire address
15161 will be reloaded), and the offset is not a multiple of
15162 four or we have an address wrap. Address wrap will only
15163 occur for LO_SUMs since legitimate_offset_address_p
15164 rejects addresses for 16-byte mems that will wrap. */
15165 if (GET_CODE (addr) == LO_SUM
15166 ? (1 /* legitimate_address_p allows any offset for lo_sum */
15167 && ((offset & 3) != 0
15168 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
15169 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
15170 && (offset & 3) != 0))
15172 if (in_p)
15173 sri->icode = CODE_FOR_reload_di_load;
15174 else
15175 sri->icode = CODE_FOR_reload_di_store;
15176 sri->extra_cost = 2;
15177 ret = NO_REGS;
15179 else
15180 default_p = true;
15182 else
15183 default_p = true;
15185 else if (!TARGET_POWERPC64
15186 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
15187 && MEM_P (x)
15188 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
15190 rtx addr = XEXP (x, 0);
15191 rtx off = address_offset (addr);
15193 if (off != NULL_RTX)
15195 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
15196 unsigned HOST_WIDE_INT offset = INTVAL (off);
15198 /* We need a secondary reload when our legitimate_address_p
15199 says the address is good (as otherwise the entire address
15200 will be reloaded), and we have a wrap.
15202 legitimate_lo_sum_address_p allows LO_SUM addresses to
15203 have any offset so test for wrap in the low 16 bits.
15205 legitimate_offset_address_p checks for the range
15206 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
15207 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
15208 [0x7ff4,0x7fff] respectively, so test for the
15209 intersection of these ranges, [0x7ffc,0x7fff] and
15210 [0x7ff4,0x7ff7] respectively.
15212 Note that the address we see here may have been
15213 manipulated by legitimize_reload_address. */
15214 if (GET_CODE (addr) == LO_SUM
15215 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
15216 : offset - (0x8000 - extra) < UNITS_PER_WORD)
15218 if (in_p)
15219 sri->icode = CODE_FOR_reload_si_load;
15220 else
15221 sri->icode = CODE_FOR_reload_si_store;
15222 sri->extra_cost = 2;
15223 ret = NO_REGS;
15225 else
15226 default_p = true;
15228 else
15229 default_p = true;
15231 else
15232 default_p = true;
15234 if (default_p)
15235 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
15237 gcc_assert (ret != ALL_REGS);
15239 if (TARGET_DEBUG_ADDR)
15241 fprintf (stderr,
15242 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
15243 "mode = %s",
15244 reg_class_names[ret],
15245 in_p ? "true" : "false",
15246 reg_class_names[rclass],
15247 GET_MODE_NAME (mode));
15249 if (default_p)
15250 fprintf (stderr, ", default secondary reload");
15252 if (sri->icode != CODE_FOR_nothing)
15253 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
15254 insn_data[sri->icode].name, sri->extra_cost);
15255 else
15256 fprintf (stderr, "\n");
15258 debug_rtx (x);
15261 return ret;
15264 /* Better tracing for rs6000_secondary_reload_inner. */
15266 static void
15267 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
15268 bool store_p)
15270 rtx set, clobber;
15272 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
15274 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
15275 store_p ? "store" : "load");
15277 if (store_p)
15278 set = gen_rtx_SET (VOIDmode, mem, reg);
15279 else
15280 set = gen_rtx_SET (VOIDmode, reg, mem);
15282 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
15283 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
15286 static void
15287 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
15288 bool store_p)
15290 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
15291 gcc_unreachable ();
15294 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
15295 to SP+reg addressing. */
15297 void
15298 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
15300 int regno = true_regnum (reg);
15301 enum machine_mode mode = GET_MODE (reg);
15302 enum reg_class rclass;
15303 rtx addr;
15304 rtx and_op2 = NULL_RTX;
15305 rtx addr_op1;
15306 rtx addr_op2;
15307 rtx scratch_or_premodify = scratch;
15308 rtx and_rtx;
15309 rtx cc_clobber;
15311 if (TARGET_DEBUG_ADDR)
15312 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
15314 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
15315 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15317 if (GET_CODE (mem) != MEM)
15318 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15320 rclass = REGNO_REG_CLASS (regno);
15321 addr = XEXP (mem, 0);
15323 switch (rclass)
15325 /* GPRs can handle reg + small constant, all other addresses need to use
15326 the scratch register. */
15327 case GENERAL_REGS:
15328 case BASE_REGS:
15329 if (GET_CODE (addr) == AND)
15331 and_op2 = XEXP (addr, 1);
15332 addr = XEXP (addr, 0);
15335 if (GET_CODE (addr) == PRE_MODIFY)
15337 scratch_or_premodify = XEXP (addr, 0);
15338 if (!REG_P (scratch_or_premodify))
15339 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15341 if (GET_CODE (XEXP (addr, 1)) != PLUS)
15342 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15344 addr = XEXP (addr, 1);
15347 if (GET_CODE (addr) == PLUS
15348 && (and_op2 != NULL_RTX
15349 || !rs6000_legitimate_offset_address_p (PTImode, addr,
15350 false, true)))
15352 addr_op1 = XEXP (addr, 0);
15353 addr_op2 = XEXP (addr, 1);
15354 if (!legitimate_indirect_address_p (addr_op1, false))
15355 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15357 if (!REG_P (addr_op2)
15358 && (GET_CODE (addr_op2) != CONST_INT
15359 || !satisfies_constraint_I (addr_op2)))
15361 if (TARGET_DEBUG_ADDR)
15363 fprintf (stderr,
15364 "\nMove plus addr to register %s, mode = %s: ",
15365 rs6000_reg_names[REGNO (scratch)],
15366 GET_MODE_NAME (mode));
15367 debug_rtx (addr_op2);
15369 rs6000_emit_move (scratch, addr_op2, Pmode);
15370 addr_op2 = scratch;
15373 emit_insn (gen_rtx_SET (VOIDmode,
15374 scratch_or_premodify,
15375 gen_rtx_PLUS (Pmode,
15376 addr_op1,
15377 addr_op2)));
15379 addr = scratch_or_premodify;
15380 scratch_or_premodify = scratch;
15382 else if (!legitimate_indirect_address_p (addr, false)
15383 && !rs6000_legitimate_offset_address_p (PTImode, addr,
15384 false, true))
15386 if (TARGET_DEBUG_ADDR)
15388 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
15389 rs6000_reg_names[REGNO (scratch_or_premodify)],
15390 GET_MODE_NAME (mode));
15391 debug_rtx (addr);
15393 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
15394 addr = scratch_or_premodify;
15395 scratch_or_premodify = scratch;
15397 break;
15399 /* Float registers can do offset+reg addressing for scalar types. */
15400 case FLOAT_REGS:
15401 if (legitimate_indirect_address_p (addr, false) /* reg */
15402 || legitimate_indexed_address_p (addr, false) /* reg+reg */
15403 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
15404 && and_op2 == NULL_RTX
15405 && scratch_or_premodify == scratch
15406 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
15407 break;
15409 /* If this isn't a legacy floating point load/store, fall through to the
15410 VSX defaults. */
15412 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
15413 addresses into a scratch register. */
15414 case VSX_REGS:
15415 case ALTIVEC_REGS:
15417 /* With float regs, we need to handle the AND ourselves, since we can't
15418 use the Altivec instruction with an implicit AND -16. Allow scalar
15419 loads to float registers to use reg+offset even if VSX. */
15420 if (GET_CODE (addr) == AND
15421 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
15422 || GET_CODE (XEXP (addr, 1)) != CONST_INT
15423 || INTVAL (XEXP (addr, 1)) != -16
15424 || !VECTOR_MEM_ALTIVEC_P (mode)))
15426 and_op2 = XEXP (addr, 1);
15427 addr = XEXP (addr, 0);
15430 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
15431 as the address later. */
15432 if (GET_CODE (addr) == PRE_MODIFY
15433 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
15434 && (rclass != FLOAT_REGS
15435 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
15436 || and_op2 != NULL_RTX
15437 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
15439 scratch_or_premodify = XEXP (addr, 0);
15440 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
15441 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15443 if (GET_CODE (XEXP (addr, 1)) != PLUS)
15444 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15446 addr = XEXP (addr, 1);
15449 if (legitimate_indirect_address_p (addr, false) /* reg */
15450 || legitimate_indexed_address_p (addr, false) /* reg+reg */
15451 || (GET_CODE (addr) == AND /* Altivec memory */
15452 && rclass == ALTIVEC_REGS
15453 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15454 && INTVAL (XEXP (addr, 1)) == -16
15455 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
15456 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
15459 else if (GET_CODE (addr) == PLUS)
15461 addr_op1 = XEXP (addr, 0);
15462 addr_op2 = XEXP (addr, 1);
15463 if (!REG_P (addr_op1))
15464 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15466 if (TARGET_DEBUG_ADDR)
15468 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
15469 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
15470 debug_rtx (addr_op2);
15472 rs6000_emit_move (scratch, addr_op2, Pmode);
15473 emit_insn (gen_rtx_SET (VOIDmode,
15474 scratch_or_premodify,
15475 gen_rtx_PLUS (Pmode,
15476 addr_op1,
15477 scratch)));
15478 addr = scratch_or_premodify;
15479 scratch_or_premodify = scratch;
15482 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
15483 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
15484 || REG_P (addr))
15486 if (TARGET_DEBUG_ADDR)
15488 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
15489 rs6000_reg_names[REGNO (scratch_or_premodify)],
15490 GET_MODE_NAME (mode));
15491 debug_rtx (addr);
15494 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
15495 addr = scratch_or_premodify;
15496 scratch_or_premodify = scratch;
15499 else
15500 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15502 break;
15504 default:
15505 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
15508 /* If the original address involved a pre-modify that we couldn't use the VSX
15509 memory instruction with update, and we haven't taken care of already,
15510 store the address in the pre-modify register and use that as the
15511 address. */
15512 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
15514 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
15515 addr = scratch_or_premodify;
15518 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
15519 memory instruction, recreate the AND now, including the clobber which is
15520 generated by the general ANDSI3/ANDDI3 patterns for the
15521 andi. instruction. */
15522 if (and_op2 != NULL_RTX)
15524 if (! legitimate_indirect_address_p (addr, false))
15526 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
15527 addr = scratch;
15530 if (TARGET_DEBUG_ADDR)
15532 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
15533 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
15534 debug_rtx (and_op2);
15537 and_rtx = gen_rtx_SET (VOIDmode,
15538 scratch,
15539 gen_rtx_AND (Pmode,
15540 addr,
15541 and_op2));
15543 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
15544 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15545 gen_rtvec (2, and_rtx, cc_clobber)));
15546 addr = scratch;
15549 /* Adjust the address if it changed. */
15550 if (addr != XEXP (mem, 0))
15552 mem = replace_equiv_address_nv (mem, addr);
15553 if (TARGET_DEBUG_ADDR)
15554 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
15557 /* Now create the move. */
15558 if (store_p)
15559 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
15560 else
15561 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
15563 return;
15566 /* Convert reloads involving 64-bit gprs and misaligned offset
15567 addressing, or multiple 32-bit gprs and offsets that are too large,
15568 to use indirect addressing. */
15570 void
15571 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
15573 int regno = true_regnum (reg);
15574 enum reg_class rclass;
15575 rtx addr;
15576 rtx scratch_or_premodify = scratch;
15578 if (TARGET_DEBUG_ADDR)
15580 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
15581 store_p ? "store" : "load");
15582 fprintf (stderr, "reg:\n");
15583 debug_rtx (reg);
15584 fprintf (stderr, "mem:\n");
15585 debug_rtx (mem);
15586 fprintf (stderr, "scratch:\n");
15587 debug_rtx (scratch);
15590 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
15591 gcc_assert (GET_CODE (mem) == MEM);
15592 rclass = REGNO_REG_CLASS (regno);
15593 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
15594 addr = XEXP (mem, 0);
15596 if (GET_CODE (addr) == PRE_MODIFY)
15598 scratch_or_premodify = XEXP (addr, 0);
15599 gcc_assert (REG_P (scratch_or_premodify));
15600 addr = XEXP (addr, 1);
15602 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
15604 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
15606 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
15608 /* Now create the move. */
15609 if (store_p)
15610 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
15611 else
15612 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
15614 return;
15617 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
15618 this function has any SDmode references. If we are on a power7 or later, we
15619 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
15620 can load/store the value. */
15622 static void
15623 rs6000_alloc_sdmode_stack_slot (void)
15625 tree t;
15626 basic_block bb;
15627 gimple_stmt_iterator gsi;
15629 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
15631 if (TARGET_NO_SDMODE_STACK)
15632 return;
15634 FOR_EACH_BB (bb)
15635 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
15637 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
15638 if (ret)
15640 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
15641 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
15642 SDmode, 0);
15643 return;
15647 /* Check for any SDmode parameters of the function. */
15648 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
15650 if (TREE_TYPE (t) == error_mark_node)
15651 continue;
15653 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
15654 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
15656 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
15657 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
15658 SDmode, 0);
15659 return;
15664 static void
15665 rs6000_instantiate_decls (void)
15667 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
15668 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
15671 /* Given an rtx X being reloaded into a reg required to be
15672 in class CLASS, return the class of reg to actually use.
15673 In general this is just CLASS; but on some machines
15674 in some cases it is preferable to use a more restrictive class.
15676 On the RS/6000, we have to return NO_REGS when we want to reload a
15677 floating-point CONST_DOUBLE to force it to be copied to memory.
15679 We also don't want to reload integer values into floating-point
15680 registers if we can at all help it. In fact, this can
15681 cause reload to die, if it tries to generate a reload of CTR
15682 into a FP register and discovers it doesn't have the memory location
15683 required.
15685 ??? Would it be a good idea to have reload do the converse, that is
15686 try to reload floating modes into FP registers if possible?
15689 static enum reg_class
15690 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
15692 enum machine_mode mode = GET_MODE (x);
15694 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
15695 return rclass;
15697 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
15698 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
15699 && easy_vector_constant (x, mode))
15700 return ALTIVEC_REGS;
15702 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
15703 return NO_REGS;
15705 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
15706 return GENERAL_REGS;
15708 /* For VSX, prefer the traditional registers for 64-bit values because we can
15709 use the non-VSX loads. Prefer the Altivec registers if Altivec is
15710 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
15711 prefer Altivec loads.. */
15712 if (rclass == VSX_REGS)
15714 if (GET_MODE_SIZE (mode) <= 8)
15715 return FLOAT_REGS;
15717 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
15718 return ALTIVEC_REGS;
15720 return rclass;
15723 return rclass;
15726 /* Debug version of rs6000_preferred_reload_class. */
15727 static enum reg_class
15728 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
15730 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
15732 fprintf (stderr,
15733 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
15734 "mode = %s, x:\n",
15735 reg_class_names[ret], reg_class_names[rclass],
15736 GET_MODE_NAME (GET_MODE (x)));
15737 debug_rtx (x);
15739 return ret;
15742 /* If we are copying between FP or AltiVec registers and anything else, we need
15743 a memory location. The exception is when we are targeting ppc64 and the
15744 move to/from fpr to gpr instructions are available. Also, under VSX, you
15745 can copy vector registers from the FP register set to the Altivec register
15746 set and vice versa. */
15748 static bool
15749 rs6000_secondary_memory_needed (enum reg_class from_class,
15750 enum reg_class to_class,
15751 enum machine_mode mode)
15753 enum rs6000_reg_type from_type, to_type;
15754 bool altivec_p = ((from_class == ALTIVEC_REGS)
15755 || (to_class == ALTIVEC_REGS));
15757 /* If a simple/direct move is available, we don't need secondary memory */
15758 from_type = reg_class_to_reg_type[(int)from_class];
15759 to_type = reg_class_to_reg_type[(int)to_class];
15761 if (rs6000_secondary_reload_move (to_type, from_type, mode,
15762 (secondary_reload_info *)0, altivec_p))
15763 return false;
15765 /* If we have a floating point or vector register class, we need to use
15766 memory to transfer the data. */
15767 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
15768 return true;
15770 return false;
15773 /* Debug version of rs6000_secondary_memory_needed. */
15774 static bool
15775 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
15776 enum reg_class to_class,
15777 enum machine_mode mode)
15779 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
15781 fprintf (stderr,
15782 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
15783 "to_class = %s, mode = %s\n",
15784 ret ? "true" : "false",
15785 reg_class_names[from_class],
15786 reg_class_names[to_class],
15787 GET_MODE_NAME (mode));
15789 return ret;
15792 /* Return the register class of a scratch register needed to copy IN into
15793 or out of a register in RCLASS in MODE. If it can be done directly,
15794 NO_REGS is returned. */
15796 static enum reg_class
15797 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
15798 rtx in)
15800 int regno;
15802 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
15803 #if TARGET_MACHO
15804 && MACHOPIC_INDIRECT
15805 #endif
15808 /* We cannot copy a symbolic operand directly into anything
15809 other than BASE_REGS for TARGET_ELF. So indicate that a
15810 register from BASE_REGS is needed as an intermediate
15811 register.
15813 On Darwin, pic addresses require a load from memory, which
15814 needs a base register. */
15815 if (rclass != BASE_REGS
15816 && (GET_CODE (in) == SYMBOL_REF
15817 || GET_CODE (in) == HIGH
15818 || GET_CODE (in) == LABEL_REF
15819 || GET_CODE (in) == CONST))
15820 return BASE_REGS;
15823 if (GET_CODE (in) == REG)
15825 regno = REGNO (in);
15826 if (regno >= FIRST_PSEUDO_REGISTER)
15828 regno = true_regnum (in);
15829 if (regno >= FIRST_PSEUDO_REGISTER)
15830 regno = -1;
15833 else if (GET_CODE (in) == SUBREG)
15835 regno = true_regnum (in);
15836 if (regno >= FIRST_PSEUDO_REGISTER)
15837 regno = -1;
15839 else
15840 regno = -1;
15842 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
15843 into anything. */
15844 if (rclass == GENERAL_REGS || rclass == BASE_REGS
15845 || (regno >= 0 && INT_REGNO_P (regno)))
15846 return NO_REGS;
15848 /* Constants, memory, and FP registers can go into FP registers. */
15849 if ((regno == -1 || FP_REGNO_P (regno))
15850 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
15851 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
15853 /* Memory, and FP/altivec registers can go into fp/altivec registers under
15854 VSX. However, for scalar variables, use the traditional floating point
15855 registers so that we can use offset+register addressing. */
15856 if (TARGET_VSX
15857 && (regno == -1 || VSX_REGNO_P (regno))
15858 && VSX_REG_CLASS_P (rclass))
15860 if (GET_MODE_SIZE (mode) < 16)
15861 return FLOAT_REGS;
15863 return NO_REGS;
15866 /* Memory, and AltiVec registers can go into AltiVec registers. */
15867 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
15868 && rclass == ALTIVEC_REGS)
15869 return NO_REGS;
15871 /* We can copy among the CR registers. */
15872 if ((rclass == CR_REGS || rclass == CR0_REGS)
15873 && regno >= 0 && CR_REGNO_P (regno))
15874 return NO_REGS;
15876 /* Otherwise, we need GENERAL_REGS. */
15877 return GENERAL_REGS;
15880 /* Debug version of rs6000_secondary_reload_class. */
15881 static enum reg_class
15882 rs6000_debug_secondary_reload_class (enum reg_class rclass,
15883 enum machine_mode mode, rtx in)
15885 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
15886 fprintf (stderr,
15887 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
15888 "mode = %s, input rtx:\n",
15889 reg_class_names[ret], reg_class_names[rclass],
15890 GET_MODE_NAME (mode));
15891 debug_rtx (in);
15893 return ret;
15896 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
15898 static bool
15899 rs6000_cannot_change_mode_class (enum machine_mode from,
15900 enum machine_mode to,
15901 enum reg_class rclass)
15903 unsigned from_size = GET_MODE_SIZE (from);
15904 unsigned to_size = GET_MODE_SIZE (to);
15906 if (from_size != to_size)
15908 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
15910 if (reg_classes_intersect_p (xclass, rclass))
15912 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
15913 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
15915 /* Don't allow 64-bit types to overlap with 128-bit types that take a
15916 single register under VSX because the scalar part of the register
15917 is in the upper 64-bits, and not the lower 64-bits. Types like
15918 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
15919 IEEE floating point can't overlap, and neither can small
15920 values. */
15922 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
15923 return true;
15925 if (from_size < 8 || to_size < 8)
15926 return true;
15928 if (from_size == 8 && (8 * to_nregs) != to_size)
15929 return true;
15931 if (to_size == 8 && (8 * from_nregs) != from_size)
15932 return true;
15934 return false;
15936 else
15937 return false;
15940 if (TARGET_E500_DOUBLE
15941 && ((((to) == DFmode) + ((from) == DFmode)) == 1
15942 || (((to) == TFmode) + ((from) == TFmode)) == 1
15943 || (((to) == DDmode) + ((from) == DDmode)) == 1
15944 || (((to) == TDmode) + ((from) == TDmode)) == 1
15945 || (((to) == DImode) + ((from) == DImode)) == 1))
15946 return true;
15948 /* Since the VSX register set includes traditional floating point registers
15949 and altivec registers, just check for the size being different instead of
15950 trying to check whether the modes are vector modes. Otherwise it won't
15951 allow say DF and DI to change classes. For types like TFmode and TDmode
15952 that take 2 64-bit registers, rather than a single 128-bit register, don't
15953 allow subregs of those types to other 128 bit types. */
15954 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
15956 unsigned num_regs = (from_size + 15) / 16;
15957 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
15958 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
15959 return true;
15961 return (from_size != 8 && from_size != 16);
15964 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
15965 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
15966 return true;
15968 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
15969 && reg_classes_intersect_p (GENERAL_REGS, rclass))
15970 return true;
15972 return false;
15975 /* Debug version of rs6000_cannot_change_mode_class. */
15976 static bool
15977 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
15978 enum machine_mode to,
15979 enum reg_class rclass)
15981 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
15983 fprintf (stderr,
15984 "rs6000_cannot_change_mode_class, return %s, from = %s, "
15985 "to = %s, rclass = %s\n",
15986 ret ? "true" : "false",
15987 GET_MODE_NAME (from), GET_MODE_NAME (to),
15988 reg_class_names[rclass]);
15990 return ret;
15993 /* Return a string to do a move operation of 128 bits of data. */
15995 const char *
15996 rs6000_output_move_128bit (rtx operands[])
15998 rtx dest = operands[0];
15999 rtx src = operands[1];
16000 enum machine_mode mode = GET_MODE (dest);
16001 int dest_regno;
16002 int src_regno;
16003 bool dest_gpr_p, dest_fp_p, dest_av_p, dest_vsx_p;
16004 bool src_gpr_p, src_fp_p, src_av_p, src_vsx_p;
16006 if (REG_P (dest))
16008 dest_regno = REGNO (dest);
16009 dest_gpr_p = INT_REGNO_P (dest_regno);
16010 dest_fp_p = FP_REGNO_P (dest_regno);
16011 dest_av_p = ALTIVEC_REGNO_P (dest_regno);
16012 dest_vsx_p = dest_fp_p | dest_av_p;
16014 else
16016 dest_regno = -1;
16017 dest_gpr_p = dest_fp_p = dest_av_p = dest_vsx_p = false;
16020 if (REG_P (src))
16022 src_regno = REGNO (src);
16023 src_gpr_p = INT_REGNO_P (src_regno);
16024 src_fp_p = FP_REGNO_P (src_regno);
16025 src_av_p = ALTIVEC_REGNO_P (src_regno);
16026 src_vsx_p = src_fp_p | src_av_p;
16028 else
16030 src_regno = -1;
16031 src_gpr_p = src_fp_p = src_av_p = src_vsx_p = false;
16034 /* Register moves. */
16035 if (dest_regno >= 0 && src_regno >= 0)
16037 if (dest_gpr_p)
16039 if (src_gpr_p)
16040 return "#";
16042 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
16043 return "#";
16046 else if (TARGET_VSX && dest_vsx_p)
16048 if (src_vsx_p)
16049 return "xxlor %x0,%x1,%x1";
16051 else if (TARGET_DIRECT_MOVE && src_gpr_p)
16052 return "#";
16055 else if (TARGET_ALTIVEC && dest_av_p && src_av_p)
16056 return "vor %0,%1,%1";
16058 else if (dest_fp_p && src_fp_p)
16059 return "#";
16062 /* Loads. */
16063 else if (dest_regno >= 0 && MEM_P (src))
16065 if (dest_gpr_p)
16067 if (TARGET_QUAD_MEMORY && (dest_regno & 1) == 0
16068 && quad_memory_operand (src, mode)
16069 && !reg_overlap_mentioned_p (dest, src))
16071 /* lq/stq only has DQ-form, so avoid X-form that %y produces. */
16072 return REG_P (XEXP (src, 0)) ? "lq %0,%1" : "lq %0,%y1";
16074 else
16075 return "#";
16078 else if (TARGET_ALTIVEC && dest_av_p
16079 && altivec_indexed_or_indirect_operand (src, mode))
16080 return "lvx %0,%y1";
16082 else if (TARGET_VSX && dest_vsx_p)
16084 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
16085 return "lxvw4x %x0,%y1";
16086 else
16087 return "lxvd2x %x0,%y1";
16090 else if (TARGET_ALTIVEC && dest_av_p)
16091 return "lvx %0,%y1";
16093 else if (dest_fp_p)
16094 return "#";
16097 /* Stores. */
16098 else if (src_regno >= 0 && MEM_P (dest))
16100 if (src_gpr_p)
16102 if (TARGET_QUAD_MEMORY && (src_regno & 1) == 0
16103 && quad_memory_operand (dest, mode))
16105 /* lq/stq only has DQ-form, so avoid X-form that %y produces. */
16106 return REG_P (XEXP (dest, 0)) ? "stq %1,%0" : "stq %1,%y0";
16108 else
16109 return "#";
16112 else if (TARGET_ALTIVEC && src_av_p
16113 && altivec_indexed_or_indirect_operand (src, mode))
16114 return "stvx %1,%y0";
16116 else if (TARGET_VSX && src_vsx_p)
16118 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
16119 return "stxvw4x %x1,%y0";
16120 else
16121 return "stxvd2x %x1,%y0";
16124 else if (TARGET_ALTIVEC && src_av_p)
16125 return "stvx %1,%y0";
16127 else if (src_fp_p)
16128 return "#";
16131 /* Constants. */
16132 else if (dest_regno >= 0
16133 && (GET_CODE (src) == CONST_INT
16134 || GET_CODE (src) == CONST_DOUBLE
16135 || GET_CODE (src) == CONST_VECTOR))
16137 if (dest_gpr_p)
16138 return "#";
16140 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
16141 return "xxlxor %x0,%x0,%x0";
16143 else if (TARGET_ALTIVEC && dest_av_p)
16144 return output_vec_const_move (operands);
16147 if (TARGET_DEBUG_ADDR)
16149 fprintf (stderr, "\n===== Bad 128 bit move:\n");
16150 debug_rtx (gen_rtx_SET (VOIDmode, dest, src));
16153 gcc_unreachable ();
16157 /* Given a comparison operation, return the bit number in CCR to test. We
16158 know this is a valid comparison.
16160 SCC_P is 1 if this is for an scc. That means that %D will have been
16161 used instead of %C, so the bits will be in different places.
16163 Return -1 if OP isn't a valid comparison for some reason. */
16166 ccr_bit (rtx op, int scc_p)
16168 enum rtx_code code = GET_CODE (op);
16169 enum machine_mode cc_mode;
16170 int cc_regnum;
16171 int base_bit;
16172 rtx reg;
16174 if (!COMPARISON_P (op))
16175 return -1;
16177 reg = XEXP (op, 0);
16179 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
16181 cc_mode = GET_MODE (reg);
16182 cc_regnum = REGNO (reg);
16183 base_bit = 4 * (cc_regnum - CR0_REGNO);
16185 validate_condition_mode (code, cc_mode);
16187 /* When generating a sCOND operation, only positive conditions are
16188 allowed. */
16189 gcc_assert (!scc_p
16190 || code == EQ || code == GT || code == LT || code == UNORDERED
16191 || code == GTU || code == LTU);
16193 switch (code)
16195 case NE:
16196 return scc_p ? base_bit + 3 : base_bit + 2;
16197 case EQ:
16198 return base_bit + 2;
16199 case GT: case GTU: case UNLE:
16200 return base_bit + 1;
16201 case LT: case LTU: case UNGE:
16202 return base_bit;
16203 case ORDERED: case UNORDERED:
16204 return base_bit + 3;
16206 case GE: case GEU:
16207 /* If scc, we will have done a cror to put the bit in the
16208 unordered position. So test that bit. For integer, this is ! LT
16209 unless this is an scc insn. */
16210 return scc_p ? base_bit + 3 : base_bit;
16212 case LE: case LEU:
16213 return scc_p ? base_bit + 3 : base_bit + 1;
16215 default:
16216 gcc_unreachable ();
16220 /* Return the GOT register. */
16223 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
16225 /* The second flow pass currently (June 1999) can't update
16226 regs_ever_live without disturbing other parts of the compiler, so
16227 update it here to make the prolog/epilogue code happy. */
16228 if (!can_create_pseudo_p ()
16229 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
16230 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
16232 crtl->uses_pic_offset_table = 1;
16234 return pic_offset_table_rtx;
16237 static rs6000_stack_t stack_info;
16239 /* Function to init struct machine_function.
16240 This will be called, via a pointer variable,
16241 from push_function_context. */
16243 static struct machine_function *
16244 rs6000_init_machine_status (void)
16246 stack_info.reload_completed = 0;
16247 return ggc_alloc_cleared_machine_function ();
16250 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
16253 extract_MB (rtx op)
16255 int i;
16256 unsigned long val = INTVAL (op);
16258 /* If the high bit is zero, the value is the first 1 bit we find
16259 from the left. */
16260 if ((val & 0x80000000) == 0)
16262 gcc_assert (val & 0xffffffff);
16264 i = 1;
16265 while (((val <<= 1) & 0x80000000) == 0)
16266 ++i;
16267 return i;
16270 /* If the high bit is set and the low bit is not, or the mask is all
16271 1's, the value is zero. */
16272 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
16273 return 0;
16275 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
16276 from the right. */
16277 i = 31;
16278 while (((val >>= 1) & 1) != 0)
16279 --i;
16281 return i;
16285 extract_ME (rtx op)
16287 int i;
16288 unsigned long val = INTVAL (op);
16290 /* If the low bit is zero, the value is the first 1 bit we find from
16291 the right. */
16292 if ((val & 1) == 0)
16294 gcc_assert (val & 0xffffffff);
16296 i = 30;
16297 while (((val >>= 1) & 1) == 0)
16298 --i;
16300 return i;
16303 /* If the low bit is set and the high bit is not, or the mask is all
16304 1's, the value is 31. */
16305 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
16306 return 31;
16308 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
16309 from the left. */
16310 i = 0;
16311 while (((val <<= 1) & 0x80000000) != 0)
16312 ++i;
16314 return i;
16317 /* Locate some local-dynamic symbol still in use by this function
16318 so that we can print its name in some tls_ld pattern. */
16320 static const char *
16321 rs6000_get_some_local_dynamic_name (void)
16323 rtx insn;
16325 if (cfun->machine->some_ld_name)
16326 return cfun->machine->some_ld_name;
16328 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
16329 if (INSN_P (insn)
16330 && for_each_rtx (&PATTERN (insn),
16331 rs6000_get_some_local_dynamic_name_1, 0))
16332 return cfun->machine->some_ld_name;
16334 gcc_unreachable ();
16337 /* Helper function for rs6000_get_some_local_dynamic_name. */
16339 static int
16340 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
16342 rtx x = *px;
16344 if (GET_CODE (x) == SYMBOL_REF)
16346 const char *str = XSTR (x, 0);
16347 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
16349 cfun->machine->some_ld_name = str;
16350 return 1;
16354 return 0;
16357 /* Write out a function code label. */
16359 void
16360 rs6000_output_function_entry (FILE *file, const char *fname)
16362 if (fname[0] != '.')
16364 switch (DEFAULT_ABI)
16366 default:
16367 gcc_unreachable ();
16369 case ABI_AIX:
16370 if (DOT_SYMBOLS)
16371 putc ('.', file);
16372 else
16373 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
16374 break;
16376 case ABI_V4:
16377 case ABI_DARWIN:
16378 break;
16382 RS6000_OUTPUT_BASENAME (file, fname);
16385 /* Print an operand. Recognize special options, documented below. */
16387 #if TARGET_ELF
16388 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
16389 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
16390 #else
16391 #define SMALL_DATA_RELOC "sda21"
16392 #define SMALL_DATA_REG 0
16393 #endif
16395 void
16396 print_operand (FILE *file, rtx x, int code)
16398 int i;
16399 unsigned HOST_WIDE_INT uval;
16401 switch (code)
16403 /* %a is output_address. */
16405 case 'b':
16406 /* If constant, low-order 16 bits of constant, unsigned.
16407 Otherwise, write normally. */
16408 if (INT_P (x))
16409 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
16410 else
16411 print_operand (file, x, 0);
16412 return;
16414 case 'B':
16415 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
16416 for 64-bit mask direction. */
16417 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
16418 return;
16420 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
16421 output_operand. */
16423 case 'D':
16424 /* Like 'J' but get to the GT bit only. */
16425 gcc_assert (REG_P (x));
16427 /* Bit 1 is GT bit. */
16428 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
16430 /* Add one for shift count in rlinm for scc. */
16431 fprintf (file, "%d", i + 1);
16432 return;
16434 case 'E':
16435 /* X is a CR register. Print the number of the EQ bit of the CR */
16436 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
16437 output_operand_lossage ("invalid %%E value");
16438 else
16439 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
16440 return;
16442 case 'f':
16443 /* X is a CR register. Print the shift count needed to move it
16444 to the high-order four bits. */
16445 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
16446 output_operand_lossage ("invalid %%f value");
16447 else
16448 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
16449 return;
16451 case 'F':
16452 /* Similar, but print the count for the rotate in the opposite
16453 direction. */
16454 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
16455 output_operand_lossage ("invalid %%F value");
16456 else
16457 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
16458 return;
16460 case 'G':
16461 /* X is a constant integer. If it is negative, print "m",
16462 otherwise print "z". This is to make an aze or ame insn. */
16463 if (GET_CODE (x) != CONST_INT)
16464 output_operand_lossage ("invalid %%G value");
16465 else if (INTVAL (x) >= 0)
16466 putc ('z', file);
16467 else
16468 putc ('m', file);
16469 return;
16471 case 'h':
16472 /* If constant, output low-order five bits. Otherwise, write
16473 normally. */
16474 if (INT_P (x))
16475 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
16476 else
16477 print_operand (file, x, 0);
16478 return;
16480 case 'H':
16481 /* If constant, output low-order six bits. Otherwise, write
16482 normally. */
16483 if (INT_P (x))
16484 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
16485 else
16486 print_operand (file, x, 0);
16487 return;
16489 case 'I':
16490 /* Print `i' if this is a constant, else nothing. */
16491 if (INT_P (x))
16492 putc ('i', file);
16493 return;
16495 case 'j':
16496 /* Write the bit number in CCR for jump. */
16497 i = ccr_bit (x, 0);
16498 if (i == -1)
16499 output_operand_lossage ("invalid %%j code");
16500 else
16501 fprintf (file, "%d", i);
16502 return;
16504 case 'J':
16505 /* Similar, but add one for shift count in rlinm for scc and pass
16506 scc flag to `ccr_bit'. */
16507 i = ccr_bit (x, 1);
16508 if (i == -1)
16509 output_operand_lossage ("invalid %%J code");
16510 else
16511 /* If we want bit 31, write a shift count of zero, not 32. */
16512 fprintf (file, "%d", i == 31 ? 0 : i + 1);
16513 return;
16515 case 'k':
16516 /* X must be a constant. Write the 1's complement of the
16517 constant. */
16518 if (! INT_P (x))
16519 output_operand_lossage ("invalid %%k value");
16520 else
16521 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
16522 return;
16524 case 'K':
16525 /* X must be a symbolic constant on ELF. Write an
16526 expression suitable for an 'addi' that adds in the low 16
16527 bits of the MEM. */
16528 if (GET_CODE (x) == CONST)
16530 if (GET_CODE (XEXP (x, 0)) != PLUS
16531 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
16532 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
16533 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
16534 output_operand_lossage ("invalid %%K value");
16536 print_operand_address (file, x);
16537 fputs ("@l", file);
16538 return;
16540 /* %l is output_asm_label. */
16542 case 'L':
16543 /* Write second word of DImode or DFmode reference. Works on register
16544 or non-indexed memory only. */
16545 if (REG_P (x))
16546 fputs (reg_names[REGNO (x) + 1], file);
16547 else if (MEM_P (x))
16549 /* Handle possible auto-increment. Since it is pre-increment and
16550 we have already done it, we can just use an offset of word. */
16551 if (GET_CODE (XEXP (x, 0)) == PRE_INC
16552 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
16553 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
16554 UNITS_PER_WORD));
16555 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16556 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
16557 UNITS_PER_WORD));
16558 else
16559 output_address (XEXP (adjust_address_nv (x, SImode,
16560 UNITS_PER_WORD),
16561 0));
16563 if (small_data_operand (x, GET_MODE (x)))
16564 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
16565 reg_names[SMALL_DATA_REG]);
16567 return;
16569 case 'm':
16570 /* MB value for a mask operand. */
16571 if (! mask_operand (x, SImode))
16572 output_operand_lossage ("invalid %%m value");
16574 fprintf (file, "%d", extract_MB (x));
16575 return;
16577 case 'M':
16578 /* ME value for a mask operand. */
16579 if (! mask_operand (x, SImode))
16580 output_operand_lossage ("invalid %%M value");
16582 fprintf (file, "%d", extract_ME (x));
16583 return;
16585 /* %n outputs the negative of its operand. */
16587 case 'N':
16588 /* Write the number of elements in the vector times 4. */
16589 if (GET_CODE (x) != PARALLEL)
16590 output_operand_lossage ("invalid %%N value");
16591 else
16592 fprintf (file, "%d", XVECLEN (x, 0) * 4);
16593 return;
16595 case 'O':
16596 /* Similar, but subtract 1 first. */
16597 if (GET_CODE (x) != PARALLEL)
16598 output_operand_lossage ("invalid %%O value");
16599 else
16600 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
16601 return;
16603 case 'p':
16604 /* X is a CONST_INT that is a power of two. Output the logarithm. */
16605 if (! INT_P (x)
16606 || INTVAL (x) < 0
16607 || (i = exact_log2 (INTVAL (x))) < 0)
16608 output_operand_lossage ("invalid %%p value");
16609 else
16610 fprintf (file, "%d", i);
16611 return;
16613 case 'P':
16614 /* The operand must be an indirect memory reference. The result
16615 is the register name. */
16616 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
16617 || REGNO (XEXP (x, 0)) >= 32)
16618 output_operand_lossage ("invalid %%P value");
16619 else
16620 fputs (reg_names[REGNO (XEXP (x, 0))], file);
16621 return;
16623 case 'q':
16624 /* This outputs the logical code corresponding to a boolean
16625 expression. The expression may have one or both operands
16626 negated (if one, only the first one). For condition register
16627 logical operations, it will also treat the negated
16628 CR codes as NOTs, but not handle NOTs of them. */
16630 const char *const *t = 0;
16631 const char *s;
16632 enum rtx_code code = GET_CODE (x);
16633 static const char * const tbl[3][3] = {
16634 { "and", "andc", "nor" },
16635 { "or", "orc", "nand" },
16636 { "xor", "eqv", "xor" } };
16638 if (code == AND)
16639 t = tbl[0];
16640 else if (code == IOR)
16641 t = tbl[1];
16642 else if (code == XOR)
16643 t = tbl[2];
16644 else
16645 output_operand_lossage ("invalid %%q value");
16647 if (GET_CODE (XEXP (x, 0)) != NOT)
16648 s = t[0];
16649 else
16651 if (GET_CODE (XEXP (x, 1)) == NOT)
16652 s = t[2];
16653 else
16654 s = t[1];
16657 fputs (s, file);
16659 return;
16661 case 'Q':
16662 if (! TARGET_MFCRF)
16663 return;
16664 fputc (',', file);
16665 /* FALLTHRU */
16667 case 'R':
16668 /* X is a CR register. Print the mask for `mtcrf'. */
16669 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
16670 output_operand_lossage ("invalid %%R value");
16671 else
16672 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
16673 return;
16675 case 's':
16676 /* Low 5 bits of 32 - value */
16677 if (! INT_P (x))
16678 output_operand_lossage ("invalid %%s value");
16679 else
16680 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
16681 return;
16683 case 'S':
16684 /* PowerPC64 mask position. All 0's is excluded.
16685 CONST_INT 32-bit mask is considered sign-extended so any
16686 transition must occur within the CONST_INT, not on the boundary. */
16687 if (! mask64_operand (x, DImode))
16688 output_operand_lossage ("invalid %%S value");
16690 uval = INTVAL (x);
16692 if (uval & 1) /* Clear Left */
16694 #if HOST_BITS_PER_WIDE_INT > 64
16695 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
16696 #endif
16697 i = 64;
16699 else /* Clear Right */
16701 uval = ~uval;
16702 #if HOST_BITS_PER_WIDE_INT > 64
16703 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
16704 #endif
16705 i = 63;
16707 while (uval != 0)
16708 --i, uval >>= 1;
16709 gcc_assert (i >= 0);
16710 fprintf (file, "%d", i);
16711 return;
16713 case 't':
16714 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
16715 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
16717 /* Bit 3 is OV bit. */
16718 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
16720 /* If we want bit 31, write a shift count of zero, not 32. */
16721 fprintf (file, "%d", i == 31 ? 0 : i + 1);
16722 return;
16724 case 'T':
16725 /* Print the symbolic name of a branch target register. */
16726 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
16727 && REGNO (x) != CTR_REGNO))
16728 output_operand_lossage ("invalid %%T value");
16729 else if (REGNO (x) == LR_REGNO)
16730 fputs ("lr", file);
16731 else
16732 fputs ("ctr", file);
16733 return;
16735 case 'u':
16736 /* High-order 16 bits of constant for use in unsigned operand. */
16737 if (! INT_P (x))
16738 output_operand_lossage ("invalid %%u value");
16739 else
16740 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
16741 (INTVAL (x) >> 16) & 0xffff);
16742 return;
16744 case 'v':
16745 /* High-order 16 bits of constant for use in signed operand. */
16746 if (! INT_P (x))
16747 output_operand_lossage ("invalid %%v value");
16748 else
16749 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
16750 (INTVAL (x) >> 16) & 0xffff);
16751 return;
16753 case 'U':
16754 /* Print `u' if this has an auto-increment or auto-decrement. */
16755 if (MEM_P (x)
16756 && (GET_CODE (XEXP (x, 0)) == PRE_INC
16757 || GET_CODE (XEXP (x, 0)) == PRE_DEC
16758 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
16759 putc ('u', file);
16760 return;
16762 case 'V':
16763 /* Print the trap code for this operand. */
16764 switch (GET_CODE (x))
16766 case EQ:
16767 fputs ("eq", file); /* 4 */
16768 break;
16769 case NE:
16770 fputs ("ne", file); /* 24 */
16771 break;
16772 case LT:
16773 fputs ("lt", file); /* 16 */
16774 break;
16775 case LE:
16776 fputs ("le", file); /* 20 */
16777 break;
16778 case GT:
16779 fputs ("gt", file); /* 8 */
16780 break;
16781 case GE:
16782 fputs ("ge", file); /* 12 */
16783 break;
16784 case LTU:
16785 fputs ("llt", file); /* 2 */
16786 break;
16787 case LEU:
16788 fputs ("lle", file); /* 6 */
16789 break;
16790 case GTU:
16791 fputs ("lgt", file); /* 1 */
16792 break;
16793 case GEU:
16794 fputs ("lge", file); /* 5 */
16795 break;
16796 default:
16797 gcc_unreachable ();
16799 break;
16801 case 'w':
16802 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
16803 normally. */
16804 if (INT_P (x))
16805 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
16806 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
16807 else
16808 print_operand (file, x, 0);
16809 return;
16811 case 'W':
16812 /* MB value for a PowerPC64 rldic operand. */
16813 i = clz_hwi (INTVAL (x));
16815 fprintf (file, "%d", i);
16816 return;
16818 case 'x':
16819 /* X is a FPR or Altivec register used in a VSX context. */
16820 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
16821 output_operand_lossage ("invalid %%x value");
16822 else
16824 int reg = REGNO (x);
16825 int vsx_reg = (FP_REGNO_P (reg)
16826 ? reg - 32
16827 : reg - FIRST_ALTIVEC_REGNO + 32);
16829 #ifdef TARGET_REGNAMES
16830 if (TARGET_REGNAMES)
16831 fprintf (file, "%%vs%d", vsx_reg);
16832 else
16833 #endif
16834 fprintf (file, "%d", vsx_reg);
16836 return;
16838 case 'X':
16839 if (MEM_P (x)
16840 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
16841 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
16842 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
16843 putc ('x', file);
16844 return;
16846 case 'Y':
16847 /* Like 'L', for third word of TImode/PTImode */
16848 if (REG_P (x))
16849 fputs (reg_names[REGNO (x) + 2], file);
16850 else if (MEM_P (x))
16852 if (GET_CODE (XEXP (x, 0)) == PRE_INC
16853 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
16854 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
16855 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16856 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
16857 else
16858 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
16859 if (small_data_operand (x, GET_MODE (x)))
16860 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
16861 reg_names[SMALL_DATA_REG]);
16863 return;
16865 case 'z':
16866 /* X is a SYMBOL_REF. Write out the name preceded by a
16867 period and without any trailing data in brackets. Used for function
16868 names. If we are configured for System V (or the embedded ABI) on
16869 the PowerPC, do not emit the period, since those systems do not use
16870 TOCs and the like. */
16871 gcc_assert (GET_CODE (x) == SYMBOL_REF);
16873 /* For macho, check to see if we need a stub. */
16874 if (TARGET_MACHO)
16876 const char *name = XSTR (x, 0);
16877 #if TARGET_MACHO
16878 if (darwin_emit_branch_islands
16879 && MACHOPIC_INDIRECT
16880 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
16881 name = machopic_indirection_name (x, /*stub_p=*/true);
16882 #endif
16883 assemble_name (file, name);
16885 else if (!DOT_SYMBOLS)
16886 assemble_name (file, XSTR (x, 0));
16887 else
16888 rs6000_output_function_entry (file, XSTR (x, 0));
16889 return;
16891 case 'Z':
16892 /* Like 'L', for last word of TImode/PTImode. */
16893 if (REG_P (x))
16894 fputs (reg_names[REGNO (x) + 3], file);
16895 else if (MEM_P (x))
16897 if (GET_CODE (XEXP (x, 0)) == PRE_INC
16898 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
16899 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
16900 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16901 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
16902 else
16903 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
16904 if (small_data_operand (x, GET_MODE (x)))
16905 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
16906 reg_names[SMALL_DATA_REG]);
16908 return;
16910 /* Print AltiVec or SPE memory operand. */
16911 case 'y':
16913 rtx tmp;
16915 gcc_assert (MEM_P (x));
16917 tmp = XEXP (x, 0);
16919 /* Ugly hack because %y is overloaded. */
16920 if ((TARGET_SPE || TARGET_E500_DOUBLE)
16921 && (GET_MODE_SIZE (GET_MODE (x)) == 8
16922 || GET_MODE (x) == TFmode
16923 || GET_MODE (x) == TImode
16924 || GET_MODE (x) == PTImode))
16926 /* Handle [reg]. */
16927 if (REG_P (tmp))
16929 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
16930 break;
16932 /* Handle [reg+UIMM]. */
16933 else if (GET_CODE (tmp) == PLUS &&
16934 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
16936 int x;
16938 gcc_assert (REG_P (XEXP (tmp, 0)));
16940 x = INTVAL (XEXP (tmp, 1));
16941 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
16942 break;
16945 /* Fall through. Must be [reg+reg]. */
16947 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
16948 && GET_CODE (tmp) == AND
16949 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
16950 && INTVAL (XEXP (tmp, 1)) == -16)
16951 tmp = XEXP (tmp, 0);
16952 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
16953 && GET_CODE (tmp) == PRE_MODIFY)
16954 tmp = XEXP (tmp, 1);
16955 if (REG_P (tmp))
16956 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
16957 else
16959 if (!GET_CODE (tmp) == PLUS
16960 || !REG_P (XEXP (tmp, 0))
16961 || !REG_P (XEXP (tmp, 1)))
16963 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
16964 break;
16967 if (REGNO (XEXP (tmp, 0)) == 0)
16968 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
16969 reg_names[ REGNO (XEXP (tmp, 0)) ]);
16970 else
16971 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
16972 reg_names[ REGNO (XEXP (tmp, 1)) ]);
16974 break;
16977 case 0:
16978 if (REG_P (x))
16979 fprintf (file, "%s", reg_names[REGNO (x)]);
16980 else if (MEM_P (x))
16982 /* We need to handle PRE_INC and PRE_DEC here, since we need to
16983 know the width from the mode. */
16984 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
16985 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
16986 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
16987 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
16988 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
16989 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
16990 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16991 output_address (XEXP (XEXP (x, 0), 1));
16992 else
16993 output_address (XEXP (x, 0));
16995 else
16997 if (toc_relative_expr_p (x, false))
16998 /* This hack along with a corresponding hack in
16999 rs6000_output_addr_const_extra arranges to output addends
17000 where the assembler expects to find them. eg.
17001 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
17002 without this hack would be output as "x@toc+4". We
17003 want "x+4@toc". */
17004 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
17005 else
17006 output_addr_const (file, x);
17008 return;
17010 case '&':
17011 assemble_name (file, rs6000_get_some_local_dynamic_name ());
17012 return;
17014 default:
17015 output_operand_lossage ("invalid %%xn code");
17019 /* Print the address of an operand. */
17021 void
17022 print_operand_address (FILE *file, rtx x)
17024 if (REG_P (x))
17025 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
17026 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
17027 || GET_CODE (x) == LABEL_REF)
17029 output_addr_const (file, x);
17030 if (small_data_operand (x, GET_MODE (x)))
17031 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
17032 reg_names[SMALL_DATA_REG]);
17033 else
17034 gcc_assert (!TARGET_TOC);
17036 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
17037 && REG_P (XEXP (x, 1)))
17039 if (REGNO (XEXP (x, 0)) == 0)
17040 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
17041 reg_names[ REGNO (XEXP (x, 0)) ]);
17042 else
17043 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
17044 reg_names[ REGNO (XEXP (x, 1)) ]);
17046 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
17047 && GET_CODE (XEXP (x, 1)) == CONST_INT)
17048 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
17049 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
17050 #if TARGET_MACHO
17051 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
17052 && CONSTANT_P (XEXP (x, 1)))
17054 fprintf (file, "lo16(");
17055 output_addr_const (file, XEXP (x, 1));
17056 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
17058 #endif
17059 #if TARGET_ELF
17060 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
17061 && CONSTANT_P (XEXP (x, 1)))
17063 output_addr_const (file, XEXP (x, 1));
17064 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
17066 #endif
17067 else if (toc_relative_expr_p (x, false))
17069 /* This hack along with a corresponding hack in
17070 rs6000_output_addr_const_extra arranges to output addends
17071 where the assembler expects to find them. eg.
17072 (lo_sum (reg 9)
17073 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
17074 without this hack would be output as "x@toc+8@l(9)". We
17075 want "x+8@toc@l(9)". */
17076 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
17077 if (GET_CODE (x) == LO_SUM)
17078 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
17079 else
17080 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
17082 else
17083 gcc_unreachable ();
17086 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
17088 static bool
17089 rs6000_output_addr_const_extra (FILE *file, rtx x)
17091 if (GET_CODE (x) == UNSPEC)
17092 switch (XINT (x, 1))
17094 case UNSPEC_TOCREL:
17095 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
17096 && REG_P (XVECEXP (x, 0, 1))
17097 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
17098 output_addr_const (file, XVECEXP (x, 0, 0));
17099 if (x == tocrel_base && tocrel_offset != const0_rtx)
17101 if (INTVAL (tocrel_offset) >= 0)
17102 fprintf (file, "+");
17103 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
17105 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
17107 putc ('-', file);
17108 assemble_name (file, toc_label_name);
17110 else if (TARGET_ELF)
17111 fputs ("@toc", file);
17112 return true;
17114 #if TARGET_MACHO
17115 case UNSPEC_MACHOPIC_OFFSET:
17116 output_addr_const (file, XVECEXP (x, 0, 0));
17117 putc ('-', file);
17118 machopic_output_function_base_name (file);
17119 return true;
17120 #endif
17122 return false;
17125 /* Target hook for assembling integer objects. The PowerPC version has
17126 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
17127 is defined. It also needs to handle DI-mode objects on 64-bit
17128 targets. */
17130 static bool
17131 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
17133 #ifdef RELOCATABLE_NEEDS_FIXUP
17134 /* Special handling for SI values. */
17135 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
17137 static int recurse = 0;
17139 /* For -mrelocatable, we mark all addresses that need to be fixed up in
17140 the .fixup section. Since the TOC section is already relocated, we
17141 don't need to mark it here. We used to skip the text section, but it
17142 should never be valid for relocated addresses to be placed in the text
17143 section. */
17144 if (TARGET_RELOCATABLE
17145 && in_section != toc_section
17146 && !recurse
17147 && GET_CODE (x) != CONST_INT
17148 && GET_CODE (x) != CONST_DOUBLE
17149 && CONSTANT_P (x))
17151 char buf[256];
17153 recurse = 1;
17154 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
17155 fixuplabelno++;
17156 ASM_OUTPUT_LABEL (asm_out_file, buf);
17157 fprintf (asm_out_file, "\t.long\t(");
17158 output_addr_const (asm_out_file, x);
17159 fprintf (asm_out_file, ")@fixup\n");
17160 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
17161 ASM_OUTPUT_ALIGN (asm_out_file, 2);
17162 fprintf (asm_out_file, "\t.long\t");
17163 assemble_name (asm_out_file, buf);
17164 fprintf (asm_out_file, "\n\t.previous\n");
17165 recurse = 0;
17166 return true;
17168 /* Remove initial .'s to turn a -mcall-aixdesc function
17169 address into the address of the descriptor, not the function
17170 itself. */
17171 else if (GET_CODE (x) == SYMBOL_REF
17172 && XSTR (x, 0)[0] == '.'
17173 && DEFAULT_ABI == ABI_AIX)
17175 const char *name = XSTR (x, 0);
17176 while (*name == '.')
17177 name++;
17179 fprintf (asm_out_file, "\t.long\t%s\n", name);
17180 return true;
17183 #endif /* RELOCATABLE_NEEDS_FIXUP */
17184 return default_assemble_integer (x, size, aligned_p);
17187 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
17188 /* Emit an assembler directive to set symbol visibility for DECL to
17189 VISIBILITY_TYPE. */
17191 static void
17192 rs6000_assemble_visibility (tree decl, int vis)
17194 if (TARGET_XCOFF)
17195 return;
17197 /* Functions need to have their entry point symbol visibility set as
17198 well as their descriptor symbol visibility. */
17199 if (DEFAULT_ABI == ABI_AIX
17200 && DOT_SYMBOLS
17201 && TREE_CODE (decl) == FUNCTION_DECL)
17203 static const char * const visibility_types[] = {
17204 NULL, "internal", "hidden", "protected"
17207 const char *name, *type;
17209 name = ((* targetm.strip_name_encoding)
17210 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
17211 type = visibility_types[vis];
17213 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
17214 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
17216 else
17217 default_assemble_visibility (decl, vis);
17219 #endif
17221 enum rtx_code
17222 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
17224 /* Reversal of FP compares takes care -- an ordered compare
17225 becomes an unordered compare and vice versa. */
17226 if (mode == CCFPmode
17227 && (!flag_finite_math_only
17228 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
17229 || code == UNEQ || code == LTGT))
17230 return reverse_condition_maybe_unordered (code);
17231 else
17232 return reverse_condition (code);
17235 /* Generate a compare for CODE. Return a brand-new rtx that
17236 represents the result of the compare. */
17238 static rtx
17239 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
17241 enum machine_mode comp_mode;
17242 rtx compare_result;
17243 enum rtx_code code = GET_CODE (cmp);
17244 rtx op0 = XEXP (cmp, 0);
17245 rtx op1 = XEXP (cmp, 1);
17247 if (FLOAT_MODE_P (mode))
17248 comp_mode = CCFPmode;
17249 else if (code == GTU || code == LTU
17250 || code == GEU || code == LEU)
17251 comp_mode = CCUNSmode;
17252 else if ((code == EQ || code == NE)
17253 && unsigned_reg_p (op0)
17254 && (unsigned_reg_p (op1)
17255 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
17256 /* These are unsigned values, perhaps there will be a later
17257 ordering compare that can be shared with this one. */
17258 comp_mode = CCUNSmode;
17259 else
17260 comp_mode = CCmode;
17262 /* If we have an unsigned compare, make sure we don't have a signed value as
17263 an immediate. */
17264 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
17265 && INTVAL (op1) < 0)
17267 op0 = copy_rtx_if_shared (op0);
17268 op1 = force_reg (GET_MODE (op0), op1);
17269 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
17272 /* First, the compare. */
17273 compare_result = gen_reg_rtx (comp_mode);
17275 /* E500 FP compare instructions on the GPRs. Yuck! */
17276 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
17277 && FLOAT_MODE_P (mode))
17279 rtx cmp, or_result, compare_result2;
17280 enum machine_mode op_mode = GET_MODE (op0);
17281 bool reverse_p;
17283 if (op_mode == VOIDmode)
17284 op_mode = GET_MODE (op1);
17286 /* First reverse the condition codes that aren't directly supported. */
17287 switch (code)
17289 case NE:
17290 case UNLT:
17291 case UNLE:
17292 case UNGT:
17293 case UNGE:
17294 code = reverse_condition_maybe_unordered (code);
17295 reverse_p = true;
17296 break;
17298 case EQ:
17299 case LT:
17300 case LE:
17301 case GT:
17302 case GE:
17303 reverse_p = false;
17304 break;
17306 default:
17307 gcc_unreachable ();
17310 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
17311 This explains the following mess. */
17313 switch (code)
17315 case EQ:
17316 switch (op_mode)
17318 case SFmode:
17319 cmp = (flag_finite_math_only && !flag_trapping_math)
17320 ? gen_tstsfeq_gpr (compare_result, op0, op1)
17321 : gen_cmpsfeq_gpr (compare_result, op0, op1);
17322 break;
17324 case DFmode:
17325 cmp = (flag_finite_math_only && !flag_trapping_math)
17326 ? gen_tstdfeq_gpr (compare_result, op0, op1)
17327 : gen_cmpdfeq_gpr (compare_result, op0, op1);
17328 break;
17330 case TFmode:
17331 cmp = (flag_finite_math_only && !flag_trapping_math)
17332 ? gen_tsttfeq_gpr (compare_result, op0, op1)
17333 : gen_cmptfeq_gpr (compare_result, op0, op1);
17334 break;
17336 default:
17337 gcc_unreachable ();
17339 break;
17341 case GT:
17342 case GE:
17343 switch (op_mode)
17345 case SFmode:
17346 cmp = (flag_finite_math_only && !flag_trapping_math)
17347 ? gen_tstsfgt_gpr (compare_result, op0, op1)
17348 : gen_cmpsfgt_gpr (compare_result, op0, op1);
17349 break;
17351 case DFmode:
17352 cmp = (flag_finite_math_only && !flag_trapping_math)
17353 ? gen_tstdfgt_gpr (compare_result, op0, op1)
17354 : gen_cmpdfgt_gpr (compare_result, op0, op1);
17355 break;
17357 case TFmode:
17358 cmp = (flag_finite_math_only && !flag_trapping_math)
17359 ? gen_tsttfgt_gpr (compare_result, op0, op1)
17360 : gen_cmptfgt_gpr (compare_result, op0, op1);
17361 break;
17363 default:
17364 gcc_unreachable ();
17366 break;
17368 case LT:
17369 case LE:
17370 switch (op_mode)
17372 case SFmode:
17373 cmp = (flag_finite_math_only && !flag_trapping_math)
17374 ? gen_tstsflt_gpr (compare_result, op0, op1)
17375 : gen_cmpsflt_gpr (compare_result, op0, op1);
17376 break;
17378 case DFmode:
17379 cmp = (flag_finite_math_only && !flag_trapping_math)
17380 ? gen_tstdflt_gpr (compare_result, op0, op1)
17381 : gen_cmpdflt_gpr (compare_result, op0, op1);
17382 break;
17384 case TFmode:
17385 cmp = (flag_finite_math_only && !flag_trapping_math)
17386 ? gen_tsttflt_gpr (compare_result, op0, op1)
17387 : gen_cmptflt_gpr (compare_result, op0, op1);
17388 break;
17390 default:
17391 gcc_unreachable ();
17393 break;
17395 default:
17396 gcc_unreachable ();
17399 /* Synthesize LE and GE from LT/GT || EQ. */
17400 if (code == LE || code == GE)
17402 emit_insn (cmp);
17404 compare_result2 = gen_reg_rtx (CCFPmode);
17406 /* Do the EQ. */
17407 switch (op_mode)
17409 case SFmode:
17410 cmp = (flag_finite_math_only && !flag_trapping_math)
17411 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
17412 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
17413 break;
17415 case DFmode:
17416 cmp = (flag_finite_math_only && !flag_trapping_math)
17417 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
17418 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
17419 break;
17421 case TFmode:
17422 cmp = (flag_finite_math_only && !flag_trapping_math)
17423 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
17424 : gen_cmptfeq_gpr (compare_result2, op0, op1);
17425 break;
17427 default:
17428 gcc_unreachable ();
17431 emit_insn (cmp);
17433 /* OR them together. */
17434 or_result = gen_reg_rtx (CCFPmode);
17435 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
17436 compare_result2);
17437 compare_result = or_result;
17440 code = reverse_p ? NE : EQ;
17442 emit_insn (cmp);
17444 else
17446 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
17447 CLOBBERs to match cmptf_internal2 pattern. */
17448 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
17449 && GET_MODE (op0) == TFmode
17450 && !TARGET_IEEEQUAD
17451 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
17452 emit_insn (gen_rtx_PARALLEL (VOIDmode,
17453 gen_rtvec (10,
17454 gen_rtx_SET (VOIDmode,
17455 compare_result,
17456 gen_rtx_COMPARE (comp_mode, op0, op1)),
17457 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17458 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17459 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17460 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17461 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17462 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17463 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17464 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
17465 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
17466 else if (GET_CODE (op1) == UNSPEC
17467 && XINT (op1, 1) == UNSPEC_SP_TEST)
17469 rtx op1b = XVECEXP (op1, 0, 0);
17470 comp_mode = CCEQmode;
17471 compare_result = gen_reg_rtx (CCEQmode);
17472 if (TARGET_64BIT)
17473 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
17474 else
17475 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
17477 else
17478 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
17479 gen_rtx_COMPARE (comp_mode, op0, op1)));
17482 /* Some kinds of FP comparisons need an OR operation;
17483 under flag_finite_math_only we don't bother. */
17484 if (FLOAT_MODE_P (mode)
17485 && !flag_finite_math_only
17486 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
17487 && (code == LE || code == GE
17488 || code == UNEQ || code == LTGT
17489 || code == UNGT || code == UNLT))
17491 enum rtx_code or1, or2;
17492 rtx or1_rtx, or2_rtx, compare2_rtx;
17493 rtx or_result = gen_reg_rtx (CCEQmode);
17495 switch (code)
17497 case LE: or1 = LT; or2 = EQ; break;
17498 case GE: or1 = GT; or2 = EQ; break;
17499 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
17500 case LTGT: or1 = LT; or2 = GT; break;
17501 case UNGT: or1 = UNORDERED; or2 = GT; break;
17502 case UNLT: or1 = UNORDERED; or2 = LT; break;
17503 default: gcc_unreachable ();
17505 validate_condition_mode (or1, comp_mode);
17506 validate_condition_mode (or2, comp_mode);
17507 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
17508 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
17509 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
17510 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
17511 const_true_rtx);
17512 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
17514 compare_result = or_result;
17515 code = EQ;
17518 validate_condition_mode (code, GET_MODE (compare_result));
17520 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
17524 /* Emit the RTL for an sISEL pattern. */
17526 void
17527 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
17529 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
17532 void
17533 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
17535 rtx condition_rtx;
17536 enum machine_mode op_mode;
17537 enum rtx_code cond_code;
17538 rtx result = operands[0];
17540 if (TARGET_ISEL && (mode == SImode || mode == DImode))
17542 rs6000_emit_sISEL (mode, operands);
17543 return;
17546 condition_rtx = rs6000_generate_compare (operands[1], mode);
17547 cond_code = GET_CODE (condition_rtx);
17549 if (FLOAT_MODE_P (mode)
17550 && !TARGET_FPRS && TARGET_HARD_FLOAT)
17552 rtx t;
17554 PUT_MODE (condition_rtx, SImode);
17555 t = XEXP (condition_rtx, 0);
17557 gcc_assert (cond_code == NE || cond_code == EQ);
17559 if (cond_code == NE)
17560 emit_insn (gen_e500_flip_gt_bit (t, t));
17562 emit_insn (gen_move_from_CR_gt_bit (result, t));
17563 return;
17566 if (cond_code == NE
17567 || cond_code == GE || cond_code == LE
17568 || cond_code == GEU || cond_code == LEU
17569 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
17571 rtx not_result = gen_reg_rtx (CCEQmode);
17572 rtx not_op, rev_cond_rtx;
17573 enum machine_mode cc_mode;
17575 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
17577 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
17578 SImode, XEXP (condition_rtx, 0), const0_rtx);
17579 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
17580 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
17581 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
17584 op_mode = GET_MODE (XEXP (operands[1], 0));
17585 if (op_mode == VOIDmode)
17586 op_mode = GET_MODE (XEXP (operands[1], 1));
17588 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
17590 PUT_MODE (condition_rtx, DImode);
17591 convert_move (result, condition_rtx, 0);
17593 else
17595 PUT_MODE (condition_rtx, SImode);
17596 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
17600 /* Emit a branch of kind CODE to location LOC. */
17602 void
17603 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
17605 rtx condition_rtx, loc_ref;
17607 condition_rtx = rs6000_generate_compare (operands[0], mode);
17608 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
17609 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
17610 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
17611 loc_ref, pc_rtx)));
17614 /* Return the string to output a conditional branch to LABEL, which is
17615 the operand template of the label, or NULL if the branch is really a
17616 conditional return.
17618 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
17619 condition code register and its mode specifies what kind of
17620 comparison we made.
17622 REVERSED is nonzero if we should reverse the sense of the comparison.
17624 INSN is the insn. */
17626 char *
17627 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
17629 static char string[64];
17630 enum rtx_code code = GET_CODE (op);
17631 rtx cc_reg = XEXP (op, 0);
17632 enum machine_mode mode = GET_MODE (cc_reg);
17633 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
17634 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
17635 int really_reversed = reversed ^ need_longbranch;
17636 char *s = string;
17637 const char *ccode;
17638 const char *pred;
17639 rtx note;
17641 validate_condition_mode (code, mode);
17643 /* Work out which way this really branches. We could use
17644 reverse_condition_maybe_unordered here always but this
17645 makes the resulting assembler clearer. */
17646 if (really_reversed)
17648 /* Reversal of FP compares takes care -- an ordered compare
17649 becomes an unordered compare and vice versa. */
17650 if (mode == CCFPmode)
17651 code = reverse_condition_maybe_unordered (code);
17652 else
17653 code = reverse_condition (code);
17656 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
17658 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
17659 to the GT bit. */
17660 switch (code)
17662 case EQ:
17663 /* Opposite of GT. */
17664 code = GT;
17665 break;
17667 case NE:
17668 code = UNLE;
17669 break;
17671 default:
17672 gcc_unreachable ();
17676 switch (code)
17678 /* Not all of these are actually distinct opcodes, but
17679 we distinguish them for clarity of the resulting assembler. */
17680 case NE: case LTGT:
17681 ccode = "ne"; break;
17682 case EQ: case UNEQ:
17683 ccode = "eq"; break;
17684 case GE: case GEU:
17685 ccode = "ge"; break;
17686 case GT: case GTU: case UNGT:
17687 ccode = "gt"; break;
17688 case LE: case LEU:
17689 ccode = "le"; break;
17690 case LT: case LTU: case UNLT:
17691 ccode = "lt"; break;
17692 case UNORDERED: ccode = "un"; break;
17693 case ORDERED: ccode = "nu"; break;
17694 case UNGE: ccode = "nl"; break;
17695 case UNLE: ccode = "ng"; break;
17696 default:
17697 gcc_unreachable ();
17700 /* Maybe we have a guess as to how likely the branch is. */
17701 pred = "";
17702 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
17703 if (note != NULL_RTX)
17705 /* PROB is the difference from 50%. */
17706 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
17708 /* Only hint for highly probable/improbable branches on newer
17709 cpus as static prediction overrides processor dynamic
17710 prediction. For older cpus we may as well always hint, but
17711 assume not taken for branches that are very close to 50% as a
17712 mispredicted taken branch is more expensive than a
17713 mispredicted not-taken branch. */
17714 if (rs6000_always_hint
17715 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
17716 && br_prob_note_reliable_p (note)))
17718 if (abs (prob) > REG_BR_PROB_BASE / 20
17719 && ((prob > 0) ^ need_longbranch))
17720 pred = "+";
17721 else
17722 pred = "-";
17726 if (label == NULL)
17727 s += sprintf (s, "b%slr%s ", ccode, pred);
17728 else
17729 s += sprintf (s, "b%s%s ", ccode, pred);
17731 /* We need to escape any '%' characters in the reg_names string.
17732 Assume they'd only be the first character.... */
17733 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
17734 *s++ = '%';
17735 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
17737 if (label != NULL)
17739 /* If the branch distance was too far, we may have to use an
17740 unconditional branch to go the distance. */
17741 if (need_longbranch)
17742 s += sprintf (s, ",$+8\n\tb %s", label);
17743 else
17744 s += sprintf (s, ",%s", label);
17747 return string;
17750 /* Return the string to flip the GT bit on a CR. */
17751 char *
17752 output_e500_flip_gt_bit (rtx dst, rtx src)
17754 static char string[64];
17755 int a, b;
17757 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
17758 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
17760 /* GT bit. */
17761 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
17762 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
17764 sprintf (string, "crnot %d,%d", a, b);
17765 return string;
17768 /* Return insn for VSX or Altivec comparisons. */
17770 static rtx
17771 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
17773 rtx mask;
17774 enum machine_mode mode = GET_MODE (op0);
17776 switch (code)
17778 default:
17779 break;
17781 case GE:
17782 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
17783 return NULL_RTX;
17785 case EQ:
17786 case GT:
17787 case GTU:
17788 case ORDERED:
17789 case UNORDERED:
17790 case UNEQ:
17791 case LTGT:
17792 mask = gen_reg_rtx (mode);
17793 emit_insn (gen_rtx_SET (VOIDmode,
17794 mask,
17795 gen_rtx_fmt_ee (code, mode, op0, op1)));
17796 return mask;
17799 return NULL_RTX;
17802 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
17803 DMODE is expected destination mode. This is a recursive function. */
17805 static rtx
17806 rs6000_emit_vector_compare (enum rtx_code rcode,
17807 rtx op0, rtx op1,
17808 enum machine_mode dmode)
17810 rtx mask;
17811 bool swap_operands = false;
17812 bool try_again = false;
17814 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
17815 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
17817 /* See if the comparison works as is. */
17818 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
17819 if (mask)
17820 return mask;
17822 switch (rcode)
17824 case LT:
17825 rcode = GT;
17826 swap_operands = true;
17827 try_again = true;
17828 break;
17829 case LTU:
17830 rcode = GTU;
17831 swap_operands = true;
17832 try_again = true;
17833 break;
17834 case NE:
17835 case UNLE:
17836 case UNLT:
17837 case UNGE:
17838 case UNGT:
17839 /* Invert condition and try again.
17840 e.g., A != B becomes ~(A==B). */
17842 enum rtx_code rev_code;
17843 enum insn_code nor_code;
17844 rtx mask2;
17846 rev_code = reverse_condition_maybe_unordered (rcode);
17847 if (rev_code == UNKNOWN)
17848 return NULL_RTX;
17850 nor_code = optab_handler (one_cmpl_optab, dmode);
17851 if (nor_code == CODE_FOR_nothing)
17852 return NULL_RTX;
17854 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
17855 if (!mask2)
17856 return NULL_RTX;
17858 mask = gen_reg_rtx (dmode);
17859 emit_insn (GEN_FCN (nor_code) (mask, mask2));
17860 return mask;
17862 break;
17863 case GE:
17864 case GEU:
17865 case LE:
17866 case LEU:
17867 /* Try GT/GTU/LT/LTU OR EQ */
17869 rtx c_rtx, eq_rtx;
17870 enum insn_code ior_code;
17871 enum rtx_code new_code;
17873 switch (rcode)
17875 case GE:
17876 new_code = GT;
17877 break;
17879 case GEU:
17880 new_code = GTU;
17881 break;
17883 case LE:
17884 new_code = LT;
17885 break;
17887 case LEU:
17888 new_code = LTU;
17889 break;
17891 default:
17892 gcc_unreachable ();
17895 ior_code = optab_handler (ior_optab, dmode);
17896 if (ior_code == CODE_FOR_nothing)
17897 return NULL_RTX;
17899 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
17900 if (!c_rtx)
17901 return NULL_RTX;
17903 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
17904 if (!eq_rtx)
17905 return NULL_RTX;
17907 mask = gen_reg_rtx (dmode);
17908 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
17909 return mask;
17911 break;
17912 default:
17913 return NULL_RTX;
17916 if (try_again)
17918 if (swap_operands)
17920 rtx tmp;
17921 tmp = op0;
17922 op0 = op1;
17923 op1 = tmp;
17926 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
17927 if (mask)
17928 return mask;
17931 /* You only get two chances. */
17932 return NULL_RTX;
17935 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
17936 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
17937 operands for the relation operation COND. */
17940 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
17941 rtx cond, rtx cc_op0, rtx cc_op1)
17943 enum machine_mode dest_mode = GET_MODE (dest);
17944 enum machine_mode mask_mode = GET_MODE (cc_op0);
17945 enum rtx_code rcode = GET_CODE (cond);
17946 enum machine_mode cc_mode = CCmode;
17947 rtx mask;
17948 rtx cond2;
17949 rtx tmp;
17950 bool invert_move = false;
17952 if (VECTOR_UNIT_NONE_P (dest_mode))
17953 return 0;
17955 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
17956 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
17958 switch (rcode)
17960 /* Swap operands if we can, and fall back to doing the operation as
17961 specified, and doing a NOR to invert the test. */
17962 case NE:
17963 case UNLE:
17964 case UNLT:
17965 case UNGE:
17966 case UNGT:
17967 /* Invert condition and try again.
17968 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
17969 invert_move = true;
17970 rcode = reverse_condition_maybe_unordered (rcode);
17971 if (rcode == UNKNOWN)
17972 return 0;
17973 break;
17975 /* Mark unsigned tests with CCUNSmode. */
17976 case GTU:
17977 case GEU:
17978 case LTU:
17979 case LEU:
17980 cc_mode = CCUNSmode;
17981 break;
17983 default:
17984 break;
17987 /* Get the vector mask for the given relational operations. */
17988 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
17990 if (!mask)
17991 return 0;
17993 if (invert_move)
17995 tmp = op_true;
17996 op_true = op_false;
17997 op_false = tmp;
18000 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
18001 CONST0_RTX (dest_mode));
18002 emit_insn (gen_rtx_SET (VOIDmode,
18003 dest,
18004 gen_rtx_IF_THEN_ELSE (dest_mode,
18005 cond2,
18006 op_true,
18007 op_false)));
18008 return 1;
18011 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
18012 operands of the last comparison is nonzero/true, FALSE_COND if it
18013 is zero/false. Return 0 if the hardware has no such operation. */
18016 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
18018 enum rtx_code code = GET_CODE (op);
18019 rtx op0 = XEXP (op, 0);
18020 rtx op1 = XEXP (op, 1);
18021 REAL_VALUE_TYPE c1;
18022 enum machine_mode compare_mode = GET_MODE (op0);
18023 enum machine_mode result_mode = GET_MODE (dest);
18024 rtx temp;
18025 bool is_against_zero;
18027 /* These modes should always match. */
18028 if (GET_MODE (op1) != compare_mode
18029 /* In the isel case however, we can use a compare immediate, so
18030 op1 may be a small constant. */
18031 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
18032 return 0;
18033 if (GET_MODE (true_cond) != result_mode)
18034 return 0;
18035 if (GET_MODE (false_cond) != result_mode)
18036 return 0;
18038 /* Don't allow using floating point comparisons for integer results for
18039 now. */
18040 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
18041 return 0;
18043 /* First, work out if the hardware can do this at all, or
18044 if it's too slow.... */
18045 if (!FLOAT_MODE_P (compare_mode))
18047 if (TARGET_ISEL)
18048 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
18049 return 0;
18051 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
18052 && SCALAR_FLOAT_MODE_P (compare_mode))
18053 return 0;
18055 is_against_zero = op1 == CONST0_RTX (compare_mode);
18057 /* A floating-point subtract might overflow, underflow, or produce
18058 an inexact result, thus changing the floating-point flags, so it
18059 can't be generated if we care about that. It's safe if one side
18060 of the construct is zero, since then no subtract will be
18061 generated. */
18062 if (SCALAR_FLOAT_MODE_P (compare_mode)
18063 && flag_trapping_math && ! is_against_zero)
18064 return 0;
18066 /* Eliminate half of the comparisons by switching operands, this
18067 makes the remaining code simpler. */
18068 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
18069 || code == LTGT || code == LT || code == UNLE)
18071 code = reverse_condition_maybe_unordered (code);
18072 temp = true_cond;
18073 true_cond = false_cond;
18074 false_cond = temp;
18077 /* UNEQ and LTGT take four instructions for a comparison with zero,
18078 it'll probably be faster to use a branch here too. */
18079 if (code == UNEQ && HONOR_NANS (compare_mode))
18080 return 0;
18082 if (GET_CODE (op1) == CONST_DOUBLE)
18083 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
18085 /* We're going to try to implement comparisons by performing
18086 a subtract, then comparing against zero. Unfortunately,
18087 Inf - Inf is NaN which is not zero, and so if we don't
18088 know that the operand is finite and the comparison
18089 would treat EQ different to UNORDERED, we can't do it. */
18090 if (HONOR_INFINITIES (compare_mode)
18091 && code != GT && code != UNGE
18092 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
18093 /* Constructs of the form (a OP b ? a : b) are safe. */
18094 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
18095 || (! rtx_equal_p (op0, true_cond)
18096 && ! rtx_equal_p (op1, true_cond))))
18097 return 0;
18099 /* At this point we know we can use fsel. */
18101 /* Reduce the comparison to a comparison against zero. */
18102 if (! is_against_zero)
18104 temp = gen_reg_rtx (compare_mode);
18105 emit_insn (gen_rtx_SET (VOIDmode, temp,
18106 gen_rtx_MINUS (compare_mode, op0, op1)));
18107 op0 = temp;
18108 op1 = CONST0_RTX (compare_mode);
18111 /* If we don't care about NaNs we can reduce some of the comparisons
18112 down to faster ones. */
18113 if (! HONOR_NANS (compare_mode))
18114 switch (code)
18116 case GT:
18117 code = LE;
18118 temp = true_cond;
18119 true_cond = false_cond;
18120 false_cond = temp;
18121 break;
18122 case UNGE:
18123 code = GE;
18124 break;
18125 case UNEQ:
18126 code = EQ;
18127 break;
18128 default:
18129 break;
18132 /* Now, reduce everything down to a GE. */
18133 switch (code)
18135 case GE:
18136 break;
18138 case LE:
18139 temp = gen_reg_rtx (compare_mode);
18140 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
18141 op0 = temp;
18142 break;
18144 case ORDERED:
18145 temp = gen_reg_rtx (compare_mode);
18146 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
18147 op0 = temp;
18148 break;
18150 case EQ:
18151 temp = gen_reg_rtx (compare_mode);
18152 emit_insn (gen_rtx_SET (VOIDmode, temp,
18153 gen_rtx_NEG (compare_mode,
18154 gen_rtx_ABS (compare_mode, op0))));
18155 op0 = temp;
18156 break;
18158 case UNGE:
18159 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
18160 temp = gen_reg_rtx (result_mode);
18161 emit_insn (gen_rtx_SET (VOIDmode, temp,
18162 gen_rtx_IF_THEN_ELSE (result_mode,
18163 gen_rtx_GE (VOIDmode,
18164 op0, op1),
18165 true_cond, false_cond)));
18166 false_cond = true_cond;
18167 true_cond = temp;
18169 temp = gen_reg_rtx (compare_mode);
18170 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
18171 op0 = temp;
18172 break;
18174 case GT:
18175 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
18176 temp = gen_reg_rtx (result_mode);
18177 emit_insn (gen_rtx_SET (VOIDmode, temp,
18178 gen_rtx_IF_THEN_ELSE (result_mode,
18179 gen_rtx_GE (VOIDmode,
18180 op0, op1),
18181 true_cond, false_cond)));
18182 true_cond = false_cond;
18183 false_cond = temp;
18185 temp = gen_reg_rtx (compare_mode);
18186 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
18187 op0 = temp;
18188 break;
18190 default:
18191 gcc_unreachable ();
18194 emit_insn (gen_rtx_SET (VOIDmode, dest,
18195 gen_rtx_IF_THEN_ELSE (result_mode,
18196 gen_rtx_GE (VOIDmode,
18197 op0, op1),
18198 true_cond, false_cond)));
18199 return 1;
18202 /* Same as above, but for ints (isel). */
18204 static int
18205 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
18207 rtx condition_rtx, cr;
18208 enum machine_mode mode = GET_MODE (dest);
18209 enum rtx_code cond_code;
18210 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
18211 bool signedp;
18213 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
18214 return 0;
18216 /* We still have to do the compare, because isel doesn't do a
18217 compare, it just looks at the CRx bits set by a previous compare
18218 instruction. */
18219 condition_rtx = rs6000_generate_compare (op, mode);
18220 cond_code = GET_CODE (condition_rtx);
18221 cr = XEXP (condition_rtx, 0);
18222 signedp = GET_MODE (cr) == CCmode;
18224 isel_func = (mode == SImode
18225 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
18226 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
18228 switch (cond_code)
18230 case LT: case GT: case LTU: case GTU: case EQ:
18231 /* isel handles these directly. */
18232 break;
18234 default:
18235 /* We need to swap the sense of the comparison. */
18237 rtx t = true_cond;
18238 true_cond = false_cond;
18239 false_cond = t;
18240 PUT_CODE (condition_rtx, reverse_condition (cond_code));
18242 break;
18245 false_cond = force_reg (mode, false_cond);
18246 if (true_cond != const0_rtx)
18247 true_cond = force_reg (mode, true_cond);
18249 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
18251 return 1;
18254 const char *
18255 output_isel (rtx *operands)
18257 enum rtx_code code;
18259 code = GET_CODE (operands[1]);
18261 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
18263 gcc_assert (GET_CODE (operands[2]) == REG
18264 && GET_CODE (operands[3]) == REG);
18265 PUT_CODE (operands[1], reverse_condition (code));
18266 return "isel %0,%3,%2,%j1";
18269 return "isel %0,%2,%3,%j1";
18272 void
18273 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
18275 enum machine_mode mode = GET_MODE (op0);
18276 enum rtx_code c;
18277 rtx target;
18279 /* VSX/altivec have direct min/max insns. */
18280 if ((code == SMAX || code == SMIN)
18281 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
18282 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
18284 emit_insn (gen_rtx_SET (VOIDmode,
18285 dest,
18286 gen_rtx_fmt_ee (code, mode, op0, op1)));
18287 return;
18290 if (code == SMAX || code == SMIN)
18291 c = GE;
18292 else
18293 c = GEU;
18295 if (code == SMAX || code == UMAX)
18296 target = emit_conditional_move (dest, c, op0, op1, mode,
18297 op0, op1, mode, 0);
18298 else
18299 target = emit_conditional_move (dest, c, op0, op1, mode,
18300 op1, op0, mode, 0);
18301 gcc_assert (target);
18302 if (target != dest)
18303 emit_move_insn (dest, target);
18306 /* A subroutine of the atomic operation splitters. Jump to LABEL if
18307 COND is true. Mark the jump as unlikely to be taken. */
18309 static void
18310 emit_unlikely_jump (rtx cond, rtx label)
18312 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
18313 rtx x;
18315 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
18316 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
18317 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
18320 /* A subroutine of the atomic operation splitters. Emit a load-locked
18321 instruction in MODE. For QI/HImode, possibly use a pattern than includes
18322 the zero_extend operation. */
18324 static void
18325 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
18327 rtx (*fn) (rtx, rtx) = NULL;
18329 switch (mode)
18331 case QImode:
18332 fn = gen_load_lockedqi;
18333 break;
18334 case HImode:
18335 fn = gen_load_lockedhi;
18336 break;
18337 case SImode:
18338 if (GET_MODE (mem) == QImode)
18339 fn = gen_load_lockedqi_si;
18340 else if (GET_MODE (mem) == HImode)
18341 fn = gen_load_lockedhi_si;
18342 else
18343 fn = gen_load_lockedsi;
18344 break;
18345 case DImode:
18346 fn = gen_load_lockeddi;
18347 break;
18348 case TImode:
18349 fn = gen_load_lockedti;
18350 break;
18351 default:
18352 gcc_unreachable ();
18354 emit_insn (fn (reg, mem));
18357 /* A subroutine of the atomic operation splitters. Emit a store-conditional
18358 instruction in MODE. */
18360 static void
18361 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
18363 rtx (*fn) (rtx, rtx, rtx) = NULL;
18365 switch (mode)
18367 case QImode:
18368 fn = gen_store_conditionalqi;
18369 break;
18370 case HImode:
18371 fn = gen_store_conditionalhi;
18372 break;
18373 case SImode:
18374 fn = gen_store_conditionalsi;
18375 break;
18376 case DImode:
18377 fn = gen_store_conditionaldi;
18378 break;
18379 case TImode:
18380 fn = gen_store_conditionalti;
18381 break;
18382 default:
18383 gcc_unreachable ();
18386 /* Emit sync before stwcx. to address PPC405 Erratum. */
18387 if (PPC405_ERRATUM77)
18388 emit_insn (gen_hwsync ());
18390 emit_insn (fn (res, mem, val));
18393 /* Expand barriers before and after a load_locked/store_cond sequence. */
18395 static rtx
18396 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
18398 rtx addr = XEXP (mem, 0);
18399 int strict_p = (reload_in_progress || reload_completed);
18401 if (!legitimate_indirect_address_p (addr, strict_p)
18402 && !legitimate_indexed_address_p (addr, strict_p))
18404 addr = force_reg (Pmode, addr);
18405 mem = replace_equiv_address_nv (mem, addr);
18408 switch (model)
18410 case MEMMODEL_RELAXED:
18411 case MEMMODEL_CONSUME:
18412 case MEMMODEL_ACQUIRE:
18413 break;
18414 case MEMMODEL_RELEASE:
18415 case MEMMODEL_ACQ_REL:
18416 emit_insn (gen_lwsync ());
18417 break;
18418 case MEMMODEL_SEQ_CST:
18419 emit_insn (gen_hwsync ());
18420 break;
18421 default:
18422 gcc_unreachable ();
18424 return mem;
18427 static void
18428 rs6000_post_atomic_barrier (enum memmodel model)
18430 switch (model)
18432 case MEMMODEL_RELAXED:
18433 case MEMMODEL_CONSUME:
18434 case MEMMODEL_RELEASE:
18435 break;
18436 case MEMMODEL_ACQUIRE:
18437 case MEMMODEL_ACQ_REL:
18438 case MEMMODEL_SEQ_CST:
18439 emit_insn (gen_isync ());
18440 break;
18441 default:
18442 gcc_unreachable ();
18446 /* A subroutine of the various atomic expanders. For sub-word operations,
18447 we must adjust things to operate on SImode. Given the original MEM,
18448 return a new aligned memory. Also build and return the quantities by
18449 which to shift and mask. */
18451 static rtx
18452 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
18454 rtx addr, align, shift, mask, mem;
18455 HOST_WIDE_INT shift_mask;
18456 enum machine_mode mode = GET_MODE (orig_mem);
18458 /* For smaller modes, we have to implement this via SImode. */
18459 shift_mask = (mode == QImode ? 0x18 : 0x10);
18461 addr = XEXP (orig_mem, 0);
18462 addr = force_reg (GET_MODE (addr), addr);
18464 /* Aligned memory containing subword. Generate a new memory. We
18465 do not want any of the existing MEM_ATTR data, as we're now
18466 accessing memory outside the original object. */
18467 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
18468 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18469 mem = gen_rtx_MEM (SImode, align);
18470 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
18471 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
18472 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
18474 /* Shift amount for subword relative to aligned word. */
18475 shift = gen_reg_rtx (SImode);
18476 addr = gen_lowpart (SImode, addr);
18477 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
18478 if (WORDS_BIG_ENDIAN)
18479 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
18480 shift, 1, OPTAB_LIB_WIDEN);
18481 *pshift = shift;
18483 /* Mask for insertion. */
18484 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
18485 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
18486 *pmask = mask;
18488 return mem;
18491 /* A subroutine of the various atomic expanders. For sub-word operands,
18492 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
18494 static rtx
18495 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
18497 rtx x;
18499 x = gen_reg_rtx (SImode);
18500 emit_insn (gen_rtx_SET (VOIDmode, x,
18501 gen_rtx_AND (SImode,
18502 gen_rtx_NOT (SImode, mask),
18503 oldval)));
18505 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
18507 return x;
18510 /* A subroutine of the various atomic expanders. For sub-word operands,
18511 extract WIDE to NARROW via SHIFT. */
18513 static void
18514 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
18516 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
18517 wide, 1, OPTAB_LIB_WIDEN);
18518 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
18521 /* Expand an atomic compare and swap operation. */
18523 void
18524 rs6000_expand_atomic_compare_and_swap (rtx operands[])
18526 rtx boolval, retval, mem, oldval, newval, cond;
18527 rtx label1, label2, x, mask, shift;
18528 enum machine_mode mode, orig_mode;
18529 enum memmodel mod_s, mod_f;
18530 bool is_weak;
18532 boolval = operands[0];
18533 retval = operands[1];
18534 mem = operands[2];
18535 oldval = operands[3];
18536 newval = operands[4];
18537 is_weak = (INTVAL (operands[5]) != 0);
18538 mod_s = (enum memmodel) INTVAL (operands[6]);
18539 mod_f = (enum memmodel) INTVAL (operands[7]);
18540 orig_mode = mode = GET_MODE (mem);
18542 mask = shift = NULL_RTX;
18543 if (mode == QImode || mode == HImode)
18545 /* Before power8, we didn't have access to lbarx/lharx, so generate a
18546 lwarx and shift/mask operations. With power8, we need to do the
18547 comparison in SImode, but the store is still done in QI/HImode. */
18548 oldval = convert_modes (SImode, mode, oldval, 1);
18550 if (!TARGET_SYNC_HI_QI)
18552 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
18554 /* Shift and mask OLDVAL into position with the word. */
18555 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
18556 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18558 /* Shift and mask NEWVAL into position within the word. */
18559 newval = convert_modes (SImode, mode, newval, 1);
18560 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
18561 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18564 /* Prepare to adjust the return value. */
18565 retval = gen_reg_rtx (SImode);
18566 mode = SImode;
18568 else if (reg_overlap_mentioned_p (retval, oldval))
18569 oldval = copy_to_reg (oldval);
18571 mem = rs6000_pre_atomic_barrier (mem, mod_s);
18573 label1 = NULL_RTX;
18574 if (!is_weak)
18576 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
18577 emit_label (XEXP (label1, 0));
18579 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
18581 emit_load_locked (mode, retval, mem);
18583 x = retval;
18584 if (mask)
18586 x = expand_simple_binop (SImode, AND, retval, mask,
18587 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18590 cond = gen_reg_rtx (CCmode);
18591 /* If we have TImode, synthesize a comparison. */
18592 if (mode != TImode)
18593 x = gen_rtx_COMPARE (CCmode, x, oldval);
18594 else
18596 rtx xor1_result = gen_reg_rtx (DImode);
18597 rtx xor2_result = gen_reg_rtx (DImode);
18598 rtx or_result = gen_reg_rtx (DImode);
18599 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
18600 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
18601 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
18602 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
18604 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
18605 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
18606 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
18607 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
18610 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
18612 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18613 emit_unlikely_jump (x, label2);
18615 x = newval;
18616 if (mask)
18617 x = rs6000_mask_atomic_subword (retval, newval, mask);
18619 emit_store_conditional (orig_mode, cond, mem, x);
18621 if (!is_weak)
18623 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18624 emit_unlikely_jump (x, label1);
18627 if (mod_f != MEMMODEL_RELAXED)
18628 emit_label (XEXP (label2, 0));
18630 rs6000_post_atomic_barrier (mod_s);
18632 if (mod_f == MEMMODEL_RELAXED)
18633 emit_label (XEXP (label2, 0));
18635 if (shift)
18636 rs6000_finish_atomic_subword (operands[1], retval, shift);
18637 else if (mode != GET_MODE (operands[1]))
18638 convert_move (operands[1], retval, 1);
18640 /* In all cases, CR0 contains EQ on success, and NE on failure. */
18641 x = gen_rtx_EQ (SImode, cond, const0_rtx);
18642 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
18645 /* Expand an atomic exchange operation. */
18647 void
18648 rs6000_expand_atomic_exchange (rtx operands[])
18650 rtx retval, mem, val, cond;
18651 enum machine_mode mode;
18652 enum memmodel model;
18653 rtx label, x, mask, shift;
18655 retval = operands[0];
18656 mem = operands[1];
18657 val = operands[2];
18658 model = (enum memmodel) INTVAL (operands[3]);
18659 mode = GET_MODE (mem);
18661 mask = shift = NULL_RTX;
18662 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
18664 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
18666 /* Shift and mask VAL into position with the word. */
18667 val = convert_modes (SImode, mode, val, 1);
18668 val = expand_simple_binop (SImode, ASHIFT, val, shift,
18669 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18671 /* Prepare to adjust the return value. */
18672 retval = gen_reg_rtx (SImode);
18673 mode = SImode;
18676 mem = rs6000_pre_atomic_barrier (mem, model);
18678 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
18679 emit_label (XEXP (label, 0));
18681 emit_load_locked (mode, retval, mem);
18683 x = val;
18684 if (mask)
18685 x = rs6000_mask_atomic_subword (retval, val, mask);
18687 cond = gen_reg_rtx (CCmode);
18688 emit_store_conditional (mode, cond, mem, x);
18690 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18691 emit_unlikely_jump (x, label);
18693 rs6000_post_atomic_barrier (model);
18695 if (shift)
18696 rs6000_finish_atomic_subword (operands[0], retval, shift);
18699 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
18700 to perform. MEM is the memory on which to operate. VAL is the second
18701 operand of the binary operator. BEFORE and AFTER are optional locations to
18702 return the value of MEM either before of after the operation. MODEL_RTX
18703 is a CONST_INT containing the memory model to use. */
18705 void
18706 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
18707 rtx orig_before, rtx orig_after, rtx model_rtx)
18709 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
18710 enum machine_mode mode = GET_MODE (mem);
18711 enum machine_mode store_mode = mode;
18712 rtx label, x, cond, mask, shift;
18713 rtx before = orig_before, after = orig_after;
18715 mask = shift = NULL_RTX;
18716 /* On power8, we want to use SImode for the operation. On previous systems,
18717 use the operation in a subword and shift/mask to get the proper byte or
18718 halfword. */
18719 if (mode == QImode || mode == HImode)
18721 if (TARGET_SYNC_HI_QI)
18723 val = convert_modes (SImode, mode, val, 1);
18725 /* Prepare to adjust the return value. */
18726 before = gen_reg_rtx (SImode);
18727 if (after)
18728 after = gen_reg_rtx (SImode);
18729 mode = SImode;
18731 else
18733 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
18735 /* Shift and mask VAL into position with the word. */
18736 val = convert_modes (SImode, mode, val, 1);
18737 val = expand_simple_binop (SImode, ASHIFT, val, shift,
18738 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18740 switch (code)
18742 case IOR:
18743 case XOR:
18744 /* We've already zero-extended VAL. That is sufficient to
18745 make certain that it does not affect other bits. */
18746 mask = NULL;
18747 break;
18749 case AND:
18750 /* If we make certain that all of the other bits in VAL are
18751 set, that will be sufficient to not affect other bits. */
18752 x = gen_rtx_NOT (SImode, mask);
18753 x = gen_rtx_IOR (SImode, x, val);
18754 emit_insn (gen_rtx_SET (VOIDmode, val, x));
18755 mask = NULL;
18756 break;
18758 case NOT:
18759 case PLUS:
18760 case MINUS:
18761 /* These will all affect bits outside the field and need
18762 adjustment via MASK within the loop. */
18763 break;
18765 default:
18766 gcc_unreachable ();
18769 /* Prepare to adjust the return value. */
18770 before = gen_reg_rtx (SImode);
18771 if (after)
18772 after = gen_reg_rtx (SImode);
18773 store_mode = mode = SImode;
18777 mem = rs6000_pre_atomic_barrier (mem, model);
18779 label = gen_label_rtx ();
18780 emit_label (label);
18781 label = gen_rtx_LABEL_REF (VOIDmode, label);
18783 if (before == NULL_RTX)
18784 before = gen_reg_rtx (mode);
18786 emit_load_locked (mode, before, mem);
18788 if (code == NOT)
18790 x = expand_simple_binop (mode, AND, before, val,
18791 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18792 after = expand_simple_unop (mode, NOT, x, after, 1);
18794 else
18796 after = expand_simple_binop (mode, code, before, val,
18797 after, 1, OPTAB_LIB_WIDEN);
18800 x = after;
18801 if (mask)
18803 x = expand_simple_binop (SImode, AND, after, mask,
18804 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18805 x = rs6000_mask_atomic_subword (before, x, mask);
18807 else if (store_mode != mode)
18808 x = convert_modes (store_mode, mode, x, 1);
18810 cond = gen_reg_rtx (CCmode);
18811 emit_store_conditional (store_mode, cond, mem, x);
18813 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18814 emit_unlikely_jump (x, label);
18816 rs6000_post_atomic_barrier (model);
18818 if (shift)
18820 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
18821 then do the calcuations in a SImode register. */
18822 if (orig_before)
18823 rs6000_finish_atomic_subword (orig_before, before, shift);
18824 if (orig_after)
18825 rs6000_finish_atomic_subword (orig_after, after, shift);
18827 else if (store_mode != mode)
18829 /* QImode/HImode on machines with lbarx/lharx where we do the native
18830 operation and then do the calcuations in a SImode register. */
18831 if (orig_before)
18832 convert_move (orig_before, before, 1);
18833 if (orig_after)
18834 convert_move (orig_after, after, 1);
18836 else if (orig_after && after != orig_after)
18837 emit_move_insn (orig_after, after);
18840 /* Emit instructions to move SRC to DST. Called by splitters for
18841 multi-register moves. It will emit at most one instruction for
18842 each register that is accessed; that is, it won't emit li/lis pairs
18843 (or equivalent for 64-bit code). One of SRC or DST must be a hard
18844 register. */
18846 void
18847 rs6000_split_multireg_move (rtx dst, rtx src)
18849 /* The register number of the first register being moved. */
18850 int reg;
18851 /* The mode that is to be moved. */
18852 enum machine_mode mode;
18853 /* The mode that the move is being done in, and its size. */
18854 enum machine_mode reg_mode;
18855 int reg_mode_size;
18856 /* The number of registers that will be moved. */
18857 int nregs;
18859 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
18860 mode = GET_MODE (dst);
18861 nregs = hard_regno_nregs[reg][mode];
18862 if (FP_REGNO_P (reg))
18863 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
18864 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
18865 else if (ALTIVEC_REGNO_P (reg))
18866 reg_mode = V16QImode;
18867 else if (TARGET_E500_DOUBLE && mode == TFmode)
18868 reg_mode = DFmode;
18869 else
18870 reg_mode = word_mode;
18871 reg_mode_size = GET_MODE_SIZE (reg_mode);
18873 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
18875 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
18877 /* Move register range backwards, if we might have destructive
18878 overlap. */
18879 int i;
18880 for (i = nregs - 1; i >= 0; i--)
18881 emit_insn (gen_rtx_SET (VOIDmode,
18882 simplify_gen_subreg (reg_mode, dst, mode,
18883 i * reg_mode_size),
18884 simplify_gen_subreg (reg_mode, src, mode,
18885 i * reg_mode_size)));
18887 else
18889 int i;
18890 int j = -1;
18891 bool used_update = false;
18892 rtx restore_basereg = NULL_RTX;
18894 if (MEM_P (src) && INT_REGNO_P (reg))
18896 rtx breg;
18898 if (GET_CODE (XEXP (src, 0)) == PRE_INC
18899 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
18901 rtx delta_rtx;
18902 breg = XEXP (XEXP (src, 0), 0);
18903 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
18904 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
18905 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
18906 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
18907 src = replace_equiv_address (src, breg);
18909 else if (! rs6000_offsettable_memref_p (src, reg_mode))
18911 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
18913 rtx basereg = XEXP (XEXP (src, 0), 0);
18914 if (TARGET_UPDATE)
18916 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
18917 emit_insn (gen_rtx_SET (VOIDmode, ndst,
18918 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
18919 used_update = true;
18921 else
18922 emit_insn (gen_rtx_SET (VOIDmode, basereg,
18923 XEXP (XEXP (src, 0), 1)));
18924 src = replace_equiv_address (src, basereg);
18926 else
18928 rtx basereg = gen_rtx_REG (Pmode, reg);
18929 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
18930 src = replace_equiv_address (src, basereg);
18934 breg = XEXP (src, 0);
18935 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
18936 breg = XEXP (breg, 0);
18938 /* If the base register we are using to address memory is
18939 also a destination reg, then change that register last. */
18940 if (REG_P (breg)
18941 && REGNO (breg) >= REGNO (dst)
18942 && REGNO (breg) < REGNO (dst) + nregs)
18943 j = REGNO (breg) - REGNO (dst);
18945 else if (MEM_P (dst) && INT_REGNO_P (reg))
18947 rtx breg;
18949 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
18950 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
18952 rtx delta_rtx;
18953 breg = XEXP (XEXP (dst, 0), 0);
18954 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
18955 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
18956 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
18958 /* We have to update the breg before doing the store.
18959 Use store with update, if available. */
18961 if (TARGET_UPDATE)
18963 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
18964 emit_insn (TARGET_32BIT
18965 ? (TARGET_POWERPC64
18966 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
18967 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
18968 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
18969 used_update = true;
18971 else
18972 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
18973 dst = replace_equiv_address (dst, breg);
18975 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
18976 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
18978 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
18980 rtx basereg = XEXP (XEXP (dst, 0), 0);
18981 if (TARGET_UPDATE)
18983 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
18984 emit_insn (gen_rtx_SET (VOIDmode,
18985 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
18986 used_update = true;
18988 else
18989 emit_insn (gen_rtx_SET (VOIDmode, basereg,
18990 XEXP (XEXP (dst, 0), 1)));
18991 dst = replace_equiv_address (dst, basereg);
18993 else
18995 rtx basereg = XEXP (XEXP (dst, 0), 0);
18996 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
18997 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
18998 && REG_P (basereg)
18999 && REG_P (offsetreg)
19000 && REGNO (basereg) != REGNO (offsetreg));
19001 if (REGNO (basereg) == 0)
19003 rtx tmp = offsetreg;
19004 offsetreg = basereg;
19005 basereg = tmp;
19007 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
19008 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
19009 dst = replace_equiv_address (dst, basereg);
19012 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
19013 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
19016 for (i = 0; i < nregs; i++)
19018 /* Calculate index to next subword. */
19019 ++j;
19020 if (j == nregs)
19021 j = 0;
19023 /* If compiler already emitted move of first word by
19024 store with update, no need to do anything. */
19025 if (j == 0 && used_update)
19026 continue;
19028 emit_insn (gen_rtx_SET (VOIDmode,
19029 simplify_gen_subreg (reg_mode, dst, mode,
19030 j * reg_mode_size),
19031 simplify_gen_subreg (reg_mode, src, mode,
19032 j * reg_mode_size)));
19034 if (restore_basereg != NULL_RTX)
19035 emit_insn (restore_basereg);
19040 /* This page contains routines that are used to determine what the
19041 function prologue and epilogue code will do and write them out. */
19043 static inline bool
19044 save_reg_p (int r)
19046 return !call_used_regs[r] && df_regs_ever_live_p (r);
19049 /* Return the first fixed-point register that is required to be
19050 saved. 32 if none. */
19053 first_reg_to_save (void)
19055 int first_reg;
19057 /* Find lowest numbered live register. */
19058 for (first_reg = 13; first_reg <= 31; first_reg++)
19059 if (save_reg_p (first_reg))
19060 break;
19062 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
19063 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19064 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
19065 || (TARGET_TOC && TARGET_MINIMAL_TOC))
19066 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
19067 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
19069 #if TARGET_MACHO
19070 if (flag_pic
19071 && crtl->uses_pic_offset_table
19072 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
19073 return RS6000_PIC_OFFSET_TABLE_REGNUM;
19074 #endif
19076 return first_reg;
19079 /* Similar, for FP regs. */
19082 first_fp_reg_to_save (void)
19084 int first_reg;
19086 /* Find lowest numbered live register. */
19087 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
19088 if (save_reg_p (first_reg))
19089 break;
19091 return first_reg;
19094 /* Similar, for AltiVec regs. */
19096 static int
19097 first_altivec_reg_to_save (void)
19099 int i;
19101 /* Stack frame remains as is unless we are in AltiVec ABI. */
19102 if (! TARGET_ALTIVEC_ABI)
19103 return LAST_ALTIVEC_REGNO + 1;
19105 /* On Darwin, the unwind routines are compiled without
19106 TARGET_ALTIVEC, and use save_world to save/restore the
19107 altivec registers when necessary. */
19108 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
19109 && ! TARGET_ALTIVEC)
19110 return FIRST_ALTIVEC_REGNO + 20;
19112 /* Find lowest numbered live register. */
19113 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
19114 if (save_reg_p (i))
19115 break;
19117 return i;
19120 /* Return a 32-bit mask of the AltiVec registers we need to set in
19121 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
19122 the 32-bit word is 0. */
19124 static unsigned int
19125 compute_vrsave_mask (void)
19127 unsigned int i, mask = 0;
19129 /* On Darwin, the unwind routines are compiled without
19130 TARGET_ALTIVEC, and use save_world to save/restore the
19131 call-saved altivec registers when necessary. */
19132 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
19133 && ! TARGET_ALTIVEC)
19134 mask |= 0xFFF;
19136 /* First, find out if we use _any_ altivec registers. */
19137 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
19138 if (df_regs_ever_live_p (i))
19139 mask |= ALTIVEC_REG_BIT (i);
19141 if (mask == 0)
19142 return mask;
19144 /* Next, remove the argument registers from the set. These must
19145 be in the VRSAVE mask set by the caller, so we don't need to add
19146 them in again. More importantly, the mask we compute here is
19147 used to generate CLOBBERs in the set_vrsave insn, and we do not
19148 wish the argument registers to die. */
19149 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
19150 mask &= ~ALTIVEC_REG_BIT (i);
19152 /* Similarly, remove the return value from the set. */
19154 bool yes = false;
19155 diddle_return_value (is_altivec_return_reg, &yes);
19156 if (yes)
19157 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
19160 return mask;
19163 /* For a very restricted set of circumstances, we can cut down the
19164 size of prologues/epilogues by calling our own save/restore-the-world
19165 routines. */
19167 static void
19168 compute_save_world_info (rs6000_stack_t *info_ptr)
19170 info_ptr->world_save_p = 1;
19171 info_ptr->world_save_p
19172 = (WORLD_SAVE_P (info_ptr)
19173 && DEFAULT_ABI == ABI_DARWIN
19174 && !cfun->has_nonlocal_label
19175 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
19176 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
19177 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
19178 && info_ptr->cr_save_p);
19180 /* This will not work in conjunction with sibcalls. Make sure there
19181 are none. (This check is expensive, but seldom executed.) */
19182 if (WORLD_SAVE_P (info_ptr))
19184 rtx insn;
19185 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
19186 if (CALL_P (insn) && SIBLING_CALL_P (insn))
19188 info_ptr->world_save_p = 0;
19189 break;
19193 if (WORLD_SAVE_P (info_ptr))
19195 /* Even if we're not touching VRsave, make sure there's room on the
19196 stack for it, if it looks like we're calling SAVE_WORLD, which
19197 will attempt to save it. */
19198 info_ptr->vrsave_size = 4;
19200 /* If we are going to save the world, we need to save the link register too. */
19201 info_ptr->lr_save_p = 1;
19203 /* "Save" the VRsave register too if we're saving the world. */
19204 if (info_ptr->vrsave_mask == 0)
19205 info_ptr->vrsave_mask = compute_vrsave_mask ();
19207 /* Because the Darwin register save/restore routines only handle
19208 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
19209 check. */
19210 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
19211 && (info_ptr->first_altivec_reg_save
19212 >= FIRST_SAVED_ALTIVEC_REGNO));
19214 return;
19218 static void
19219 is_altivec_return_reg (rtx reg, void *xyes)
19221 bool *yes = (bool *) xyes;
19222 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
19223 *yes = true;
19227 /* Look for user-defined global regs in the range FIRST to LAST-1.
19228 We should not restore these, and so cannot use lmw or out-of-line
19229 restore functions if there are any. We also can't save them
19230 (well, emit frame notes for them), because frame unwinding during
19231 exception handling will restore saved registers. */
19233 static bool
19234 global_regs_p (unsigned first, unsigned last)
19236 while (first < last)
19237 if (global_regs[first++])
19238 return true;
19239 return false;
19242 /* Determine the strategy for savings/restoring registers. */
19244 enum {
19245 SAVRES_MULTIPLE = 0x1,
19246 SAVE_INLINE_FPRS = 0x2,
19247 SAVE_INLINE_GPRS = 0x4,
19248 REST_INLINE_FPRS = 0x8,
19249 REST_INLINE_GPRS = 0x10,
19250 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
19251 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
19252 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
19253 SAVE_INLINE_VRS = 0x100,
19254 REST_INLINE_VRS = 0x200
19257 static int
19258 rs6000_savres_strategy (rs6000_stack_t *info,
19259 bool using_static_chain_p)
19261 int strategy = 0;
19262 bool lr_save_p;
19264 if (TARGET_MULTIPLE
19265 && !TARGET_POWERPC64
19266 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
19267 && info->first_gp_reg_save < 31
19268 && !global_regs_p (info->first_gp_reg_save, 32))
19269 strategy |= SAVRES_MULTIPLE;
19271 if (crtl->calls_eh_return
19272 || cfun->machine->ra_need_lr)
19273 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
19274 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
19275 | SAVE_INLINE_VRS | REST_INLINE_VRS);
19277 if (info->first_fp_reg_save == 64
19278 /* The out-of-line FP routines use double-precision stores;
19279 we can't use those routines if we don't have such stores. */
19280 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
19281 || global_regs_p (info->first_fp_reg_save, 64))
19282 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
19284 if (info->first_gp_reg_save == 32
19285 || (!(strategy & SAVRES_MULTIPLE)
19286 && global_regs_p (info->first_gp_reg_save, 32)))
19287 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
19289 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
19290 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
19291 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
19293 /* Define cutoff for using out-of-line functions to save registers. */
19294 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
19296 if (!optimize_size)
19298 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
19299 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
19300 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
19302 else
19304 /* Prefer out-of-line restore if it will exit. */
19305 if (info->first_fp_reg_save > 61)
19306 strategy |= SAVE_INLINE_FPRS;
19307 if (info->first_gp_reg_save > 29)
19309 if (info->first_fp_reg_save == 64)
19310 strategy |= SAVE_INLINE_GPRS;
19311 else
19312 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
19314 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
19315 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
19318 else if (DEFAULT_ABI == ABI_DARWIN)
19320 if (info->first_fp_reg_save > 60)
19321 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
19322 if (info->first_gp_reg_save > 29)
19323 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
19324 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
19326 else
19328 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
19329 if (info->first_fp_reg_save > 61)
19330 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
19331 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
19332 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
19335 /* Don't bother to try to save things out-of-line if r11 is occupied
19336 by the static chain. It would require too much fiddling and the
19337 static chain is rarely used anyway. FPRs are saved w.r.t the stack
19338 pointer on Darwin, and AIX uses r1 or r12. */
19339 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
19340 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
19341 | SAVE_INLINE_GPRS
19342 | SAVE_INLINE_VRS | REST_INLINE_VRS);
19344 /* We can only use the out-of-line routines to restore if we've
19345 saved all the registers from first_fp_reg_save in the prologue.
19346 Otherwise, we risk loading garbage. */
19347 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
19349 int i;
19351 for (i = info->first_fp_reg_save; i < 64; i++)
19352 if (!save_reg_p (i))
19354 strategy |= REST_INLINE_FPRS;
19355 break;
19359 /* If we are going to use store multiple, then don't even bother
19360 with the out-of-line routines, since the store-multiple
19361 instruction will always be smaller. */
19362 if ((strategy & SAVRES_MULTIPLE))
19363 strategy |= SAVE_INLINE_GPRS;
19365 /* info->lr_save_p isn't yet set if the only reason lr needs to be
19366 saved is an out-of-line save or restore. Set up the value for
19367 the next test (excluding out-of-line gpr restore). */
19368 lr_save_p = (info->lr_save_p
19369 || !(strategy & SAVE_INLINE_GPRS)
19370 || !(strategy & SAVE_INLINE_FPRS)
19371 || !(strategy & SAVE_INLINE_VRS)
19372 || !(strategy & REST_INLINE_FPRS)
19373 || !(strategy & REST_INLINE_VRS));
19375 /* The situation is more complicated with load multiple. We'd
19376 prefer to use the out-of-line routines for restores, since the
19377 "exit" out-of-line routines can handle the restore of LR and the
19378 frame teardown. However if doesn't make sense to use the
19379 out-of-line routine if that is the only reason we'd need to save
19380 LR, and we can't use the "exit" out-of-line gpr restore if we
19381 have saved some fprs; In those cases it is advantageous to use
19382 load multiple when available. */
19383 if ((strategy & SAVRES_MULTIPLE)
19384 && (!lr_save_p
19385 || info->first_fp_reg_save != 64))
19386 strategy |= REST_INLINE_GPRS;
19388 /* Saving CR interferes with the exit routines used on the SPE, so
19389 just punt here. */
19390 if (TARGET_SPE_ABI
19391 && info->spe_64bit_regs_used
19392 && info->cr_save_p)
19393 strategy |= REST_INLINE_GPRS;
19395 /* We can only use load multiple or the out-of-line routines to
19396 restore if we've used store multiple or out-of-line routines
19397 in the prologue, i.e. if we've saved all the registers from
19398 first_gp_reg_save. Otherwise, we risk loading garbage. */
19399 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
19400 == SAVE_INLINE_GPRS)
19402 int i;
19404 for (i = info->first_gp_reg_save; i < 32; i++)
19405 if (!save_reg_p (i))
19407 strategy |= REST_INLINE_GPRS;
19408 break;
19412 if (TARGET_ELF && TARGET_64BIT)
19414 if (!(strategy & SAVE_INLINE_FPRS))
19415 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
19416 else if (!(strategy & SAVE_INLINE_GPRS)
19417 && info->first_fp_reg_save == 64)
19418 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
19420 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
19421 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
19423 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
19424 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
19426 return strategy;
19429 /* Calculate the stack information for the current function. This is
19430 complicated by having two separate calling sequences, the AIX calling
19431 sequence and the V.4 calling sequence.
19433 AIX (and Darwin/Mac OS X) stack frames look like:
19434 32-bit 64-bit
19435 SP----> +---------------------------------------+
19436 | back chain to caller | 0 0
19437 +---------------------------------------+
19438 | saved CR | 4 8 (8-11)
19439 +---------------------------------------+
19440 | saved LR | 8 16
19441 +---------------------------------------+
19442 | reserved for compilers | 12 24
19443 +---------------------------------------+
19444 | reserved for binders | 16 32
19445 +---------------------------------------+
19446 | saved TOC pointer | 20 40
19447 +---------------------------------------+
19448 | Parameter save area (P) | 24 48
19449 +---------------------------------------+
19450 | Alloca space (A) | 24+P etc.
19451 +---------------------------------------+
19452 | Local variable space (L) | 24+P+A
19453 +---------------------------------------+
19454 | Float/int conversion temporary (X) | 24+P+A+L
19455 +---------------------------------------+
19456 | Save area for AltiVec registers (W) | 24+P+A+L+X
19457 +---------------------------------------+
19458 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
19459 +---------------------------------------+
19460 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
19461 +---------------------------------------+
19462 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
19463 +---------------------------------------+
19464 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
19465 +---------------------------------------+
19466 old SP->| back chain to caller's caller |
19467 +---------------------------------------+
19469 The required alignment for AIX configurations is two words (i.e., 8
19470 or 16 bytes).
19473 V.4 stack frames look like:
19475 SP----> +---------------------------------------+
19476 | back chain to caller | 0
19477 +---------------------------------------+
19478 | caller's saved LR | 4
19479 +---------------------------------------+
19480 | Parameter save area (P) | 8
19481 +---------------------------------------+
19482 | Alloca space (A) | 8+P
19483 +---------------------------------------+
19484 | Varargs save area (V) | 8+P+A
19485 +---------------------------------------+
19486 | Local variable space (L) | 8+P+A+V
19487 +---------------------------------------+
19488 | Float/int conversion temporary (X) | 8+P+A+V+L
19489 +---------------------------------------+
19490 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
19491 +---------------------------------------+
19492 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
19493 +---------------------------------------+
19494 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
19495 +---------------------------------------+
19496 | SPE: area for 64-bit GP registers |
19497 +---------------------------------------+
19498 | SPE alignment padding |
19499 +---------------------------------------+
19500 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
19501 +---------------------------------------+
19502 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
19503 +---------------------------------------+
19504 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
19505 +---------------------------------------+
19506 old SP->| back chain to caller's caller |
19507 +---------------------------------------+
19509 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
19510 given. (But note below and in sysv4.h that we require only 8 and
19511 may round up the size of our stack frame anyways. The historical
19512 reason is early versions of powerpc-linux which didn't properly
19513 align the stack at program startup. A happy side-effect is that
19514 -mno-eabi libraries can be used with -meabi programs.)
19516 The EABI configuration defaults to the V.4 layout. However,
19517 the stack alignment requirements may differ. If -mno-eabi is not
19518 given, the required stack alignment is 8 bytes; if -mno-eabi is
19519 given, the required alignment is 16 bytes. (But see V.4 comment
19520 above.) */
19522 #ifndef ABI_STACK_BOUNDARY
19523 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
19524 #endif
19526 static rs6000_stack_t *
19527 rs6000_stack_info (void)
19529 rs6000_stack_t *info_ptr = &stack_info;
19530 int reg_size = TARGET_32BIT ? 4 : 8;
19531 int ehrd_size;
19532 int save_align;
19533 int first_gp;
19534 HOST_WIDE_INT non_fixed_size;
19535 bool using_static_chain_p;
19537 if (reload_completed && info_ptr->reload_completed)
19538 return info_ptr;
19540 memset (info_ptr, 0, sizeof (*info_ptr));
19541 info_ptr->reload_completed = reload_completed;
19543 if (TARGET_SPE)
19545 /* Cache value so we don't rescan instruction chain over and over. */
19546 if (cfun->machine->insn_chain_scanned_p == 0)
19547 cfun->machine->insn_chain_scanned_p
19548 = spe_func_has_64bit_regs_p () + 1;
19549 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
19552 /* Select which calling sequence. */
19553 info_ptr->abi = DEFAULT_ABI;
19555 /* Calculate which registers need to be saved & save area size. */
19556 info_ptr->first_gp_reg_save = first_reg_to_save ();
19557 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
19558 even if it currently looks like we won't. Reload may need it to
19559 get at a constant; if so, it will have already created a constant
19560 pool entry for it. */
19561 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
19562 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
19563 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
19564 && crtl->uses_const_pool
19565 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
19566 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
19567 else
19568 first_gp = info_ptr->first_gp_reg_save;
19570 info_ptr->gp_size = reg_size * (32 - first_gp);
19572 /* For the SPE, we have an additional upper 32-bits on each GPR.
19573 Ideally we should save the entire 64-bits only when the upper
19574 half is used in SIMD instructions. Since we only record
19575 registers live (not the size they are used in), this proves
19576 difficult because we'd have to traverse the instruction chain at
19577 the right time, taking reload into account. This is a real pain,
19578 so we opt to save the GPRs in 64-bits always if but one register
19579 gets used in 64-bits. Otherwise, all the registers in the frame
19580 get saved in 32-bits.
19582 So... since when we save all GPRs (except the SP) in 64-bits, the
19583 traditional GP save area will be empty. */
19584 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
19585 info_ptr->gp_size = 0;
19587 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
19588 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
19590 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
19591 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
19592 - info_ptr->first_altivec_reg_save);
19594 /* Does this function call anything? */
19595 info_ptr->calls_p = (! crtl->is_leaf
19596 || cfun->machine->ra_needs_full_frame);
19598 /* Determine if we need to save the condition code registers. */
19599 if (df_regs_ever_live_p (CR2_REGNO)
19600 || df_regs_ever_live_p (CR3_REGNO)
19601 || df_regs_ever_live_p (CR4_REGNO))
19603 info_ptr->cr_save_p = 1;
19604 if (DEFAULT_ABI == ABI_V4)
19605 info_ptr->cr_size = reg_size;
19608 /* If the current function calls __builtin_eh_return, then we need
19609 to allocate stack space for registers that will hold data for
19610 the exception handler. */
19611 if (crtl->calls_eh_return)
19613 unsigned int i;
19614 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
19615 continue;
19617 /* SPE saves EH registers in 64-bits. */
19618 ehrd_size = i * (TARGET_SPE_ABI
19619 && info_ptr->spe_64bit_regs_used != 0
19620 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
19622 else
19623 ehrd_size = 0;
19625 /* Determine various sizes. */
19626 info_ptr->reg_size = reg_size;
19627 info_ptr->fixed_size = RS6000_SAVE_AREA;
19628 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
19629 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
19630 TARGET_ALTIVEC ? 16 : 8);
19631 if (FRAME_GROWS_DOWNWARD)
19632 info_ptr->vars_size
19633 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
19634 + info_ptr->parm_size,
19635 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
19636 - (info_ptr->fixed_size + info_ptr->vars_size
19637 + info_ptr->parm_size);
19639 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
19640 info_ptr->spe_gp_size = 8 * (32 - first_gp);
19641 else
19642 info_ptr->spe_gp_size = 0;
19644 if (TARGET_ALTIVEC_ABI)
19645 info_ptr->vrsave_mask = compute_vrsave_mask ();
19646 else
19647 info_ptr->vrsave_mask = 0;
19649 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
19650 info_ptr->vrsave_size = 4;
19651 else
19652 info_ptr->vrsave_size = 0;
19654 compute_save_world_info (info_ptr);
19656 /* Calculate the offsets. */
19657 switch (DEFAULT_ABI)
19659 case ABI_NONE:
19660 default:
19661 gcc_unreachable ();
19663 case ABI_AIX:
19664 case ABI_DARWIN:
19665 info_ptr->fp_save_offset = - info_ptr->fp_size;
19666 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
19668 if (TARGET_ALTIVEC_ABI)
19670 info_ptr->vrsave_save_offset
19671 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
19673 /* Align stack so vector save area is on a quadword boundary.
19674 The padding goes above the vectors. */
19675 if (info_ptr->altivec_size != 0)
19676 info_ptr->altivec_padding_size
19677 = info_ptr->vrsave_save_offset & 0xF;
19678 else
19679 info_ptr->altivec_padding_size = 0;
19681 info_ptr->altivec_save_offset
19682 = info_ptr->vrsave_save_offset
19683 - info_ptr->altivec_padding_size
19684 - info_ptr->altivec_size;
19685 gcc_assert (info_ptr->altivec_size == 0
19686 || info_ptr->altivec_save_offset % 16 == 0);
19688 /* Adjust for AltiVec case. */
19689 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
19691 else
19692 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
19693 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
19694 info_ptr->lr_save_offset = 2*reg_size;
19695 break;
19697 case ABI_V4:
19698 info_ptr->fp_save_offset = - info_ptr->fp_size;
19699 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
19700 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
19702 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
19704 /* Align stack so SPE GPR save area is aligned on a
19705 double-word boundary. */
19706 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
19707 info_ptr->spe_padding_size
19708 = 8 - (-info_ptr->cr_save_offset % 8);
19709 else
19710 info_ptr->spe_padding_size = 0;
19712 info_ptr->spe_gp_save_offset
19713 = info_ptr->cr_save_offset
19714 - info_ptr->spe_padding_size
19715 - info_ptr->spe_gp_size;
19717 /* Adjust for SPE case. */
19718 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
19720 else if (TARGET_ALTIVEC_ABI)
19722 info_ptr->vrsave_save_offset
19723 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
19725 /* Align stack so vector save area is on a quadword boundary. */
19726 if (info_ptr->altivec_size != 0)
19727 info_ptr->altivec_padding_size
19728 = 16 - (-info_ptr->vrsave_save_offset % 16);
19729 else
19730 info_ptr->altivec_padding_size = 0;
19732 info_ptr->altivec_save_offset
19733 = info_ptr->vrsave_save_offset
19734 - info_ptr->altivec_padding_size
19735 - info_ptr->altivec_size;
19737 /* Adjust for AltiVec case. */
19738 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
19740 else
19741 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
19742 info_ptr->ehrd_offset -= ehrd_size;
19743 info_ptr->lr_save_offset = reg_size;
19744 break;
19747 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
19748 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
19749 + info_ptr->gp_size
19750 + info_ptr->altivec_size
19751 + info_ptr->altivec_padding_size
19752 + info_ptr->spe_gp_size
19753 + info_ptr->spe_padding_size
19754 + ehrd_size
19755 + info_ptr->cr_size
19756 + info_ptr->vrsave_size,
19757 save_align);
19759 non_fixed_size = (info_ptr->vars_size
19760 + info_ptr->parm_size
19761 + info_ptr->save_size);
19763 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
19764 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
19766 /* Determine if we need to save the link register. */
19767 if (info_ptr->calls_p
19768 || (DEFAULT_ABI == ABI_AIX
19769 && crtl->profile
19770 && !TARGET_PROFILE_KERNEL)
19771 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
19772 #ifdef TARGET_RELOCATABLE
19773 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
19774 #endif
19775 || rs6000_ra_ever_killed ())
19776 info_ptr->lr_save_p = 1;
19778 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19779 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19780 && call_used_regs[STATIC_CHAIN_REGNUM]);
19781 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
19782 using_static_chain_p);
19784 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
19785 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
19786 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
19787 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
19788 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
19789 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
19790 info_ptr->lr_save_p = 1;
19792 if (info_ptr->lr_save_p)
19793 df_set_regs_ever_live (LR_REGNO, true);
19795 /* Determine if we need to allocate any stack frame:
19797 For AIX we need to push the stack if a frame pointer is needed
19798 (because the stack might be dynamically adjusted), if we are
19799 debugging, if we make calls, or if the sum of fp_save, gp_save,
19800 and local variables are more than the space needed to save all
19801 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
19802 + 18*8 = 288 (GPR13 reserved).
19804 For V.4 we don't have the stack cushion that AIX uses, but assume
19805 that the debugger can handle stackless frames. */
19807 if (info_ptr->calls_p)
19808 info_ptr->push_p = 1;
19810 else if (DEFAULT_ABI == ABI_V4)
19811 info_ptr->push_p = non_fixed_size != 0;
19813 else if (frame_pointer_needed)
19814 info_ptr->push_p = 1;
19816 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
19817 info_ptr->push_p = 1;
19819 else
19820 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
19822 /* Zero offsets if we're not saving those registers. */
19823 if (info_ptr->fp_size == 0)
19824 info_ptr->fp_save_offset = 0;
19826 if (info_ptr->gp_size == 0)
19827 info_ptr->gp_save_offset = 0;
19829 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
19830 info_ptr->altivec_save_offset = 0;
19832 /* Zero VRSAVE offset if not saved and restored. */
19833 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
19834 info_ptr->vrsave_save_offset = 0;
19836 if (! TARGET_SPE_ABI
19837 || info_ptr->spe_64bit_regs_used == 0
19838 || info_ptr->spe_gp_size == 0)
19839 info_ptr->spe_gp_save_offset = 0;
19841 if (! info_ptr->lr_save_p)
19842 info_ptr->lr_save_offset = 0;
19844 if (! info_ptr->cr_save_p)
19845 info_ptr->cr_save_offset = 0;
19847 return info_ptr;
19850 /* Return true if the current function uses any GPRs in 64-bit SIMD
19851 mode. */
19853 static bool
19854 spe_func_has_64bit_regs_p (void)
19856 rtx insns, insn;
19858 /* Functions that save and restore all the call-saved registers will
19859 need to save/restore the registers in 64-bits. */
19860 if (crtl->calls_eh_return
19861 || cfun->calls_setjmp
19862 || crtl->has_nonlocal_goto)
19863 return true;
19865 insns = get_insns ();
19867 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
19869 if (INSN_P (insn))
19871 rtx i;
19873 /* FIXME: This should be implemented with attributes...
19875 (set_attr "spe64" "true")....then,
19876 if (get_spe64(insn)) return true;
19878 It's the only reliable way to do the stuff below. */
19880 i = PATTERN (insn);
19881 if (GET_CODE (i) == SET)
19883 enum machine_mode mode = GET_MODE (SET_SRC (i));
19885 if (SPE_VECTOR_MODE (mode))
19886 return true;
19887 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
19888 return true;
19893 return false;
19896 static void
19897 debug_stack_info (rs6000_stack_t *info)
19899 const char *abi_string;
19901 if (! info)
19902 info = rs6000_stack_info ();
19904 fprintf (stderr, "\nStack information for function %s:\n",
19905 ((current_function_decl && DECL_NAME (current_function_decl))
19906 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
19907 : "<unknown>"));
19909 switch (info->abi)
19911 default: abi_string = "Unknown"; break;
19912 case ABI_NONE: abi_string = "NONE"; break;
19913 case ABI_AIX: abi_string = "AIX"; break;
19914 case ABI_DARWIN: abi_string = "Darwin"; break;
19915 case ABI_V4: abi_string = "V.4"; break;
19918 fprintf (stderr, "\tABI = %5s\n", abi_string);
19920 if (TARGET_ALTIVEC_ABI)
19921 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
19923 if (TARGET_SPE_ABI)
19924 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
19926 if (info->first_gp_reg_save != 32)
19927 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
19929 if (info->first_fp_reg_save != 64)
19930 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
19932 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
19933 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
19934 info->first_altivec_reg_save);
19936 if (info->lr_save_p)
19937 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
19939 if (info->cr_save_p)
19940 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
19942 if (info->vrsave_mask)
19943 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
19945 if (info->push_p)
19946 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
19948 if (info->calls_p)
19949 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
19951 if (info->gp_save_offset)
19952 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
19954 if (info->fp_save_offset)
19955 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
19957 if (info->altivec_save_offset)
19958 fprintf (stderr, "\taltivec_save_offset = %5d\n",
19959 info->altivec_save_offset);
19961 if (info->spe_gp_save_offset)
19962 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
19963 info->spe_gp_save_offset);
19965 if (info->vrsave_save_offset)
19966 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
19967 info->vrsave_save_offset);
19969 if (info->lr_save_offset)
19970 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
19972 if (info->cr_save_offset)
19973 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
19975 if (info->varargs_save_offset)
19976 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
19978 if (info->total_size)
19979 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
19980 info->total_size);
19982 if (info->vars_size)
19983 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
19984 info->vars_size);
19986 if (info->parm_size)
19987 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
19989 if (info->fixed_size)
19990 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
19992 if (info->gp_size)
19993 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
19995 if (info->spe_gp_size)
19996 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
19998 if (info->fp_size)
19999 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
20001 if (info->altivec_size)
20002 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
20004 if (info->vrsave_size)
20005 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
20007 if (info->altivec_padding_size)
20008 fprintf (stderr, "\taltivec_padding_size= %5d\n",
20009 info->altivec_padding_size);
20011 if (info->spe_padding_size)
20012 fprintf (stderr, "\tspe_padding_size = %5d\n",
20013 info->spe_padding_size);
20015 if (info->cr_size)
20016 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
20018 if (info->save_size)
20019 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
20021 if (info->reg_size != 4)
20022 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
20024 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
20026 fprintf (stderr, "\n");
20030 rs6000_return_addr (int count, rtx frame)
20032 /* Currently we don't optimize very well between prolog and body
20033 code and for PIC code the code can be actually quite bad, so
20034 don't try to be too clever here. */
20035 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
20037 cfun->machine->ra_needs_full_frame = 1;
20039 return
20040 gen_rtx_MEM
20041 (Pmode,
20042 memory_address
20043 (Pmode,
20044 plus_constant (Pmode,
20045 copy_to_reg
20046 (gen_rtx_MEM (Pmode,
20047 memory_address (Pmode, frame))),
20048 RETURN_ADDRESS_OFFSET)));
20051 cfun->machine->ra_need_lr = 1;
20052 return get_hard_reg_initial_val (Pmode, LR_REGNO);
20055 /* Say whether a function is a candidate for sibcall handling or not. */
20057 static bool
20058 rs6000_function_ok_for_sibcall (tree decl, tree exp)
20060 tree fntype;
20062 if (decl)
20063 fntype = TREE_TYPE (decl);
20064 else
20065 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
20067 /* We can't do it if the called function has more vector parameters
20068 than the current function; there's nowhere to put the VRsave code. */
20069 if (TARGET_ALTIVEC_ABI
20070 && TARGET_ALTIVEC_VRSAVE
20071 && !(decl && decl == current_function_decl))
20073 function_args_iterator args_iter;
20074 tree type;
20075 int nvreg = 0;
20077 /* Functions with vector parameters are required to have a
20078 prototype, so the argument type info must be available
20079 here. */
20080 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
20081 if (TREE_CODE (type) == VECTOR_TYPE
20082 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
20083 nvreg++;
20085 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
20086 if (TREE_CODE (type) == VECTOR_TYPE
20087 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
20088 nvreg--;
20090 if (nvreg > 0)
20091 return false;
20094 /* Under the AIX ABI we can't allow calls to non-local functions,
20095 because the callee may have a different TOC pointer to the
20096 caller and there's no way to ensure we restore the TOC when we
20097 return. With the secure-plt SYSV ABI we can't make non-local
20098 calls when -fpic/PIC because the plt call stubs use r30. */
20099 if (DEFAULT_ABI == ABI_DARWIN
20100 || (DEFAULT_ABI == ABI_AIX
20101 && decl
20102 && !DECL_EXTERNAL (decl)
20103 && (*targetm.binds_local_p) (decl))
20104 || (DEFAULT_ABI == ABI_V4
20105 && (!TARGET_SECURE_PLT
20106 || !flag_pic
20107 || (decl
20108 && (*targetm.binds_local_p) (decl)))))
20110 tree attr_list = TYPE_ATTRIBUTES (fntype);
20112 if (!lookup_attribute ("longcall", attr_list)
20113 || lookup_attribute ("shortcall", attr_list))
20114 return true;
20117 return false;
20120 static int
20121 rs6000_ra_ever_killed (void)
20123 rtx top;
20124 rtx reg;
20125 rtx insn;
20127 if (cfun->is_thunk)
20128 return 0;
20130 if (cfun->machine->lr_save_state)
20131 return cfun->machine->lr_save_state - 1;
20133 /* regs_ever_live has LR marked as used if any sibcalls are present,
20134 but this should not force saving and restoring in the
20135 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
20136 clobbers LR, so that is inappropriate. */
20138 /* Also, the prologue can generate a store into LR that
20139 doesn't really count, like this:
20141 move LR->R0
20142 bcl to set PIC register
20143 move LR->R31
20144 move R0->LR
20146 When we're called from the epilogue, we need to avoid counting
20147 this as a store. */
20149 push_topmost_sequence ();
20150 top = get_insns ();
20151 pop_topmost_sequence ();
20152 reg = gen_rtx_REG (Pmode, LR_REGNO);
20154 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
20156 if (INSN_P (insn))
20158 if (CALL_P (insn))
20160 if (!SIBLING_CALL_P (insn))
20161 return 1;
20163 else if (find_regno_note (insn, REG_INC, LR_REGNO))
20164 return 1;
20165 else if (set_of (reg, insn) != NULL_RTX
20166 && !prologue_epilogue_contains (insn))
20167 return 1;
20170 return 0;
20173 /* Emit instructions needed to load the TOC register.
20174 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
20175 a constant pool; or for SVR4 -fpic. */
20177 void
20178 rs6000_emit_load_toc_table (int fromprolog)
20180 rtx dest;
20181 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
20183 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
20185 char buf[30];
20186 rtx lab, tmp1, tmp2, got;
20188 lab = gen_label_rtx ();
20189 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
20190 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
20191 if (flag_pic == 2)
20192 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
20193 else
20194 got = rs6000_got_sym ();
20195 tmp1 = tmp2 = dest;
20196 if (!fromprolog)
20198 tmp1 = gen_reg_rtx (Pmode);
20199 tmp2 = gen_reg_rtx (Pmode);
20201 emit_insn (gen_load_toc_v4_PIC_1 (lab));
20202 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
20203 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
20204 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
20206 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
20208 emit_insn (gen_load_toc_v4_pic_si ());
20209 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
20211 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
20213 char buf[30];
20214 rtx temp0 = (fromprolog
20215 ? gen_rtx_REG (Pmode, 0)
20216 : gen_reg_rtx (Pmode));
20218 if (fromprolog)
20220 rtx symF, symL;
20222 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
20223 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
20225 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
20226 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
20228 emit_insn (gen_load_toc_v4_PIC_1 (symF));
20229 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
20230 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
20232 else
20234 rtx tocsym, lab;
20236 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
20237 lab = gen_label_rtx ();
20238 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
20239 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
20240 if (TARGET_LINK_STACK)
20241 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
20242 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
20244 emit_insn (gen_addsi3 (dest, temp0, dest));
20246 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
20248 /* This is for AIX code running in non-PIC ELF32. */
20249 char buf[30];
20250 rtx realsym;
20251 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
20252 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
20254 emit_insn (gen_elf_high (dest, realsym));
20255 emit_insn (gen_elf_low (dest, dest, realsym));
20257 else
20259 gcc_assert (DEFAULT_ABI == ABI_AIX);
20261 if (TARGET_32BIT)
20262 emit_insn (gen_load_toc_aix_si (dest));
20263 else
20264 emit_insn (gen_load_toc_aix_di (dest));
20268 /* Emit instructions to restore the link register after determining where
20269 its value has been stored. */
20271 void
20272 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
20274 rs6000_stack_t *info = rs6000_stack_info ();
20275 rtx operands[2];
20277 operands[0] = source;
20278 operands[1] = scratch;
20280 if (info->lr_save_p)
20282 rtx frame_rtx = stack_pointer_rtx;
20283 HOST_WIDE_INT sp_offset = 0;
20284 rtx tmp;
20286 if (frame_pointer_needed
20287 || cfun->calls_alloca
20288 || info->total_size > 32767)
20290 tmp = gen_frame_mem (Pmode, frame_rtx);
20291 emit_move_insn (operands[1], tmp);
20292 frame_rtx = operands[1];
20294 else if (info->push_p)
20295 sp_offset = info->total_size;
20297 tmp = plus_constant (Pmode, frame_rtx,
20298 info->lr_save_offset + sp_offset);
20299 tmp = gen_frame_mem (Pmode, tmp);
20300 emit_move_insn (tmp, operands[0]);
20302 else
20303 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
20305 /* Freeze lr_save_p. We've just emitted rtl that depends on the
20306 state of lr_save_p so any change from here on would be a bug. In
20307 particular, stop rs6000_ra_ever_killed from considering the SET
20308 of lr we may have added just above. */
20309 cfun->machine->lr_save_state = info->lr_save_p + 1;
20312 static GTY(()) alias_set_type set = -1;
20314 alias_set_type
20315 get_TOC_alias_set (void)
20317 if (set == -1)
20318 set = new_alias_set ();
20319 return set;
20322 /* This returns nonzero if the current function uses the TOC. This is
20323 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
20324 is generated by the ABI_V4 load_toc_* patterns. */
20325 #if TARGET_ELF
20326 static int
20327 uses_TOC (void)
20329 rtx insn;
20331 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
20332 if (INSN_P (insn))
20334 rtx pat = PATTERN (insn);
20335 int i;
20337 if (GET_CODE (pat) == PARALLEL)
20338 for (i = 0; i < XVECLEN (pat, 0); i++)
20340 rtx sub = XVECEXP (pat, 0, i);
20341 if (GET_CODE (sub) == USE)
20343 sub = XEXP (sub, 0);
20344 if (GET_CODE (sub) == UNSPEC
20345 && XINT (sub, 1) == UNSPEC_TOC)
20346 return 1;
20350 return 0;
20352 #endif
20355 create_TOC_reference (rtx symbol, rtx largetoc_reg)
20357 rtx tocrel, tocreg, hi;
20359 if (TARGET_DEBUG_ADDR)
20361 if (GET_CODE (symbol) == SYMBOL_REF)
20362 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
20363 XSTR (symbol, 0));
20364 else
20366 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
20367 GET_RTX_NAME (GET_CODE (symbol)));
20368 debug_rtx (symbol);
20372 if (!can_create_pseudo_p ())
20373 df_set_regs_ever_live (TOC_REGISTER, true);
20375 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
20376 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
20377 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
20378 return tocrel;
20380 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
20381 if (largetoc_reg != NULL)
20383 emit_move_insn (largetoc_reg, hi);
20384 hi = largetoc_reg;
20386 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
20389 /* Issue assembly directives that create a reference to the given DWARF
20390 FRAME_TABLE_LABEL from the current function section. */
20391 void
20392 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
20394 fprintf (asm_out_file, "\t.ref %s\n",
20395 (* targetm.strip_name_encoding) (frame_table_label));
20398 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
20399 and the change to the stack pointer. */
20401 static void
20402 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
20404 rtvec p;
20405 int i;
20406 rtx regs[3];
20408 i = 0;
20409 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
20410 if (hard_frame_needed)
20411 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
20412 if (!(REGNO (fp) == STACK_POINTER_REGNUM
20413 || (hard_frame_needed
20414 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
20415 regs[i++] = fp;
20417 p = rtvec_alloc (i);
20418 while (--i >= 0)
20420 rtx mem = gen_frame_mem (BLKmode, regs[i]);
20421 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
20424 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
20427 /* Emit the correct code for allocating stack space, as insns.
20428 If COPY_REG, make sure a copy of the old frame is left there.
20429 The generated code may use hard register 0 as a temporary. */
20431 static void
20432 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
20434 rtx insn;
20435 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
20436 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
20437 rtx todec = gen_int_mode (-size, Pmode);
20438 rtx par, set, mem;
20440 if (INTVAL (todec) != -size)
20442 warning (0, "stack frame too large");
20443 emit_insn (gen_trap ());
20444 return;
20447 if (crtl->limit_stack)
20449 if (REG_P (stack_limit_rtx)
20450 && REGNO (stack_limit_rtx) > 1
20451 && REGNO (stack_limit_rtx) <= 31)
20453 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
20454 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
20455 const0_rtx));
20457 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
20458 && TARGET_32BIT
20459 && DEFAULT_ABI == ABI_V4)
20461 rtx toload = gen_rtx_CONST (VOIDmode,
20462 gen_rtx_PLUS (Pmode,
20463 stack_limit_rtx,
20464 GEN_INT (size)));
20466 emit_insn (gen_elf_high (tmp_reg, toload));
20467 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
20468 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
20469 const0_rtx));
20471 else
20472 warning (0, "stack limit expression is not supported");
20475 if (copy_reg)
20477 if (copy_off != 0)
20478 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
20479 else
20480 emit_move_insn (copy_reg, stack_reg);
20483 if (size > 32767)
20485 /* Need a note here so that try_split doesn't get confused. */
20486 if (get_last_insn () == NULL_RTX)
20487 emit_note (NOTE_INSN_DELETED);
20488 insn = emit_move_insn (tmp_reg, todec);
20489 try_split (PATTERN (insn), insn, 0);
20490 todec = tmp_reg;
20493 insn = emit_insn (TARGET_32BIT
20494 ? gen_movsi_update_stack (stack_reg, stack_reg,
20495 todec, stack_reg)
20496 : gen_movdi_di_update_stack (stack_reg, stack_reg,
20497 todec, stack_reg));
20498 /* Since we didn't use gen_frame_mem to generate the MEM, grab
20499 it now and set the alias set/attributes. The above gen_*_update
20500 calls will generate a PARALLEL with the MEM set being the first
20501 operation. */
20502 par = PATTERN (insn);
20503 gcc_assert (GET_CODE (par) == PARALLEL);
20504 set = XVECEXP (par, 0, 0);
20505 gcc_assert (GET_CODE (set) == SET);
20506 mem = SET_DEST (set);
20507 gcc_assert (MEM_P (mem));
20508 MEM_NOTRAP_P (mem) = 1;
20509 set_mem_alias_set (mem, get_frame_alias_set ());
20511 RTX_FRAME_RELATED_P (insn) = 1;
20512 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
20513 gen_rtx_SET (VOIDmode, stack_reg,
20514 gen_rtx_PLUS (Pmode, stack_reg,
20515 GEN_INT (-size))));
20518 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
20520 #if PROBE_INTERVAL > 32768
20521 #error Cannot use indexed addressing mode for stack probing
20522 #endif
20524 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
20525 inclusive. These are offsets from the current stack pointer. */
20527 static void
20528 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
20530 /* See if we have a constant small number of probes to generate. If so,
20531 that's the easy case. */
20532 if (first + size <= 32768)
20534 HOST_WIDE_INT i;
20536 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
20537 it exceeds SIZE. If only one probe is needed, this will not
20538 generate any code. Then probe at FIRST + SIZE. */
20539 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
20540 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
20541 -(first + i)));
20543 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
20544 -(first + size)));
20547 /* Otherwise, do the same as above, but in a loop. Note that we must be
20548 extra careful with variables wrapping around because we might be at
20549 the very top (or the very bottom) of the address space and we have
20550 to be able to handle this case properly; in particular, we use an
20551 equality test for the loop condition. */
20552 else
20554 HOST_WIDE_INT rounded_size;
20555 rtx r12 = gen_rtx_REG (Pmode, 12);
20556 rtx r0 = gen_rtx_REG (Pmode, 0);
20558 /* Sanity check for the addressing mode we're going to use. */
20559 gcc_assert (first <= 32768);
20561 /* Step 1: round SIZE to the previous multiple of the interval. */
20563 rounded_size = size & -PROBE_INTERVAL;
20566 /* Step 2: compute initial and final value of the loop counter. */
20568 /* TEST_ADDR = SP + FIRST. */
20569 emit_insn (gen_rtx_SET (VOIDmode, r12,
20570 plus_constant (Pmode, stack_pointer_rtx,
20571 -first)));
20573 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
20574 if (rounded_size > 32768)
20576 emit_move_insn (r0, GEN_INT (-rounded_size));
20577 emit_insn (gen_rtx_SET (VOIDmode, r0,
20578 gen_rtx_PLUS (Pmode, r12, r0)));
20580 else
20581 emit_insn (gen_rtx_SET (VOIDmode, r0,
20582 plus_constant (Pmode, r12, -rounded_size)));
20585 /* Step 3: the loop
20587 while (TEST_ADDR != LAST_ADDR)
20589 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
20590 probe at TEST_ADDR
20593 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
20594 until it is equal to ROUNDED_SIZE. */
20596 if (TARGET_64BIT)
20597 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
20598 else
20599 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
20602 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
20603 that SIZE is equal to ROUNDED_SIZE. */
20605 if (size != rounded_size)
20606 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
20610 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
20611 absolute addresses. */
20613 const char *
20614 output_probe_stack_range (rtx reg1, rtx reg2)
20616 static int labelno = 0;
20617 char loop_lab[32], end_lab[32];
20618 rtx xops[2];
20620 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
20621 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
20623 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
20625 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
20626 xops[0] = reg1;
20627 xops[1] = reg2;
20628 if (TARGET_64BIT)
20629 output_asm_insn ("cmpd 0,%0,%1", xops);
20630 else
20631 output_asm_insn ("cmpw 0,%0,%1", xops);
20633 fputs ("\tbeq 0,", asm_out_file);
20634 assemble_name_raw (asm_out_file, end_lab);
20635 fputc ('\n', asm_out_file);
20637 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
20638 xops[1] = GEN_INT (-PROBE_INTERVAL);
20639 output_asm_insn ("addi %0,%0,%1", xops);
20641 /* Probe at TEST_ADDR and branch. */
20642 xops[1] = gen_rtx_REG (Pmode, 0);
20643 output_asm_insn ("stw %1,0(%0)", xops);
20644 fprintf (asm_out_file, "\tb ");
20645 assemble_name_raw (asm_out_file, loop_lab);
20646 fputc ('\n', asm_out_file);
20648 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
20650 return "";
20653 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
20654 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
20655 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
20656 deduce these equivalences by itself so it wasn't necessary to hold
20657 its hand so much. Don't be tempted to always supply d2_f_d_e with
20658 the actual cfa register, ie. r31 when we are using a hard frame
20659 pointer. That fails when saving regs off r1, and sched moves the
20660 r31 setup past the reg saves. */
20662 static rtx
20663 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
20664 rtx reg2, rtx rreg)
20666 rtx real, temp;
20668 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
20670 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
20671 int i;
20673 gcc_checking_assert (val == 0);
20674 real = PATTERN (insn);
20675 if (GET_CODE (real) == PARALLEL)
20676 for (i = 0; i < XVECLEN (real, 0); i++)
20677 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
20679 rtx set = XVECEXP (real, 0, i);
20681 RTX_FRAME_RELATED_P (set) = 1;
20683 RTX_FRAME_RELATED_P (insn) = 1;
20684 return insn;
20687 /* copy_rtx will not make unique copies of registers, so we need to
20688 ensure we don't have unwanted sharing here. */
20689 if (reg == reg2)
20690 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
20692 if (reg == rreg)
20693 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
20695 real = copy_rtx (PATTERN (insn));
20697 if (reg2 != NULL_RTX)
20698 real = replace_rtx (real, reg2, rreg);
20700 if (REGNO (reg) == STACK_POINTER_REGNUM)
20701 gcc_checking_assert (val == 0);
20702 else
20703 real = replace_rtx (real, reg,
20704 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
20705 STACK_POINTER_REGNUM),
20706 GEN_INT (val)));
20708 /* We expect that 'real' is either a SET or a PARALLEL containing
20709 SETs (and possibly other stuff). In a PARALLEL, all the SETs
20710 are important so they all have to be marked RTX_FRAME_RELATED_P. */
20712 if (GET_CODE (real) == SET)
20714 rtx set = real;
20716 temp = simplify_rtx (SET_SRC (set));
20717 if (temp)
20718 SET_SRC (set) = temp;
20719 temp = simplify_rtx (SET_DEST (set));
20720 if (temp)
20721 SET_DEST (set) = temp;
20722 if (GET_CODE (SET_DEST (set)) == MEM)
20724 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
20725 if (temp)
20726 XEXP (SET_DEST (set), 0) = temp;
20729 else
20731 int i;
20733 gcc_assert (GET_CODE (real) == PARALLEL);
20734 for (i = 0; i < XVECLEN (real, 0); i++)
20735 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
20737 rtx set = XVECEXP (real, 0, i);
20739 temp = simplify_rtx (SET_SRC (set));
20740 if (temp)
20741 SET_SRC (set) = temp;
20742 temp = simplify_rtx (SET_DEST (set));
20743 if (temp)
20744 SET_DEST (set) = temp;
20745 if (GET_CODE (SET_DEST (set)) == MEM)
20747 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
20748 if (temp)
20749 XEXP (SET_DEST (set), 0) = temp;
20751 RTX_FRAME_RELATED_P (set) = 1;
20755 RTX_FRAME_RELATED_P (insn) = 1;
20756 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
20758 return insn;
20761 /* Returns an insn that has a vrsave set operation with the
20762 appropriate CLOBBERs. */
20764 static rtx
20765 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
20767 int nclobs, i;
20768 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
20769 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20771 clobs[0]
20772 = gen_rtx_SET (VOIDmode,
20773 vrsave,
20774 gen_rtx_UNSPEC_VOLATILE (SImode,
20775 gen_rtvec (2, reg, vrsave),
20776 UNSPECV_SET_VRSAVE));
20778 nclobs = 1;
20780 /* We need to clobber the registers in the mask so the scheduler
20781 does not move sets to VRSAVE before sets of AltiVec registers.
20783 However, if the function receives nonlocal gotos, reload will set
20784 all call saved registers live. We will end up with:
20786 (set (reg 999) (mem))
20787 (parallel [ (set (reg vrsave) (unspec blah))
20788 (clobber (reg 999))])
20790 The clobber will cause the store into reg 999 to be dead, and
20791 flow will attempt to delete an epilogue insn. In this case, we
20792 need an unspec use/set of the register. */
20794 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
20795 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20797 if (!epiloguep || call_used_regs [i])
20798 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
20799 gen_rtx_REG (V4SImode, i));
20800 else
20802 rtx reg = gen_rtx_REG (V4SImode, i);
20804 clobs[nclobs++]
20805 = gen_rtx_SET (VOIDmode,
20806 reg,
20807 gen_rtx_UNSPEC (V4SImode,
20808 gen_rtvec (1, reg), 27));
20812 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
20814 for (i = 0; i < nclobs; ++i)
20815 XVECEXP (insn, 0, i) = clobs[i];
20817 return insn;
20820 static rtx
20821 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
20823 rtx addr, mem;
20825 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
20826 mem = gen_frame_mem (GET_MODE (reg), addr);
20827 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
20830 static rtx
20831 gen_frame_load (rtx reg, rtx frame_reg, int offset)
20833 return gen_frame_set (reg, frame_reg, offset, false);
20836 static rtx
20837 gen_frame_store (rtx reg, rtx frame_reg, int offset)
20839 return gen_frame_set (reg, frame_reg, offset, true);
20842 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
20843 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
20845 static rtx
20846 emit_frame_save (rtx frame_reg, enum machine_mode mode,
20847 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
20849 rtx reg, insn;
20851 /* Some cases that need register indexed addressing. */
20852 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
20853 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
20854 || (TARGET_E500_DOUBLE && mode == DFmode)
20855 || (TARGET_SPE_ABI
20856 && SPE_VECTOR_MODE (mode)
20857 && !SPE_CONST_OFFSET_OK (offset))));
20859 reg = gen_rtx_REG (mode, regno);
20860 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
20861 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
20862 NULL_RTX, NULL_RTX);
20865 /* Emit an offset memory reference suitable for a frame store, while
20866 converting to a valid addressing mode. */
20868 static rtx
20869 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
20871 rtx int_rtx, offset_rtx;
20873 int_rtx = GEN_INT (offset);
20875 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
20876 || (TARGET_E500_DOUBLE && mode == DFmode))
20878 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
20879 emit_move_insn (offset_rtx, int_rtx);
20881 else
20882 offset_rtx = int_rtx;
20884 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
20887 #ifndef TARGET_FIX_AND_CONTINUE
20888 #define TARGET_FIX_AND_CONTINUE 0
20889 #endif
20891 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
20892 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
20893 #define LAST_SAVRES_REGISTER 31
20894 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
20896 enum {
20897 SAVRES_LR = 0x1,
20898 SAVRES_SAVE = 0x2,
20899 SAVRES_REG = 0x0c,
20900 SAVRES_GPR = 0,
20901 SAVRES_FPR = 4,
20902 SAVRES_VR = 8
20905 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
20907 /* Temporary holding space for an out-of-line register save/restore
20908 routine name. */
20909 static char savres_routine_name[30];
20911 /* Return the name for an out-of-line register save/restore routine.
20912 We are saving/restoring GPRs if GPR is true. */
20914 static char *
20915 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
20917 const char *prefix = "";
20918 const char *suffix = "";
20920 /* Different targets are supposed to define
20921 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
20922 routine name could be defined with:
20924 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
20926 This is a nice idea in practice, but in reality, things are
20927 complicated in several ways:
20929 - ELF targets have save/restore routines for GPRs.
20931 - SPE targets use different prefixes for 32/64-bit registers, and
20932 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
20934 - PPC64 ELF targets have routines for save/restore of GPRs that
20935 differ in what they do with the link register, so having a set
20936 prefix doesn't work. (We only use one of the save routines at
20937 the moment, though.)
20939 - PPC32 elf targets have "exit" versions of the restore routines
20940 that restore the link register and can save some extra space.
20941 These require an extra suffix. (There are also "tail" versions
20942 of the restore routines and "GOT" versions of the save routines,
20943 but we don't generate those at present. Same problems apply,
20944 though.)
20946 We deal with all this by synthesizing our own prefix/suffix and
20947 using that for the simple sprintf call shown above. */
20948 if (TARGET_SPE)
20950 /* No floating point saves on the SPE. */
20951 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
20953 if ((sel & SAVRES_SAVE))
20954 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
20955 else
20956 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
20958 if ((sel & SAVRES_LR))
20959 suffix = "_x";
20961 else if (DEFAULT_ABI == ABI_V4)
20963 if (TARGET_64BIT)
20964 goto aix_names;
20966 if ((sel & SAVRES_REG) == SAVRES_GPR)
20967 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
20968 else if ((sel & SAVRES_REG) == SAVRES_FPR)
20969 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
20970 else if ((sel & SAVRES_REG) == SAVRES_VR)
20971 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
20972 else
20973 abort ();
20975 if ((sel & SAVRES_LR))
20976 suffix = "_x";
20978 else if (DEFAULT_ABI == ABI_AIX)
20980 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
20981 /* No out-of-line save/restore routines for GPRs on AIX. */
20982 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
20983 #endif
20985 aix_names:
20986 if ((sel & SAVRES_REG) == SAVRES_GPR)
20987 prefix = ((sel & SAVRES_SAVE)
20988 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
20989 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
20990 else if ((sel & SAVRES_REG) == SAVRES_FPR)
20992 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
20993 if ((sel & SAVRES_LR))
20994 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
20995 else
20996 #endif
20998 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
20999 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
21002 else if ((sel & SAVRES_REG) == SAVRES_VR)
21003 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
21004 else
21005 abort ();
21008 if (DEFAULT_ABI == ABI_DARWIN)
21010 /* The Darwin approach is (slightly) different, in order to be
21011 compatible with code generated by the system toolchain. There is a
21012 single symbol for the start of save sequence, and the code here
21013 embeds an offset into that code on the basis of the first register
21014 to be saved. */
21015 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
21016 if ((sel & SAVRES_REG) == SAVRES_GPR)
21017 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
21018 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
21019 (regno - 13) * 4, prefix, regno);
21020 else if ((sel & SAVRES_REG) == SAVRES_FPR)
21021 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
21022 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
21023 else if ((sel & SAVRES_REG) == SAVRES_VR)
21024 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
21025 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
21026 else
21027 abort ();
21029 else
21030 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
21032 return savres_routine_name;
21035 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
21036 We are saving/restoring GPRs if GPR is true. */
21038 static rtx
21039 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
21041 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
21042 ? info->first_gp_reg_save
21043 : (sel & SAVRES_REG) == SAVRES_FPR
21044 ? info->first_fp_reg_save - 32
21045 : (sel & SAVRES_REG) == SAVRES_VR
21046 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
21047 : -1);
21048 rtx sym;
21049 int select = sel;
21051 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
21052 versions of the gpr routines. */
21053 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
21054 && info->spe_64bit_regs_used)
21055 select ^= SAVRES_FPR ^ SAVRES_GPR;
21057 /* Don't generate bogus routine names. */
21058 gcc_assert (FIRST_SAVRES_REGISTER <= regno
21059 && regno <= LAST_SAVRES_REGISTER
21060 && select >= 0 && select <= 12);
21062 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
21064 if (sym == NULL)
21066 char *name;
21068 name = rs6000_savres_routine_name (info, regno, sel);
21070 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
21071 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
21072 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
21075 return sym;
21078 /* Emit a sequence of insns, including a stack tie if needed, for
21079 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
21080 reset the stack pointer, but move the base of the frame into
21081 reg UPDT_REGNO for use by out-of-line register restore routines. */
21083 static rtx
21084 rs6000_emit_stack_reset (rs6000_stack_t *info,
21085 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
21086 unsigned updt_regno)
21088 rtx updt_reg_rtx;
21090 /* This blockage is needed so that sched doesn't decide to move
21091 the sp change before the register restores. */
21092 if (DEFAULT_ABI == ABI_V4
21093 || (TARGET_SPE_ABI
21094 && info->spe_64bit_regs_used != 0
21095 && info->first_gp_reg_save != 32))
21096 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
21098 /* If we are restoring registers out-of-line, we will be using the
21099 "exit" variants of the restore routines, which will reset the
21100 stack for us. But we do need to point updt_reg into the
21101 right place for those routines. */
21102 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
21104 if (frame_off != 0)
21105 return emit_insn (gen_add3_insn (updt_reg_rtx,
21106 frame_reg_rtx, GEN_INT (frame_off)));
21107 else if (REGNO (frame_reg_rtx) != updt_regno)
21108 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
21110 return NULL_RTX;
21113 /* Return the register number used as a pointer by out-of-line
21114 save/restore functions. */
21116 static inline unsigned
21117 ptr_regno_for_savres (int sel)
21119 if (DEFAULT_ABI == ABI_AIX)
21120 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
21121 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
21124 /* Construct a parallel rtx describing the effect of a call to an
21125 out-of-line register save/restore routine, and emit the insn
21126 or jump_insn as appropriate. */
21128 static rtx
21129 rs6000_emit_savres_rtx (rs6000_stack_t *info,
21130 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
21131 enum machine_mode reg_mode, int sel)
21133 int i;
21134 int offset, start_reg, end_reg, n_regs, use_reg;
21135 int reg_size = GET_MODE_SIZE (reg_mode);
21136 rtx sym;
21137 rtvec p;
21138 rtx par, insn;
21140 offset = 0;
21141 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
21142 ? info->first_gp_reg_save
21143 : (sel & SAVRES_REG) == SAVRES_FPR
21144 ? info->first_fp_reg_save
21145 : (sel & SAVRES_REG) == SAVRES_VR
21146 ? info->first_altivec_reg_save
21147 : -1);
21148 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
21149 ? 32
21150 : (sel & SAVRES_REG) == SAVRES_FPR
21151 ? 64
21152 : (sel & SAVRES_REG) == SAVRES_VR
21153 ? LAST_ALTIVEC_REGNO + 1
21154 : -1);
21155 n_regs = end_reg - start_reg;
21156 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
21157 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
21158 + n_regs);
21160 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
21161 RTVEC_ELT (p, offset++) = ret_rtx;
21163 RTVEC_ELT (p, offset++)
21164 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
21166 sym = rs6000_savres_routine_sym (info, sel);
21167 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
21169 use_reg = ptr_regno_for_savres (sel);
21170 if ((sel & SAVRES_REG) == SAVRES_VR)
21172 /* Vector regs are saved/restored using [reg+reg] addressing. */
21173 RTVEC_ELT (p, offset++)
21174 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
21175 RTVEC_ELT (p, offset++)
21176 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
21178 else
21179 RTVEC_ELT (p, offset++)
21180 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
21182 for (i = 0; i < end_reg - start_reg; i++)
21183 RTVEC_ELT (p, i + offset)
21184 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
21185 frame_reg_rtx, save_area_offset + reg_size * i,
21186 (sel & SAVRES_SAVE) != 0);
21188 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
21189 RTVEC_ELT (p, i + offset)
21190 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
21192 par = gen_rtx_PARALLEL (VOIDmode, p);
21194 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
21196 insn = emit_jump_insn (par);
21197 JUMP_LABEL (insn) = ret_rtx;
21199 else
21200 insn = emit_insn (par);
21201 return insn;
21204 /* Determine whether the gp REG is really used. */
21206 static bool
21207 rs6000_reg_live_or_pic_offset_p (int reg)
21209 /* If the function calls eh_return, claim used all the registers that would
21210 be checked for liveness otherwise. This is required for the PIC offset
21211 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
21212 register allocation purposes in this case. */
21214 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
21215 && (!call_used_regs[reg]
21216 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
21217 && !TARGET_SINGLE_PIC_BASE
21218 && TARGET_TOC && TARGET_MINIMAL_TOC)))
21219 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
21220 && !TARGET_SINGLE_PIC_BASE
21221 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
21222 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
21225 /* Emit function prologue as insns. */
21227 void
21228 rs6000_emit_prologue (void)
21230 rs6000_stack_t *info = rs6000_stack_info ();
21231 enum machine_mode reg_mode = Pmode;
21232 int reg_size = TARGET_32BIT ? 4 : 8;
21233 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
21234 rtx frame_reg_rtx = sp_reg_rtx;
21235 unsigned int cr_save_regno;
21236 rtx cr_save_rtx = NULL_RTX;
21237 rtx insn;
21238 int strategy;
21239 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
21240 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
21241 && call_used_regs[STATIC_CHAIN_REGNUM]);
21242 /* Offset to top of frame for frame_reg and sp respectively. */
21243 HOST_WIDE_INT frame_off = 0;
21244 HOST_WIDE_INT sp_off = 0;
21246 #ifdef ENABLE_CHECKING
21247 /* Track and check usage of r0, r11, r12. */
21248 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
21249 #define START_USE(R) do \
21251 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
21252 reg_inuse |= 1 << (R); \
21253 } while (0)
21254 #define END_USE(R) do \
21256 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
21257 reg_inuse &= ~(1 << (R)); \
21258 } while (0)
21259 #define NOT_INUSE(R) do \
21261 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
21262 } while (0)
21263 #else
21264 #define START_USE(R) do {} while (0)
21265 #define END_USE(R) do {} while (0)
21266 #define NOT_INUSE(R) do {} while (0)
21267 #endif
21269 if (flag_stack_usage_info)
21270 current_function_static_stack_size = info->total_size;
21272 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
21273 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
21275 if (TARGET_FIX_AND_CONTINUE)
21277 /* gdb on darwin arranges to forward a function from the old
21278 address by modifying the first 5 instructions of the function
21279 to branch to the overriding function. This is necessary to
21280 permit function pointers that point to the old function to
21281 actually forward to the new function. */
21282 emit_insn (gen_nop ());
21283 emit_insn (gen_nop ());
21284 emit_insn (gen_nop ());
21285 emit_insn (gen_nop ());
21286 emit_insn (gen_nop ());
21289 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
21291 reg_mode = V2SImode;
21292 reg_size = 8;
21295 /* Handle world saves specially here. */
21296 if (WORLD_SAVE_P (info))
21298 int i, j, sz;
21299 rtx treg;
21300 rtvec p;
21301 rtx reg0;
21303 /* save_world expects lr in r0. */
21304 reg0 = gen_rtx_REG (Pmode, 0);
21305 if (info->lr_save_p)
21307 insn = emit_move_insn (reg0,
21308 gen_rtx_REG (Pmode, LR_REGNO));
21309 RTX_FRAME_RELATED_P (insn) = 1;
21312 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
21313 assumptions about the offsets of various bits of the stack
21314 frame. */
21315 gcc_assert (info->gp_save_offset == -220
21316 && info->fp_save_offset == -144
21317 && info->lr_save_offset == 8
21318 && info->cr_save_offset == 4
21319 && info->push_p
21320 && info->lr_save_p
21321 && (!crtl->calls_eh_return
21322 || info->ehrd_offset == -432)
21323 && info->vrsave_save_offset == -224
21324 && info->altivec_save_offset == -416);
21326 treg = gen_rtx_REG (SImode, 11);
21327 emit_move_insn (treg, GEN_INT (-info->total_size));
21329 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
21330 in R11. It also clobbers R12, so beware! */
21332 /* Preserve CR2 for save_world prologues */
21333 sz = 5;
21334 sz += 32 - info->first_gp_reg_save;
21335 sz += 64 - info->first_fp_reg_save;
21336 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
21337 p = rtvec_alloc (sz);
21338 j = 0;
21339 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
21340 gen_rtx_REG (SImode,
21341 LR_REGNO));
21342 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
21343 gen_rtx_SYMBOL_REF (Pmode,
21344 "*save_world"));
21345 /* We do floats first so that the instruction pattern matches
21346 properly. */
21347 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21348 RTVEC_ELT (p, j++)
21349 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21350 ? DFmode : SFmode,
21351 info->first_fp_reg_save + i),
21352 frame_reg_rtx,
21353 info->fp_save_offset + frame_off + 8 * i);
21354 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
21355 RTVEC_ELT (p, j++)
21356 = gen_frame_store (gen_rtx_REG (V4SImode,
21357 info->first_altivec_reg_save + i),
21358 frame_reg_rtx,
21359 info->altivec_save_offset + frame_off + 16 * i);
21360 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21361 RTVEC_ELT (p, j++)
21362 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21363 frame_reg_rtx,
21364 info->gp_save_offset + frame_off + reg_size * i);
21366 /* CR register traditionally saved as CR2. */
21367 RTVEC_ELT (p, j++)
21368 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
21369 frame_reg_rtx, info->cr_save_offset + frame_off);
21370 /* Explain about use of R0. */
21371 if (info->lr_save_p)
21372 RTVEC_ELT (p, j++)
21373 = gen_frame_store (reg0,
21374 frame_reg_rtx, info->lr_save_offset + frame_off);
21375 /* Explain what happens to the stack pointer. */
21377 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
21378 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
21381 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21382 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21383 treg, GEN_INT (-info->total_size));
21384 sp_off = frame_off = info->total_size;
21387 strategy = info->savres_strategy;
21389 /* For V.4, update stack before we do any saving and set back pointer. */
21390 if (! WORLD_SAVE_P (info)
21391 && info->push_p
21392 && (DEFAULT_ABI == ABI_V4
21393 || crtl->calls_eh_return))
21395 bool need_r11 = (TARGET_SPE
21396 ? (!(strategy & SAVE_INLINE_GPRS)
21397 && info->spe_64bit_regs_used == 0)
21398 : (!(strategy & SAVE_INLINE_FPRS)
21399 || !(strategy & SAVE_INLINE_GPRS)
21400 || !(strategy & SAVE_INLINE_VRS)));
21401 int ptr_regno = -1;
21402 rtx ptr_reg = NULL_RTX;
21403 int ptr_off = 0;
21405 if (info->total_size < 32767)
21406 frame_off = info->total_size;
21407 else if (need_r11)
21408 ptr_regno = 11;
21409 else if (info->cr_save_p
21410 || info->lr_save_p
21411 || info->first_fp_reg_save < 64
21412 || info->first_gp_reg_save < 32
21413 || info->altivec_size != 0
21414 || info->vrsave_mask != 0
21415 || crtl->calls_eh_return)
21416 ptr_regno = 12;
21417 else
21419 /* The prologue won't be saving any regs so there is no need
21420 to set up a frame register to access any frame save area.
21421 We also won't be using frame_off anywhere below, but set
21422 the correct value anyway to protect against future
21423 changes to this function. */
21424 frame_off = info->total_size;
21426 if (ptr_regno != -1)
21428 /* Set up the frame offset to that needed by the first
21429 out-of-line save function. */
21430 START_USE (ptr_regno);
21431 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21432 frame_reg_rtx = ptr_reg;
21433 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
21434 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
21435 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
21436 ptr_off = info->gp_save_offset + info->gp_size;
21437 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
21438 ptr_off = info->altivec_save_offset + info->altivec_size;
21439 frame_off = -ptr_off;
21441 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
21442 sp_off = info->total_size;
21443 if (frame_reg_rtx != sp_reg_rtx)
21444 rs6000_emit_stack_tie (frame_reg_rtx, false);
21447 /* If we use the link register, get it into r0. */
21448 if (!WORLD_SAVE_P (info) && info->lr_save_p)
21450 rtx addr, reg, mem;
21452 reg = gen_rtx_REG (Pmode, 0);
21453 START_USE (0);
21454 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
21455 RTX_FRAME_RELATED_P (insn) = 1;
21457 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
21458 | SAVE_NOINLINE_FPRS_SAVES_LR)))
21460 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
21461 GEN_INT (info->lr_save_offset + frame_off));
21462 mem = gen_rtx_MEM (Pmode, addr);
21463 /* This should not be of rs6000_sr_alias_set, because of
21464 __builtin_return_address. */
21466 insn = emit_move_insn (mem, reg);
21467 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21468 NULL_RTX, NULL_RTX);
21469 END_USE (0);
21473 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
21474 r12 will be needed by out-of-line gpr restore. */
21475 cr_save_regno = (DEFAULT_ABI == ABI_AIX
21476 && !(strategy & (SAVE_INLINE_GPRS
21477 | SAVE_NOINLINE_GPRS_SAVES_LR))
21478 ? 11 : 12);
21479 if (!WORLD_SAVE_P (info)
21480 && info->cr_save_p
21481 && REGNO (frame_reg_rtx) != cr_save_regno
21482 && !(using_static_chain_p && cr_save_regno == 11))
21484 rtx set;
21486 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
21487 START_USE (cr_save_regno);
21488 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
21489 RTX_FRAME_RELATED_P (insn) = 1;
21490 /* Now, there's no way that dwarf2out_frame_debug_expr is going
21491 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
21492 But that's OK. All we have to do is specify that _one_ condition
21493 code register is saved in this stack slot. The thrower's epilogue
21494 will then restore all the call-saved registers.
21495 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
21496 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
21497 gen_rtx_REG (SImode, CR2_REGNO));
21498 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
21501 /* Do any required saving of fpr's. If only one or two to save, do
21502 it ourselves. Otherwise, call function. */
21503 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
21505 int i;
21506 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21507 if (save_reg_p (info->first_fp_reg_save + i))
21508 emit_frame_save (frame_reg_rtx,
21509 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21510 ? DFmode : SFmode),
21511 info->first_fp_reg_save + i,
21512 info->fp_save_offset + frame_off + 8 * i,
21513 sp_off - frame_off);
21515 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
21517 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
21518 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
21519 unsigned ptr_regno = ptr_regno_for_savres (sel);
21520 rtx ptr_reg = frame_reg_rtx;
21522 if (REGNO (frame_reg_rtx) == ptr_regno)
21523 gcc_checking_assert (frame_off == 0);
21524 else
21526 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21527 NOT_INUSE (ptr_regno);
21528 emit_insn (gen_add3_insn (ptr_reg,
21529 frame_reg_rtx, GEN_INT (frame_off)));
21531 insn = rs6000_emit_savres_rtx (info, ptr_reg,
21532 info->fp_save_offset,
21533 info->lr_save_offset,
21534 DFmode, sel);
21535 rs6000_frame_related (insn, ptr_reg, sp_off,
21536 NULL_RTX, NULL_RTX);
21537 if (lr)
21538 END_USE (0);
21541 /* Save GPRs. This is done as a PARALLEL if we are using
21542 the store-multiple instructions. */
21543 if (!WORLD_SAVE_P (info)
21544 && TARGET_SPE_ABI
21545 && info->spe_64bit_regs_used != 0
21546 && info->first_gp_reg_save != 32)
21548 int i;
21549 rtx spe_save_area_ptr;
21550 HOST_WIDE_INT save_off;
21551 int ool_adjust = 0;
21553 /* Determine whether we can address all of the registers that need
21554 to be saved with an offset from frame_reg_rtx that fits in
21555 the small const field for SPE memory instructions. */
21556 int spe_regs_addressable
21557 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21558 + reg_size * (32 - info->first_gp_reg_save - 1))
21559 && (strategy & SAVE_INLINE_GPRS));
21561 if (spe_regs_addressable)
21563 spe_save_area_ptr = frame_reg_rtx;
21564 save_off = frame_off;
21566 else
21568 /* Make r11 point to the start of the SPE save area. We need
21569 to be careful here if r11 is holding the static chain. If
21570 it is, then temporarily save it in r0. */
21571 HOST_WIDE_INT offset;
21573 if (!(strategy & SAVE_INLINE_GPRS))
21574 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
21575 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
21576 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
21577 save_off = frame_off - offset;
21579 if (using_static_chain_p)
21581 rtx r0 = gen_rtx_REG (Pmode, 0);
21583 START_USE (0);
21584 gcc_assert (info->first_gp_reg_save > 11);
21586 emit_move_insn (r0, spe_save_area_ptr);
21588 else if (REGNO (frame_reg_rtx) != 11)
21589 START_USE (11);
21591 emit_insn (gen_addsi3 (spe_save_area_ptr,
21592 frame_reg_rtx, GEN_INT (offset)));
21593 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
21594 frame_off = -info->spe_gp_save_offset + ool_adjust;
21597 if ((strategy & SAVE_INLINE_GPRS))
21599 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21600 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21601 emit_frame_save (spe_save_area_ptr, reg_mode,
21602 info->first_gp_reg_save + i,
21603 (info->spe_gp_save_offset + save_off
21604 + reg_size * i),
21605 sp_off - save_off);
21607 else
21609 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
21610 info->spe_gp_save_offset + save_off,
21611 0, reg_mode,
21612 SAVRES_SAVE | SAVRES_GPR);
21614 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
21615 NULL_RTX, NULL_RTX);
21618 /* Move the static chain pointer back. */
21619 if (!spe_regs_addressable)
21621 if (using_static_chain_p)
21623 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
21624 END_USE (0);
21626 else if (REGNO (frame_reg_rtx) != 11)
21627 END_USE (11);
21630 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
21632 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
21633 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
21634 unsigned ptr_regno = ptr_regno_for_savres (sel);
21635 rtx ptr_reg = frame_reg_rtx;
21636 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
21637 int end_save = info->gp_save_offset + info->gp_size;
21638 int ptr_off;
21640 if (!ptr_set_up)
21641 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21643 /* Need to adjust r11 (r12) if we saved any FPRs. */
21644 if (end_save + frame_off != 0)
21646 rtx offset = GEN_INT (end_save + frame_off);
21648 if (ptr_set_up)
21649 frame_off = -end_save;
21650 else
21651 NOT_INUSE (ptr_regno);
21652 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21654 else if (!ptr_set_up)
21656 NOT_INUSE (ptr_regno);
21657 emit_move_insn (ptr_reg, frame_reg_rtx);
21659 ptr_off = -end_save;
21660 insn = rs6000_emit_savres_rtx (info, ptr_reg,
21661 info->gp_save_offset + ptr_off,
21662 info->lr_save_offset + ptr_off,
21663 reg_mode, sel);
21664 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
21665 NULL_RTX, NULL_RTX);
21666 if (lr)
21667 END_USE (0);
21669 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
21671 rtvec p;
21672 int i;
21673 p = rtvec_alloc (32 - info->first_gp_reg_save);
21674 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21675 RTVEC_ELT (p, i)
21676 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21677 frame_reg_rtx,
21678 info->gp_save_offset + frame_off + reg_size * i);
21679 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21680 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21681 NULL_RTX, NULL_RTX);
21683 else if (!WORLD_SAVE_P (info))
21685 int i;
21686 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21687 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21688 emit_frame_save (frame_reg_rtx, reg_mode,
21689 info->first_gp_reg_save + i,
21690 info->gp_save_offset + frame_off + reg_size * i,
21691 sp_off - frame_off);
21694 if (crtl->calls_eh_return)
21696 unsigned int i;
21697 rtvec p;
21699 for (i = 0; ; ++i)
21701 unsigned int regno = EH_RETURN_DATA_REGNO (i);
21702 if (regno == INVALID_REGNUM)
21703 break;
21706 p = rtvec_alloc (i);
21708 for (i = 0; ; ++i)
21710 unsigned int regno = EH_RETURN_DATA_REGNO (i);
21711 if (regno == INVALID_REGNUM)
21712 break;
21714 insn
21715 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
21716 sp_reg_rtx,
21717 info->ehrd_offset + sp_off + reg_size * (int) i);
21718 RTVEC_ELT (p, i) = insn;
21719 RTX_FRAME_RELATED_P (insn) = 1;
21722 insn = emit_insn (gen_blockage ());
21723 RTX_FRAME_RELATED_P (insn) = 1;
21724 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
21727 /* In AIX ABI we need to make sure r2 is really saved. */
21728 if (TARGET_AIX && crtl->calls_eh_return)
21730 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
21731 rtx save_insn, join_insn, note;
21732 long toc_restore_insn;
21734 tmp_reg = gen_rtx_REG (Pmode, 11);
21735 tmp_reg_si = gen_rtx_REG (SImode, 11);
21736 if (using_static_chain_p)
21738 START_USE (0);
21739 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
21741 else
21742 START_USE (11);
21743 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
21744 /* Peek at instruction to which this function returns. If it's
21745 restoring r2, then we know we've already saved r2. We can't
21746 unconditionally save r2 because the value we have will already
21747 be updated if we arrived at this function via a plt call or
21748 toc adjusting stub. */
21749 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
21750 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
21751 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
21752 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
21753 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
21754 validate_condition_mode (EQ, CCUNSmode);
21755 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
21756 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
21757 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
21758 toc_save_done = gen_label_rtx ();
21759 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
21760 gen_rtx_EQ (VOIDmode, compare_result,
21761 const0_rtx),
21762 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
21763 pc_rtx);
21764 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
21765 JUMP_LABEL (jump) = toc_save_done;
21766 LABEL_NUSES (toc_save_done) += 1;
21768 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
21769 TOC_REGNUM, frame_off + 5 * reg_size,
21770 sp_off - frame_off);
21772 emit_label (toc_save_done);
21774 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
21775 have a CFG that has different saves along different paths.
21776 Move the note to a dummy blockage insn, which describes that
21777 R2 is unconditionally saved after the label. */
21778 /* ??? An alternate representation might be a special insn pattern
21779 containing both the branch and the store. That might let the
21780 code that minimizes the number of DW_CFA_advance opcodes better
21781 freedom in placing the annotations. */
21782 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
21783 if (note)
21784 remove_note (save_insn, note);
21785 else
21786 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
21787 copy_rtx (PATTERN (save_insn)), NULL_RTX);
21788 RTX_FRAME_RELATED_P (save_insn) = 0;
21790 join_insn = emit_insn (gen_blockage ());
21791 REG_NOTES (join_insn) = note;
21792 RTX_FRAME_RELATED_P (join_insn) = 1;
21794 if (using_static_chain_p)
21796 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
21797 END_USE (0);
21799 else
21800 END_USE (11);
21803 /* Save CR if we use any that must be preserved. */
21804 if (!WORLD_SAVE_P (info) && info->cr_save_p)
21806 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
21807 GEN_INT (info->cr_save_offset + frame_off));
21808 rtx mem = gen_frame_mem (SImode, addr);
21809 /* See the large comment above about why CR2_REGNO is used. */
21810 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
21812 /* If we didn't copy cr before, do so now using r0. */
21813 if (cr_save_rtx == NULL_RTX)
21815 rtx set;
21817 START_USE (0);
21818 cr_save_rtx = gen_rtx_REG (SImode, 0);
21819 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
21820 RTX_FRAME_RELATED_P (insn) = 1;
21821 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
21822 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
21824 insn = emit_move_insn (mem, cr_save_rtx);
21825 END_USE (REGNO (cr_save_rtx));
21827 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21828 NULL_RTX, NULL_RTX);
21831 /* Update stack and set back pointer unless this is V.4,
21832 for which it was done previously. */
21833 if (!WORLD_SAVE_P (info) && info->push_p
21834 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
21836 rtx ptr_reg = NULL;
21837 int ptr_off = 0;
21839 /* If saving altivec regs we need to be able to address all save
21840 locations using a 16-bit offset. */
21841 if ((strategy & SAVE_INLINE_VRS) == 0
21842 || (info->altivec_size != 0
21843 && (info->altivec_save_offset + info->altivec_size - 16
21844 + info->total_size - frame_off) > 32767)
21845 || (info->vrsave_size != 0
21846 && (info->vrsave_save_offset
21847 + info->total_size - frame_off) > 32767))
21849 int sel = SAVRES_SAVE | SAVRES_VR;
21850 unsigned ptr_regno = ptr_regno_for_savres (sel);
21852 if (using_static_chain_p
21853 && ptr_regno == STATIC_CHAIN_REGNUM)
21854 ptr_regno = 12;
21855 if (REGNO (frame_reg_rtx) != ptr_regno)
21856 START_USE (ptr_regno);
21857 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21858 frame_reg_rtx = ptr_reg;
21859 ptr_off = info->altivec_save_offset + info->altivec_size;
21860 frame_off = -ptr_off;
21862 else if (REGNO (frame_reg_rtx) == 1)
21863 frame_off = info->total_size;
21864 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
21865 sp_off = info->total_size;
21866 if (frame_reg_rtx != sp_reg_rtx)
21867 rs6000_emit_stack_tie (frame_reg_rtx, false);
21870 /* Set frame pointer, if needed. */
21871 if (frame_pointer_needed)
21873 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
21874 sp_reg_rtx);
21875 RTX_FRAME_RELATED_P (insn) = 1;
21878 /* Save AltiVec registers if needed. Save here because the red zone does
21879 not always include AltiVec registers. */
21880 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
21881 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
21883 int end_save = info->altivec_save_offset + info->altivec_size;
21884 int ptr_off;
21885 /* Oddly, the vector save/restore functions point r0 at the end
21886 of the save area, then use r11 or r12 to load offsets for
21887 [reg+reg] addressing. */
21888 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
21889 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
21890 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
21892 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
21893 NOT_INUSE (0);
21894 if (end_save + frame_off != 0)
21896 rtx offset = GEN_INT (end_save + frame_off);
21898 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21900 else
21901 emit_move_insn (ptr_reg, frame_reg_rtx);
21903 ptr_off = -end_save;
21904 insn = rs6000_emit_savres_rtx (info, scratch_reg,
21905 info->altivec_save_offset + ptr_off,
21906 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
21907 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
21908 NULL_RTX, NULL_RTX);
21909 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
21911 /* The oddity mentioned above clobbered our frame reg. */
21912 emit_move_insn (frame_reg_rtx, ptr_reg);
21913 frame_off = ptr_off;
21916 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
21917 && info->altivec_size != 0)
21919 int i;
21921 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21922 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
21924 rtx areg, savereg, mem;
21925 int offset;
21927 offset = (info->altivec_save_offset + frame_off
21928 + 16 * (i - info->first_altivec_reg_save));
21930 savereg = gen_rtx_REG (V4SImode, i);
21932 NOT_INUSE (0);
21933 areg = gen_rtx_REG (Pmode, 0);
21934 emit_move_insn (areg, GEN_INT (offset));
21936 /* AltiVec addressing mode is [reg+reg]. */
21937 mem = gen_frame_mem (V4SImode,
21938 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
21940 insn = emit_move_insn (mem, savereg);
21942 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21943 areg, GEN_INT (offset));
21947 /* VRSAVE is a bit vector representing which AltiVec registers
21948 are used. The OS uses this to determine which vector
21949 registers to save on a context switch. We need to save
21950 VRSAVE on the stack frame, add whatever AltiVec registers we
21951 used in this function, and do the corresponding magic in the
21952 epilogue. */
21954 if (!WORLD_SAVE_P (info)
21955 && TARGET_ALTIVEC
21956 && TARGET_ALTIVEC_VRSAVE
21957 && info->vrsave_mask != 0)
21959 rtx reg, vrsave;
21960 int offset;
21961 int save_regno;
21963 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
21964 be using r12 as frame_reg_rtx and r11 as the static chain
21965 pointer for nested functions. */
21966 save_regno = 12;
21967 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
21968 save_regno = 11;
21969 else if (REGNO (frame_reg_rtx) == 12)
21971 save_regno = 11;
21972 if (using_static_chain_p)
21973 save_regno = 0;
21976 NOT_INUSE (save_regno);
21977 reg = gen_rtx_REG (SImode, save_regno);
21978 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
21979 if (TARGET_MACHO)
21980 emit_insn (gen_get_vrsave_internal (reg));
21981 else
21982 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
21984 /* Save VRSAVE. */
21985 offset = info->vrsave_save_offset + frame_off;
21986 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
21988 /* Include the registers in the mask. */
21989 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
21991 insn = emit_insn (generate_set_vrsave (reg, info, 0));
21994 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
21995 if (!TARGET_SINGLE_PIC_BASE
21996 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
21997 || (DEFAULT_ABI == ABI_V4
21998 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
21999 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
22001 /* If emit_load_toc_table will use the link register, we need to save
22002 it. We use R12 for this purpose because emit_load_toc_table
22003 can use register 0. This allows us to use a plain 'blr' to return
22004 from the procedure more often. */
22005 int save_LR_around_toc_setup = (TARGET_ELF
22006 && DEFAULT_ABI != ABI_AIX
22007 && flag_pic
22008 && ! info->lr_save_p
22009 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
22010 if (save_LR_around_toc_setup)
22012 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
22013 rtx tmp = gen_rtx_REG (Pmode, 12);
22015 insn = emit_move_insn (tmp, lr);
22016 RTX_FRAME_RELATED_P (insn) = 1;
22018 rs6000_emit_load_toc_table (TRUE);
22020 insn = emit_move_insn (lr, tmp);
22021 add_reg_note (insn, REG_CFA_RESTORE, lr);
22022 RTX_FRAME_RELATED_P (insn) = 1;
22024 else
22025 rs6000_emit_load_toc_table (TRUE);
22028 #if TARGET_MACHO
22029 if (!TARGET_SINGLE_PIC_BASE
22030 && DEFAULT_ABI == ABI_DARWIN
22031 && flag_pic && crtl->uses_pic_offset_table)
22033 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
22034 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
22036 /* Save and restore LR locally around this call (in R0). */
22037 if (!info->lr_save_p)
22038 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
22040 emit_insn (gen_load_macho_picbase (src));
22042 emit_move_insn (gen_rtx_REG (Pmode,
22043 RS6000_PIC_OFFSET_TABLE_REGNUM),
22044 lr);
22046 if (!info->lr_save_p)
22047 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
22049 #endif
22051 /* If we need to, save the TOC register after doing the stack setup.
22052 Do not emit eh frame info for this save. The unwinder wants info,
22053 conceptually attached to instructions in this function, about
22054 register values in the caller of this function. This R2 may have
22055 already been changed from the value in the caller.
22056 We don't attempt to write accurate DWARF EH frame info for R2
22057 because code emitted by gcc for a (non-pointer) function call
22058 doesn't save and restore R2. Instead, R2 is managed out-of-line
22059 by a linker generated plt call stub when the function resides in
22060 a shared library. This behaviour is costly to describe in DWARF,
22061 both in terms of the size of DWARF info and the time taken in the
22062 unwinder to interpret it. R2 changes, apart from the
22063 calls_eh_return case earlier in this function, are handled by
22064 linux-unwind.h frob_update_context. */
22065 if (rs6000_save_toc_in_prologue_p ())
22067 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
22068 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
22072 /* Write function prologue. */
22074 static void
22075 rs6000_output_function_prologue (FILE *file,
22076 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
22078 rs6000_stack_t *info = rs6000_stack_info ();
22080 if (TARGET_DEBUG_STACK)
22081 debug_stack_info (info);
22083 /* Write .extern for any function we will call to save and restore
22084 fp values. */
22085 if (info->first_fp_reg_save < 64
22086 && !TARGET_MACHO
22087 && !TARGET_ELF)
22089 char *name;
22090 int regno = info->first_fp_reg_save - 32;
22092 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
22094 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
22095 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
22096 name = rs6000_savres_routine_name (info, regno, sel);
22097 fprintf (file, "\t.extern %s\n", name);
22099 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
22101 bool lr = (info->savres_strategy
22102 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
22103 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
22104 name = rs6000_savres_routine_name (info, regno, sel);
22105 fprintf (file, "\t.extern %s\n", name);
22109 rs6000_pic_labelno++;
22112 /* Non-zero if vmx regs are restored before the frame pop, zero if
22113 we restore after the pop when possible. */
22114 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
22116 /* Restoring cr is a two step process: loading a reg from the frame
22117 save, then moving the reg to cr. For ABI_V4 we must let the
22118 unwinder know that the stack location is no longer valid at or
22119 before the stack deallocation, but we can't emit a cfa_restore for
22120 cr at the stack deallocation like we do for other registers.
22121 The trouble is that it is possible for the move to cr to be
22122 scheduled after the stack deallocation. So say exactly where cr
22123 is located on each of the two insns. */
22125 static rtx
22126 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
22128 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
22129 rtx reg = gen_rtx_REG (SImode, regno);
22130 rtx insn = emit_move_insn (reg, mem);
22132 if (!exit_func && DEFAULT_ABI == ABI_V4)
22134 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
22135 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
22137 add_reg_note (insn, REG_CFA_REGISTER, set);
22138 RTX_FRAME_RELATED_P (insn) = 1;
22140 return reg;
22143 /* Reload CR from REG. */
22145 static void
22146 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
22148 int count = 0;
22149 int i;
22151 if (using_mfcr_multiple)
22153 for (i = 0; i < 8; i++)
22154 if (save_reg_p (CR0_REGNO + i))
22155 count++;
22156 gcc_assert (count);
22159 if (using_mfcr_multiple && count > 1)
22161 rtvec p;
22162 int ndx;
22164 p = rtvec_alloc (count);
22166 ndx = 0;
22167 for (i = 0; i < 8; i++)
22168 if (save_reg_p (CR0_REGNO + i))
22170 rtvec r = rtvec_alloc (2);
22171 RTVEC_ELT (r, 0) = reg;
22172 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
22173 RTVEC_ELT (p, ndx) =
22174 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
22175 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
22176 ndx++;
22178 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22179 gcc_assert (ndx == count);
22181 else
22182 for (i = 0; i < 8; i++)
22183 if (save_reg_p (CR0_REGNO + i))
22184 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
22185 reg));
22187 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
22189 rtx insn = get_last_insn ();
22190 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
22192 add_reg_note (insn, REG_CFA_RESTORE, cr);
22193 RTX_FRAME_RELATED_P (insn) = 1;
22197 /* Like cr, the move to lr instruction can be scheduled after the
22198 stack deallocation, but unlike cr, its stack frame save is still
22199 valid. So we only need to emit the cfa_restore on the correct
22200 instruction. */
22202 static void
22203 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
22205 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
22206 rtx reg = gen_rtx_REG (Pmode, regno);
22208 emit_move_insn (reg, mem);
22211 static void
22212 restore_saved_lr (int regno, bool exit_func)
22214 rtx reg = gen_rtx_REG (Pmode, regno);
22215 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
22216 rtx insn = emit_move_insn (lr, reg);
22218 if (!exit_func && flag_shrink_wrap)
22220 add_reg_note (insn, REG_CFA_RESTORE, lr);
22221 RTX_FRAME_RELATED_P (insn) = 1;
22225 static rtx
22226 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
22228 if (info->cr_save_p)
22229 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
22230 gen_rtx_REG (SImode, CR2_REGNO),
22231 cfa_restores);
22232 if (info->lr_save_p)
22233 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
22234 gen_rtx_REG (Pmode, LR_REGNO),
22235 cfa_restores);
22236 return cfa_restores;
22239 /* Return true if OFFSET from stack pointer can be clobbered by signals.
22240 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
22241 below stack pointer not cloberred by signals. */
22243 static inline bool
22244 offset_below_red_zone_p (HOST_WIDE_INT offset)
22246 return offset < (DEFAULT_ABI == ABI_V4
22248 : TARGET_32BIT ? -220 : -288);
22251 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
22253 static void
22254 emit_cfa_restores (rtx cfa_restores)
22256 rtx insn = get_last_insn ();
22257 rtx *loc = &REG_NOTES (insn);
22259 while (*loc)
22260 loc = &XEXP (*loc, 1);
22261 *loc = cfa_restores;
22262 RTX_FRAME_RELATED_P (insn) = 1;
22265 /* Emit function epilogue as insns. */
22267 void
22268 rs6000_emit_epilogue (int sibcall)
22270 rs6000_stack_t *info;
22271 int restoring_GPRs_inline;
22272 int restoring_FPRs_inline;
22273 int using_load_multiple;
22274 int using_mtcr_multiple;
22275 int use_backchain_to_restore_sp;
22276 int restore_lr;
22277 int strategy;
22278 HOST_WIDE_INT frame_off = 0;
22279 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
22280 rtx frame_reg_rtx = sp_reg_rtx;
22281 rtx cfa_restores = NULL_RTX;
22282 rtx insn;
22283 rtx cr_save_reg = NULL_RTX;
22284 enum machine_mode reg_mode = Pmode;
22285 int reg_size = TARGET_32BIT ? 4 : 8;
22286 int i;
22287 bool exit_func;
22288 unsigned ptr_regno;
22290 info = rs6000_stack_info ();
22292 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
22294 reg_mode = V2SImode;
22295 reg_size = 8;
22298 strategy = info->savres_strategy;
22299 using_load_multiple = strategy & SAVRES_MULTIPLE;
22300 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
22301 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
22302 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
22303 || rs6000_cpu == PROCESSOR_PPC603
22304 || rs6000_cpu == PROCESSOR_PPC750
22305 || optimize_size);
22306 /* Restore via the backchain when we have a large frame, since this
22307 is more efficient than an addis, addi pair. The second condition
22308 here will not trigger at the moment; We don't actually need a
22309 frame pointer for alloca, but the generic parts of the compiler
22310 give us one anyway. */
22311 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
22312 || (cfun->calls_alloca
22313 && !frame_pointer_needed));
22314 restore_lr = (info->lr_save_p
22315 && (restoring_FPRs_inline
22316 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
22317 && (restoring_GPRs_inline
22318 || info->first_fp_reg_save < 64));
22320 if (WORLD_SAVE_P (info))
22322 int i, j;
22323 char rname[30];
22324 const char *alloc_rname;
22325 rtvec p;
22327 /* eh_rest_world_r10 will return to the location saved in the LR
22328 stack slot (which is not likely to be our caller.)
22329 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
22330 rest_world is similar, except any R10 parameter is ignored.
22331 The exception-handling stuff that was here in 2.95 is no
22332 longer necessary. */
22334 p = rtvec_alloc (9
22336 + 32 - info->first_gp_reg_save
22337 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
22338 + 63 + 1 - info->first_fp_reg_save);
22340 strcpy (rname, ((crtl->calls_eh_return) ?
22341 "*eh_rest_world_r10" : "*rest_world"));
22342 alloc_rname = ggc_strdup (rname);
22344 j = 0;
22345 RTVEC_ELT (p, j++) = ret_rtx;
22346 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
22347 gen_rtx_REG (Pmode,
22348 LR_REGNO));
22349 RTVEC_ELT (p, j++)
22350 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
22351 /* The instruction pattern requires a clobber here;
22352 it is shared with the restVEC helper. */
22353 RTVEC_ELT (p, j++)
22354 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
22357 /* CR register traditionally saved as CR2. */
22358 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
22359 RTVEC_ELT (p, j++)
22360 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
22361 if (flag_shrink_wrap)
22363 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
22364 gen_rtx_REG (Pmode, LR_REGNO),
22365 cfa_restores);
22366 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22370 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22372 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
22373 RTVEC_ELT (p, j++)
22374 = gen_frame_load (reg,
22375 frame_reg_rtx, info->gp_save_offset + reg_size * i);
22376 if (flag_shrink_wrap)
22377 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22379 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
22381 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
22382 RTVEC_ELT (p, j++)
22383 = gen_frame_load (reg,
22384 frame_reg_rtx, info->altivec_save_offset + 16 * i);
22385 if (flag_shrink_wrap)
22386 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22388 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
22390 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
22391 ? DFmode : SFmode),
22392 info->first_fp_reg_save + i);
22393 RTVEC_ELT (p, j++)
22394 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
22395 if (flag_shrink_wrap)
22396 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22398 RTVEC_ELT (p, j++)
22399 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
22400 RTVEC_ELT (p, j++)
22401 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
22402 RTVEC_ELT (p, j++)
22403 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
22404 RTVEC_ELT (p, j++)
22405 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
22406 RTVEC_ELT (p, j++)
22407 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
22408 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
22410 if (flag_shrink_wrap)
22412 REG_NOTES (insn) = cfa_restores;
22413 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
22414 RTX_FRAME_RELATED_P (insn) = 1;
22416 return;
22419 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
22420 if (info->push_p)
22421 frame_off = info->total_size;
22423 /* Restore AltiVec registers if we must do so before adjusting the
22424 stack. */
22425 if (TARGET_ALTIVEC_ABI
22426 && info->altivec_size != 0
22427 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22428 || (DEFAULT_ABI != ABI_V4
22429 && offset_below_red_zone_p (info->altivec_save_offset))))
22431 int i;
22432 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
22434 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
22435 if (use_backchain_to_restore_sp)
22437 int frame_regno = 11;
22439 if ((strategy & REST_INLINE_VRS) == 0)
22441 /* Of r11 and r12, select the one not clobbered by an
22442 out-of-line restore function for the frame register. */
22443 frame_regno = 11 + 12 - scratch_regno;
22445 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
22446 emit_move_insn (frame_reg_rtx,
22447 gen_rtx_MEM (Pmode, sp_reg_rtx));
22448 frame_off = 0;
22450 else if (frame_pointer_needed)
22451 frame_reg_rtx = hard_frame_pointer_rtx;
22453 if ((strategy & REST_INLINE_VRS) == 0)
22455 int end_save = info->altivec_save_offset + info->altivec_size;
22456 int ptr_off;
22457 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
22458 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
22460 if (end_save + frame_off != 0)
22462 rtx offset = GEN_INT (end_save + frame_off);
22464 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
22466 else
22467 emit_move_insn (ptr_reg, frame_reg_rtx);
22469 ptr_off = -end_save;
22470 insn = rs6000_emit_savres_rtx (info, scratch_reg,
22471 info->altivec_save_offset + ptr_off,
22472 0, V4SImode, SAVRES_VR);
22474 else
22476 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
22477 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22479 rtx addr, areg, mem, reg;
22481 areg = gen_rtx_REG (Pmode, 0);
22482 emit_move_insn
22483 (areg, GEN_INT (info->altivec_save_offset
22484 + frame_off
22485 + 16 * (i - info->first_altivec_reg_save)));
22487 /* AltiVec addressing mode is [reg+reg]. */
22488 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
22489 mem = gen_frame_mem (V4SImode, addr);
22491 reg = gen_rtx_REG (V4SImode, i);
22492 emit_move_insn (reg, mem);
22496 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
22497 if (((strategy & REST_INLINE_VRS) == 0
22498 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
22499 && (flag_shrink_wrap
22500 || (offset_below_red_zone_p
22501 (info->altivec_save_offset
22502 + 16 * (i - info->first_altivec_reg_save)))))
22504 rtx reg = gen_rtx_REG (V4SImode, i);
22505 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22509 /* Restore VRSAVE if we must do so before adjusting the stack. */
22510 if (TARGET_ALTIVEC
22511 && TARGET_ALTIVEC_VRSAVE
22512 && info->vrsave_mask != 0
22513 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22514 || (DEFAULT_ABI != ABI_V4
22515 && offset_below_red_zone_p (info->vrsave_save_offset))))
22517 rtx reg;
22519 if (frame_reg_rtx == sp_reg_rtx)
22521 if (use_backchain_to_restore_sp)
22523 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
22524 emit_move_insn (frame_reg_rtx,
22525 gen_rtx_MEM (Pmode, sp_reg_rtx));
22526 frame_off = 0;
22528 else if (frame_pointer_needed)
22529 frame_reg_rtx = hard_frame_pointer_rtx;
22532 reg = gen_rtx_REG (SImode, 12);
22533 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22534 info->vrsave_save_offset + frame_off));
22536 emit_insn (generate_set_vrsave (reg, info, 1));
22539 insn = NULL_RTX;
22540 /* If we have a large stack frame, restore the old stack pointer
22541 using the backchain. */
22542 if (use_backchain_to_restore_sp)
22544 if (frame_reg_rtx == sp_reg_rtx)
22546 /* Under V.4, don't reset the stack pointer until after we're done
22547 loading the saved registers. */
22548 if (DEFAULT_ABI == ABI_V4)
22549 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
22551 insn = emit_move_insn (frame_reg_rtx,
22552 gen_rtx_MEM (Pmode, sp_reg_rtx));
22553 frame_off = 0;
22555 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22556 && DEFAULT_ABI == ABI_V4)
22557 /* frame_reg_rtx has been set up by the altivec restore. */
22559 else
22561 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
22562 frame_reg_rtx = sp_reg_rtx;
22565 /* If we have a frame pointer, we can restore the old stack pointer
22566 from it. */
22567 else if (frame_pointer_needed)
22569 frame_reg_rtx = sp_reg_rtx;
22570 if (DEFAULT_ABI == ABI_V4)
22571 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
22572 /* Prevent reordering memory accesses against stack pointer restore. */
22573 else if (cfun->calls_alloca
22574 || offset_below_red_zone_p (-info->total_size))
22575 rs6000_emit_stack_tie (frame_reg_rtx, true);
22577 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
22578 GEN_INT (info->total_size)));
22579 frame_off = 0;
22581 else if (info->push_p
22582 && DEFAULT_ABI != ABI_V4
22583 && !crtl->calls_eh_return)
22585 /* Prevent reordering memory accesses against stack pointer restore. */
22586 if (cfun->calls_alloca
22587 || offset_below_red_zone_p (-info->total_size))
22588 rs6000_emit_stack_tie (frame_reg_rtx, false);
22589 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
22590 GEN_INT (info->total_size)));
22591 frame_off = 0;
22593 if (insn && frame_reg_rtx == sp_reg_rtx)
22595 if (cfa_restores)
22597 REG_NOTES (insn) = cfa_restores;
22598 cfa_restores = NULL_RTX;
22600 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
22601 RTX_FRAME_RELATED_P (insn) = 1;
22604 /* Restore AltiVec registers if we have not done so already. */
22605 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22606 && TARGET_ALTIVEC_ABI
22607 && info->altivec_size != 0
22608 && (DEFAULT_ABI == ABI_V4
22609 || !offset_below_red_zone_p (info->altivec_save_offset)))
22611 int i;
22613 if ((strategy & REST_INLINE_VRS) == 0)
22615 int end_save = info->altivec_save_offset + info->altivec_size;
22616 int ptr_off;
22617 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
22618 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
22619 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
22621 if (end_save + frame_off != 0)
22623 rtx offset = GEN_INT (end_save + frame_off);
22625 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
22627 else
22628 emit_move_insn (ptr_reg, frame_reg_rtx);
22630 ptr_off = -end_save;
22631 insn = rs6000_emit_savres_rtx (info, scratch_reg,
22632 info->altivec_save_offset + ptr_off,
22633 0, V4SImode, SAVRES_VR);
22634 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
22636 /* Frame reg was clobbered by out-of-line save. Restore it
22637 from ptr_reg, and if we are calling out-of-line gpr or
22638 fpr restore set up the correct pointer and offset. */
22639 unsigned newptr_regno = 1;
22640 if (!restoring_GPRs_inline)
22642 bool lr = info->gp_save_offset + info->gp_size == 0;
22643 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
22644 newptr_regno = ptr_regno_for_savres (sel);
22645 end_save = info->gp_save_offset + info->gp_size;
22647 else if (!restoring_FPRs_inline)
22649 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
22650 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
22651 newptr_regno = ptr_regno_for_savres (sel);
22652 end_save = info->gp_save_offset + info->gp_size;
22655 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
22656 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
22658 if (end_save + ptr_off != 0)
22660 rtx offset = GEN_INT (end_save + ptr_off);
22662 frame_off = -end_save;
22663 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
22665 else
22667 frame_off = ptr_off;
22668 emit_move_insn (frame_reg_rtx, ptr_reg);
22672 else
22674 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
22675 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22677 rtx addr, areg, mem, reg;
22679 areg = gen_rtx_REG (Pmode, 0);
22680 emit_move_insn
22681 (areg, GEN_INT (info->altivec_save_offset
22682 + frame_off
22683 + 16 * (i - info->first_altivec_reg_save)));
22685 /* AltiVec addressing mode is [reg+reg]. */
22686 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
22687 mem = gen_frame_mem (V4SImode, addr);
22689 reg = gen_rtx_REG (V4SImode, i);
22690 emit_move_insn (reg, mem);
22694 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
22695 if (((strategy & REST_INLINE_VRS) == 0
22696 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
22697 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
22699 rtx reg = gen_rtx_REG (V4SImode, i);
22700 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22704 /* Restore VRSAVE if we have not done so already. */
22705 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22706 && TARGET_ALTIVEC
22707 && TARGET_ALTIVEC_VRSAVE
22708 && info->vrsave_mask != 0
22709 && (DEFAULT_ABI == ABI_V4
22710 || !offset_below_red_zone_p (info->vrsave_save_offset)))
22712 rtx reg;
22714 reg = gen_rtx_REG (SImode, 12);
22715 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22716 info->vrsave_save_offset + frame_off));
22718 emit_insn (generate_set_vrsave (reg, info, 1));
22721 /* If we exit by an out-of-line restore function on ABI_V4 then that
22722 function will deallocate the stack, so we don't need to worry
22723 about the unwinder restoring cr from an invalid stack frame
22724 location. */
22725 exit_func = (!restoring_FPRs_inline
22726 || (!restoring_GPRs_inline
22727 && info->first_fp_reg_save == 64));
22729 /* Get the old lr if we saved it. If we are restoring registers
22730 out-of-line, then the out-of-line routines can do this for us. */
22731 if (restore_lr && restoring_GPRs_inline)
22732 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
22734 /* Get the old cr if we saved it. */
22735 if (info->cr_save_p)
22737 unsigned cr_save_regno = 12;
22739 if (!restoring_GPRs_inline)
22741 /* Ensure we don't use the register used by the out-of-line
22742 gpr register restore below. */
22743 bool lr = info->gp_save_offset + info->gp_size == 0;
22744 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
22745 int gpr_ptr_regno = ptr_regno_for_savres (sel);
22747 if (gpr_ptr_regno == 12)
22748 cr_save_regno = 11;
22749 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
22751 else if (REGNO (frame_reg_rtx) == 12)
22752 cr_save_regno = 11;
22754 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
22755 info->cr_save_offset + frame_off,
22756 exit_func);
22759 /* Set LR here to try to overlap restores below. */
22760 if (restore_lr && restoring_GPRs_inline)
22761 restore_saved_lr (0, exit_func);
22763 /* Load exception handler data registers, if needed. */
22764 if (crtl->calls_eh_return)
22766 unsigned int i, regno;
22768 if (TARGET_AIX)
22770 rtx reg = gen_rtx_REG (reg_mode, 2);
22771 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22772 frame_off + 5 * reg_size));
22775 for (i = 0; ; ++i)
22777 rtx mem;
22779 regno = EH_RETURN_DATA_REGNO (i);
22780 if (regno == INVALID_REGNUM)
22781 break;
22783 /* Note: possible use of r0 here to address SPE regs. */
22784 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
22785 info->ehrd_offset + frame_off
22786 + reg_size * (int) i);
22788 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
22792 /* Restore GPRs. This is done as a PARALLEL if we are using
22793 the load-multiple instructions. */
22794 if (TARGET_SPE_ABI
22795 && info->spe_64bit_regs_used
22796 && info->first_gp_reg_save != 32)
22798 /* Determine whether we can address all of the registers that need
22799 to be saved with an offset from frame_reg_rtx that fits in
22800 the small const field for SPE memory instructions. */
22801 int spe_regs_addressable
22802 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
22803 + reg_size * (32 - info->first_gp_reg_save - 1))
22804 && restoring_GPRs_inline);
22806 if (!spe_regs_addressable)
22808 int ool_adjust = 0;
22809 rtx old_frame_reg_rtx = frame_reg_rtx;
22810 /* Make r11 point to the start of the SPE save area. We worried about
22811 not clobbering it when we were saving registers in the prologue.
22812 There's no need to worry here because the static chain is passed
22813 anew to every function. */
22815 if (!restoring_GPRs_inline)
22816 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
22817 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
22818 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
22819 GEN_INT (info->spe_gp_save_offset
22820 + frame_off
22821 - ool_adjust)));
22822 /* Keep the invariant that frame_reg_rtx + frame_off points
22823 at the top of the stack frame. */
22824 frame_off = -info->spe_gp_save_offset + ool_adjust;
22827 if (restoring_GPRs_inline)
22829 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
22831 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22832 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
22834 rtx offset, addr, mem, reg;
22836 /* We're doing all this to ensure that the immediate offset
22837 fits into the immediate field of 'evldd'. */
22838 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
22840 offset = GEN_INT (spe_offset + reg_size * i);
22841 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
22842 mem = gen_rtx_MEM (V2SImode, addr);
22843 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
22845 emit_move_insn (reg, mem);
22848 else
22849 rs6000_emit_savres_rtx (info, frame_reg_rtx,
22850 info->spe_gp_save_offset + frame_off,
22851 info->lr_save_offset + frame_off,
22852 reg_mode,
22853 SAVRES_GPR | SAVRES_LR);
22855 else if (!restoring_GPRs_inline)
22857 /* We are jumping to an out-of-line function. */
22858 rtx ptr_reg;
22859 int end_save = info->gp_save_offset + info->gp_size;
22860 bool can_use_exit = end_save == 0;
22861 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
22862 int ptr_off;
22864 /* Emit stack reset code if we need it. */
22865 ptr_regno = ptr_regno_for_savres (sel);
22866 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
22867 if (can_use_exit)
22868 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
22869 else if (end_save + frame_off != 0)
22870 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
22871 GEN_INT (end_save + frame_off)));
22872 else if (REGNO (frame_reg_rtx) != ptr_regno)
22873 emit_move_insn (ptr_reg, frame_reg_rtx);
22874 if (REGNO (frame_reg_rtx) == ptr_regno)
22875 frame_off = -end_save;
22877 if (can_use_exit && info->cr_save_p)
22878 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
22880 ptr_off = -end_save;
22881 rs6000_emit_savres_rtx (info, ptr_reg,
22882 info->gp_save_offset + ptr_off,
22883 info->lr_save_offset + ptr_off,
22884 reg_mode, sel);
22886 else if (using_load_multiple)
22888 rtvec p;
22889 p = rtvec_alloc (32 - info->first_gp_reg_save);
22890 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22891 RTVEC_ELT (p, i)
22892 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
22893 frame_reg_rtx,
22894 info->gp_save_offset + frame_off + reg_size * i);
22895 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22897 else
22899 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22900 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
22901 emit_insn (gen_frame_load
22902 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
22903 frame_reg_rtx,
22904 info->gp_save_offset + frame_off + reg_size * i));
22907 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
22909 /* If the frame pointer was used then we can't delay emitting
22910 a REG_CFA_DEF_CFA note. This must happen on the insn that
22911 restores the frame pointer, r31. We may have already emitted
22912 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
22913 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
22914 be harmless if emitted. */
22915 if (frame_pointer_needed)
22917 insn = get_last_insn ();
22918 add_reg_note (insn, REG_CFA_DEF_CFA,
22919 plus_constant (Pmode, frame_reg_rtx, frame_off));
22920 RTX_FRAME_RELATED_P (insn) = 1;
22923 /* Set up cfa_restores. We always need these when
22924 shrink-wrapping. If not shrink-wrapping then we only need
22925 the cfa_restore when the stack location is no longer valid.
22926 The cfa_restores must be emitted on or before the insn that
22927 invalidates the stack, and of course must not be emitted
22928 before the insn that actually does the restore. The latter
22929 is why it is a bad idea to emit the cfa_restores as a group
22930 on the last instruction here that actually does a restore:
22931 That insn may be reordered with respect to others doing
22932 restores. */
22933 if (flag_shrink_wrap
22934 && !restoring_GPRs_inline
22935 && info->first_fp_reg_save == 64)
22936 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
22938 for (i = info->first_gp_reg_save; i < 32; i++)
22939 if (!restoring_GPRs_inline
22940 || using_load_multiple
22941 || rs6000_reg_live_or_pic_offset_p (i))
22943 rtx reg = gen_rtx_REG (reg_mode, i);
22945 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22949 if (!restoring_GPRs_inline
22950 && info->first_fp_reg_save == 64)
22952 /* We are jumping to an out-of-line function. */
22953 if (cfa_restores)
22954 emit_cfa_restores (cfa_restores);
22955 return;
22958 if (restore_lr && !restoring_GPRs_inline)
22960 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
22961 restore_saved_lr (0, exit_func);
22964 /* Restore fpr's if we need to do it without calling a function. */
22965 if (restoring_FPRs_inline)
22966 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
22967 if (save_reg_p (info->first_fp_reg_save + i))
22969 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
22970 ? DFmode : SFmode),
22971 info->first_fp_reg_save + i);
22972 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22973 info->fp_save_offset + frame_off + 8 * i));
22974 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
22975 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22978 /* If we saved cr, restore it here. Just those that were used. */
22979 if (info->cr_save_p)
22980 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
22982 /* If this is V.4, unwind the stack pointer after all of the loads
22983 have been done, or set up r11 if we are restoring fp out of line. */
22984 ptr_regno = 1;
22985 if (!restoring_FPRs_inline)
22987 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
22988 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
22989 ptr_regno = ptr_regno_for_savres (sel);
22992 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
22993 if (REGNO (frame_reg_rtx) == ptr_regno)
22994 frame_off = 0;
22996 if (insn && restoring_FPRs_inline)
22998 if (cfa_restores)
23000 REG_NOTES (insn) = cfa_restores;
23001 cfa_restores = NULL_RTX;
23003 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
23004 RTX_FRAME_RELATED_P (insn) = 1;
23007 if (crtl->calls_eh_return)
23009 rtx sa = EH_RETURN_STACKADJ_RTX;
23010 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
23013 if (!sibcall)
23015 rtvec p;
23016 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
23017 if (! restoring_FPRs_inline)
23019 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
23020 RTVEC_ELT (p, 0) = ret_rtx;
23022 else
23024 if (cfa_restores)
23026 /* We can't hang the cfa_restores off a simple return,
23027 since the shrink-wrap code sometimes uses an existing
23028 return. This means there might be a path from
23029 pre-prologue code to this return, and dwarf2cfi code
23030 wants the eh_frame unwinder state to be the same on
23031 all paths to any point. So we need to emit the
23032 cfa_restores before the return. For -m64 we really
23033 don't need epilogue cfa_restores at all, except for
23034 this irritating dwarf2cfi with shrink-wrap
23035 requirement; The stack red-zone means eh_frame info
23036 from the prologue telling the unwinder to restore
23037 from the stack is perfectly good right to the end of
23038 the function. */
23039 emit_insn (gen_blockage ());
23040 emit_cfa_restores (cfa_restores);
23041 cfa_restores = NULL_RTX;
23043 p = rtvec_alloc (2);
23044 RTVEC_ELT (p, 0) = simple_return_rtx;
23047 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
23048 ? gen_rtx_USE (VOIDmode,
23049 gen_rtx_REG (Pmode, LR_REGNO))
23050 : gen_rtx_CLOBBER (VOIDmode,
23051 gen_rtx_REG (Pmode, LR_REGNO)));
23053 /* If we have to restore more than two FP registers, branch to the
23054 restore function. It will return to our caller. */
23055 if (! restoring_FPRs_inline)
23057 int i;
23058 rtx sym;
23060 if (flag_shrink_wrap)
23061 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
23063 sym = rs6000_savres_routine_sym (info,
23064 SAVRES_FPR | (lr ? SAVRES_LR : 0));
23065 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
23066 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
23067 gen_rtx_REG (Pmode,
23068 DEFAULT_ABI == ABI_AIX
23069 ? 1 : 11));
23070 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23072 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
23074 RTVEC_ELT (p, i + 4)
23075 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
23076 if (flag_shrink_wrap)
23077 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
23078 cfa_restores);
23082 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
23085 if (cfa_restores)
23087 if (sibcall)
23088 /* Ensure the cfa_restores are hung off an insn that won't
23089 be reordered above other restores. */
23090 emit_insn (gen_blockage ());
23092 emit_cfa_restores (cfa_restores);
23096 /* Write function epilogue. */
23098 static void
23099 rs6000_output_function_epilogue (FILE *file,
23100 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
23102 #if TARGET_MACHO
23103 macho_branch_islands ();
23104 /* Mach-O doesn't support labels at the end of objects, so if
23105 it looks like we might want one, insert a NOP. */
23107 rtx insn = get_last_insn ();
23108 rtx deleted_debug_label = NULL_RTX;
23109 while (insn
23110 && NOTE_P (insn)
23111 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
23113 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
23114 notes only, instead set their CODE_LABEL_NUMBER to -1,
23115 otherwise there would be code generation differences
23116 in between -g and -g0. */
23117 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
23118 deleted_debug_label = insn;
23119 insn = PREV_INSN (insn);
23121 if (insn
23122 && (LABEL_P (insn)
23123 || (NOTE_P (insn)
23124 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
23125 fputs ("\tnop\n", file);
23126 else if (deleted_debug_label)
23127 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
23128 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
23129 CODE_LABEL_NUMBER (insn) = -1;
23131 #endif
23133 /* Output a traceback table here. See /usr/include/sys/debug.h for info
23134 on its format.
23136 We don't output a traceback table if -finhibit-size-directive was
23137 used. The documentation for -finhibit-size-directive reads
23138 ``don't output a @code{.size} assembler directive, or anything
23139 else that would cause trouble if the function is split in the
23140 middle, and the two halves are placed at locations far apart in
23141 memory.'' The traceback table has this property, since it
23142 includes the offset from the start of the function to the
23143 traceback table itself.
23145 System V.4 Powerpc's (and the embedded ABI derived from it) use a
23146 different traceback table. */
23147 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
23148 && rs6000_traceback != traceback_none && !cfun->is_thunk)
23150 const char *fname = NULL;
23151 const char *language_string = lang_hooks.name;
23152 int fixed_parms = 0, float_parms = 0, parm_info = 0;
23153 int i;
23154 int optional_tbtab;
23155 rs6000_stack_t *info = rs6000_stack_info ();
23157 if (rs6000_traceback == traceback_full)
23158 optional_tbtab = 1;
23159 else if (rs6000_traceback == traceback_part)
23160 optional_tbtab = 0;
23161 else
23162 optional_tbtab = !optimize_size && !TARGET_ELF;
23164 if (optional_tbtab)
23166 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
23167 while (*fname == '.') /* V.4 encodes . in the name */
23168 fname++;
23170 /* Need label immediately before tbtab, so we can compute
23171 its offset from the function start. */
23172 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
23173 ASM_OUTPUT_LABEL (file, fname);
23176 /* The .tbtab pseudo-op can only be used for the first eight
23177 expressions, since it can't handle the possibly variable
23178 length fields that follow. However, if you omit the optional
23179 fields, the assembler outputs zeros for all optional fields
23180 anyways, giving each variable length field is minimum length
23181 (as defined in sys/debug.h). Thus we can not use the .tbtab
23182 pseudo-op at all. */
23184 /* An all-zero word flags the start of the tbtab, for debuggers
23185 that have to find it by searching forward from the entry
23186 point or from the current pc. */
23187 fputs ("\t.long 0\n", file);
23189 /* Tbtab format type. Use format type 0. */
23190 fputs ("\t.byte 0,", file);
23192 /* Language type. Unfortunately, there does not seem to be any
23193 official way to discover the language being compiled, so we
23194 use language_string.
23195 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
23196 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
23197 a number, so for now use 9. LTO and Go aren't assigned numbers
23198 either, so for now use 0. */
23199 if (! strcmp (language_string, "GNU C")
23200 || ! strcmp (language_string, "GNU GIMPLE")
23201 || ! strcmp (language_string, "GNU Go"))
23202 i = 0;
23203 else if (! strcmp (language_string, "GNU F77")
23204 || ! strcmp (language_string, "GNU Fortran"))
23205 i = 1;
23206 else if (! strcmp (language_string, "GNU Pascal"))
23207 i = 2;
23208 else if (! strcmp (language_string, "GNU Ada"))
23209 i = 3;
23210 else if (! strcmp (language_string, "GNU C++")
23211 || ! strcmp (language_string, "GNU Objective-C++"))
23212 i = 9;
23213 else if (! strcmp (language_string, "GNU Java"))
23214 i = 13;
23215 else if (! strcmp (language_string, "GNU Objective-C"))
23216 i = 14;
23217 else
23218 gcc_unreachable ();
23219 fprintf (file, "%d,", i);
23221 /* 8 single bit fields: global linkage (not set for C extern linkage,
23222 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
23223 from start of procedure stored in tbtab, internal function, function
23224 has controlled storage, function has no toc, function uses fp,
23225 function logs/aborts fp operations. */
23226 /* Assume that fp operations are used if any fp reg must be saved. */
23227 fprintf (file, "%d,",
23228 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
23230 /* 6 bitfields: function is interrupt handler, name present in
23231 proc table, function calls alloca, on condition directives
23232 (controls stack walks, 3 bits), saves condition reg, saves
23233 link reg. */
23234 /* The `function calls alloca' bit seems to be set whenever reg 31 is
23235 set up as a frame pointer, even when there is no alloca call. */
23236 fprintf (file, "%d,",
23237 ((optional_tbtab << 6)
23238 | ((optional_tbtab & frame_pointer_needed) << 5)
23239 | (info->cr_save_p << 1)
23240 | (info->lr_save_p)));
23242 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
23243 (6 bits). */
23244 fprintf (file, "%d,",
23245 (info->push_p << 7) | (64 - info->first_fp_reg_save));
23247 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
23248 fprintf (file, "%d,", (32 - first_reg_to_save ()));
23250 if (optional_tbtab)
23252 /* Compute the parameter info from the function decl argument
23253 list. */
23254 tree decl;
23255 int next_parm_info_bit = 31;
23257 for (decl = DECL_ARGUMENTS (current_function_decl);
23258 decl; decl = DECL_CHAIN (decl))
23260 rtx parameter = DECL_INCOMING_RTL (decl);
23261 enum machine_mode mode = GET_MODE (parameter);
23263 if (GET_CODE (parameter) == REG)
23265 if (SCALAR_FLOAT_MODE_P (mode))
23267 int bits;
23269 float_parms++;
23271 switch (mode)
23273 case SFmode:
23274 case SDmode:
23275 bits = 0x2;
23276 break;
23278 case DFmode:
23279 case DDmode:
23280 case TFmode:
23281 case TDmode:
23282 bits = 0x3;
23283 break;
23285 default:
23286 gcc_unreachable ();
23289 /* If only one bit will fit, don't or in this entry. */
23290 if (next_parm_info_bit > 0)
23291 parm_info |= (bits << (next_parm_info_bit - 1));
23292 next_parm_info_bit -= 2;
23294 else
23296 fixed_parms += ((GET_MODE_SIZE (mode)
23297 + (UNITS_PER_WORD - 1))
23298 / UNITS_PER_WORD);
23299 next_parm_info_bit -= 1;
23305 /* Number of fixed point parameters. */
23306 /* This is actually the number of words of fixed point parameters; thus
23307 an 8 byte struct counts as 2; and thus the maximum value is 8. */
23308 fprintf (file, "%d,", fixed_parms);
23310 /* 2 bitfields: number of floating point parameters (7 bits), parameters
23311 all on stack. */
23312 /* This is actually the number of fp registers that hold parameters;
23313 and thus the maximum value is 13. */
23314 /* Set parameters on stack bit if parameters are not in their original
23315 registers, regardless of whether they are on the stack? Xlc
23316 seems to set the bit when not optimizing. */
23317 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
23319 if (! optional_tbtab)
23320 return;
23322 /* Optional fields follow. Some are variable length. */
23324 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
23325 11 double float. */
23326 /* There is an entry for each parameter in a register, in the order that
23327 they occur in the parameter list. Any intervening arguments on the
23328 stack are ignored. If the list overflows a long (max possible length
23329 34 bits) then completely leave off all elements that don't fit. */
23330 /* Only emit this long if there was at least one parameter. */
23331 if (fixed_parms || float_parms)
23332 fprintf (file, "\t.long %d\n", parm_info);
23334 /* Offset from start of code to tb table. */
23335 fputs ("\t.long ", file);
23336 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
23337 RS6000_OUTPUT_BASENAME (file, fname);
23338 putc ('-', file);
23339 rs6000_output_function_entry (file, fname);
23340 putc ('\n', file);
23342 /* Interrupt handler mask. */
23343 /* Omit this long, since we never set the interrupt handler bit
23344 above. */
23346 /* Number of CTL (controlled storage) anchors. */
23347 /* Omit this long, since the has_ctl bit is never set above. */
23349 /* Displacement into stack of each CTL anchor. */
23350 /* Omit this list of longs, because there are no CTL anchors. */
23352 /* Length of function name. */
23353 if (*fname == '*')
23354 ++fname;
23355 fprintf (file, "\t.short %d\n", (int) strlen (fname));
23357 /* Function name. */
23358 assemble_string (fname, strlen (fname));
23360 /* Register for alloca automatic storage; this is always reg 31.
23361 Only emit this if the alloca bit was set above. */
23362 if (frame_pointer_needed)
23363 fputs ("\t.byte 31\n", file);
23365 fputs ("\t.align 2\n", file);
23369 /* A C compound statement that outputs the assembler code for a thunk
23370 function, used to implement C++ virtual function calls with
23371 multiple inheritance. The thunk acts as a wrapper around a virtual
23372 function, adjusting the implicit object parameter before handing
23373 control off to the real function.
23375 First, emit code to add the integer DELTA to the location that
23376 contains the incoming first argument. Assume that this argument
23377 contains a pointer, and is the one used to pass the `this' pointer
23378 in C++. This is the incoming argument *before* the function
23379 prologue, e.g. `%o0' on a sparc. The addition must preserve the
23380 values of all other incoming arguments.
23382 After the addition, emit code to jump to FUNCTION, which is a
23383 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
23384 not touch the return address. Hence returning from FUNCTION will
23385 return to whoever called the current `thunk'.
23387 The effect must be as if FUNCTION had been called directly with the
23388 adjusted first argument. This macro is responsible for emitting
23389 all of the code for a thunk function; output_function_prologue()
23390 and output_function_epilogue() are not invoked.
23392 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
23393 been extracted from it.) It might possibly be useful on some
23394 targets, but probably not.
23396 If you do not define this macro, the target-independent code in the
23397 C++ frontend will generate a less efficient heavyweight thunk that
23398 calls FUNCTION instead of jumping to it. The generic approach does
23399 not support varargs. */
23401 static void
23402 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
23403 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
23404 tree function)
23406 rtx this_rtx, insn, funexp;
23408 reload_completed = 1;
23409 epilogue_completed = 1;
23411 /* Mark the end of the (empty) prologue. */
23412 emit_note (NOTE_INSN_PROLOGUE_END);
23414 /* Find the "this" pointer. If the function returns a structure,
23415 the structure return pointer is in r3. */
23416 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
23417 this_rtx = gen_rtx_REG (Pmode, 4);
23418 else
23419 this_rtx = gen_rtx_REG (Pmode, 3);
23421 /* Apply the constant offset, if required. */
23422 if (delta)
23423 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
23425 /* Apply the offset from the vtable, if required. */
23426 if (vcall_offset)
23428 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
23429 rtx tmp = gen_rtx_REG (Pmode, 12);
23431 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
23432 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
23434 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
23435 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
23437 else
23439 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
23441 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
23443 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
23446 /* Generate a tail call to the target function. */
23447 if (!TREE_USED (function))
23449 assemble_external (function);
23450 TREE_USED (function) = 1;
23452 funexp = XEXP (DECL_RTL (function), 0);
23453 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
23455 #if TARGET_MACHO
23456 if (MACHOPIC_INDIRECT)
23457 funexp = machopic_indirect_call_target (funexp);
23458 #endif
23460 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
23461 generate sibcall RTL explicitly. */
23462 insn = emit_call_insn (
23463 gen_rtx_PARALLEL (VOIDmode,
23464 gen_rtvec (4,
23465 gen_rtx_CALL (VOIDmode,
23466 funexp, const0_rtx),
23467 gen_rtx_USE (VOIDmode, const0_rtx),
23468 gen_rtx_USE (VOIDmode,
23469 gen_rtx_REG (SImode,
23470 LR_REGNO)),
23471 simple_return_rtx)));
23472 SIBLING_CALL_P (insn) = 1;
23473 emit_barrier ();
23475 /* Run just enough of rest_of_compilation to get the insns emitted.
23476 There's not really enough bulk here to make other passes such as
23477 instruction scheduling worth while. Note that use_thunk calls
23478 assemble_start_function and assemble_end_function. */
23479 insn = get_insns ();
23480 shorten_branches (insn);
23481 final_start_function (insn, file, 1);
23482 final (insn, file, 1);
23483 final_end_function ();
23485 reload_completed = 0;
23486 epilogue_completed = 0;
23489 /* A quick summary of the various types of 'constant-pool tables'
23490 under PowerPC:
23492 Target Flags Name One table per
23493 AIX (none) AIX TOC object file
23494 AIX -mfull-toc AIX TOC object file
23495 AIX -mminimal-toc AIX minimal TOC translation unit
23496 SVR4/EABI (none) SVR4 SDATA object file
23497 SVR4/EABI -fpic SVR4 pic object file
23498 SVR4/EABI -fPIC SVR4 PIC translation unit
23499 SVR4/EABI -mrelocatable EABI TOC function
23500 SVR4/EABI -maix AIX TOC object file
23501 SVR4/EABI -maix -mminimal-toc
23502 AIX minimal TOC translation unit
23504 Name Reg. Set by entries contains:
23505 made by addrs? fp? sum?
23507 AIX TOC 2 crt0 as Y option option
23508 AIX minimal TOC 30 prolog gcc Y Y option
23509 SVR4 SDATA 13 crt0 gcc N Y N
23510 SVR4 pic 30 prolog ld Y not yet N
23511 SVR4 PIC 30 prolog gcc Y option option
23512 EABI TOC 30 prolog gcc Y option option
23516 /* Hash functions for the hash table. */
23518 static unsigned
23519 rs6000_hash_constant (rtx k)
23521 enum rtx_code code = GET_CODE (k);
23522 enum machine_mode mode = GET_MODE (k);
23523 unsigned result = (code << 3) ^ mode;
23524 const char *format;
23525 int flen, fidx;
23527 format = GET_RTX_FORMAT (code);
23528 flen = strlen (format);
23529 fidx = 0;
23531 switch (code)
23533 case LABEL_REF:
23534 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
23536 case CONST_DOUBLE:
23537 if (mode != VOIDmode)
23538 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
23539 flen = 2;
23540 break;
23542 case CODE_LABEL:
23543 fidx = 3;
23544 break;
23546 default:
23547 break;
23550 for (; fidx < flen; fidx++)
23551 switch (format[fidx])
23553 case 's':
23555 unsigned i, len;
23556 const char *str = XSTR (k, fidx);
23557 len = strlen (str);
23558 result = result * 613 + len;
23559 for (i = 0; i < len; i++)
23560 result = result * 613 + (unsigned) str[i];
23561 break;
23563 case 'u':
23564 case 'e':
23565 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
23566 break;
23567 case 'i':
23568 case 'n':
23569 result = result * 613 + (unsigned) XINT (k, fidx);
23570 break;
23571 case 'w':
23572 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
23573 result = result * 613 + (unsigned) XWINT (k, fidx);
23574 else
23576 size_t i;
23577 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
23578 result = result * 613 + (unsigned) (XWINT (k, fidx)
23579 >> CHAR_BIT * i);
23581 break;
23582 case '0':
23583 break;
23584 default:
23585 gcc_unreachable ();
23588 return result;
23591 static unsigned
23592 toc_hash_function (const void *hash_entry)
23594 const struct toc_hash_struct *thc =
23595 (const struct toc_hash_struct *) hash_entry;
23596 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
23599 /* Compare H1 and H2 for equivalence. */
23601 static int
23602 toc_hash_eq (const void *h1, const void *h2)
23604 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
23605 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
23607 if (((const struct toc_hash_struct *) h1)->key_mode
23608 != ((const struct toc_hash_struct *) h2)->key_mode)
23609 return 0;
23611 return rtx_equal_p (r1, r2);
23614 /* These are the names given by the C++ front-end to vtables, and
23615 vtable-like objects. Ideally, this logic should not be here;
23616 instead, there should be some programmatic way of inquiring as
23617 to whether or not an object is a vtable. */
23619 #define VTABLE_NAME_P(NAME) \
23620 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
23621 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
23622 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
23623 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
23624 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
23626 #ifdef NO_DOLLAR_IN_LABEL
23627 /* Return a GGC-allocated character string translating dollar signs in
23628 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
23630 const char *
23631 rs6000_xcoff_strip_dollar (const char *name)
23633 char *strip, *p;
23634 const char *q;
23635 size_t len;
23637 q = (const char *) strchr (name, '$');
23639 if (q == 0 || q == name)
23640 return name;
23642 len = strlen (name);
23643 strip = XALLOCAVEC (char, len + 1);
23644 strcpy (strip, name);
23645 p = strip + (q - name);
23646 while (p)
23648 *p = '_';
23649 p = strchr (p + 1, '$');
23652 return ggc_alloc_string (strip, len);
23654 #endif
23656 void
23657 rs6000_output_symbol_ref (FILE *file, rtx x)
23659 /* Currently C++ toc references to vtables can be emitted before it
23660 is decided whether the vtable is public or private. If this is
23661 the case, then the linker will eventually complain that there is
23662 a reference to an unknown section. Thus, for vtables only,
23663 we emit the TOC reference to reference the symbol and not the
23664 section. */
23665 const char *name = XSTR (x, 0);
23667 if (VTABLE_NAME_P (name))
23669 RS6000_OUTPUT_BASENAME (file, name);
23671 else
23672 assemble_name (file, name);
23675 /* Output a TOC entry. We derive the entry name from what is being
23676 written. */
23678 void
23679 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
23681 char buf[256];
23682 const char *name = buf;
23683 rtx base = x;
23684 HOST_WIDE_INT offset = 0;
23686 gcc_assert (!TARGET_NO_TOC);
23688 /* When the linker won't eliminate them, don't output duplicate
23689 TOC entries (this happens on AIX if there is any kind of TOC,
23690 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
23691 CODE_LABELs. */
23692 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
23694 struct toc_hash_struct *h;
23695 void * * found;
23697 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
23698 time because GGC is not initialized at that point. */
23699 if (toc_hash_table == NULL)
23700 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
23701 toc_hash_eq, NULL);
23703 h = ggc_alloc_toc_hash_struct ();
23704 h->key = x;
23705 h->key_mode = mode;
23706 h->labelno = labelno;
23708 found = htab_find_slot (toc_hash_table, h, INSERT);
23709 if (*found == NULL)
23710 *found = h;
23711 else /* This is indeed a duplicate.
23712 Set this label equal to that label. */
23714 fputs ("\t.set ", file);
23715 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
23716 fprintf (file, "%d,", labelno);
23717 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
23718 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
23719 found)->labelno));
23721 #ifdef HAVE_AS_TLS
23722 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
23723 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
23724 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
23726 fputs ("\t.set ", file);
23727 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
23728 fprintf (file, "%d,", labelno);
23729 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
23730 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
23731 found)->labelno));
23733 #endif
23734 return;
23738 /* If we're going to put a double constant in the TOC, make sure it's
23739 aligned properly when strict alignment is on. */
23740 if (GET_CODE (x) == CONST_DOUBLE
23741 && STRICT_ALIGNMENT
23742 && GET_MODE_BITSIZE (mode) >= 64
23743 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
23744 ASM_OUTPUT_ALIGN (file, 3);
23747 (*targetm.asm_out.internal_label) (file, "LC", labelno);
23749 /* Handle FP constants specially. Note that if we have a minimal
23750 TOC, things we put here aren't actually in the TOC, so we can allow
23751 FP constants. */
23752 if (GET_CODE (x) == CONST_DOUBLE &&
23753 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
23755 REAL_VALUE_TYPE rv;
23756 long k[4];
23758 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
23759 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
23760 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
23761 else
23762 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
23764 if (TARGET_64BIT)
23766 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23767 fputs (DOUBLE_INT_ASM_OP, file);
23768 else
23769 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
23770 k[0] & 0xffffffff, k[1] & 0xffffffff,
23771 k[2] & 0xffffffff, k[3] & 0xffffffff);
23772 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
23773 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
23774 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
23775 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
23776 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
23777 return;
23779 else
23781 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23782 fputs ("\t.long ", file);
23783 else
23784 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
23785 k[0] & 0xffffffff, k[1] & 0xffffffff,
23786 k[2] & 0xffffffff, k[3] & 0xffffffff);
23787 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
23788 k[0] & 0xffffffff, k[1] & 0xffffffff,
23789 k[2] & 0xffffffff, k[3] & 0xffffffff);
23790 return;
23793 else if (GET_CODE (x) == CONST_DOUBLE &&
23794 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
23796 REAL_VALUE_TYPE rv;
23797 long k[2];
23799 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
23801 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
23802 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
23803 else
23804 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
23806 if (TARGET_64BIT)
23808 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23809 fputs (DOUBLE_INT_ASM_OP, file);
23810 else
23811 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
23812 k[0] & 0xffffffff, k[1] & 0xffffffff);
23813 fprintf (file, "0x%lx%08lx\n",
23814 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
23815 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
23816 return;
23818 else
23820 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23821 fputs ("\t.long ", file);
23822 else
23823 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
23824 k[0] & 0xffffffff, k[1] & 0xffffffff);
23825 fprintf (file, "0x%lx,0x%lx\n",
23826 k[0] & 0xffffffff, k[1] & 0xffffffff);
23827 return;
23830 else if (GET_CODE (x) == CONST_DOUBLE &&
23831 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
23833 REAL_VALUE_TYPE rv;
23834 long l;
23836 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
23837 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
23838 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
23839 else
23840 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
23842 if (TARGET_64BIT)
23844 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23845 fputs (DOUBLE_INT_ASM_OP, file);
23846 else
23847 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
23848 if (WORDS_BIG_ENDIAN)
23849 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
23850 else
23851 fprintf (file, "0x%lx\n", l & 0xffffffff);
23852 return;
23854 else
23856 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23857 fputs ("\t.long ", file);
23858 else
23859 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
23860 fprintf (file, "0x%lx\n", l & 0xffffffff);
23861 return;
23864 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
23866 unsigned HOST_WIDE_INT low;
23867 HOST_WIDE_INT high;
23869 low = INTVAL (x) & 0xffffffff;
23870 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
23872 /* TOC entries are always Pmode-sized, so when big-endian
23873 smaller integer constants in the TOC need to be padded.
23874 (This is still a win over putting the constants in
23875 a separate constant pool, because then we'd have
23876 to have both a TOC entry _and_ the actual constant.)
23878 For a 32-bit target, CONST_INT values are loaded and shifted
23879 entirely within `low' and can be stored in one TOC entry. */
23881 /* It would be easy to make this work, but it doesn't now. */
23882 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
23884 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
23886 low |= high << 32;
23887 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
23888 high = (HOST_WIDE_INT) low >> 32;
23889 low &= 0xffffffff;
23892 if (TARGET_64BIT)
23894 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23895 fputs (DOUBLE_INT_ASM_OP, file);
23896 else
23897 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
23898 (long) high & 0xffffffff, (long) low & 0xffffffff);
23899 fprintf (file, "0x%lx%08lx\n",
23900 (long) high & 0xffffffff, (long) low & 0xffffffff);
23901 return;
23903 else
23905 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
23907 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23908 fputs ("\t.long ", file);
23909 else
23910 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
23911 (long) high & 0xffffffff, (long) low & 0xffffffff);
23912 fprintf (file, "0x%lx,0x%lx\n",
23913 (long) high & 0xffffffff, (long) low & 0xffffffff);
23915 else
23917 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23918 fputs ("\t.long ", file);
23919 else
23920 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
23921 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
23923 return;
23927 if (GET_CODE (x) == CONST)
23929 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
23930 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
23932 base = XEXP (XEXP (x, 0), 0);
23933 offset = INTVAL (XEXP (XEXP (x, 0), 1));
23936 switch (GET_CODE (base))
23938 case SYMBOL_REF:
23939 name = XSTR (base, 0);
23940 break;
23942 case LABEL_REF:
23943 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
23944 CODE_LABEL_NUMBER (XEXP (base, 0)));
23945 break;
23947 case CODE_LABEL:
23948 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
23949 break;
23951 default:
23952 gcc_unreachable ();
23955 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23956 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
23957 else
23959 fputs ("\t.tc ", file);
23960 RS6000_OUTPUT_BASENAME (file, name);
23962 if (offset < 0)
23963 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
23964 else if (offset)
23965 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
23967 /* Mark large TOC symbols on AIX with [TE] so they are mapped
23968 after other TOC symbols, reducing overflow of small TOC access
23969 to [TC] symbols. */
23970 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
23971 ? "[TE]," : "[TC],", file);
23974 /* Currently C++ toc references to vtables can be emitted before it
23975 is decided whether the vtable is public or private. If this is
23976 the case, then the linker will eventually complain that there is
23977 a TOC reference to an unknown section. Thus, for vtables only,
23978 we emit the TOC reference to reference the symbol and not the
23979 section. */
23980 if (VTABLE_NAME_P (name))
23982 RS6000_OUTPUT_BASENAME (file, name);
23983 if (offset < 0)
23984 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
23985 else if (offset > 0)
23986 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
23988 else
23989 output_addr_const (file, x);
23991 #if HAVE_AS_TLS
23992 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
23993 && SYMBOL_REF_TLS_MODEL (base) != 0)
23995 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
23996 fputs ("@le", file);
23997 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
23998 fputs ("@ie", file);
23999 /* Use global-dynamic for local-dynamic. */
24000 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
24001 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
24003 putc ('\n', file);
24004 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
24005 fputs ("\t.tc .", file);
24006 RS6000_OUTPUT_BASENAME (file, name);
24007 fputs ("[TC],", file);
24008 output_addr_const (file, x);
24009 fputs ("@m", file);
24012 #endif
24014 putc ('\n', file);
24017 /* Output an assembler pseudo-op to write an ASCII string of N characters
24018 starting at P to FILE.
24020 On the RS/6000, we have to do this using the .byte operation and
24021 write out special characters outside the quoted string.
24022 Also, the assembler is broken; very long strings are truncated,
24023 so we must artificially break them up early. */
24025 void
24026 output_ascii (FILE *file, const char *p, int n)
24028 char c;
24029 int i, count_string;
24030 const char *for_string = "\t.byte \"";
24031 const char *for_decimal = "\t.byte ";
24032 const char *to_close = NULL;
24034 count_string = 0;
24035 for (i = 0; i < n; i++)
24037 c = *p++;
24038 if (c >= ' ' && c < 0177)
24040 if (for_string)
24041 fputs (for_string, file);
24042 putc (c, file);
24044 /* Write two quotes to get one. */
24045 if (c == '"')
24047 putc (c, file);
24048 ++count_string;
24051 for_string = NULL;
24052 for_decimal = "\"\n\t.byte ";
24053 to_close = "\"\n";
24054 ++count_string;
24056 if (count_string >= 512)
24058 fputs (to_close, file);
24060 for_string = "\t.byte \"";
24061 for_decimal = "\t.byte ";
24062 to_close = NULL;
24063 count_string = 0;
24066 else
24068 if (for_decimal)
24069 fputs (for_decimal, file);
24070 fprintf (file, "%d", c);
24072 for_string = "\n\t.byte \"";
24073 for_decimal = ", ";
24074 to_close = "\n";
24075 count_string = 0;
24079 /* Now close the string if we have written one. Then end the line. */
24080 if (to_close)
24081 fputs (to_close, file);
24084 /* Generate a unique section name for FILENAME for a section type
24085 represented by SECTION_DESC. Output goes into BUF.
24087 SECTION_DESC can be any string, as long as it is different for each
24088 possible section type.
24090 We name the section in the same manner as xlc. The name begins with an
24091 underscore followed by the filename (after stripping any leading directory
24092 names) with the last period replaced by the string SECTION_DESC. If
24093 FILENAME does not contain a period, SECTION_DESC is appended to the end of
24094 the name. */
24096 void
24097 rs6000_gen_section_name (char **buf, const char *filename,
24098 const char *section_desc)
24100 const char *q, *after_last_slash, *last_period = 0;
24101 char *p;
24102 int len;
24104 after_last_slash = filename;
24105 for (q = filename; *q; q++)
24107 if (*q == '/')
24108 after_last_slash = q + 1;
24109 else if (*q == '.')
24110 last_period = q;
24113 len = strlen (after_last_slash) + strlen (section_desc) + 2;
24114 *buf = (char *) xmalloc (len);
24116 p = *buf;
24117 *p++ = '_';
24119 for (q = after_last_slash; *q; q++)
24121 if (q == last_period)
24123 strcpy (p, section_desc);
24124 p += strlen (section_desc);
24125 break;
24128 else if (ISALNUM (*q))
24129 *p++ = *q;
24132 if (last_period == 0)
24133 strcpy (p, section_desc);
24134 else
24135 *p = '\0';
24138 /* Emit profile function. */
24140 void
24141 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
24143 /* Non-standard profiling for kernels, which just saves LR then calls
24144 _mcount without worrying about arg saves. The idea is to change
24145 the function prologue as little as possible as it isn't easy to
24146 account for arg save/restore code added just for _mcount. */
24147 if (TARGET_PROFILE_KERNEL)
24148 return;
24150 if (DEFAULT_ABI == ABI_AIX)
24152 #ifndef NO_PROFILE_COUNTERS
24153 # define NO_PROFILE_COUNTERS 0
24154 #endif
24155 if (NO_PROFILE_COUNTERS)
24156 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24157 LCT_NORMAL, VOIDmode, 0);
24158 else
24160 char buf[30];
24161 const char *label_name;
24162 rtx fun;
24164 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24165 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
24166 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
24168 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24169 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
24172 else if (DEFAULT_ABI == ABI_DARWIN)
24174 const char *mcount_name = RS6000_MCOUNT;
24175 int caller_addr_regno = LR_REGNO;
24177 /* Be conservative and always set this, at least for now. */
24178 crtl->uses_pic_offset_table = 1;
24180 #if TARGET_MACHO
24181 /* For PIC code, set up a stub and collect the caller's address
24182 from r0, which is where the prologue puts it. */
24183 if (MACHOPIC_INDIRECT
24184 && crtl->uses_pic_offset_table)
24185 caller_addr_regno = 0;
24186 #endif
24187 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
24188 LCT_NORMAL, VOIDmode, 1,
24189 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
24193 /* Write function profiler code. */
24195 void
24196 output_function_profiler (FILE *file, int labelno)
24198 char buf[100];
24200 switch (DEFAULT_ABI)
24202 default:
24203 gcc_unreachable ();
24205 case ABI_V4:
24206 if (!TARGET_32BIT)
24208 warning (0, "no profiling of 64-bit code for this ABI");
24209 return;
24211 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24212 fprintf (file, "\tmflr %s\n", reg_names[0]);
24213 if (NO_PROFILE_COUNTERS)
24215 asm_fprintf (file, "\tstw %s,4(%s)\n",
24216 reg_names[0], reg_names[1]);
24218 else if (TARGET_SECURE_PLT && flag_pic)
24220 if (TARGET_LINK_STACK)
24222 char name[32];
24223 get_ppc476_thunk_name (name);
24224 asm_fprintf (file, "\tbl %s\n", name);
24226 else
24227 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
24228 asm_fprintf (file, "\tstw %s,4(%s)\n",
24229 reg_names[0], reg_names[1]);
24230 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24231 asm_fprintf (file, "\taddis %s,%s,",
24232 reg_names[12], reg_names[12]);
24233 assemble_name (file, buf);
24234 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
24235 assemble_name (file, buf);
24236 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
24238 else if (flag_pic == 1)
24240 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
24241 asm_fprintf (file, "\tstw %s,4(%s)\n",
24242 reg_names[0], reg_names[1]);
24243 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24244 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
24245 assemble_name (file, buf);
24246 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
24248 else if (flag_pic > 1)
24250 asm_fprintf (file, "\tstw %s,4(%s)\n",
24251 reg_names[0], reg_names[1]);
24252 /* Now, we need to get the address of the label. */
24253 if (TARGET_LINK_STACK)
24255 char name[32];
24256 get_ppc476_thunk_name (name);
24257 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
24258 assemble_name (file, buf);
24259 fputs ("-.\n1:", file);
24260 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24261 asm_fprintf (file, "\taddi %s,%s,4\n",
24262 reg_names[11], reg_names[11]);
24264 else
24266 fputs ("\tbcl 20,31,1f\n\t.long ", file);
24267 assemble_name (file, buf);
24268 fputs ("-.\n1:", file);
24269 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24271 asm_fprintf (file, "\tlwz %s,0(%s)\n",
24272 reg_names[0], reg_names[11]);
24273 asm_fprintf (file, "\tadd %s,%s,%s\n",
24274 reg_names[0], reg_names[0], reg_names[11]);
24276 else
24278 asm_fprintf (file, "\tlis %s,", reg_names[12]);
24279 assemble_name (file, buf);
24280 fputs ("@ha\n", file);
24281 asm_fprintf (file, "\tstw %s,4(%s)\n",
24282 reg_names[0], reg_names[1]);
24283 asm_fprintf (file, "\tla %s,", reg_names[0]);
24284 assemble_name (file, buf);
24285 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
24288 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
24289 fprintf (file, "\tbl %s%s\n",
24290 RS6000_MCOUNT, flag_pic ? "@plt" : "");
24291 break;
24293 case ABI_AIX:
24294 case ABI_DARWIN:
24295 if (!TARGET_PROFILE_KERNEL)
24297 /* Don't do anything, done in output_profile_hook (). */
24299 else
24301 gcc_assert (!TARGET_32BIT);
24303 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
24304 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
24306 if (cfun->static_chain_decl != NULL)
24308 asm_fprintf (file, "\tstd %s,24(%s)\n",
24309 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24310 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24311 asm_fprintf (file, "\tld %s,24(%s)\n",
24312 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24314 else
24315 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24317 break;
24323 /* The following variable value is the last issued insn. */
24325 static rtx last_scheduled_insn;
24327 /* The following variable helps to balance issuing of load and
24328 store instructions */
24330 static int load_store_pendulum;
24332 /* Power4 load update and store update instructions are cracked into a
24333 load or store and an integer insn which are executed in the same cycle.
24334 Branches have their own dispatch slot which does not count against the
24335 GCC issue rate, but it changes the program flow so there are no other
24336 instructions to issue in this cycle. */
24338 static int
24339 rs6000_variable_issue_1 (rtx insn, int more)
24341 last_scheduled_insn = insn;
24342 if (GET_CODE (PATTERN (insn)) == USE
24343 || GET_CODE (PATTERN (insn)) == CLOBBER)
24345 cached_can_issue_more = more;
24346 return cached_can_issue_more;
24349 if (insn_terminates_group_p (insn, current_group))
24351 cached_can_issue_more = 0;
24352 return cached_can_issue_more;
24355 /* If no reservation, but reach here */
24356 if (recog_memoized (insn) < 0)
24357 return more;
24359 if (rs6000_sched_groups)
24361 if (is_microcoded_insn (insn))
24362 cached_can_issue_more = 0;
24363 else if (is_cracked_insn (insn))
24364 cached_can_issue_more = more > 2 ? more - 2 : 0;
24365 else
24366 cached_can_issue_more = more - 1;
24368 return cached_can_issue_more;
24371 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
24372 return 0;
24374 cached_can_issue_more = more - 1;
24375 return cached_can_issue_more;
24378 static int
24379 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
24381 int r = rs6000_variable_issue_1 (insn, more);
24382 if (verbose)
24383 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
24384 return r;
24387 /* Adjust the cost of a scheduling dependency. Return the new cost of
24388 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
24390 static int
24391 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
24393 enum attr_type attr_type;
24395 if (! recog_memoized (insn))
24396 return 0;
24398 switch (REG_NOTE_KIND (link))
24400 case REG_DEP_TRUE:
24402 /* Data dependency; DEP_INSN writes a register that INSN reads
24403 some cycles later. */
24405 /* Separate a load from a narrower, dependent store. */
24406 if (rs6000_sched_groups
24407 && GET_CODE (PATTERN (insn)) == SET
24408 && GET_CODE (PATTERN (dep_insn)) == SET
24409 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
24410 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
24411 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
24412 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
24413 return cost + 14;
24415 attr_type = get_attr_type (insn);
24417 switch (attr_type)
24419 case TYPE_JMPREG:
24420 /* Tell the first scheduling pass about the latency between
24421 a mtctr and bctr (and mtlr and br/blr). The first
24422 scheduling pass will not know about this latency since
24423 the mtctr instruction, which has the latency associated
24424 to it, will be generated by reload. */
24425 return 4;
24426 case TYPE_BRANCH:
24427 /* Leave some extra cycles between a compare and its
24428 dependent branch, to inhibit expensive mispredicts. */
24429 if ((rs6000_cpu_attr == CPU_PPC603
24430 || rs6000_cpu_attr == CPU_PPC604
24431 || rs6000_cpu_attr == CPU_PPC604E
24432 || rs6000_cpu_attr == CPU_PPC620
24433 || rs6000_cpu_attr == CPU_PPC630
24434 || rs6000_cpu_attr == CPU_PPC750
24435 || rs6000_cpu_attr == CPU_PPC7400
24436 || rs6000_cpu_attr == CPU_PPC7450
24437 || rs6000_cpu_attr == CPU_PPCE5500
24438 || rs6000_cpu_attr == CPU_PPCE6500
24439 || rs6000_cpu_attr == CPU_POWER4
24440 || rs6000_cpu_attr == CPU_POWER5
24441 || rs6000_cpu_attr == CPU_POWER7
24442 || rs6000_cpu_attr == CPU_POWER8
24443 || rs6000_cpu_attr == CPU_CELL)
24444 && recog_memoized (dep_insn)
24445 && (INSN_CODE (dep_insn) >= 0))
24447 switch (get_attr_type (dep_insn))
24449 case TYPE_CMP:
24450 case TYPE_COMPARE:
24451 case TYPE_DELAYED_COMPARE:
24452 case TYPE_IMUL_COMPARE:
24453 case TYPE_LMUL_COMPARE:
24454 case TYPE_FPCOMPARE:
24455 case TYPE_CR_LOGICAL:
24456 case TYPE_DELAYED_CR:
24457 return cost + 2;
24458 default:
24459 break;
24461 break;
24463 case TYPE_STORE:
24464 case TYPE_STORE_U:
24465 case TYPE_STORE_UX:
24466 case TYPE_FPSTORE:
24467 case TYPE_FPSTORE_U:
24468 case TYPE_FPSTORE_UX:
24469 if ((rs6000_cpu == PROCESSOR_POWER6)
24470 && recog_memoized (dep_insn)
24471 && (INSN_CODE (dep_insn) >= 0))
24474 if (GET_CODE (PATTERN (insn)) != SET)
24475 /* If this happens, we have to extend this to schedule
24476 optimally. Return default for now. */
24477 return cost;
24479 /* Adjust the cost for the case where the value written
24480 by a fixed point operation is used as the address
24481 gen value on a store. */
24482 switch (get_attr_type (dep_insn))
24484 case TYPE_LOAD:
24485 case TYPE_LOAD_U:
24486 case TYPE_LOAD_UX:
24487 case TYPE_CNTLZ:
24489 if (! store_data_bypass_p (dep_insn, insn))
24490 return 4;
24491 break;
24493 case TYPE_LOAD_EXT:
24494 case TYPE_LOAD_EXT_U:
24495 case TYPE_LOAD_EXT_UX:
24496 case TYPE_VAR_SHIFT_ROTATE:
24497 case TYPE_VAR_DELAYED_COMPARE:
24499 if (! store_data_bypass_p (dep_insn, insn))
24500 return 6;
24501 break;
24503 case TYPE_INTEGER:
24504 case TYPE_COMPARE:
24505 case TYPE_FAST_COMPARE:
24506 case TYPE_EXTS:
24507 case TYPE_SHIFT:
24508 case TYPE_INSERT_WORD:
24509 case TYPE_INSERT_DWORD:
24510 case TYPE_FPLOAD_U:
24511 case TYPE_FPLOAD_UX:
24512 case TYPE_STORE_U:
24513 case TYPE_STORE_UX:
24514 case TYPE_FPSTORE_U:
24515 case TYPE_FPSTORE_UX:
24517 if (! store_data_bypass_p (dep_insn, insn))
24518 return 3;
24519 break;
24521 case TYPE_IMUL:
24522 case TYPE_IMUL2:
24523 case TYPE_IMUL3:
24524 case TYPE_LMUL:
24525 case TYPE_IMUL_COMPARE:
24526 case TYPE_LMUL_COMPARE:
24528 if (! store_data_bypass_p (dep_insn, insn))
24529 return 17;
24530 break;
24532 case TYPE_IDIV:
24534 if (! store_data_bypass_p (dep_insn, insn))
24535 return 45;
24536 break;
24538 case TYPE_LDIV:
24540 if (! store_data_bypass_p (dep_insn, insn))
24541 return 57;
24542 break;
24544 default:
24545 break;
24548 break;
24550 case TYPE_LOAD:
24551 case TYPE_LOAD_U:
24552 case TYPE_LOAD_UX:
24553 case TYPE_LOAD_EXT:
24554 case TYPE_LOAD_EXT_U:
24555 case TYPE_LOAD_EXT_UX:
24556 if ((rs6000_cpu == PROCESSOR_POWER6)
24557 && recog_memoized (dep_insn)
24558 && (INSN_CODE (dep_insn) >= 0))
24561 /* Adjust the cost for the case where the value written
24562 by a fixed point instruction is used within the address
24563 gen portion of a subsequent load(u)(x) */
24564 switch (get_attr_type (dep_insn))
24566 case TYPE_LOAD:
24567 case TYPE_LOAD_U:
24568 case TYPE_LOAD_UX:
24569 case TYPE_CNTLZ:
24571 if (set_to_load_agen (dep_insn, insn))
24572 return 4;
24573 break;
24575 case TYPE_LOAD_EXT:
24576 case TYPE_LOAD_EXT_U:
24577 case TYPE_LOAD_EXT_UX:
24578 case TYPE_VAR_SHIFT_ROTATE:
24579 case TYPE_VAR_DELAYED_COMPARE:
24581 if (set_to_load_agen (dep_insn, insn))
24582 return 6;
24583 break;
24585 case TYPE_INTEGER:
24586 case TYPE_COMPARE:
24587 case TYPE_FAST_COMPARE:
24588 case TYPE_EXTS:
24589 case TYPE_SHIFT:
24590 case TYPE_INSERT_WORD:
24591 case TYPE_INSERT_DWORD:
24592 case TYPE_FPLOAD_U:
24593 case TYPE_FPLOAD_UX:
24594 case TYPE_STORE_U:
24595 case TYPE_STORE_UX:
24596 case TYPE_FPSTORE_U:
24597 case TYPE_FPSTORE_UX:
24599 if (set_to_load_agen (dep_insn, insn))
24600 return 3;
24601 break;
24603 case TYPE_IMUL:
24604 case TYPE_IMUL2:
24605 case TYPE_IMUL3:
24606 case TYPE_LMUL:
24607 case TYPE_IMUL_COMPARE:
24608 case TYPE_LMUL_COMPARE:
24610 if (set_to_load_agen (dep_insn, insn))
24611 return 17;
24612 break;
24614 case TYPE_IDIV:
24616 if (set_to_load_agen (dep_insn, insn))
24617 return 45;
24618 break;
24620 case TYPE_LDIV:
24622 if (set_to_load_agen (dep_insn, insn))
24623 return 57;
24624 break;
24626 default:
24627 break;
24630 break;
24632 case TYPE_FPLOAD:
24633 if ((rs6000_cpu == PROCESSOR_POWER6)
24634 && recog_memoized (dep_insn)
24635 && (INSN_CODE (dep_insn) >= 0)
24636 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
24637 return 2;
24639 default:
24640 break;
24643 /* Fall out to return default cost. */
24645 break;
24647 case REG_DEP_OUTPUT:
24648 /* Output dependency; DEP_INSN writes a register that INSN writes some
24649 cycles later. */
24650 if ((rs6000_cpu == PROCESSOR_POWER6)
24651 && recog_memoized (dep_insn)
24652 && (INSN_CODE (dep_insn) >= 0))
24654 attr_type = get_attr_type (insn);
24656 switch (attr_type)
24658 case TYPE_FP:
24659 if (get_attr_type (dep_insn) == TYPE_FP)
24660 return 1;
24661 break;
24662 case TYPE_FPLOAD:
24663 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
24664 return 2;
24665 break;
24666 default:
24667 break;
24670 case REG_DEP_ANTI:
24671 /* Anti dependency; DEP_INSN reads a register that INSN writes some
24672 cycles later. */
24673 return 0;
24675 default:
24676 gcc_unreachable ();
24679 return cost;
24682 /* Debug version of rs6000_adjust_cost. */
24684 static int
24685 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
24687 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
24689 if (ret != cost)
24691 const char *dep;
24693 switch (REG_NOTE_KIND (link))
24695 default: dep = "unknown depencency"; break;
24696 case REG_DEP_TRUE: dep = "data dependency"; break;
24697 case REG_DEP_OUTPUT: dep = "output dependency"; break;
24698 case REG_DEP_ANTI: dep = "anti depencency"; break;
24701 fprintf (stderr,
24702 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
24703 "%s, insn:\n", ret, cost, dep);
24705 debug_rtx (insn);
24708 return ret;
24711 /* The function returns a true if INSN is microcoded.
24712 Return false otherwise. */
24714 static bool
24715 is_microcoded_insn (rtx insn)
24717 if (!insn || !NONDEBUG_INSN_P (insn)
24718 || GET_CODE (PATTERN (insn)) == USE
24719 || GET_CODE (PATTERN (insn)) == CLOBBER)
24720 return false;
24722 if (rs6000_cpu_attr == CPU_CELL)
24723 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
24725 if (rs6000_sched_groups
24726 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
24728 enum attr_type type = get_attr_type (insn);
24729 if (type == TYPE_LOAD_EXT_U
24730 || type == TYPE_LOAD_EXT_UX
24731 || type == TYPE_LOAD_UX
24732 || type == TYPE_STORE_UX
24733 || type == TYPE_MFCR)
24734 return true;
24737 return false;
24740 /* The function returns true if INSN is cracked into 2 instructions
24741 by the processor (and therefore occupies 2 issue slots). */
24743 static bool
24744 is_cracked_insn (rtx insn)
24746 if (!insn || !NONDEBUG_INSN_P (insn)
24747 || GET_CODE (PATTERN (insn)) == USE
24748 || GET_CODE (PATTERN (insn)) == CLOBBER)
24749 return false;
24751 if (rs6000_sched_groups
24752 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
24754 enum attr_type type = get_attr_type (insn);
24755 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
24756 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
24757 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
24758 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
24759 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
24760 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
24761 || type == TYPE_IDIV || type == TYPE_LDIV
24762 || type == TYPE_INSERT_WORD)
24763 return true;
24766 return false;
24769 /* The function returns true if INSN can be issued only from
24770 the branch slot. */
24772 static bool
24773 is_branch_slot_insn (rtx insn)
24775 if (!insn || !NONDEBUG_INSN_P (insn)
24776 || GET_CODE (PATTERN (insn)) == USE
24777 || GET_CODE (PATTERN (insn)) == CLOBBER)
24778 return false;
24780 if (rs6000_sched_groups)
24782 enum attr_type type = get_attr_type (insn);
24783 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
24784 return true;
24785 return false;
24788 return false;
24791 /* The function returns true if out_inst sets a value that is
24792 used in the address generation computation of in_insn */
24793 static bool
24794 set_to_load_agen (rtx out_insn, rtx in_insn)
24796 rtx out_set, in_set;
24798 /* For performance reasons, only handle the simple case where
24799 both loads are a single_set. */
24800 out_set = single_set (out_insn);
24801 if (out_set)
24803 in_set = single_set (in_insn);
24804 if (in_set)
24805 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
24808 return false;
24811 /* Try to determine base/offset/size parts of the given MEM.
24812 Return true if successful, false if all the values couldn't
24813 be determined.
24815 This function only looks for REG or REG+CONST address forms.
24816 REG+REG address form will return false. */
24818 static bool
24819 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
24820 HOST_WIDE_INT *size)
24822 rtx addr_rtx;
24823 if MEM_SIZE_KNOWN_P (mem)
24824 *size = MEM_SIZE (mem);
24825 else
24826 return false;
24828 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
24829 addr_rtx = XEXP (XEXP (mem, 0), 1);
24830 else
24831 addr_rtx = (XEXP (mem, 0));
24833 if (GET_CODE (addr_rtx) == REG)
24835 *base = addr_rtx;
24836 *offset = 0;
24838 else if (GET_CODE (addr_rtx) == PLUS
24839 && CONST_INT_P (XEXP (addr_rtx, 1)))
24841 *base = XEXP (addr_rtx, 0);
24842 *offset = INTVAL (XEXP (addr_rtx, 1));
24844 else
24845 return false;
24847 return true;
24850 /* The function returns true if the target storage location of
24851 mem1 is adjacent to the target storage location of mem2 */
24852 /* Return 1 if memory locations are adjacent. */
24854 static bool
24855 adjacent_mem_locations (rtx mem1, rtx mem2)
24857 rtx reg1, reg2;
24858 HOST_WIDE_INT off1, size1, off2, size2;
24860 if (get_memref_parts (mem1, &reg1, &off1, &size1)
24861 && get_memref_parts (mem2, &reg2, &off2, &size2))
24862 return ((REGNO (reg1) == REGNO (reg2))
24863 && ((off1 + size1 == off2)
24864 || (off2 + size2 == off1)));
24866 return false;
24869 /* This function returns true if it can be determined that the two MEM
24870 locations overlap by at least 1 byte based on base reg/offset/size. */
24872 static bool
24873 mem_locations_overlap (rtx mem1, rtx mem2)
24875 rtx reg1, reg2;
24876 HOST_WIDE_INT off1, size1, off2, size2;
24878 if (get_memref_parts (mem1, &reg1, &off1, &size1)
24879 && get_memref_parts (mem2, &reg2, &off2, &size2))
24880 return ((REGNO (reg1) == REGNO (reg2))
24881 && (((off1 <= off2) && (off1 + size1 > off2))
24882 || ((off2 <= off1) && (off2 + size2 > off1))));
24884 return false;
24887 /* A C statement (sans semicolon) to update the integer scheduling
24888 priority INSN_PRIORITY (INSN). Increase the priority to execute the
24889 INSN earlier, reduce the priority to execute INSN later. Do not
24890 define this macro if you do not need to adjust the scheduling
24891 priorities of insns. */
24893 static int
24894 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
24896 rtx load_mem, str_mem;
24897 /* On machines (like the 750) which have asymmetric integer units,
24898 where one integer unit can do multiply and divides and the other
24899 can't, reduce the priority of multiply/divide so it is scheduled
24900 before other integer operations. */
24902 #if 0
24903 if (! INSN_P (insn))
24904 return priority;
24906 if (GET_CODE (PATTERN (insn)) == USE)
24907 return priority;
24909 switch (rs6000_cpu_attr) {
24910 case CPU_PPC750:
24911 switch (get_attr_type (insn))
24913 default:
24914 break;
24916 case TYPE_IMUL:
24917 case TYPE_IDIV:
24918 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
24919 priority, priority);
24920 if (priority >= 0 && priority < 0x01000000)
24921 priority >>= 3;
24922 break;
24925 #endif
24927 if (insn_must_be_first_in_group (insn)
24928 && reload_completed
24929 && current_sched_info->sched_max_insns_priority
24930 && rs6000_sched_restricted_insns_priority)
24933 /* Prioritize insns that can be dispatched only in the first
24934 dispatch slot. */
24935 if (rs6000_sched_restricted_insns_priority == 1)
24936 /* Attach highest priority to insn. This means that in
24937 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
24938 precede 'priority' (critical path) considerations. */
24939 return current_sched_info->sched_max_insns_priority;
24940 else if (rs6000_sched_restricted_insns_priority == 2)
24941 /* Increase priority of insn by a minimal amount. This means that in
24942 haifa-sched.c:ready_sort(), only 'priority' (critical path)
24943 considerations precede dispatch-slot restriction considerations. */
24944 return (priority + 1);
24947 if (rs6000_cpu == PROCESSOR_POWER6
24948 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
24949 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
24950 /* Attach highest priority to insn if the scheduler has just issued two
24951 stores and this instruction is a load, or two loads and this instruction
24952 is a store. Power6 wants loads and stores scheduled alternately
24953 when possible */
24954 return current_sched_info->sched_max_insns_priority;
24956 return priority;
24959 /* Return true if the instruction is nonpipelined on the Cell. */
24960 static bool
24961 is_nonpipeline_insn (rtx insn)
24963 enum attr_type type;
24964 if (!insn || !NONDEBUG_INSN_P (insn)
24965 || GET_CODE (PATTERN (insn)) == USE
24966 || GET_CODE (PATTERN (insn)) == CLOBBER)
24967 return false;
24969 type = get_attr_type (insn);
24970 if (type == TYPE_IMUL
24971 || type == TYPE_IMUL2
24972 || type == TYPE_IMUL3
24973 || type == TYPE_LMUL
24974 || type == TYPE_IDIV
24975 || type == TYPE_LDIV
24976 || type == TYPE_SDIV
24977 || type == TYPE_DDIV
24978 || type == TYPE_SSQRT
24979 || type == TYPE_DSQRT
24980 || type == TYPE_MFCR
24981 || type == TYPE_MFCRF
24982 || type == TYPE_MFJMPR)
24984 return true;
24986 return false;
24990 /* Return how many instructions the machine can issue per cycle. */
24992 static int
24993 rs6000_issue_rate (void)
24995 /* Unless scheduling for register pressure, use issue rate of 1 for
24996 first scheduling pass to decrease degradation. */
24997 if (!reload_completed && !flag_sched_pressure)
24998 return 1;
25000 switch (rs6000_cpu_attr) {
25001 case CPU_RS64A:
25002 case CPU_PPC601: /* ? */
25003 case CPU_PPC7450:
25004 return 3;
25005 case CPU_PPC440:
25006 case CPU_PPC603:
25007 case CPU_PPC750:
25008 case CPU_PPC7400:
25009 case CPU_PPC8540:
25010 case CPU_PPC8548:
25011 case CPU_CELL:
25012 case CPU_PPCE300C2:
25013 case CPU_PPCE300C3:
25014 case CPU_PPCE500MC:
25015 case CPU_PPCE500MC64:
25016 case CPU_PPCE5500:
25017 case CPU_PPCE6500:
25018 case CPU_TITAN:
25019 return 2;
25020 case CPU_PPC476:
25021 case CPU_PPC604:
25022 case CPU_PPC604E:
25023 case CPU_PPC620:
25024 case CPU_PPC630:
25025 return 4;
25026 case CPU_POWER4:
25027 case CPU_POWER5:
25028 case CPU_POWER6:
25029 case CPU_POWER7:
25030 return 5;
25031 case CPU_POWER8:
25032 return 7;
25033 default:
25034 return 1;
25038 /* Return how many instructions to look ahead for better insn
25039 scheduling. */
25041 static int
25042 rs6000_use_sched_lookahead (void)
25044 switch (rs6000_cpu_attr)
25046 case CPU_PPC8540:
25047 case CPU_PPC8548:
25048 return 4;
25050 case CPU_CELL:
25051 return (reload_completed ? 8 : 0);
25053 default:
25054 return 0;
25058 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
25059 static int
25060 rs6000_use_sched_lookahead_guard (rtx insn)
25062 if (rs6000_cpu_attr != CPU_CELL)
25063 return 1;
25065 if (insn == NULL_RTX || !INSN_P (insn))
25066 abort ();
25068 if (!reload_completed
25069 || is_nonpipeline_insn (insn)
25070 || is_microcoded_insn (insn))
25071 return 0;
25073 return 1;
25076 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
25077 and return true. */
25079 static bool
25080 find_mem_ref (rtx pat, rtx *mem_ref)
25082 const char * fmt;
25083 int i, j;
25085 /* stack_tie does not produce any real memory traffic. */
25086 if (tie_operand (pat, VOIDmode))
25087 return false;
25089 if (GET_CODE (pat) == MEM)
25091 *mem_ref = pat;
25092 return true;
25095 /* Recursively process the pattern. */
25096 fmt = GET_RTX_FORMAT (GET_CODE (pat));
25098 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
25100 if (fmt[i] == 'e')
25102 if (find_mem_ref (XEXP (pat, i), mem_ref))
25103 return true;
25105 else if (fmt[i] == 'E')
25106 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
25108 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
25109 return true;
25113 return false;
25116 /* Determine if PAT is a PATTERN of a load insn. */
25118 static bool
25119 is_load_insn1 (rtx pat, rtx *load_mem)
25121 if (!pat || pat == NULL_RTX)
25122 return false;
25124 if (GET_CODE (pat) == SET)
25125 return find_mem_ref (SET_SRC (pat), load_mem);
25127 if (GET_CODE (pat) == PARALLEL)
25129 int i;
25131 for (i = 0; i < XVECLEN (pat, 0); i++)
25132 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
25133 return true;
25136 return false;
25139 /* Determine if INSN loads from memory. */
25141 static bool
25142 is_load_insn (rtx insn, rtx *load_mem)
25144 if (!insn || !INSN_P (insn))
25145 return false;
25147 if (CALL_P (insn))
25148 return false;
25150 return is_load_insn1 (PATTERN (insn), load_mem);
25153 /* Determine if PAT is a PATTERN of a store insn. */
25155 static bool
25156 is_store_insn1 (rtx pat, rtx *str_mem)
25158 if (!pat || pat == NULL_RTX)
25159 return false;
25161 if (GET_CODE (pat) == SET)
25162 return find_mem_ref (SET_DEST (pat), str_mem);
25164 if (GET_CODE (pat) == PARALLEL)
25166 int i;
25168 for (i = 0; i < XVECLEN (pat, 0); i++)
25169 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
25170 return true;
25173 return false;
25176 /* Determine if INSN stores to memory. */
25178 static bool
25179 is_store_insn (rtx insn, rtx *str_mem)
25181 if (!insn || !INSN_P (insn))
25182 return false;
25184 return is_store_insn1 (PATTERN (insn), str_mem);
25187 /* Returns whether the dependence between INSN and NEXT is considered
25188 costly by the given target. */
25190 static bool
25191 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
25193 rtx insn;
25194 rtx next;
25195 rtx load_mem, str_mem;
25197 /* If the flag is not enabled - no dependence is considered costly;
25198 allow all dependent insns in the same group.
25199 This is the most aggressive option. */
25200 if (rs6000_sched_costly_dep == no_dep_costly)
25201 return false;
25203 /* If the flag is set to 1 - a dependence is always considered costly;
25204 do not allow dependent instructions in the same group.
25205 This is the most conservative option. */
25206 if (rs6000_sched_costly_dep == all_deps_costly)
25207 return true;
25209 insn = DEP_PRO (dep);
25210 next = DEP_CON (dep);
25212 if (rs6000_sched_costly_dep == store_to_load_dep_costly
25213 && is_load_insn (next, &load_mem)
25214 && is_store_insn (insn, &str_mem))
25215 /* Prevent load after store in the same group. */
25216 return true;
25218 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
25219 && is_load_insn (next, &load_mem)
25220 && is_store_insn (insn, &str_mem)
25221 && DEP_TYPE (dep) == REG_DEP_TRUE
25222 && mem_locations_overlap(str_mem, load_mem))
25223 /* Prevent load after store in the same group if it is a true
25224 dependence. */
25225 return true;
25227 /* The flag is set to X; dependences with latency >= X are considered costly,
25228 and will not be scheduled in the same group. */
25229 if (rs6000_sched_costly_dep <= max_dep_latency
25230 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
25231 return true;
25233 return false;
25236 /* Return the next insn after INSN that is found before TAIL is reached,
25237 skipping any "non-active" insns - insns that will not actually occupy
25238 an issue slot. Return NULL_RTX if such an insn is not found. */
25240 static rtx
25241 get_next_active_insn (rtx insn, rtx tail)
25243 if (insn == NULL_RTX || insn == tail)
25244 return NULL_RTX;
25246 while (1)
25248 insn = NEXT_INSN (insn);
25249 if (insn == NULL_RTX || insn == tail)
25250 return NULL_RTX;
25252 if (CALL_P (insn)
25253 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
25254 || (NONJUMP_INSN_P (insn)
25255 && GET_CODE (PATTERN (insn)) != USE
25256 && GET_CODE (PATTERN (insn)) != CLOBBER
25257 && INSN_CODE (insn) != CODE_FOR_stack_tie))
25258 break;
25260 return insn;
25263 /* We are about to begin issuing insns for this clock cycle. */
25265 static int
25266 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
25267 rtx *ready ATTRIBUTE_UNUSED,
25268 int *pn_ready ATTRIBUTE_UNUSED,
25269 int clock_var ATTRIBUTE_UNUSED)
25271 int n_ready = *pn_ready;
25273 if (sched_verbose)
25274 fprintf (dump, "// rs6000_sched_reorder :\n");
25276 /* Reorder the ready list, if the second to last ready insn
25277 is a nonepipeline insn. */
25278 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
25280 if (is_nonpipeline_insn (ready[n_ready - 1])
25281 && (recog_memoized (ready[n_ready - 2]) > 0))
25282 /* Simply swap first two insns. */
25284 rtx tmp = ready[n_ready - 1];
25285 ready[n_ready - 1] = ready[n_ready - 2];
25286 ready[n_ready - 2] = tmp;
25290 if (rs6000_cpu == PROCESSOR_POWER6)
25291 load_store_pendulum = 0;
25293 return rs6000_issue_rate ();
25296 /* Like rs6000_sched_reorder, but called after issuing each insn. */
25298 static int
25299 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
25300 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
25302 if (sched_verbose)
25303 fprintf (dump, "// rs6000_sched_reorder2 :\n");
25305 /* For Power6, we need to handle some special cases to try and keep the
25306 store queue from overflowing and triggering expensive flushes.
25308 This code monitors how load and store instructions are being issued
25309 and skews the ready list one way or the other to increase the likelihood
25310 that a desired instruction is issued at the proper time.
25312 A couple of things are done. First, we maintain a "load_store_pendulum"
25313 to track the current state of load/store issue.
25315 - If the pendulum is at zero, then no loads or stores have been
25316 issued in the current cycle so we do nothing.
25318 - If the pendulum is 1, then a single load has been issued in this
25319 cycle and we attempt to locate another load in the ready list to
25320 issue with it.
25322 - If the pendulum is -2, then two stores have already been
25323 issued in this cycle, so we increase the priority of the first load
25324 in the ready list to increase it's likelihood of being chosen first
25325 in the next cycle.
25327 - If the pendulum is -1, then a single store has been issued in this
25328 cycle and we attempt to locate another store in the ready list to
25329 issue with it, preferring a store to an adjacent memory location to
25330 facilitate store pairing in the store queue.
25332 - If the pendulum is 2, then two loads have already been
25333 issued in this cycle, so we increase the priority of the first store
25334 in the ready list to increase it's likelihood of being chosen first
25335 in the next cycle.
25337 - If the pendulum < -2 or > 2, then do nothing.
25339 Note: This code covers the most common scenarios. There exist non
25340 load/store instructions which make use of the LSU and which
25341 would need to be accounted for to strictly model the behavior
25342 of the machine. Those instructions are currently unaccounted
25343 for to help minimize compile time overhead of this code.
25345 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
25347 int pos;
25348 int i;
25349 rtx tmp, load_mem, str_mem;
25351 if (is_store_insn (last_scheduled_insn, &str_mem))
25352 /* Issuing a store, swing the load_store_pendulum to the left */
25353 load_store_pendulum--;
25354 else if (is_load_insn (last_scheduled_insn, &load_mem))
25355 /* Issuing a load, swing the load_store_pendulum to the right */
25356 load_store_pendulum++;
25357 else
25358 return cached_can_issue_more;
25360 /* If the pendulum is balanced, or there is only one instruction on
25361 the ready list, then all is well, so return. */
25362 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
25363 return cached_can_issue_more;
25365 if (load_store_pendulum == 1)
25367 /* A load has been issued in this cycle. Scan the ready list
25368 for another load to issue with it */
25369 pos = *pn_ready-1;
25371 while (pos >= 0)
25373 if (is_load_insn (ready[pos], &load_mem))
25375 /* Found a load. Move it to the head of the ready list,
25376 and adjust it's priority so that it is more likely to
25377 stay there */
25378 tmp = ready[pos];
25379 for (i=pos; i<*pn_ready-1; i++)
25380 ready[i] = ready[i + 1];
25381 ready[*pn_ready-1] = tmp;
25383 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25384 INSN_PRIORITY (tmp)++;
25385 break;
25387 pos--;
25390 else if (load_store_pendulum == -2)
25392 /* Two stores have been issued in this cycle. Increase the
25393 priority of the first load in the ready list to favor it for
25394 issuing in the next cycle. */
25395 pos = *pn_ready-1;
25397 while (pos >= 0)
25399 if (is_load_insn (ready[pos], &load_mem)
25400 && !sel_sched_p ()
25401 && INSN_PRIORITY_KNOWN (ready[pos]))
25403 INSN_PRIORITY (ready[pos])++;
25405 /* Adjust the pendulum to account for the fact that a load
25406 was found and increased in priority. This is to prevent
25407 increasing the priority of multiple loads */
25408 load_store_pendulum--;
25410 break;
25412 pos--;
25415 else if (load_store_pendulum == -1)
25417 /* A store has been issued in this cycle. Scan the ready list for
25418 another store to issue with it, preferring a store to an adjacent
25419 memory location */
25420 int first_store_pos = -1;
25422 pos = *pn_ready-1;
25424 while (pos >= 0)
25426 if (is_store_insn (ready[pos], &str_mem))
25428 rtx str_mem2;
25429 /* Maintain the index of the first store found on the
25430 list */
25431 if (first_store_pos == -1)
25432 first_store_pos = pos;
25434 if (is_store_insn (last_scheduled_insn, &str_mem2)
25435 && adjacent_mem_locations (str_mem, str_mem2))
25437 /* Found an adjacent store. Move it to the head of the
25438 ready list, and adjust it's priority so that it is
25439 more likely to stay there */
25440 tmp = ready[pos];
25441 for (i=pos; i<*pn_ready-1; i++)
25442 ready[i] = ready[i + 1];
25443 ready[*pn_ready-1] = tmp;
25445 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25446 INSN_PRIORITY (tmp)++;
25448 first_store_pos = -1;
25450 break;
25453 pos--;
25456 if (first_store_pos >= 0)
25458 /* An adjacent store wasn't found, but a non-adjacent store was,
25459 so move the non-adjacent store to the front of the ready
25460 list, and adjust its priority so that it is more likely to
25461 stay there. */
25462 tmp = ready[first_store_pos];
25463 for (i=first_store_pos; i<*pn_ready-1; i++)
25464 ready[i] = ready[i + 1];
25465 ready[*pn_ready-1] = tmp;
25466 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25467 INSN_PRIORITY (tmp)++;
25470 else if (load_store_pendulum == 2)
25472 /* Two loads have been issued in this cycle. Increase the priority
25473 of the first store in the ready list to favor it for issuing in
25474 the next cycle. */
25475 pos = *pn_ready-1;
25477 while (pos >= 0)
25479 if (is_store_insn (ready[pos], &str_mem)
25480 && !sel_sched_p ()
25481 && INSN_PRIORITY_KNOWN (ready[pos]))
25483 INSN_PRIORITY (ready[pos])++;
25485 /* Adjust the pendulum to account for the fact that a store
25486 was found and increased in priority. This is to prevent
25487 increasing the priority of multiple stores */
25488 load_store_pendulum++;
25490 break;
25492 pos--;
25497 return cached_can_issue_more;
25500 /* Return whether the presence of INSN causes a dispatch group termination
25501 of group WHICH_GROUP.
25503 If WHICH_GROUP == current_group, this function will return true if INSN
25504 causes the termination of the current group (i.e, the dispatch group to
25505 which INSN belongs). This means that INSN will be the last insn in the
25506 group it belongs to.
25508 If WHICH_GROUP == previous_group, this function will return true if INSN
25509 causes the termination of the previous group (i.e, the dispatch group that
25510 precedes the group to which INSN belongs). This means that INSN will be
25511 the first insn in the group it belongs to). */
25513 static bool
25514 insn_terminates_group_p (rtx insn, enum group_termination which_group)
25516 bool first, last;
25518 if (! insn)
25519 return false;
25521 first = insn_must_be_first_in_group (insn);
25522 last = insn_must_be_last_in_group (insn);
25524 if (first && last)
25525 return true;
25527 if (which_group == current_group)
25528 return last;
25529 else if (which_group == previous_group)
25530 return first;
25532 return false;
25536 static bool
25537 insn_must_be_first_in_group (rtx insn)
25539 enum attr_type type;
25541 if (!insn
25542 || NOTE_P (insn)
25543 || DEBUG_INSN_P (insn)
25544 || GET_CODE (PATTERN (insn)) == USE
25545 || GET_CODE (PATTERN (insn)) == CLOBBER)
25546 return false;
25548 switch (rs6000_cpu)
25550 case PROCESSOR_POWER5:
25551 if (is_cracked_insn (insn))
25552 return true;
25553 case PROCESSOR_POWER4:
25554 if (is_microcoded_insn (insn))
25555 return true;
25557 if (!rs6000_sched_groups)
25558 return false;
25560 type = get_attr_type (insn);
25562 switch (type)
25564 case TYPE_MFCR:
25565 case TYPE_MFCRF:
25566 case TYPE_MTCR:
25567 case TYPE_DELAYED_CR:
25568 case TYPE_CR_LOGICAL:
25569 case TYPE_MTJMPR:
25570 case TYPE_MFJMPR:
25571 case TYPE_IDIV:
25572 case TYPE_LDIV:
25573 case TYPE_LOAD_L:
25574 case TYPE_STORE_C:
25575 case TYPE_ISYNC:
25576 case TYPE_SYNC:
25577 return true;
25578 default:
25579 break;
25581 break;
25582 case PROCESSOR_POWER6:
25583 type = get_attr_type (insn);
25585 switch (type)
25587 case TYPE_INSERT_DWORD:
25588 case TYPE_EXTS:
25589 case TYPE_CNTLZ:
25590 case TYPE_SHIFT:
25591 case TYPE_VAR_SHIFT_ROTATE:
25592 case TYPE_TRAP:
25593 case TYPE_IMUL:
25594 case TYPE_IMUL2:
25595 case TYPE_IMUL3:
25596 case TYPE_LMUL:
25597 case TYPE_IDIV:
25598 case TYPE_INSERT_WORD:
25599 case TYPE_DELAYED_COMPARE:
25600 case TYPE_IMUL_COMPARE:
25601 case TYPE_LMUL_COMPARE:
25602 case TYPE_FPCOMPARE:
25603 case TYPE_MFCR:
25604 case TYPE_MTCR:
25605 case TYPE_MFJMPR:
25606 case TYPE_MTJMPR:
25607 case TYPE_ISYNC:
25608 case TYPE_SYNC:
25609 case TYPE_LOAD_L:
25610 case TYPE_STORE_C:
25611 case TYPE_LOAD_U:
25612 case TYPE_LOAD_UX:
25613 case TYPE_LOAD_EXT_UX:
25614 case TYPE_STORE_U:
25615 case TYPE_STORE_UX:
25616 case TYPE_FPLOAD_U:
25617 case TYPE_FPLOAD_UX:
25618 case TYPE_FPSTORE_U:
25619 case TYPE_FPSTORE_UX:
25620 return true;
25621 default:
25622 break;
25624 break;
25625 case PROCESSOR_POWER7:
25626 type = get_attr_type (insn);
25628 switch (type)
25630 case TYPE_CR_LOGICAL:
25631 case TYPE_MFCR:
25632 case TYPE_MFCRF:
25633 case TYPE_MTCR:
25634 case TYPE_IDIV:
25635 case TYPE_LDIV:
25636 case TYPE_COMPARE:
25637 case TYPE_DELAYED_COMPARE:
25638 case TYPE_VAR_DELAYED_COMPARE:
25639 case TYPE_ISYNC:
25640 case TYPE_LOAD_L:
25641 case TYPE_STORE_C:
25642 case TYPE_LOAD_U:
25643 case TYPE_LOAD_UX:
25644 case TYPE_LOAD_EXT:
25645 case TYPE_LOAD_EXT_U:
25646 case TYPE_LOAD_EXT_UX:
25647 case TYPE_STORE_U:
25648 case TYPE_STORE_UX:
25649 case TYPE_FPLOAD_U:
25650 case TYPE_FPLOAD_UX:
25651 case TYPE_FPSTORE_U:
25652 case TYPE_FPSTORE_UX:
25653 case TYPE_MFJMPR:
25654 case TYPE_MTJMPR:
25655 return true;
25656 default:
25657 break;
25659 break;
25660 case PROCESSOR_POWER8:
25661 type = get_attr_type (insn);
25663 switch (type)
25665 case TYPE_CR_LOGICAL:
25666 case TYPE_DELAYED_CR:
25667 case TYPE_MFCR:
25668 case TYPE_MFCRF:
25669 case TYPE_MTCR:
25670 case TYPE_COMPARE:
25671 case TYPE_DELAYED_COMPARE:
25672 case TYPE_VAR_DELAYED_COMPARE:
25673 case TYPE_IMUL_COMPARE:
25674 case TYPE_LMUL_COMPARE:
25675 case TYPE_SYNC:
25676 case TYPE_ISYNC:
25677 case TYPE_LOAD_L:
25678 case TYPE_STORE_C:
25679 case TYPE_LOAD_U:
25680 case TYPE_LOAD_UX:
25681 case TYPE_LOAD_EXT:
25682 case TYPE_LOAD_EXT_U:
25683 case TYPE_LOAD_EXT_UX:
25684 case TYPE_STORE_UX:
25685 case TYPE_VECSTORE:
25686 case TYPE_MFJMPR:
25687 case TYPE_MTJMPR:
25688 return true;
25689 default:
25690 break;
25692 break;
25693 default:
25694 break;
25697 return false;
25700 static bool
25701 insn_must_be_last_in_group (rtx insn)
25703 enum attr_type type;
25705 if (!insn
25706 || NOTE_P (insn)
25707 || DEBUG_INSN_P (insn)
25708 || GET_CODE (PATTERN (insn)) == USE
25709 || GET_CODE (PATTERN (insn)) == CLOBBER)
25710 return false;
25712 switch (rs6000_cpu) {
25713 case PROCESSOR_POWER4:
25714 case PROCESSOR_POWER5:
25715 if (is_microcoded_insn (insn))
25716 return true;
25718 if (is_branch_slot_insn (insn))
25719 return true;
25721 break;
25722 case PROCESSOR_POWER6:
25723 type = get_attr_type (insn);
25725 switch (type)
25727 case TYPE_EXTS:
25728 case TYPE_CNTLZ:
25729 case TYPE_SHIFT:
25730 case TYPE_VAR_SHIFT_ROTATE:
25731 case TYPE_TRAP:
25732 case TYPE_IMUL:
25733 case TYPE_IMUL2:
25734 case TYPE_IMUL3:
25735 case TYPE_LMUL:
25736 case TYPE_IDIV:
25737 case TYPE_DELAYED_COMPARE:
25738 case TYPE_IMUL_COMPARE:
25739 case TYPE_LMUL_COMPARE:
25740 case TYPE_FPCOMPARE:
25741 case TYPE_MFCR:
25742 case TYPE_MTCR:
25743 case TYPE_MFJMPR:
25744 case TYPE_MTJMPR:
25745 case TYPE_ISYNC:
25746 case TYPE_SYNC:
25747 case TYPE_LOAD_L:
25748 case TYPE_STORE_C:
25749 return true;
25750 default:
25751 break;
25753 break;
25754 case PROCESSOR_POWER7:
25755 type = get_attr_type (insn);
25757 switch (type)
25759 case TYPE_ISYNC:
25760 case TYPE_SYNC:
25761 case TYPE_LOAD_L:
25762 case TYPE_STORE_C:
25763 case TYPE_LOAD_EXT_U:
25764 case TYPE_LOAD_EXT_UX:
25765 case TYPE_STORE_UX:
25766 return true;
25767 default:
25768 break;
25770 break;
25771 case PROCESSOR_POWER8:
25772 type = get_attr_type (insn);
25774 switch (type)
25776 case TYPE_MFCR:
25777 case TYPE_MTCR:
25778 case TYPE_ISYNC:
25779 case TYPE_SYNC:
25780 case TYPE_LOAD_L:
25781 case TYPE_STORE_C:
25782 case TYPE_LOAD_EXT_U:
25783 case TYPE_LOAD_EXT_UX:
25784 case TYPE_STORE_UX:
25785 return true;
25786 default:
25787 break;
25789 break;
25790 default:
25791 break;
25794 return false;
25797 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
25798 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
25800 static bool
25801 is_costly_group (rtx *group_insns, rtx next_insn)
25803 int i;
25804 int issue_rate = rs6000_issue_rate ();
25806 for (i = 0; i < issue_rate; i++)
25808 sd_iterator_def sd_it;
25809 dep_t dep;
25810 rtx insn = group_insns[i];
25812 if (!insn)
25813 continue;
25815 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
25817 rtx next = DEP_CON (dep);
25819 if (next == next_insn
25820 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
25821 return true;
25825 return false;
25828 /* Utility of the function redefine_groups.
25829 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
25830 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
25831 to keep it "far" (in a separate group) from GROUP_INSNS, following
25832 one of the following schemes, depending on the value of the flag
25833 -minsert_sched_nops = X:
25834 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
25835 in order to force NEXT_INSN into a separate group.
25836 (2) X < sched_finish_regroup_exact: insert exactly X nops.
25837 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
25838 insertion (has a group just ended, how many vacant issue slots remain in the
25839 last group, and how many dispatch groups were encountered so far). */
25841 static int
25842 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
25843 rtx next_insn, bool *group_end, int can_issue_more,
25844 int *group_count)
25846 rtx nop;
25847 bool force;
25848 int issue_rate = rs6000_issue_rate ();
25849 bool end = *group_end;
25850 int i;
25852 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
25853 return can_issue_more;
25855 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
25856 return can_issue_more;
25858 force = is_costly_group (group_insns, next_insn);
25859 if (!force)
25860 return can_issue_more;
25862 if (sched_verbose > 6)
25863 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
25864 *group_count ,can_issue_more);
25866 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
25868 if (*group_end)
25869 can_issue_more = 0;
25871 /* Since only a branch can be issued in the last issue_slot, it is
25872 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
25873 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
25874 in this case the last nop will start a new group and the branch
25875 will be forced to the new group. */
25876 if (can_issue_more && !is_branch_slot_insn (next_insn))
25877 can_issue_more--;
25879 /* Do we have a special group ending nop? */
25880 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
25881 || rs6000_cpu_attr == CPU_POWER8)
25883 nop = gen_group_ending_nop ();
25884 emit_insn_before (nop, next_insn);
25885 can_issue_more = 0;
25887 else
25888 while (can_issue_more > 0)
25890 nop = gen_nop ();
25891 emit_insn_before (nop, next_insn);
25892 can_issue_more--;
25895 *group_end = true;
25896 return 0;
25899 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
25901 int n_nops = rs6000_sched_insert_nops;
25903 /* Nops can't be issued from the branch slot, so the effective
25904 issue_rate for nops is 'issue_rate - 1'. */
25905 if (can_issue_more == 0)
25906 can_issue_more = issue_rate;
25907 can_issue_more--;
25908 if (can_issue_more == 0)
25910 can_issue_more = issue_rate - 1;
25911 (*group_count)++;
25912 end = true;
25913 for (i = 0; i < issue_rate; i++)
25915 group_insns[i] = 0;
25919 while (n_nops > 0)
25921 nop = gen_nop ();
25922 emit_insn_before (nop, next_insn);
25923 if (can_issue_more == issue_rate - 1) /* new group begins */
25924 end = false;
25925 can_issue_more--;
25926 if (can_issue_more == 0)
25928 can_issue_more = issue_rate - 1;
25929 (*group_count)++;
25930 end = true;
25931 for (i = 0; i < issue_rate; i++)
25933 group_insns[i] = 0;
25936 n_nops--;
25939 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
25940 can_issue_more++;
25942 /* Is next_insn going to start a new group? */
25943 *group_end
25944 = (end
25945 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
25946 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
25947 || (can_issue_more < issue_rate &&
25948 insn_terminates_group_p (next_insn, previous_group)));
25949 if (*group_end && end)
25950 (*group_count)--;
25952 if (sched_verbose > 6)
25953 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
25954 *group_count, can_issue_more);
25955 return can_issue_more;
25958 return can_issue_more;
25961 /* This function tries to synch the dispatch groups that the compiler "sees"
25962 with the dispatch groups that the processor dispatcher is expected to
25963 form in practice. It tries to achieve this synchronization by forcing the
25964 estimated processor grouping on the compiler (as opposed to the function
25965 'pad_goups' which tries to force the scheduler's grouping on the processor).
25967 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
25968 examines the (estimated) dispatch groups that will be formed by the processor
25969 dispatcher. It marks these group boundaries to reflect the estimated
25970 processor grouping, overriding the grouping that the scheduler had marked.
25971 Depending on the value of the flag '-minsert-sched-nops' this function can
25972 force certain insns into separate groups or force a certain distance between
25973 them by inserting nops, for example, if there exists a "costly dependence"
25974 between the insns.
25976 The function estimates the group boundaries that the processor will form as
25977 follows: It keeps track of how many vacant issue slots are available after
25978 each insn. A subsequent insn will start a new group if one of the following
25979 4 cases applies:
25980 - no more vacant issue slots remain in the current dispatch group.
25981 - only the last issue slot, which is the branch slot, is vacant, but the next
25982 insn is not a branch.
25983 - only the last 2 or less issue slots, including the branch slot, are vacant,
25984 which means that a cracked insn (which occupies two issue slots) can't be
25985 issued in this group.
25986 - less than 'issue_rate' slots are vacant, and the next insn always needs to
25987 start a new group. */
25989 static int
25990 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
25992 rtx insn, next_insn;
25993 int issue_rate;
25994 int can_issue_more;
25995 int slot, i;
25996 bool group_end;
25997 int group_count = 0;
25998 rtx *group_insns;
26000 /* Initialize. */
26001 issue_rate = rs6000_issue_rate ();
26002 group_insns = XALLOCAVEC (rtx, issue_rate);
26003 for (i = 0; i < issue_rate; i++)
26005 group_insns[i] = 0;
26007 can_issue_more = issue_rate;
26008 slot = 0;
26009 insn = get_next_active_insn (prev_head_insn, tail);
26010 group_end = false;
26012 while (insn != NULL_RTX)
26014 slot = (issue_rate - can_issue_more);
26015 group_insns[slot] = insn;
26016 can_issue_more =
26017 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26018 if (insn_terminates_group_p (insn, current_group))
26019 can_issue_more = 0;
26021 next_insn = get_next_active_insn (insn, tail);
26022 if (next_insn == NULL_RTX)
26023 return group_count + 1;
26025 /* Is next_insn going to start a new group? */
26026 group_end
26027 = (can_issue_more == 0
26028 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
26029 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
26030 || (can_issue_more < issue_rate &&
26031 insn_terminates_group_p (next_insn, previous_group)));
26033 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
26034 next_insn, &group_end, can_issue_more,
26035 &group_count);
26037 if (group_end)
26039 group_count++;
26040 can_issue_more = 0;
26041 for (i = 0; i < issue_rate; i++)
26043 group_insns[i] = 0;
26047 if (GET_MODE (next_insn) == TImode && can_issue_more)
26048 PUT_MODE (next_insn, VOIDmode);
26049 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
26050 PUT_MODE (next_insn, TImode);
26052 insn = next_insn;
26053 if (can_issue_more == 0)
26054 can_issue_more = issue_rate;
26055 } /* while */
26057 return group_count;
26060 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
26061 dispatch group boundaries that the scheduler had marked. Pad with nops
26062 any dispatch groups which have vacant issue slots, in order to force the
26063 scheduler's grouping on the processor dispatcher. The function
26064 returns the number of dispatch groups found. */
26066 static int
26067 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
26069 rtx insn, next_insn;
26070 rtx nop;
26071 int issue_rate;
26072 int can_issue_more;
26073 int group_end;
26074 int group_count = 0;
26076 /* Initialize issue_rate. */
26077 issue_rate = rs6000_issue_rate ();
26078 can_issue_more = issue_rate;
26080 insn = get_next_active_insn (prev_head_insn, tail);
26081 next_insn = get_next_active_insn (insn, tail);
26083 while (insn != NULL_RTX)
26085 can_issue_more =
26086 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26088 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
26090 if (next_insn == NULL_RTX)
26091 break;
26093 if (group_end)
26095 /* If the scheduler had marked group termination at this location
26096 (between insn and next_insn), and neither insn nor next_insn will
26097 force group termination, pad the group with nops to force group
26098 termination. */
26099 if (can_issue_more
26100 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
26101 && !insn_terminates_group_p (insn, current_group)
26102 && !insn_terminates_group_p (next_insn, previous_group))
26104 if (!is_branch_slot_insn (next_insn))
26105 can_issue_more--;
26107 while (can_issue_more)
26109 nop = gen_nop ();
26110 emit_insn_before (nop, next_insn);
26111 can_issue_more--;
26115 can_issue_more = issue_rate;
26116 group_count++;
26119 insn = next_insn;
26120 next_insn = get_next_active_insn (insn, tail);
26123 return group_count;
26126 /* We're beginning a new block. Initialize data structures as necessary. */
26128 static void
26129 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
26130 int sched_verbose ATTRIBUTE_UNUSED,
26131 int max_ready ATTRIBUTE_UNUSED)
26133 last_scheduled_insn = NULL_RTX;
26134 load_store_pendulum = 0;
26137 /* The following function is called at the end of scheduling BB.
26138 After reload, it inserts nops at insn group bundling. */
26140 static void
26141 rs6000_sched_finish (FILE *dump, int sched_verbose)
26143 int n_groups;
26145 if (sched_verbose)
26146 fprintf (dump, "=== Finishing schedule.\n");
26148 if (reload_completed && rs6000_sched_groups)
26150 /* Do not run sched_finish hook when selective scheduling enabled. */
26151 if (sel_sched_p ())
26152 return;
26154 if (rs6000_sched_insert_nops == sched_finish_none)
26155 return;
26157 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
26158 n_groups = pad_groups (dump, sched_verbose,
26159 current_sched_info->prev_head,
26160 current_sched_info->next_tail);
26161 else
26162 n_groups = redefine_groups (dump, sched_verbose,
26163 current_sched_info->prev_head,
26164 current_sched_info->next_tail);
26166 if (sched_verbose >= 6)
26168 fprintf (dump, "ngroups = %d\n", n_groups);
26169 print_rtl (dump, current_sched_info->prev_head);
26170 fprintf (dump, "Done finish_sched\n");
26175 struct _rs6000_sched_context
26177 short cached_can_issue_more;
26178 rtx last_scheduled_insn;
26179 int load_store_pendulum;
26182 typedef struct _rs6000_sched_context rs6000_sched_context_def;
26183 typedef rs6000_sched_context_def *rs6000_sched_context_t;
26185 /* Allocate store for new scheduling context. */
26186 static void *
26187 rs6000_alloc_sched_context (void)
26189 return xmalloc (sizeof (rs6000_sched_context_def));
26192 /* If CLEAN_P is true then initializes _SC with clean data,
26193 and from the global context otherwise. */
26194 static void
26195 rs6000_init_sched_context (void *_sc, bool clean_p)
26197 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26199 if (clean_p)
26201 sc->cached_can_issue_more = 0;
26202 sc->last_scheduled_insn = NULL_RTX;
26203 sc->load_store_pendulum = 0;
26205 else
26207 sc->cached_can_issue_more = cached_can_issue_more;
26208 sc->last_scheduled_insn = last_scheduled_insn;
26209 sc->load_store_pendulum = load_store_pendulum;
26213 /* Sets the global scheduling context to the one pointed to by _SC. */
26214 static void
26215 rs6000_set_sched_context (void *_sc)
26217 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26219 gcc_assert (sc != NULL);
26221 cached_can_issue_more = sc->cached_can_issue_more;
26222 last_scheduled_insn = sc->last_scheduled_insn;
26223 load_store_pendulum = sc->load_store_pendulum;
26226 /* Free _SC. */
26227 static void
26228 rs6000_free_sched_context (void *_sc)
26230 gcc_assert (_sc != NULL);
26232 free (_sc);
26236 /* Length in units of the trampoline for entering a nested function. */
26239 rs6000_trampoline_size (void)
26241 int ret = 0;
26243 switch (DEFAULT_ABI)
26245 default:
26246 gcc_unreachable ();
26248 case ABI_AIX:
26249 ret = (TARGET_32BIT) ? 12 : 24;
26250 break;
26252 case ABI_DARWIN:
26253 case ABI_V4:
26254 ret = (TARGET_32BIT) ? 40 : 48;
26255 break;
26258 return ret;
26261 /* Emit RTL insns to initialize the variable parts of a trampoline.
26262 FNADDR is an RTX for the address of the function's pure code.
26263 CXT is an RTX for the static chain value for the function. */
26265 static void
26266 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
26268 int regsize = (TARGET_32BIT) ? 4 : 8;
26269 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
26270 rtx ctx_reg = force_reg (Pmode, cxt);
26271 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
26273 switch (DEFAULT_ABI)
26275 default:
26276 gcc_unreachable ();
26278 /* Under AIX, just build the 3 word function descriptor */
26279 case ABI_AIX:
26281 rtx fnmem, fn_reg, toc_reg;
26283 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
26284 error ("You cannot take the address of a nested function if you use "
26285 "the -mno-pointers-to-nested-functions option.");
26287 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
26288 fn_reg = gen_reg_rtx (Pmode);
26289 toc_reg = gen_reg_rtx (Pmode);
26291 /* Macro to shorten the code expansions below. */
26292 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
26294 m_tramp = replace_equiv_address (m_tramp, addr);
26296 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
26297 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
26298 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
26299 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
26300 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
26302 # undef MEM_PLUS
26304 break;
26306 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
26307 case ABI_DARWIN:
26308 case ABI_V4:
26309 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
26310 LCT_NORMAL, VOIDmode, 4,
26311 addr, Pmode,
26312 GEN_INT (rs6000_trampoline_size ()), SImode,
26313 fnaddr, Pmode,
26314 ctx_reg, Pmode);
26315 break;
26320 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
26321 identifier as an argument, so the front end shouldn't look it up. */
26323 static bool
26324 rs6000_attribute_takes_identifier_p (const_tree attr_id)
26326 return is_attribute_p ("altivec", attr_id);
26329 /* Handle the "altivec" attribute. The attribute may have
26330 arguments as follows:
26332 __attribute__((altivec(vector__)))
26333 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
26334 __attribute__((altivec(bool__))) (always followed by 'unsigned')
26336 and may appear more than once (e.g., 'vector bool char') in a
26337 given declaration. */
26339 static tree
26340 rs6000_handle_altivec_attribute (tree *node,
26341 tree name ATTRIBUTE_UNUSED,
26342 tree args,
26343 int flags ATTRIBUTE_UNUSED,
26344 bool *no_add_attrs)
26346 tree type = *node, result = NULL_TREE;
26347 enum machine_mode mode;
26348 int unsigned_p;
26349 char altivec_type
26350 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
26351 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
26352 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
26353 : '?');
26355 while (POINTER_TYPE_P (type)
26356 || TREE_CODE (type) == FUNCTION_TYPE
26357 || TREE_CODE (type) == METHOD_TYPE
26358 || TREE_CODE (type) == ARRAY_TYPE)
26359 type = TREE_TYPE (type);
26361 mode = TYPE_MODE (type);
26363 /* Check for invalid AltiVec type qualifiers. */
26364 if (type == long_double_type_node)
26365 error ("use of %<long double%> in AltiVec types is invalid");
26366 else if (type == boolean_type_node)
26367 error ("use of boolean types in AltiVec types is invalid");
26368 else if (TREE_CODE (type) == COMPLEX_TYPE)
26369 error ("use of %<complex%> in AltiVec types is invalid");
26370 else if (DECIMAL_FLOAT_MODE_P (mode))
26371 error ("use of decimal floating point types in AltiVec types is invalid");
26372 else if (!TARGET_VSX)
26374 if (type == long_unsigned_type_node || type == long_integer_type_node)
26376 if (TARGET_64BIT)
26377 error ("use of %<long%> in AltiVec types is invalid for "
26378 "64-bit code without -mvsx");
26379 else if (rs6000_warn_altivec_long)
26380 warning (0, "use of %<long%> in AltiVec types is deprecated; "
26381 "use %<int%>");
26383 else if (type == long_long_unsigned_type_node
26384 || type == long_long_integer_type_node)
26385 error ("use of %<long long%> in AltiVec types is invalid without "
26386 "-mvsx");
26387 else if (type == double_type_node)
26388 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
26391 switch (altivec_type)
26393 case 'v':
26394 unsigned_p = TYPE_UNSIGNED (type);
26395 switch (mode)
26397 case DImode:
26398 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
26399 break;
26400 case SImode:
26401 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
26402 break;
26403 case HImode:
26404 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
26405 break;
26406 case QImode:
26407 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
26408 break;
26409 case SFmode: result = V4SF_type_node; break;
26410 case DFmode: result = V2DF_type_node; break;
26411 /* If the user says 'vector int bool', we may be handed the 'bool'
26412 attribute _before_ the 'vector' attribute, and so select the
26413 proper type in the 'b' case below. */
26414 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
26415 case V2DImode: case V2DFmode:
26416 result = type;
26417 default: break;
26419 break;
26420 case 'b':
26421 switch (mode)
26423 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
26424 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
26425 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
26426 case QImode: case V16QImode: result = bool_V16QI_type_node;
26427 default: break;
26429 break;
26430 case 'p':
26431 switch (mode)
26433 case V8HImode: result = pixel_V8HI_type_node;
26434 default: break;
26436 default: break;
26439 /* Propagate qualifiers attached to the element type
26440 onto the vector type. */
26441 if (result && result != type && TYPE_QUALS (type))
26442 result = build_qualified_type (result, TYPE_QUALS (type));
26444 *no_add_attrs = true; /* No need to hang on to the attribute. */
26446 if (result)
26447 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
26449 return NULL_TREE;
26452 /* AltiVec defines four built-in scalar types that serve as vector
26453 elements; we must teach the compiler how to mangle them. */
26455 static const char *
26456 rs6000_mangle_type (const_tree type)
26458 type = TYPE_MAIN_VARIANT (type);
26460 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
26461 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
26462 return NULL;
26464 if (type == bool_char_type_node) return "U6__boolc";
26465 if (type == bool_short_type_node) return "U6__bools";
26466 if (type == pixel_type_node) return "u7__pixel";
26467 if (type == bool_int_type_node) return "U6__booli";
26468 if (type == bool_long_type_node) return "U6__booll";
26470 /* Mangle IBM extended float long double as `g' (__float128) on
26471 powerpc*-linux where long-double-64 previously was the default. */
26472 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
26473 && TARGET_ELF
26474 && TARGET_LONG_DOUBLE_128
26475 && !TARGET_IEEEQUAD)
26476 return "g";
26478 /* For all other types, use normal C++ mangling. */
26479 return NULL;
26482 /* Handle a "longcall" or "shortcall" attribute; arguments as in
26483 struct attribute_spec.handler. */
26485 static tree
26486 rs6000_handle_longcall_attribute (tree *node, tree name,
26487 tree args ATTRIBUTE_UNUSED,
26488 int flags ATTRIBUTE_UNUSED,
26489 bool *no_add_attrs)
26491 if (TREE_CODE (*node) != FUNCTION_TYPE
26492 && TREE_CODE (*node) != FIELD_DECL
26493 && TREE_CODE (*node) != TYPE_DECL)
26495 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26496 name);
26497 *no_add_attrs = true;
26500 return NULL_TREE;
26503 /* Set longcall attributes on all functions declared when
26504 rs6000_default_long_calls is true. */
26505 static void
26506 rs6000_set_default_type_attributes (tree type)
26508 if (rs6000_default_long_calls
26509 && (TREE_CODE (type) == FUNCTION_TYPE
26510 || TREE_CODE (type) == METHOD_TYPE))
26511 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
26512 NULL_TREE,
26513 TYPE_ATTRIBUTES (type));
26515 #if TARGET_MACHO
26516 darwin_set_default_type_attributes (type);
26517 #endif
26520 /* Return a reference suitable for calling a function with the
26521 longcall attribute. */
26524 rs6000_longcall_ref (rtx call_ref)
26526 const char *call_name;
26527 tree node;
26529 if (GET_CODE (call_ref) != SYMBOL_REF)
26530 return call_ref;
26532 /* System V adds '.' to the internal name, so skip them. */
26533 call_name = XSTR (call_ref, 0);
26534 if (*call_name == '.')
26536 while (*call_name == '.')
26537 call_name++;
26539 node = get_identifier (call_name);
26540 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
26543 return force_reg (Pmode, call_ref);
26546 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
26547 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
26548 #endif
26550 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26551 struct attribute_spec.handler. */
26552 static tree
26553 rs6000_handle_struct_attribute (tree *node, tree name,
26554 tree args ATTRIBUTE_UNUSED,
26555 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26557 tree *type = NULL;
26558 if (DECL_P (*node))
26560 if (TREE_CODE (*node) == TYPE_DECL)
26561 type = &TREE_TYPE (*node);
26563 else
26564 type = node;
26566 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26567 || TREE_CODE (*type) == UNION_TYPE)))
26569 warning (OPT_Wattributes, "%qE attribute ignored", name);
26570 *no_add_attrs = true;
26573 else if ((is_attribute_p ("ms_struct", name)
26574 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26575 || ((is_attribute_p ("gcc_struct", name)
26576 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26578 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26579 name);
26580 *no_add_attrs = true;
26583 return NULL_TREE;
26586 static bool
26587 rs6000_ms_bitfield_layout_p (const_tree record_type)
26589 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
26590 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26591 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26594 #ifdef USING_ELFOS_H
26596 /* A get_unnamed_section callback, used for switching to toc_section. */
26598 static void
26599 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
26601 if (DEFAULT_ABI == ABI_AIX
26602 && TARGET_MINIMAL_TOC
26603 && !TARGET_RELOCATABLE)
26605 if (!toc_initialized)
26607 toc_initialized = 1;
26608 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
26609 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
26610 fprintf (asm_out_file, "\t.tc ");
26611 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
26612 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
26613 fprintf (asm_out_file, "\n");
26615 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
26616 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
26617 fprintf (asm_out_file, " = .+32768\n");
26619 else
26620 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
26622 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
26623 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
26624 else
26626 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
26627 if (!toc_initialized)
26629 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
26630 fprintf (asm_out_file, " = .+32768\n");
26631 toc_initialized = 1;
26636 /* Implement TARGET_ASM_INIT_SECTIONS. */
26638 static void
26639 rs6000_elf_asm_init_sections (void)
26641 toc_section
26642 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
26644 sdata2_section
26645 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
26646 SDATA2_SECTION_ASM_OP);
26649 /* Implement TARGET_SELECT_RTX_SECTION. */
26651 static section *
26652 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
26653 unsigned HOST_WIDE_INT align)
26655 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
26656 return toc_section;
26657 else
26658 return default_elf_select_rtx_section (mode, x, align);
26661 /* For a SYMBOL_REF, set generic flags and then perform some
26662 target-specific processing.
26664 When the AIX ABI is requested on a non-AIX system, replace the
26665 function name with the real name (with a leading .) rather than the
26666 function descriptor name. This saves a lot of overriding code to
26667 read the prefixes. */
26669 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
26670 static void
26671 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
26673 default_encode_section_info (decl, rtl, first);
26675 if (first
26676 && TREE_CODE (decl) == FUNCTION_DECL
26677 && !TARGET_AIX
26678 && DEFAULT_ABI == ABI_AIX)
26680 rtx sym_ref = XEXP (rtl, 0);
26681 size_t len = strlen (XSTR (sym_ref, 0));
26682 char *str = XALLOCAVEC (char, len + 2);
26683 str[0] = '.';
26684 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
26685 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
26689 static inline bool
26690 compare_section_name (const char *section, const char *templ)
26692 int len;
26694 len = strlen (templ);
26695 return (strncmp (section, templ, len) == 0
26696 && (section[len] == 0 || section[len] == '.'));
26699 bool
26700 rs6000_elf_in_small_data_p (const_tree decl)
26702 if (rs6000_sdata == SDATA_NONE)
26703 return false;
26705 /* We want to merge strings, so we never consider them small data. */
26706 if (TREE_CODE (decl) == STRING_CST)
26707 return false;
26709 /* Functions are never in the small data area. */
26710 if (TREE_CODE (decl) == FUNCTION_DECL)
26711 return false;
26713 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
26715 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
26716 if (compare_section_name (section, ".sdata")
26717 || compare_section_name (section, ".sdata2")
26718 || compare_section_name (section, ".gnu.linkonce.s")
26719 || compare_section_name (section, ".sbss")
26720 || compare_section_name (section, ".sbss2")
26721 || compare_section_name (section, ".gnu.linkonce.sb")
26722 || strcmp (section, ".PPC.EMB.sdata0") == 0
26723 || strcmp (section, ".PPC.EMB.sbss0") == 0)
26724 return true;
26726 else
26728 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
26730 if (size > 0
26731 && size <= g_switch_value
26732 /* If it's not public, and we're not going to reference it there,
26733 there's no need to put it in the small data section. */
26734 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
26735 return true;
26738 return false;
26741 #endif /* USING_ELFOS_H */
26743 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
26745 static bool
26746 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
26748 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
26751 /* Do not place thread-local symbols refs in the object blocks. */
26753 static bool
26754 rs6000_use_blocks_for_decl_p (const_tree decl)
26756 return !DECL_THREAD_LOCAL_P (decl);
26759 /* Return a REG that occurs in ADDR with coefficient 1.
26760 ADDR can be effectively incremented by incrementing REG.
26762 r0 is special and we must not select it as an address
26763 register by this routine since our caller will try to
26764 increment the returned register via an "la" instruction. */
26767 find_addr_reg (rtx addr)
26769 while (GET_CODE (addr) == PLUS)
26771 if (GET_CODE (XEXP (addr, 0)) == REG
26772 && REGNO (XEXP (addr, 0)) != 0)
26773 addr = XEXP (addr, 0);
26774 else if (GET_CODE (XEXP (addr, 1)) == REG
26775 && REGNO (XEXP (addr, 1)) != 0)
26776 addr = XEXP (addr, 1);
26777 else if (CONSTANT_P (XEXP (addr, 0)))
26778 addr = XEXP (addr, 1);
26779 else if (CONSTANT_P (XEXP (addr, 1)))
26780 addr = XEXP (addr, 0);
26781 else
26782 gcc_unreachable ();
26784 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
26785 return addr;
26788 void
26789 rs6000_fatal_bad_address (rtx op)
26791 fatal_insn ("bad address", op);
26794 #if TARGET_MACHO
26796 typedef struct branch_island_d {
26797 tree function_name;
26798 tree label_name;
26799 int line_number;
26800 } branch_island;
26803 static vec<branch_island, va_gc> *branch_islands;
26805 /* Remember to generate a branch island for far calls to the given
26806 function. */
26808 static void
26809 add_compiler_branch_island (tree label_name, tree function_name,
26810 int line_number)
26812 branch_island bi = {function_name, label_name, line_number};
26813 vec_safe_push (branch_islands, bi);
26816 /* Generate far-jump branch islands for everything recorded in
26817 branch_islands. Invoked immediately after the last instruction of
26818 the epilogue has been emitted; the branch islands must be appended
26819 to, and contiguous with, the function body. Mach-O stubs are
26820 generated in machopic_output_stub(). */
26822 static void
26823 macho_branch_islands (void)
26825 char tmp_buf[512];
26827 while (!vec_safe_is_empty (branch_islands))
26829 branch_island *bi = &branch_islands->last ();
26830 const char *label = IDENTIFIER_POINTER (bi->label_name);
26831 const char *name = IDENTIFIER_POINTER (bi->function_name);
26832 char name_buf[512];
26833 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
26834 if (name[0] == '*' || name[0] == '&')
26835 strcpy (name_buf, name+1);
26836 else
26838 name_buf[0] = '_';
26839 strcpy (name_buf+1, name);
26841 strcpy (tmp_buf, "\n");
26842 strcat (tmp_buf, label);
26843 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
26844 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
26845 dbxout_stabd (N_SLINE, bi->line_number);
26846 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
26847 if (flag_pic)
26849 if (TARGET_LINK_STACK)
26851 char name[32];
26852 get_ppc476_thunk_name (name);
26853 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
26854 strcat (tmp_buf, name);
26855 strcat (tmp_buf, "\n");
26856 strcat (tmp_buf, label);
26857 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
26859 else
26861 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
26862 strcat (tmp_buf, label);
26863 strcat (tmp_buf, "_pic\n");
26864 strcat (tmp_buf, label);
26865 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
26868 strcat (tmp_buf, "\taddis r11,r11,ha16(");
26869 strcat (tmp_buf, name_buf);
26870 strcat (tmp_buf, " - ");
26871 strcat (tmp_buf, label);
26872 strcat (tmp_buf, "_pic)\n");
26874 strcat (tmp_buf, "\tmtlr r0\n");
26876 strcat (tmp_buf, "\taddi r12,r11,lo16(");
26877 strcat (tmp_buf, name_buf);
26878 strcat (tmp_buf, " - ");
26879 strcat (tmp_buf, label);
26880 strcat (tmp_buf, "_pic)\n");
26882 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
26884 else
26886 strcat (tmp_buf, ":\nlis r12,hi16(");
26887 strcat (tmp_buf, name_buf);
26888 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
26889 strcat (tmp_buf, name_buf);
26890 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
26892 output_asm_insn (tmp_buf, 0);
26893 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
26894 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
26895 dbxout_stabd (N_SLINE, bi->line_number);
26896 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
26897 branch_islands->pop ();
26901 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
26902 already there or not. */
26904 static int
26905 no_previous_def (tree function_name)
26907 branch_island *bi;
26908 unsigned ix;
26910 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
26911 if (function_name == bi->function_name)
26912 return 0;
26913 return 1;
26916 /* GET_PREV_LABEL gets the label name from the previous definition of
26917 the function. */
26919 static tree
26920 get_prev_label (tree function_name)
26922 branch_island *bi;
26923 unsigned ix;
26925 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
26926 if (function_name == bi->function_name)
26927 return bi->label_name;
26928 return NULL_TREE;
26931 /* INSN is either a function call or a millicode call. It may have an
26932 unconditional jump in its delay slot.
26934 CALL_DEST is the routine we are calling. */
26936 char *
26937 output_call (rtx insn, rtx *operands, int dest_operand_number,
26938 int cookie_operand_number)
26940 static char buf[256];
26941 if (darwin_emit_branch_islands
26942 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
26943 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
26945 tree labelname;
26946 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
26948 if (no_previous_def (funname))
26950 rtx label_rtx = gen_label_rtx ();
26951 char *label_buf, temp_buf[256];
26952 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
26953 CODE_LABEL_NUMBER (label_rtx));
26954 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
26955 labelname = get_identifier (label_buf);
26956 add_compiler_branch_island (labelname, funname, insn_line (insn));
26958 else
26959 labelname = get_prev_label (funname);
26961 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
26962 instruction will reach 'foo', otherwise link as 'bl L42'".
26963 "L42" should be a 'branch island', that will do a far jump to
26964 'foo'. Branch islands are generated in
26965 macho_branch_islands(). */
26966 sprintf (buf, "jbsr %%z%d,%.246s",
26967 dest_operand_number, IDENTIFIER_POINTER (labelname));
26969 else
26970 sprintf (buf, "bl %%z%d", dest_operand_number);
26971 return buf;
26974 /* Generate PIC and indirect symbol stubs. */
26976 void
26977 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26979 unsigned int length;
26980 char *symbol_name, *lazy_ptr_name;
26981 char *local_label_0;
26982 static int label = 0;
26984 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26985 symb = (*targetm.strip_name_encoding) (symb);
26988 length = strlen (symb);
26989 symbol_name = XALLOCAVEC (char, length + 32);
26990 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26992 lazy_ptr_name = XALLOCAVEC (char, length + 32);
26993 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
26995 if (flag_pic == 2)
26996 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
26997 else
26998 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
27000 if (flag_pic == 2)
27002 fprintf (file, "\t.align 5\n");
27004 fprintf (file, "%s:\n", stub);
27005 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27007 label++;
27008 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
27009 sprintf (local_label_0, "\"L%011d$spb\"", label);
27011 fprintf (file, "\tmflr r0\n");
27012 if (TARGET_LINK_STACK)
27014 char name[32];
27015 get_ppc476_thunk_name (name);
27016 fprintf (file, "\tbl %s\n", name);
27017 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27019 else
27021 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
27022 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27024 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
27025 lazy_ptr_name, local_label_0);
27026 fprintf (file, "\tmtlr r0\n");
27027 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
27028 (TARGET_64BIT ? "ldu" : "lwzu"),
27029 lazy_ptr_name, local_label_0);
27030 fprintf (file, "\tmtctr r12\n");
27031 fprintf (file, "\tbctr\n");
27033 else
27035 fprintf (file, "\t.align 4\n");
27037 fprintf (file, "%s:\n", stub);
27038 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27040 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
27041 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
27042 (TARGET_64BIT ? "ldu" : "lwzu"),
27043 lazy_ptr_name);
27044 fprintf (file, "\tmtctr r12\n");
27045 fprintf (file, "\tbctr\n");
27048 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
27049 fprintf (file, "%s:\n", lazy_ptr_name);
27050 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27051 fprintf (file, "%sdyld_stub_binding_helper\n",
27052 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
27055 /* Legitimize PIC addresses. If the address is already
27056 position-independent, we return ORIG. Newly generated
27057 position-independent addresses go into a reg. This is REG if non
27058 zero, otherwise we allocate register(s) as necessary. */
27060 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
27063 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
27064 rtx reg)
27066 rtx base, offset;
27068 if (reg == NULL && ! reload_in_progress && ! reload_completed)
27069 reg = gen_reg_rtx (Pmode);
27071 if (GET_CODE (orig) == CONST)
27073 rtx reg_temp;
27075 if (GET_CODE (XEXP (orig, 0)) == PLUS
27076 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
27077 return orig;
27079 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
27081 /* Use a different reg for the intermediate value, as
27082 it will be marked UNCHANGING. */
27083 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
27084 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
27085 Pmode, reg_temp);
27086 offset =
27087 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
27088 Pmode, reg);
27090 if (GET_CODE (offset) == CONST_INT)
27092 if (SMALL_INT (offset))
27093 return plus_constant (Pmode, base, INTVAL (offset));
27094 else if (! reload_in_progress && ! reload_completed)
27095 offset = force_reg (Pmode, offset);
27096 else
27098 rtx mem = force_const_mem (Pmode, orig);
27099 return machopic_legitimize_pic_address (mem, Pmode, reg);
27102 return gen_rtx_PLUS (Pmode, base, offset);
27105 /* Fall back on generic machopic code. */
27106 return machopic_legitimize_pic_address (orig, mode, reg);
27109 /* Output a .machine directive for the Darwin assembler, and call
27110 the generic start_file routine. */
27112 static void
27113 rs6000_darwin_file_start (void)
27115 static const struct
27117 const char *arg;
27118 const char *name;
27119 HOST_WIDE_INT if_set;
27120 } mapping[] = {
27121 { "ppc64", "ppc64", MASK_64BIT },
27122 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
27123 { "power4", "ppc970", 0 },
27124 { "G5", "ppc970", 0 },
27125 { "7450", "ppc7450", 0 },
27126 { "7400", "ppc7400", MASK_ALTIVEC },
27127 { "G4", "ppc7400", 0 },
27128 { "750", "ppc750", 0 },
27129 { "740", "ppc750", 0 },
27130 { "G3", "ppc750", 0 },
27131 { "604e", "ppc604e", 0 },
27132 { "604", "ppc604", 0 },
27133 { "603e", "ppc603", 0 },
27134 { "603", "ppc603", 0 },
27135 { "601", "ppc601", 0 },
27136 { NULL, "ppc", 0 } };
27137 const char *cpu_id = "";
27138 size_t i;
27140 rs6000_file_start ();
27141 darwin_file_start ();
27143 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
27145 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
27146 cpu_id = rs6000_default_cpu;
27148 if (global_options_set.x_rs6000_cpu_index)
27149 cpu_id = processor_target_table[rs6000_cpu_index].name;
27151 /* Look through the mapping array. Pick the first name that either
27152 matches the argument, has a bit set in IF_SET that is also set
27153 in the target flags, or has a NULL name. */
27155 i = 0;
27156 while (mapping[i].arg != NULL
27157 && strcmp (mapping[i].arg, cpu_id) != 0
27158 && (mapping[i].if_set & rs6000_isa_flags) == 0)
27159 i++;
27161 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
27164 #endif /* TARGET_MACHO */
27166 #if TARGET_ELF
27167 static int
27168 rs6000_elf_reloc_rw_mask (void)
27170 if (flag_pic)
27171 return 3;
27172 else if (DEFAULT_ABI == ABI_AIX)
27173 return 2;
27174 else
27175 return 0;
27178 /* Record an element in the table of global constructors. SYMBOL is
27179 a SYMBOL_REF of the function to be called; PRIORITY is a number
27180 between 0 and MAX_INIT_PRIORITY.
27182 This differs from default_named_section_asm_out_constructor in
27183 that we have special handling for -mrelocatable. */
27185 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
27186 static void
27187 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
27189 const char *section = ".ctors";
27190 char buf[16];
27192 if (priority != DEFAULT_INIT_PRIORITY)
27194 sprintf (buf, ".ctors.%.5u",
27195 /* Invert the numbering so the linker puts us in the proper
27196 order; constructors are run from right to left, and the
27197 linker sorts in increasing order. */
27198 MAX_INIT_PRIORITY - priority);
27199 section = buf;
27202 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27203 assemble_align (POINTER_SIZE);
27205 if (TARGET_RELOCATABLE)
27207 fputs ("\t.long (", asm_out_file);
27208 output_addr_const (asm_out_file, symbol);
27209 fputs (")@fixup\n", asm_out_file);
27211 else
27212 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27215 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
27216 static void
27217 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
27219 const char *section = ".dtors";
27220 char buf[16];
27222 if (priority != DEFAULT_INIT_PRIORITY)
27224 sprintf (buf, ".dtors.%.5u",
27225 /* Invert the numbering so the linker puts us in the proper
27226 order; constructors are run from right to left, and the
27227 linker sorts in increasing order. */
27228 MAX_INIT_PRIORITY - priority);
27229 section = buf;
27232 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27233 assemble_align (POINTER_SIZE);
27235 if (TARGET_RELOCATABLE)
27237 fputs ("\t.long (", asm_out_file);
27238 output_addr_const (asm_out_file, symbol);
27239 fputs (")@fixup\n", asm_out_file);
27241 else
27242 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27245 void
27246 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
27248 if (TARGET_64BIT)
27250 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
27251 ASM_OUTPUT_LABEL (file, name);
27252 fputs (DOUBLE_INT_ASM_OP, file);
27253 rs6000_output_function_entry (file, name);
27254 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
27255 if (DOT_SYMBOLS)
27257 fputs ("\t.size\t", file);
27258 assemble_name (file, name);
27259 fputs (",24\n\t.type\t.", file);
27260 assemble_name (file, name);
27261 fputs (",@function\n", file);
27262 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
27264 fputs ("\t.globl\t.", file);
27265 assemble_name (file, name);
27266 putc ('\n', file);
27269 else
27270 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27271 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27272 rs6000_output_function_entry (file, name);
27273 fputs (":\n", file);
27274 return;
27277 if (TARGET_RELOCATABLE
27278 && !TARGET_SECURE_PLT
27279 && (get_pool_size () != 0 || crtl->profile)
27280 && uses_TOC ())
27282 char buf[256];
27284 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
27286 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
27287 fprintf (file, "\t.long ");
27288 assemble_name (file, buf);
27289 putc ('-', file);
27290 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27291 assemble_name (file, buf);
27292 putc ('\n', file);
27295 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27296 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27298 if (DEFAULT_ABI == ABI_AIX)
27300 const char *desc_name, *orig_name;
27302 orig_name = (*targetm.strip_name_encoding) (name);
27303 desc_name = orig_name;
27304 while (*desc_name == '.')
27305 desc_name++;
27307 if (TREE_PUBLIC (decl))
27308 fprintf (file, "\t.globl %s\n", desc_name);
27310 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27311 fprintf (file, "%s:\n", desc_name);
27312 fprintf (file, "\t.long %s\n", orig_name);
27313 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
27314 if (DEFAULT_ABI == ABI_AIX)
27315 fputs ("\t.long 0\n", file);
27316 fprintf (file, "\t.previous\n");
27318 ASM_OUTPUT_LABEL (file, name);
27321 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
27322 static void
27323 rs6000_elf_file_end (void)
27325 #ifdef HAVE_AS_GNU_ATTRIBUTE
27326 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
27328 if (rs6000_passes_float)
27329 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
27330 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
27331 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
27332 : 2));
27333 if (rs6000_passes_vector)
27334 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
27335 (TARGET_ALTIVEC_ABI ? 2
27336 : TARGET_SPE_ABI ? 3
27337 : 1));
27338 if (rs6000_returns_struct)
27339 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
27340 aix_struct_return ? 2 : 1);
27342 #endif
27343 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
27344 if (TARGET_32BIT)
27345 file_end_indicate_exec_stack ();
27346 #endif
27348 #endif
27350 #if TARGET_XCOFF
27351 static void
27352 rs6000_xcoff_asm_output_anchor (rtx symbol)
27354 char buffer[100];
27356 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
27357 SYMBOL_REF_BLOCK_OFFSET (symbol));
27358 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
27361 static void
27362 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
27364 fputs (GLOBAL_ASM_OP, stream);
27365 RS6000_OUTPUT_BASENAME (stream, name);
27366 putc ('\n', stream);
27369 /* A get_unnamed_decl callback, used for read-only sections. PTR
27370 points to the section string variable. */
27372 static void
27373 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
27375 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
27376 *(const char *const *) directive,
27377 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
27380 /* Likewise for read-write sections. */
27382 static void
27383 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
27385 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
27386 *(const char *const *) directive,
27387 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
27390 static void
27391 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
27393 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
27394 *(const char *const *) directive,
27395 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
27398 /* A get_unnamed_section callback, used for switching to toc_section. */
27400 static void
27401 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
27403 if (TARGET_MINIMAL_TOC)
27405 /* toc_section is always selected at least once from
27406 rs6000_xcoff_file_start, so this is guaranteed to
27407 always be defined once and only once in each file. */
27408 if (!toc_initialized)
27410 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
27411 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
27412 toc_initialized = 1;
27414 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
27415 (TARGET_32BIT ? "" : ",3"));
27417 else
27418 fputs ("\t.toc\n", asm_out_file);
27421 /* Implement TARGET_ASM_INIT_SECTIONS. */
27423 static void
27424 rs6000_xcoff_asm_init_sections (void)
27426 read_only_data_section
27427 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
27428 &xcoff_read_only_section_name);
27430 private_data_section
27431 = get_unnamed_section (SECTION_WRITE,
27432 rs6000_xcoff_output_readwrite_section_asm_op,
27433 &xcoff_private_data_section_name);
27435 tls_data_section
27436 = get_unnamed_section (SECTION_TLS,
27437 rs6000_xcoff_output_tls_section_asm_op,
27438 &xcoff_tls_data_section_name);
27440 tls_private_data_section
27441 = get_unnamed_section (SECTION_TLS,
27442 rs6000_xcoff_output_tls_section_asm_op,
27443 &xcoff_private_data_section_name);
27445 read_only_private_data_section
27446 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
27447 &xcoff_private_data_section_name);
27449 toc_section
27450 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
27452 readonly_data_section = read_only_data_section;
27453 exception_section = data_section;
27456 static int
27457 rs6000_xcoff_reloc_rw_mask (void)
27459 return 3;
27462 static void
27463 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
27464 tree decl ATTRIBUTE_UNUSED)
27466 int smclass;
27467 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
27469 if (flags & SECTION_CODE)
27470 smclass = 0;
27471 else if (flags & SECTION_TLS)
27472 smclass = 3;
27473 else if (flags & SECTION_WRITE)
27474 smclass = 2;
27475 else
27476 smclass = 1;
27478 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
27479 (flags & SECTION_CODE) ? "." : "",
27480 name, suffix[smclass], flags & SECTION_ENTSIZE);
27483 static section *
27484 rs6000_xcoff_select_section (tree decl, int reloc,
27485 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
27487 if (decl_readonly_section (decl, reloc))
27489 if (TREE_PUBLIC (decl))
27490 return read_only_data_section;
27491 else
27492 return read_only_private_data_section;
27494 else
27496 #if HAVE_AS_TLS
27497 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
27499 if (TREE_PUBLIC (decl))
27500 return tls_data_section;
27501 else if (bss_initializer_p (decl))
27503 /* Convert to COMMON to emit in BSS. */
27504 DECL_COMMON (decl) = 1;
27505 return tls_comm_section;
27507 else
27508 return tls_private_data_section;
27510 else
27511 #endif
27512 if (TREE_PUBLIC (decl))
27513 return data_section;
27514 else
27515 return private_data_section;
27519 static void
27520 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
27522 const char *name;
27524 /* Use select_section for private and uninitialized data. */
27525 if (!TREE_PUBLIC (decl)
27526 || DECL_COMMON (decl)
27527 || DECL_INITIAL (decl) == NULL_TREE
27528 || DECL_INITIAL (decl) == error_mark_node
27529 || (flag_zero_initialized_in_bss
27530 && initializer_zerop (DECL_INITIAL (decl))))
27531 return;
27533 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
27534 name = (*targetm.strip_name_encoding) (name);
27535 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
27538 /* Select section for constant in constant pool.
27540 On RS/6000, all constants are in the private read-only data area.
27541 However, if this is being placed in the TOC it must be output as a
27542 toc entry. */
27544 static section *
27545 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
27546 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
27548 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
27549 return toc_section;
27550 else
27551 return read_only_private_data_section;
27554 /* Remove any trailing [DS] or the like from the symbol name. */
27556 static const char *
27557 rs6000_xcoff_strip_name_encoding (const char *name)
27559 size_t len;
27560 if (*name == '*')
27561 name++;
27562 len = strlen (name);
27563 if (name[len - 1] == ']')
27564 return ggc_alloc_string (name, len - 4);
27565 else
27566 return name;
27569 /* Section attributes. AIX is always PIC. */
27571 static unsigned int
27572 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
27574 unsigned int align;
27575 unsigned int flags = default_section_type_flags (decl, name, reloc);
27577 /* Align to at least UNIT size. */
27578 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
27579 align = MIN_UNITS_PER_WORD;
27580 else
27581 /* Increase alignment of large objects if not already stricter. */
27582 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
27583 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
27584 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
27586 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
27589 /* Output at beginning of assembler file.
27591 Initialize the section names for the RS/6000 at this point.
27593 Specify filename, including full path, to assembler.
27595 We want to go into the TOC section so at least one .toc will be emitted.
27596 Also, in order to output proper .bs/.es pairs, we need at least one static
27597 [RW] section emitted.
27599 Finally, declare mcount when profiling to make the assembler happy. */
27601 static void
27602 rs6000_xcoff_file_start (void)
27604 rs6000_gen_section_name (&xcoff_bss_section_name,
27605 main_input_filename, ".bss_");
27606 rs6000_gen_section_name (&xcoff_private_data_section_name,
27607 main_input_filename, ".rw_");
27608 rs6000_gen_section_name (&xcoff_read_only_section_name,
27609 main_input_filename, ".ro_");
27610 rs6000_gen_section_name (&xcoff_tls_data_section_name,
27611 main_input_filename, ".tls_");
27612 rs6000_gen_section_name (&xcoff_tbss_section_name,
27613 main_input_filename, ".tbss_[UL]");
27615 fputs ("\t.file\t", asm_out_file);
27616 output_quoted_string (asm_out_file, main_input_filename);
27617 fputc ('\n', asm_out_file);
27618 if (write_symbols != NO_DEBUG)
27619 switch_to_section (private_data_section);
27620 switch_to_section (text_section);
27621 if (profile_flag)
27622 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
27623 rs6000_file_start ();
27626 /* Output at end of assembler file.
27627 On the RS/6000, referencing data should automatically pull in text. */
27629 static void
27630 rs6000_xcoff_file_end (void)
27632 switch_to_section (text_section);
27633 fputs ("_section_.text:\n", asm_out_file);
27634 switch_to_section (data_section);
27635 fputs (TARGET_32BIT
27636 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
27637 asm_out_file);
27640 #ifdef HAVE_AS_TLS
27641 static void
27642 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
27644 rtx symbol;
27645 int flags;
27647 default_encode_section_info (decl, rtl, first);
27649 /* Careful not to prod global register variables. */
27650 if (!MEM_P (rtl))
27651 return;
27652 symbol = XEXP (rtl, 0);
27653 if (GET_CODE (symbol) != SYMBOL_REF)
27654 return;
27656 flags = SYMBOL_REF_FLAGS (symbol);
27658 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
27659 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
27661 SYMBOL_REF_FLAGS (symbol) = flags;
27663 #endif /* HAVE_AS_TLS */
27664 #endif /* TARGET_XCOFF */
27666 /* Compute a (partial) cost for rtx X. Return true if the complete
27667 cost has been computed, and false if subexpressions should be
27668 scanned. In either case, *TOTAL contains the cost result. */
27670 static bool
27671 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
27672 int *total, bool speed)
27674 enum machine_mode mode = GET_MODE (x);
27676 switch (code)
27678 /* On the RS/6000, if it is valid in the insn, it is free. */
27679 case CONST_INT:
27680 if (((outer_code == SET
27681 || outer_code == PLUS
27682 || outer_code == MINUS)
27683 && (satisfies_constraint_I (x)
27684 || satisfies_constraint_L (x)))
27685 || (outer_code == AND
27686 && (satisfies_constraint_K (x)
27687 || (mode == SImode
27688 ? satisfies_constraint_L (x)
27689 : satisfies_constraint_J (x))
27690 || mask_operand (x, mode)
27691 || (mode == DImode
27692 && mask64_operand (x, DImode))))
27693 || ((outer_code == IOR || outer_code == XOR)
27694 && (satisfies_constraint_K (x)
27695 || (mode == SImode
27696 ? satisfies_constraint_L (x)
27697 : satisfies_constraint_J (x))))
27698 || outer_code == ASHIFT
27699 || outer_code == ASHIFTRT
27700 || outer_code == LSHIFTRT
27701 || outer_code == ROTATE
27702 || outer_code == ROTATERT
27703 || outer_code == ZERO_EXTRACT
27704 || (outer_code == MULT
27705 && satisfies_constraint_I (x))
27706 || ((outer_code == DIV || outer_code == UDIV
27707 || outer_code == MOD || outer_code == UMOD)
27708 && exact_log2 (INTVAL (x)) >= 0)
27709 || (outer_code == COMPARE
27710 && (satisfies_constraint_I (x)
27711 || satisfies_constraint_K (x)))
27712 || ((outer_code == EQ || outer_code == NE)
27713 && (satisfies_constraint_I (x)
27714 || satisfies_constraint_K (x)
27715 || (mode == SImode
27716 ? satisfies_constraint_L (x)
27717 : satisfies_constraint_J (x))))
27718 || (outer_code == GTU
27719 && satisfies_constraint_I (x))
27720 || (outer_code == LTU
27721 && satisfies_constraint_P (x)))
27723 *total = 0;
27724 return true;
27726 else if ((outer_code == PLUS
27727 && reg_or_add_cint_operand (x, VOIDmode))
27728 || (outer_code == MINUS
27729 && reg_or_sub_cint_operand (x, VOIDmode))
27730 || ((outer_code == SET
27731 || outer_code == IOR
27732 || outer_code == XOR)
27733 && (INTVAL (x)
27734 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
27736 *total = COSTS_N_INSNS (1);
27737 return true;
27739 /* FALLTHRU */
27741 case CONST_DOUBLE:
27742 case CONST:
27743 case HIGH:
27744 case SYMBOL_REF:
27745 case MEM:
27746 /* When optimizing for size, MEM should be slightly more expensive
27747 than generating address, e.g., (plus (reg) (const)).
27748 L1 cache latency is about two instructions. */
27749 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
27750 return true;
27752 case LABEL_REF:
27753 *total = 0;
27754 return true;
27756 case PLUS:
27757 case MINUS:
27758 if (FLOAT_MODE_P (mode))
27759 *total = rs6000_cost->fp;
27760 else
27761 *total = COSTS_N_INSNS (1);
27762 return false;
27764 case MULT:
27765 if (GET_CODE (XEXP (x, 1)) == CONST_INT
27766 && satisfies_constraint_I (XEXP (x, 1)))
27768 if (INTVAL (XEXP (x, 1)) >= -256
27769 && INTVAL (XEXP (x, 1)) <= 255)
27770 *total = rs6000_cost->mulsi_const9;
27771 else
27772 *total = rs6000_cost->mulsi_const;
27774 else if (mode == SFmode)
27775 *total = rs6000_cost->fp;
27776 else if (FLOAT_MODE_P (mode))
27777 *total = rs6000_cost->dmul;
27778 else if (mode == DImode)
27779 *total = rs6000_cost->muldi;
27780 else
27781 *total = rs6000_cost->mulsi;
27782 return false;
27784 case FMA:
27785 if (mode == SFmode)
27786 *total = rs6000_cost->fp;
27787 else
27788 *total = rs6000_cost->dmul;
27789 break;
27791 case DIV:
27792 case MOD:
27793 if (FLOAT_MODE_P (mode))
27795 *total = mode == DFmode ? rs6000_cost->ddiv
27796 : rs6000_cost->sdiv;
27797 return false;
27799 /* FALLTHRU */
27801 case UDIV:
27802 case UMOD:
27803 if (GET_CODE (XEXP (x, 1)) == CONST_INT
27804 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
27806 if (code == DIV || code == MOD)
27807 /* Shift, addze */
27808 *total = COSTS_N_INSNS (2);
27809 else
27810 /* Shift */
27811 *total = COSTS_N_INSNS (1);
27813 else
27815 if (GET_MODE (XEXP (x, 1)) == DImode)
27816 *total = rs6000_cost->divdi;
27817 else
27818 *total = rs6000_cost->divsi;
27820 /* Add in shift and subtract for MOD. */
27821 if (code == MOD || code == UMOD)
27822 *total += COSTS_N_INSNS (2);
27823 return false;
27825 case CTZ:
27826 case FFS:
27827 *total = COSTS_N_INSNS (4);
27828 return false;
27830 case POPCOUNT:
27831 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
27832 return false;
27834 case PARITY:
27835 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
27836 return false;
27838 case NOT:
27839 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
27841 *total = 0;
27842 return false;
27844 /* FALLTHRU */
27846 case AND:
27847 case CLZ:
27848 case IOR:
27849 case XOR:
27850 case ZERO_EXTRACT:
27851 *total = COSTS_N_INSNS (1);
27852 return false;
27854 case ASHIFT:
27855 case ASHIFTRT:
27856 case LSHIFTRT:
27857 case ROTATE:
27858 case ROTATERT:
27859 /* Handle mul_highpart. */
27860 if (outer_code == TRUNCATE
27861 && GET_CODE (XEXP (x, 0)) == MULT)
27863 if (mode == DImode)
27864 *total = rs6000_cost->muldi;
27865 else
27866 *total = rs6000_cost->mulsi;
27867 return true;
27869 else if (outer_code == AND)
27870 *total = 0;
27871 else
27872 *total = COSTS_N_INSNS (1);
27873 return false;
27875 case SIGN_EXTEND:
27876 case ZERO_EXTEND:
27877 if (GET_CODE (XEXP (x, 0)) == MEM)
27878 *total = 0;
27879 else
27880 *total = COSTS_N_INSNS (1);
27881 return false;
27883 case COMPARE:
27884 case NEG:
27885 case ABS:
27886 if (!FLOAT_MODE_P (mode))
27888 *total = COSTS_N_INSNS (1);
27889 return false;
27891 /* FALLTHRU */
27893 case FLOAT:
27894 case UNSIGNED_FLOAT:
27895 case FIX:
27896 case UNSIGNED_FIX:
27897 case FLOAT_TRUNCATE:
27898 *total = rs6000_cost->fp;
27899 return false;
27901 case FLOAT_EXTEND:
27902 if (mode == DFmode)
27903 *total = 0;
27904 else
27905 *total = rs6000_cost->fp;
27906 return false;
27908 case UNSPEC:
27909 switch (XINT (x, 1))
27911 case UNSPEC_FRSP:
27912 *total = rs6000_cost->fp;
27913 return true;
27915 default:
27916 break;
27918 break;
27920 case CALL:
27921 case IF_THEN_ELSE:
27922 if (!speed)
27924 *total = COSTS_N_INSNS (1);
27925 return true;
27927 else if (FLOAT_MODE_P (mode)
27928 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
27930 *total = rs6000_cost->fp;
27931 return false;
27933 break;
27935 case EQ:
27936 case GTU:
27937 case LTU:
27938 /* Carry bit requires mode == Pmode.
27939 NEG or PLUS already counted so only add one. */
27940 if (mode == Pmode
27941 && (outer_code == NEG || outer_code == PLUS))
27943 *total = COSTS_N_INSNS (1);
27944 return true;
27946 if (outer_code == SET)
27948 if (XEXP (x, 1) == const0_rtx)
27950 if (TARGET_ISEL && !TARGET_MFCRF)
27951 *total = COSTS_N_INSNS (8);
27952 else
27953 *total = COSTS_N_INSNS (2);
27954 return true;
27956 else if (mode == Pmode)
27958 *total = COSTS_N_INSNS (3);
27959 return false;
27962 /* FALLTHRU */
27964 case GT:
27965 case LT:
27966 case UNORDERED:
27967 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
27969 if (TARGET_ISEL && !TARGET_MFCRF)
27970 *total = COSTS_N_INSNS (8);
27971 else
27972 *total = COSTS_N_INSNS (2);
27973 return true;
27975 /* CC COMPARE. */
27976 if (outer_code == COMPARE)
27978 *total = 0;
27979 return true;
27981 break;
27983 default:
27984 break;
27987 return false;
27990 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
27992 static bool
27993 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
27994 bool speed)
27996 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
27998 fprintf (stderr,
27999 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
28000 "opno = %d, total = %d, speed = %s, x:\n",
28001 ret ? "complete" : "scan inner",
28002 GET_RTX_NAME (code),
28003 GET_RTX_NAME (outer_code),
28004 opno,
28005 *total,
28006 speed ? "true" : "false");
28008 debug_rtx (x);
28010 return ret;
28013 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
28015 static int
28016 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
28017 addr_space_t as, bool speed)
28019 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
28021 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
28022 ret, speed ? "true" : "false");
28023 debug_rtx (x);
28025 return ret;
28029 /* A C expression returning the cost of moving data from a register of class
28030 CLASS1 to one of CLASS2. */
28032 static int
28033 rs6000_register_move_cost (enum machine_mode mode,
28034 reg_class_t from, reg_class_t to)
28036 int ret;
28038 if (TARGET_DEBUG_COST)
28039 dbg_cost_ctrl++;
28041 /* Moves from/to GENERAL_REGS. */
28042 if (reg_classes_intersect_p (to, GENERAL_REGS)
28043 || reg_classes_intersect_p (from, GENERAL_REGS))
28045 reg_class_t rclass = from;
28047 if (! reg_classes_intersect_p (to, GENERAL_REGS))
28048 rclass = to;
28050 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
28051 ret = (rs6000_memory_move_cost (mode, rclass, false)
28052 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
28054 /* It's more expensive to move CR_REGS than CR0_REGS because of the
28055 shift. */
28056 else if (rclass == CR_REGS)
28057 ret = 4;
28059 /* For those processors that have slow LR/CTR moves, make them more
28060 expensive than memory in order to bias spills to memory .*/
28061 else if ((rs6000_cpu == PROCESSOR_POWER6
28062 || rs6000_cpu == PROCESSOR_POWER7
28063 || rs6000_cpu == PROCESSOR_POWER8)
28064 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
28065 ret = 6 * hard_regno_nregs[0][mode];
28067 else
28068 /* A move will cost one instruction per GPR moved. */
28069 ret = 2 * hard_regno_nregs[0][mode];
28072 /* If we have VSX, we can easily move between FPR or Altivec registers. */
28073 else if (VECTOR_MEM_VSX_P (mode)
28074 && reg_classes_intersect_p (to, VSX_REGS)
28075 && reg_classes_intersect_p (from, VSX_REGS))
28076 ret = 2 * hard_regno_nregs[32][mode];
28078 /* Moving between two similar registers is just one instruction. */
28079 else if (reg_classes_intersect_p (to, from))
28080 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
28082 /* Everything else has to go through GENERAL_REGS. */
28083 else
28084 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
28085 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
28087 if (TARGET_DEBUG_COST)
28089 if (dbg_cost_ctrl == 1)
28090 fprintf (stderr,
28091 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
28092 ret, GET_MODE_NAME (mode), reg_class_names[from],
28093 reg_class_names[to]);
28094 dbg_cost_ctrl--;
28097 return ret;
28100 /* A C expressions returning the cost of moving data of MODE from a register to
28101 or from memory. */
28103 static int
28104 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
28105 bool in ATTRIBUTE_UNUSED)
28107 int ret;
28109 if (TARGET_DEBUG_COST)
28110 dbg_cost_ctrl++;
28112 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
28113 ret = 4 * hard_regno_nregs[0][mode];
28114 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
28115 || reg_classes_intersect_p (rclass, VSX_REGS)))
28116 ret = 4 * hard_regno_nregs[32][mode];
28117 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
28118 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
28119 else
28120 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
28122 if (TARGET_DEBUG_COST)
28124 if (dbg_cost_ctrl == 1)
28125 fprintf (stderr,
28126 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
28127 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
28128 dbg_cost_ctrl--;
28131 return ret;
28134 /* Returns a code for a target-specific builtin that implements
28135 reciprocal of the function, or NULL_TREE if not available. */
28137 static tree
28138 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
28139 bool sqrt ATTRIBUTE_UNUSED)
28141 if (optimize_insn_for_size_p ())
28142 return NULL_TREE;
28144 if (md_fn)
28145 switch (fn)
28147 case VSX_BUILTIN_XVSQRTDP:
28148 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
28149 return NULL_TREE;
28151 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
28153 case VSX_BUILTIN_XVSQRTSP:
28154 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
28155 return NULL_TREE;
28157 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
28159 default:
28160 return NULL_TREE;
28163 else
28164 switch (fn)
28166 case BUILT_IN_SQRT:
28167 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
28168 return NULL_TREE;
28170 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
28172 case BUILT_IN_SQRTF:
28173 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
28174 return NULL_TREE;
28176 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
28178 default:
28179 return NULL_TREE;
28183 /* Load up a constant. If the mode is a vector mode, splat the value across
28184 all of the vector elements. */
28186 static rtx
28187 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
28189 rtx reg;
28191 if (mode == SFmode || mode == DFmode)
28193 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
28194 reg = force_reg (mode, d);
28196 else if (mode == V4SFmode)
28198 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
28199 rtvec v = gen_rtvec (4, d, d, d, d);
28200 reg = gen_reg_rtx (mode);
28201 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
28203 else if (mode == V2DFmode)
28205 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
28206 rtvec v = gen_rtvec (2, d, d);
28207 reg = gen_reg_rtx (mode);
28208 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
28210 else
28211 gcc_unreachable ();
28213 return reg;
28216 /* Generate an FMA instruction. */
28218 static void
28219 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
28221 enum machine_mode mode = GET_MODE (target);
28222 rtx dst;
28224 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
28225 gcc_assert (dst != NULL);
28227 if (dst != target)
28228 emit_move_insn (target, dst);
28231 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
28233 static void
28234 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
28236 enum machine_mode mode = GET_MODE (target);
28237 rtx dst;
28239 /* Altivec does not support fms directly;
28240 generate in terms of fma in that case. */
28241 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
28242 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
28243 else
28245 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
28246 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
28248 gcc_assert (dst != NULL);
28250 if (dst != target)
28251 emit_move_insn (target, dst);
28254 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
28256 static void
28257 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
28259 enum machine_mode mode = GET_MODE (dst);
28260 rtx r;
28262 /* This is a tad more complicated, since the fnma_optab is for
28263 a different expression: fma(-m1, m2, a), which is the same
28264 thing except in the case of signed zeros.
28266 Fortunately we know that if FMA is supported that FNMSUB is
28267 also supported in the ISA. Just expand it directly. */
28269 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
28271 r = gen_rtx_NEG (mode, a);
28272 r = gen_rtx_FMA (mode, m1, m2, r);
28273 r = gen_rtx_NEG (mode, r);
28274 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
28277 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
28278 add a reg_note saying that this was a division. Support both scalar and
28279 vector divide. Assumes no trapping math and finite arguments. */
28281 void
28282 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
28284 enum machine_mode mode = GET_MODE (dst);
28285 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
28286 int i;
28288 /* Low precision estimates guarantee 5 bits of accuracy. High
28289 precision estimates guarantee 14 bits of accuracy. SFmode
28290 requires 23 bits of accuracy. DFmode requires 52 bits of
28291 accuracy. Each pass at least doubles the accuracy, leading
28292 to the following. */
28293 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
28294 if (mode == DFmode || mode == V2DFmode)
28295 passes++;
28297 enum insn_code code = optab_handler (smul_optab, mode);
28298 insn_gen_fn gen_mul = GEN_FCN (code);
28300 gcc_assert (code != CODE_FOR_nothing);
28302 one = rs6000_load_constant_and_splat (mode, dconst1);
28304 /* x0 = 1./d estimate */
28305 x0 = gen_reg_rtx (mode);
28306 emit_insn (gen_rtx_SET (VOIDmode, x0,
28307 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
28308 UNSPEC_FRES)));
28310 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
28311 if (passes > 1) {
28313 /* e0 = 1. - d * x0 */
28314 e0 = gen_reg_rtx (mode);
28315 rs6000_emit_nmsub (e0, d, x0, one);
28317 /* x1 = x0 + e0 * x0 */
28318 x1 = gen_reg_rtx (mode);
28319 rs6000_emit_madd (x1, e0, x0, x0);
28321 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
28322 ++i, xprev = xnext, eprev = enext) {
28324 /* enext = eprev * eprev */
28325 enext = gen_reg_rtx (mode);
28326 emit_insn (gen_mul (enext, eprev, eprev));
28328 /* xnext = xprev + enext * xprev */
28329 xnext = gen_reg_rtx (mode);
28330 rs6000_emit_madd (xnext, enext, xprev, xprev);
28333 } else
28334 xprev = x0;
28336 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
28338 /* u = n * xprev */
28339 u = gen_reg_rtx (mode);
28340 emit_insn (gen_mul (u, n, xprev));
28342 /* v = n - (d * u) */
28343 v = gen_reg_rtx (mode);
28344 rs6000_emit_nmsub (v, d, u, n);
28346 /* dst = (v * xprev) + u */
28347 rs6000_emit_madd (dst, v, xprev, u);
28349 if (note_p)
28350 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
28353 /* Newton-Raphson approximation of single/double-precision floating point
28354 rsqrt. Assumes no trapping math and finite arguments. */
28356 void
28357 rs6000_emit_swrsqrt (rtx dst, rtx src)
28359 enum machine_mode mode = GET_MODE (src);
28360 rtx x0 = gen_reg_rtx (mode);
28361 rtx y = gen_reg_rtx (mode);
28363 /* Low precision estimates guarantee 5 bits of accuracy. High
28364 precision estimates guarantee 14 bits of accuracy. SFmode
28365 requires 23 bits of accuracy. DFmode requires 52 bits of
28366 accuracy. Each pass at least doubles the accuracy, leading
28367 to the following. */
28368 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
28369 if (mode == DFmode || mode == V2DFmode)
28370 passes++;
28372 REAL_VALUE_TYPE dconst3_2;
28373 int i;
28374 rtx halfthree;
28375 enum insn_code code = optab_handler (smul_optab, mode);
28376 insn_gen_fn gen_mul = GEN_FCN (code);
28378 gcc_assert (code != CODE_FOR_nothing);
28380 /* Load up the constant 1.5 either as a scalar, or as a vector. */
28381 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
28382 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
28384 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
28386 /* x0 = rsqrt estimate */
28387 emit_insn (gen_rtx_SET (VOIDmode, x0,
28388 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
28389 UNSPEC_RSQRT)));
28391 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
28392 rs6000_emit_msub (y, src, halfthree, src);
28394 for (i = 0; i < passes; i++)
28396 rtx x1 = gen_reg_rtx (mode);
28397 rtx u = gen_reg_rtx (mode);
28398 rtx v = gen_reg_rtx (mode);
28400 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
28401 emit_insn (gen_mul (u, x0, x0));
28402 rs6000_emit_nmsub (v, y, u, halfthree);
28403 emit_insn (gen_mul (x1, x0, v));
28404 x0 = x1;
28407 emit_move_insn (dst, x0);
28408 return;
28411 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
28412 (Power7) targets. DST is the target, and SRC is the argument operand. */
28414 void
28415 rs6000_emit_popcount (rtx dst, rtx src)
28417 enum machine_mode mode = GET_MODE (dst);
28418 rtx tmp1, tmp2;
28420 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
28421 if (TARGET_POPCNTD)
28423 if (mode == SImode)
28424 emit_insn (gen_popcntdsi2 (dst, src));
28425 else
28426 emit_insn (gen_popcntddi2 (dst, src));
28427 return;
28430 tmp1 = gen_reg_rtx (mode);
28432 if (mode == SImode)
28434 emit_insn (gen_popcntbsi2 (tmp1, src));
28435 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
28436 NULL_RTX, 0);
28437 tmp2 = force_reg (SImode, tmp2);
28438 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
28440 else
28442 emit_insn (gen_popcntbdi2 (tmp1, src));
28443 tmp2 = expand_mult (DImode, tmp1,
28444 GEN_INT ((HOST_WIDE_INT)
28445 0x01010101 << 32 | 0x01010101),
28446 NULL_RTX, 0);
28447 tmp2 = force_reg (DImode, tmp2);
28448 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
28453 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
28454 target, and SRC is the argument operand. */
28456 void
28457 rs6000_emit_parity (rtx dst, rtx src)
28459 enum machine_mode mode = GET_MODE (dst);
28460 rtx tmp;
28462 tmp = gen_reg_rtx (mode);
28464 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
28465 if (TARGET_CMPB)
28467 if (mode == SImode)
28469 emit_insn (gen_popcntbsi2 (tmp, src));
28470 emit_insn (gen_paritysi2_cmpb (dst, tmp));
28472 else
28474 emit_insn (gen_popcntbdi2 (tmp, src));
28475 emit_insn (gen_paritydi2_cmpb (dst, tmp));
28477 return;
28480 if (mode == SImode)
28482 /* Is mult+shift >= shift+xor+shift+xor? */
28483 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
28485 rtx tmp1, tmp2, tmp3, tmp4;
28487 tmp1 = gen_reg_rtx (SImode);
28488 emit_insn (gen_popcntbsi2 (tmp1, src));
28490 tmp2 = gen_reg_rtx (SImode);
28491 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
28492 tmp3 = gen_reg_rtx (SImode);
28493 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
28495 tmp4 = gen_reg_rtx (SImode);
28496 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
28497 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
28499 else
28500 rs6000_emit_popcount (tmp, src);
28501 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
28503 else
28505 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
28506 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
28508 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
28510 tmp1 = gen_reg_rtx (DImode);
28511 emit_insn (gen_popcntbdi2 (tmp1, src));
28513 tmp2 = gen_reg_rtx (DImode);
28514 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
28515 tmp3 = gen_reg_rtx (DImode);
28516 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
28518 tmp4 = gen_reg_rtx (DImode);
28519 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
28520 tmp5 = gen_reg_rtx (DImode);
28521 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
28523 tmp6 = gen_reg_rtx (DImode);
28524 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
28525 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
28527 else
28528 rs6000_emit_popcount (tmp, src);
28529 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
28533 /* Expand an Altivec constant permutation for little endian mode.
28534 There are two issues: First, the two input operands must be
28535 swapped so that together they form a double-wide array in LE
28536 order. Second, the vperm instruction has surprising behavior
28537 in LE mode: it interprets the elements of the source vectors
28538 in BE mode ("left to right") and interprets the elements of
28539 the destination vector in LE mode ("right to left"). To
28540 correct for this, we must subtract each element of the permute
28541 control vector from 31.
28543 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
28544 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
28545 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
28546 serve as the permute control vector. Then, in BE mode,
28548 vperm 9,10,11,12
28550 places the desired result in vr9. However, in LE mode the
28551 vector contents will be
28553 vr10 = 00000003 00000002 00000001 00000000
28554 vr11 = 00000007 00000006 00000005 00000004
28556 The result of the vperm using the same permute control vector is
28558 vr9 = 05000000 07000000 01000000 03000000
28560 That is, the leftmost 4 bytes of vr10 are interpreted as the
28561 source for the rightmost 4 bytes of vr9, and so on.
28563 If we change the permute control vector to
28565 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
28567 and issue
28569 vperm 9,11,10,12
28571 we get the desired
28573 vr9 = 00000006 00000004 00000002 00000000. */
28575 void
28576 altivec_expand_vec_perm_const_le (rtx operands[4])
28578 unsigned int i;
28579 rtx perm[16];
28580 rtx constv, unspec;
28581 rtx target = operands[0];
28582 rtx op0 = operands[1];
28583 rtx op1 = operands[2];
28584 rtx sel = operands[3];
28586 /* Unpack and adjust the constant selector. */
28587 for (i = 0; i < 16; ++i)
28589 rtx e = XVECEXP (sel, 0, i);
28590 unsigned int elt = 31 - (INTVAL (e) & 31);
28591 perm[i] = GEN_INT (elt);
28594 /* Expand to a permute, swapping the inputs and using the
28595 adjusted selector. */
28596 if (!REG_P (op0))
28597 op0 = force_reg (V16QImode, op0);
28598 if (!REG_P (op1))
28599 op1 = force_reg (V16QImode, op1);
28601 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
28602 constv = force_reg (V16QImode, constv);
28603 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
28604 UNSPEC_VPERM);
28605 if (!REG_P (target))
28607 rtx tmp = gen_reg_rtx (V16QImode);
28608 emit_move_insn (tmp, unspec);
28609 unspec = tmp;
28612 emit_move_insn (target, unspec);
28615 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
28616 permute control vector. But here it's not a constant, so we must
28617 generate a vector splat/subtract to do the adjustment. */
28619 void
28620 altivec_expand_vec_perm_le (rtx operands[4])
28622 rtx splat, unspec;
28623 rtx target = operands[0];
28624 rtx op0 = operands[1];
28625 rtx op1 = operands[2];
28626 rtx sel = operands[3];
28627 rtx tmp = target;
28629 /* Get everything in regs so the pattern matches. */
28630 if (!REG_P (op0))
28631 op0 = force_reg (V16QImode, op0);
28632 if (!REG_P (op1))
28633 op1 = force_reg (V16QImode, op1);
28634 if (!REG_P (sel))
28635 sel = force_reg (V16QImode, sel);
28636 if (!REG_P (target))
28637 tmp = gen_reg_rtx (V16QImode);
28639 /* SEL = splat(31) - SEL. */
28640 /* We want to subtract from 31, but we can't vspltisb 31 since
28641 it's out of range. -1 works as well because only the low-order
28642 five bits of the permute control vector elements are used. */
28643 splat = gen_rtx_VEC_DUPLICATE (V16QImode,
28644 gen_rtx_CONST_INT (QImode, -1));
28645 emit_move_insn (tmp, splat);
28646 sel = gen_rtx_MINUS (V16QImode, tmp, sel);
28647 emit_move_insn (tmp, sel);
28649 /* Permute with operands reversed and adjusted selector. */
28650 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, tmp),
28651 UNSPEC_VPERM);
28653 /* Copy into target, possibly by way of a register. */
28654 if (!REG_P (target))
28656 emit_move_insn (tmp, unspec);
28657 unspec = tmp;
28660 emit_move_insn (target, unspec);
28663 /* Expand an Altivec constant permutation. Return true if we match
28664 an efficient implementation; false to fall back to VPERM. */
28666 bool
28667 altivec_expand_vec_perm_const (rtx operands[4])
28669 struct altivec_perm_insn {
28670 HOST_WIDE_INT mask;
28671 enum insn_code impl;
28672 unsigned char perm[16];
28674 static const struct altivec_perm_insn patterns[] = {
28675 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum,
28676 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
28677 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum,
28678 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
28679 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrghb,
28680 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
28681 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrghh,
28682 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
28683 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrghw,
28684 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
28685 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrglb,
28686 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
28687 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrglh,
28688 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
28689 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrglw,
28690 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
28691 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
28692 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
28693 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
28694 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
28697 unsigned int i, j, elt, which;
28698 unsigned char perm[16];
28699 rtx target, op0, op1, sel, x;
28700 bool one_vec;
28702 target = operands[0];
28703 op0 = operands[1];
28704 op1 = operands[2];
28705 sel = operands[3];
28707 /* Unpack the constant selector. */
28708 for (i = which = 0; i < 16; ++i)
28710 rtx e = XVECEXP (sel, 0, i);
28711 elt = INTVAL (e) & 31;
28712 which |= (elt < 16 ? 1 : 2);
28713 perm[i] = elt;
28716 /* Simplify the constant selector based on operands. */
28717 switch (which)
28719 default:
28720 gcc_unreachable ();
28722 case 3:
28723 one_vec = false;
28724 if (!rtx_equal_p (op0, op1))
28725 break;
28726 /* FALLTHRU */
28728 case 2:
28729 for (i = 0; i < 16; ++i)
28730 perm[i] &= 15;
28731 op0 = op1;
28732 one_vec = true;
28733 break;
28735 case 1:
28736 op1 = op0;
28737 one_vec = true;
28738 break;
28741 /* Look for splat patterns. */
28742 if (one_vec)
28744 elt = perm[0];
28746 for (i = 0; i < 16; ++i)
28747 if (perm[i] != elt)
28748 break;
28749 if (i == 16)
28751 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
28752 return true;
28755 if (elt % 2 == 0)
28757 for (i = 0; i < 16; i += 2)
28758 if (perm[i] != elt || perm[i + 1] != elt + 1)
28759 break;
28760 if (i == 16)
28762 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
28763 x = gen_reg_rtx (V8HImode);
28764 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
28765 GEN_INT (field)));
28766 emit_move_insn (target, gen_lowpart (V16QImode, x));
28767 return true;
28771 if (elt % 4 == 0)
28773 for (i = 0; i < 16; i += 4)
28774 if (perm[i] != elt
28775 || perm[i + 1] != elt + 1
28776 || perm[i + 2] != elt + 2
28777 || perm[i + 3] != elt + 3)
28778 break;
28779 if (i == 16)
28781 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
28782 x = gen_reg_rtx (V4SImode);
28783 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
28784 GEN_INT (field)));
28785 emit_move_insn (target, gen_lowpart (V16QImode, x));
28786 return true;
28791 /* Look for merge and pack patterns. */
28792 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
28794 bool swapped;
28796 if ((patterns[j].mask & rs6000_isa_flags) == 0)
28797 continue;
28799 elt = patterns[j].perm[0];
28800 if (perm[0] == elt)
28801 swapped = false;
28802 else if (perm[0] == elt + 16)
28803 swapped = true;
28804 else
28805 continue;
28806 for (i = 1; i < 16; ++i)
28808 elt = patterns[j].perm[i];
28809 if (swapped)
28810 elt = (elt >= 16 ? elt - 16 : elt + 16);
28811 else if (one_vec && elt >= 16)
28812 elt -= 16;
28813 if (perm[i] != elt)
28814 break;
28816 if (i == 16)
28818 enum insn_code icode = patterns[j].impl;
28819 enum machine_mode omode = insn_data[icode].operand[0].mode;
28820 enum machine_mode imode = insn_data[icode].operand[1].mode;
28822 /* For little-endian, the two input operands must be swapped
28823 (or swapped back) to ensure proper right-to-left numbering
28824 from 0 to 2N-1. */
28825 if (swapped ^ !BYTES_BIG_ENDIAN)
28826 x = op0, op0 = op1, op1 = x;
28827 if (imode != V16QImode)
28829 op0 = gen_lowpart (imode, op0);
28830 op1 = gen_lowpart (imode, op1);
28832 if (omode == V16QImode)
28833 x = target;
28834 else
28835 x = gen_reg_rtx (omode);
28836 emit_insn (GEN_FCN (icode) (x, op0, op1));
28837 if (omode != V16QImode)
28838 emit_move_insn (target, gen_lowpart (V16QImode, x));
28839 return true;
28843 if (!BYTES_BIG_ENDIAN)
28845 altivec_expand_vec_perm_const_le (operands);
28846 return true;
28849 return false;
28852 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
28853 Return true if we match an efficient implementation. */
28855 static bool
28856 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
28857 unsigned char perm0, unsigned char perm1)
28859 rtx x;
28861 /* If both selectors come from the same operand, fold to single op. */
28862 if ((perm0 & 2) == (perm1 & 2))
28864 if (perm0 & 2)
28865 op0 = op1;
28866 else
28867 op1 = op0;
28869 /* If both operands are equal, fold to simpler permutation. */
28870 if (rtx_equal_p (op0, op1))
28872 perm0 = perm0 & 1;
28873 perm1 = (perm1 & 1) + 2;
28875 /* If the first selector comes from the second operand, swap. */
28876 else if (perm0 & 2)
28878 if (perm1 & 2)
28879 return false;
28880 perm0 -= 2;
28881 perm1 += 2;
28882 x = op0, op0 = op1, op1 = x;
28884 /* If the second selector does not come from the second operand, fail. */
28885 else if ((perm1 & 2) == 0)
28886 return false;
28888 /* Success! */
28889 if (target != NULL)
28891 enum machine_mode vmode, dmode;
28892 rtvec v;
28894 vmode = GET_MODE (target);
28895 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
28896 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
28898 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
28899 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
28900 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
28901 emit_insn (gen_rtx_SET (VOIDmode, target, x));
28903 return true;
28906 bool
28907 rs6000_expand_vec_perm_const (rtx operands[4])
28909 rtx target, op0, op1, sel;
28910 unsigned char perm0, perm1;
28912 target = operands[0];
28913 op0 = operands[1];
28914 op1 = operands[2];
28915 sel = operands[3];
28917 /* Unpack the constant selector. */
28918 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
28919 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
28921 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
28924 /* Test whether a constant permutation is supported. */
28926 static bool
28927 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
28928 const unsigned char *sel)
28930 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
28931 if (TARGET_ALTIVEC)
28932 return true;
28934 /* Check for ps_merge* or evmerge* insns. */
28935 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
28936 || (TARGET_SPE && vmode == V2SImode))
28938 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
28939 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
28940 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
28943 return false;
28946 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
28948 static void
28949 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
28950 enum machine_mode vmode, unsigned nelt, rtx perm[])
28952 enum machine_mode imode;
28953 rtx x;
28955 imode = vmode;
28956 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
28958 imode = GET_MODE_INNER (vmode);
28959 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
28960 imode = mode_for_vector (imode, nelt);
28963 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
28964 x = expand_vec_perm (vmode, op0, op1, x, target);
28965 if (x != target)
28966 emit_move_insn (target, x);
28969 /* Expand an extract even operation. */
28971 void
28972 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
28974 enum machine_mode vmode = GET_MODE (target);
28975 unsigned i, nelt = GET_MODE_NUNITS (vmode);
28976 rtx perm[16];
28978 for (i = 0; i < nelt; i++)
28979 perm[i] = GEN_INT (i * 2);
28981 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
28984 /* Expand a vector interleave operation. */
28986 void
28987 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
28989 enum machine_mode vmode = GET_MODE (target);
28990 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
28991 rtx perm[16];
28993 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
28994 for (i = 0; i < nelt / 2; i++)
28996 perm[i * 2] = GEN_INT (i + high);
28997 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
29000 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
29003 /* Return an RTX representing where to find the function value of a
29004 function returning MODE. */
29005 static rtx
29006 rs6000_complex_function_value (enum machine_mode mode)
29008 unsigned int regno;
29009 rtx r1, r2;
29010 enum machine_mode inner = GET_MODE_INNER (mode);
29011 unsigned int inner_bytes = GET_MODE_SIZE (inner);
29013 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
29014 regno = FP_ARG_RETURN;
29015 else
29017 regno = GP_ARG_RETURN;
29019 /* 32-bit is OK since it'll go in r3/r4. */
29020 if (TARGET_32BIT && inner_bytes >= 4)
29021 return gen_rtx_REG (mode, regno);
29024 if (inner_bytes >= 8)
29025 return gen_rtx_REG (mode, regno);
29027 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
29028 const0_rtx);
29029 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
29030 GEN_INT (inner_bytes));
29031 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
29034 /* Target hook for TARGET_FUNCTION_VALUE.
29036 On the SPE, both FPs and vectors are returned in r3.
29038 On RS/6000 an integer value is in r3 and a floating-point value is in
29039 fp1, unless -msoft-float. */
29041 static rtx
29042 rs6000_function_value (const_tree valtype,
29043 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
29044 bool outgoing ATTRIBUTE_UNUSED)
29046 enum machine_mode mode;
29047 unsigned int regno;
29049 /* Special handling for structs in darwin64. */
29050 if (TARGET_MACHO
29051 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
29053 CUMULATIVE_ARGS valcum;
29054 rtx valret;
29056 valcum.words = 0;
29057 valcum.fregno = FP_ARG_MIN_REG;
29058 valcum.vregno = ALTIVEC_ARG_MIN_REG;
29059 /* Do a trial code generation as if this were going to be passed as
29060 an argument; if any part goes in memory, we return NULL. */
29061 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
29062 if (valret)
29063 return valret;
29064 /* Otherwise fall through to standard ABI rules. */
29067 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
29069 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
29070 return gen_rtx_PARALLEL (DImode,
29071 gen_rtvec (2,
29072 gen_rtx_EXPR_LIST (VOIDmode,
29073 gen_rtx_REG (SImode, GP_ARG_RETURN),
29074 const0_rtx),
29075 gen_rtx_EXPR_LIST (VOIDmode,
29076 gen_rtx_REG (SImode,
29077 GP_ARG_RETURN + 1),
29078 GEN_INT (4))));
29080 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
29082 return gen_rtx_PARALLEL (DCmode,
29083 gen_rtvec (4,
29084 gen_rtx_EXPR_LIST (VOIDmode,
29085 gen_rtx_REG (SImode, GP_ARG_RETURN),
29086 const0_rtx),
29087 gen_rtx_EXPR_LIST (VOIDmode,
29088 gen_rtx_REG (SImode,
29089 GP_ARG_RETURN + 1),
29090 GEN_INT (4)),
29091 gen_rtx_EXPR_LIST (VOIDmode,
29092 gen_rtx_REG (SImode,
29093 GP_ARG_RETURN + 2),
29094 GEN_INT (8)),
29095 gen_rtx_EXPR_LIST (VOIDmode,
29096 gen_rtx_REG (SImode,
29097 GP_ARG_RETURN + 3),
29098 GEN_INT (12))));
29101 mode = TYPE_MODE (valtype);
29102 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
29103 || POINTER_TYPE_P (valtype))
29104 mode = TARGET_32BIT ? SImode : DImode;
29106 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
29107 /* _Decimal128 must use an even/odd register pair. */
29108 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
29109 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
29110 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
29111 regno = FP_ARG_RETURN;
29112 else if (TREE_CODE (valtype) == COMPLEX_TYPE
29113 && targetm.calls.split_complex_arg)
29114 return rs6000_complex_function_value (mode);
29115 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
29116 return register is used in both cases, and we won't see V2DImode/V2DFmode
29117 for pure altivec, combine the two cases. */
29118 else if (TREE_CODE (valtype) == VECTOR_TYPE
29119 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
29120 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
29121 regno = ALTIVEC_ARG_RETURN;
29122 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
29123 && (mode == DFmode || mode == DCmode
29124 || mode == TFmode || mode == TCmode))
29125 return spe_build_register_parallel (mode, GP_ARG_RETURN);
29126 else
29127 regno = GP_ARG_RETURN;
29129 return gen_rtx_REG (mode, regno);
29132 /* Define how to find the value returned by a library function
29133 assuming the value has mode MODE. */
29135 rs6000_libcall_value (enum machine_mode mode)
29137 unsigned int regno;
29139 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
29141 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
29142 return gen_rtx_PARALLEL (DImode,
29143 gen_rtvec (2,
29144 gen_rtx_EXPR_LIST (VOIDmode,
29145 gen_rtx_REG (SImode, GP_ARG_RETURN),
29146 const0_rtx),
29147 gen_rtx_EXPR_LIST (VOIDmode,
29148 gen_rtx_REG (SImode,
29149 GP_ARG_RETURN + 1),
29150 GEN_INT (4))));
29153 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
29154 /* _Decimal128 must use an even/odd register pair. */
29155 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
29156 else if (SCALAR_FLOAT_MODE_P (mode)
29157 && TARGET_HARD_FLOAT && TARGET_FPRS
29158 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
29159 regno = FP_ARG_RETURN;
29160 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
29161 return register is used in both cases, and we won't see V2DImode/V2DFmode
29162 for pure altivec, combine the two cases. */
29163 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
29164 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
29165 regno = ALTIVEC_ARG_RETURN;
29166 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
29167 return rs6000_complex_function_value (mode);
29168 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
29169 && (mode == DFmode || mode == DCmode
29170 || mode == TFmode || mode == TCmode))
29171 return spe_build_register_parallel (mode, GP_ARG_RETURN);
29172 else
29173 regno = GP_ARG_RETURN;
29175 return gen_rtx_REG (mode, regno);
29179 /* Given FROM and TO register numbers, say whether this elimination is allowed.
29180 Frame pointer elimination is automatically handled.
29182 For the RS/6000, if frame pointer elimination is being done, we would like
29183 to convert ap into fp, not sp.
29185 We need r30 if -mminimal-toc was specified, and there are constant pool
29186 references. */
29188 static bool
29189 rs6000_can_eliminate (const int from, const int to)
29191 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
29192 ? ! frame_pointer_needed
29193 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
29194 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
29195 : true);
29198 /* Define the offset between two registers, FROM to be eliminated and its
29199 replacement TO, at the start of a routine. */
29200 HOST_WIDE_INT
29201 rs6000_initial_elimination_offset (int from, int to)
29203 rs6000_stack_t *info = rs6000_stack_info ();
29204 HOST_WIDE_INT offset;
29206 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
29207 offset = info->push_p ? 0 : -info->total_size;
29208 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
29210 offset = info->push_p ? 0 : -info->total_size;
29211 if (FRAME_GROWS_DOWNWARD)
29212 offset += info->fixed_size + info->vars_size + info->parm_size;
29214 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
29215 offset = FRAME_GROWS_DOWNWARD
29216 ? info->fixed_size + info->vars_size + info->parm_size
29217 : 0;
29218 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
29219 offset = info->total_size;
29220 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
29221 offset = info->push_p ? info->total_size : 0;
29222 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
29223 offset = 0;
29224 else
29225 gcc_unreachable ();
29227 return offset;
29230 static rtx
29231 rs6000_dwarf_register_span (rtx reg)
29233 rtx parts[8];
29234 int i, words;
29235 unsigned regno = REGNO (reg);
29236 enum machine_mode mode = GET_MODE (reg);
29238 if (TARGET_SPE
29239 && regno < 32
29240 && (SPE_VECTOR_MODE (GET_MODE (reg))
29241 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
29242 && mode != SFmode && mode != SDmode && mode != SCmode)))
29244 else
29245 return NULL_RTX;
29247 regno = REGNO (reg);
29249 /* The duality of the SPE register size wreaks all kinds of havoc.
29250 This is a way of distinguishing r0 in 32-bits from r0 in
29251 64-bits. */
29252 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
29253 gcc_assert (words <= 4);
29254 for (i = 0; i < words; i++, regno++)
29256 if (BYTES_BIG_ENDIAN)
29258 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
29259 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
29261 else
29263 parts[2 * i] = gen_rtx_REG (SImode, regno);
29264 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
29268 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
29271 /* Fill in sizes for SPE register high parts in table used by unwinder. */
29273 static void
29274 rs6000_init_dwarf_reg_sizes_extra (tree address)
29276 if (TARGET_SPE)
29278 int i;
29279 enum machine_mode mode = TYPE_MODE (char_type_node);
29280 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
29281 rtx mem = gen_rtx_MEM (BLKmode, addr);
29282 rtx value = gen_int_mode (4, mode);
29284 for (i = 1201; i < 1232; i++)
29286 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
29287 HOST_WIDE_INT offset
29288 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
29290 emit_move_insn (adjust_address (mem, mode, offset), value);
29294 if (TARGET_MACHO && ! TARGET_ALTIVEC)
29296 int i;
29297 enum machine_mode mode = TYPE_MODE (char_type_node);
29298 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
29299 rtx mem = gen_rtx_MEM (BLKmode, addr);
29300 rtx value = gen_int_mode (16, mode);
29302 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
29303 The unwinder still needs to know the size of Altivec registers. */
29305 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
29307 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
29308 HOST_WIDE_INT offset
29309 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
29311 emit_move_insn (adjust_address (mem, mode, offset), value);
29316 /* Map internal gcc register numbers to DWARF2 register numbers. */
29318 unsigned int
29319 rs6000_dbx_register_number (unsigned int regno)
29321 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
29322 return regno;
29323 if (regno == LR_REGNO)
29324 return 108;
29325 if (regno == CTR_REGNO)
29326 return 109;
29327 if (CR_REGNO_P (regno))
29328 return regno - CR0_REGNO + 86;
29329 if (regno == CA_REGNO)
29330 return 101; /* XER */
29331 if (ALTIVEC_REGNO_P (regno))
29332 return regno - FIRST_ALTIVEC_REGNO + 1124;
29333 if (regno == VRSAVE_REGNO)
29334 return 356;
29335 if (regno == VSCR_REGNO)
29336 return 67;
29337 if (regno == SPE_ACC_REGNO)
29338 return 99;
29339 if (regno == SPEFSCR_REGNO)
29340 return 612;
29341 /* SPE high reg number. We get these values of regno from
29342 rs6000_dwarf_register_span. */
29343 gcc_assert (regno >= 1200 && regno < 1232);
29344 return regno;
29347 /* target hook eh_return_filter_mode */
29348 static enum machine_mode
29349 rs6000_eh_return_filter_mode (void)
29351 return TARGET_32BIT ? SImode : word_mode;
29354 /* Target hook for scalar_mode_supported_p. */
29355 static bool
29356 rs6000_scalar_mode_supported_p (enum machine_mode mode)
29358 if (DECIMAL_FLOAT_MODE_P (mode))
29359 return default_decimal_float_supported_p ();
29360 else
29361 return default_scalar_mode_supported_p (mode);
29364 /* Target hook for vector_mode_supported_p. */
29365 static bool
29366 rs6000_vector_mode_supported_p (enum machine_mode mode)
29369 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
29370 return true;
29372 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
29373 return true;
29375 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
29376 return true;
29378 else
29379 return false;
29382 /* Target hook for invalid_arg_for_unprototyped_fn. */
29383 static const char *
29384 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
29386 return (!rs6000_darwin64_abi
29387 && typelist == 0
29388 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
29389 && (funcdecl == NULL_TREE
29390 || (TREE_CODE (funcdecl) == FUNCTION_DECL
29391 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
29392 ? N_("AltiVec argument passed to unprototyped function")
29393 : NULL;
29396 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
29397 setup by using __stack_chk_fail_local hidden function instead of
29398 calling __stack_chk_fail directly. Otherwise it is better to call
29399 __stack_chk_fail directly. */
29401 static tree ATTRIBUTE_UNUSED
29402 rs6000_stack_protect_fail (void)
29404 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
29405 ? default_hidden_stack_protect_fail ()
29406 : default_external_stack_protect_fail ();
29409 void
29410 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
29411 int num_operands ATTRIBUTE_UNUSED)
29413 if (rs6000_warn_cell_microcode)
29415 const char *temp;
29416 int insn_code_number = recog_memoized (insn);
29417 location_t location = INSN_LOCATION (insn);
29419 /* Punt on insns we cannot recognize. */
29420 if (insn_code_number < 0)
29421 return;
29423 temp = get_insn_template (insn_code_number, insn);
29425 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
29426 warning_at (location, OPT_mwarn_cell_microcode,
29427 "emitting microcode insn %s\t[%s] #%d",
29428 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
29429 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
29430 warning_at (location, OPT_mwarn_cell_microcode,
29431 "emitting conditional microcode insn %s\t[%s] #%d",
29432 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
29436 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
29438 #if TARGET_ELF
29439 static unsigned HOST_WIDE_INT
29440 rs6000_asan_shadow_offset (void)
29442 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
29444 #endif
29446 /* Mask options that we want to support inside of attribute((target)) and
29447 #pragma GCC target operations. Note, we do not include things like
29448 64/32-bit, endianess, hard/soft floating point, etc. that would have
29449 different calling sequences. */
29451 struct rs6000_opt_mask {
29452 const char *name; /* option name */
29453 HOST_WIDE_INT mask; /* mask to set */
29454 bool invert; /* invert sense of mask */
29455 bool valid_target; /* option is a target option */
29458 static struct rs6000_opt_mask const rs6000_opt_masks[] =
29460 { "altivec", OPTION_MASK_ALTIVEC, false, true },
29461 { "cmpb", OPTION_MASK_CMPB, false, true },
29462 { "crypto", OPTION_MASK_CRYPTO, false, true },
29463 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
29464 { "dlmzb", OPTION_MASK_DLMZB, false, true },
29465 { "fprnd", OPTION_MASK_FPRND, false, true },
29466 { "hard-dfp", OPTION_MASK_DFP, false, true },
29467 { "htm", OPTION_MASK_HTM, false, true },
29468 { "isel", OPTION_MASK_ISEL, false, true },
29469 { "mfcrf", OPTION_MASK_MFCRF, false, true },
29470 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
29471 { "mulhw", OPTION_MASK_MULHW, false, true },
29472 { "multiple", OPTION_MASK_MULTIPLE, false, true },
29473 { "popcntb", OPTION_MASK_POPCNTB, false, true },
29474 { "popcntd", OPTION_MASK_POPCNTD, false, true },
29475 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
29476 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
29477 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
29478 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
29479 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
29480 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
29481 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
29482 { "string", OPTION_MASK_STRING, false, true },
29483 { "update", OPTION_MASK_NO_UPDATE, true , true },
29484 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, false },
29485 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, false },
29486 { "vsx", OPTION_MASK_VSX, false, true },
29487 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
29488 #ifdef OPTION_MASK_64BIT
29489 #if TARGET_AIX_OS
29490 { "aix64", OPTION_MASK_64BIT, false, false },
29491 { "aix32", OPTION_MASK_64BIT, true, false },
29492 #else
29493 { "64", OPTION_MASK_64BIT, false, false },
29494 { "32", OPTION_MASK_64BIT, true, false },
29495 #endif
29496 #endif
29497 #ifdef OPTION_MASK_EABI
29498 { "eabi", OPTION_MASK_EABI, false, false },
29499 #endif
29500 #ifdef OPTION_MASK_LITTLE_ENDIAN
29501 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
29502 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
29503 #endif
29504 #ifdef OPTION_MASK_RELOCATABLE
29505 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
29506 #endif
29507 #ifdef OPTION_MASK_STRICT_ALIGN
29508 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
29509 #endif
29510 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
29511 { "string", OPTION_MASK_STRING, false, false },
29514 /* Builtin mask mapping for printing the flags. */
29515 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
29517 { "altivec", RS6000_BTM_ALTIVEC, false, false },
29518 { "vsx", RS6000_BTM_VSX, false, false },
29519 { "spe", RS6000_BTM_SPE, false, false },
29520 { "paired", RS6000_BTM_PAIRED, false, false },
29521 { "fre", RS6000_BTM_FRE, false, false },
29522 { "fres", RS6000_BTM_FRES, false, false },
29523 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
29524 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
29525 { "popcntd", RS6000_BTM_POPCNTD, false, false },
29526 { "cell", RS6000_BTM_CELL, false, false },
29527 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
29528 { "crypto", RS6000_BTM_CRYPTO, false, false },
29529 { "htm", RS6000_BTM_HTM, false, false },
29532 /* Option variables that we want to support inside attribute((target)) and
29533 #pragma GCC target operations. */
29535 struct rs6000_opt_var {
29536 const char *name; /* option name */
29537 size_t global_offset; /* offset of the option in global_options. */
29538 size_t target_offset; /* offset of the option in target optiosn. */
29541 static struct rs6000_opt_var const rs6000_opt_vars[] =
29543 { "friz",
29544 offsetof (struct gcc_options, x_TARGET_FRIZ),
29545 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
29546 { "avoid-indexed-addresses",
29547 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
29548 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
29549 { "paired",
29550 offsetof (struct gcc_options, x_rs6000_paired_float),
29551 offsetof (struct cl_target_option, x_rs6000_paired_float), },
29552 { "longcall",
29553 offsetof (struct gcc_options, x_rs6000_default_long_calls),
29554 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
29557 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
29558 parsing. Return true if there were no errors. */
29560 static bool
29561 rs6000_inner_target_options (tree args, bool attr_p)
29563 bool ret = true;
29565 if (args == NULL_TREE)
29568 else if (TREE_CODE (args) == STRING_CST)
29570 char *p = ASTRDUP (TREE_STRING_POINTER (args));
29571 char *q;
29573 while ((q = strtok (p, ",")) != NULL)
29575 bool error_p = false;
29576 bool not_valid_p = false;
29577 const char *cpu_opt = NULL;
29579 p = NULL;
29580 if (strncmp (q, "cpu=", 4) == 0)
29582 int cpu_index = rs6000_cpu_name_lookup (q+4);
29583 if (cpu_index >= 0)
29584 rs6000_cpu_index = cpu_index;
29585 else
29587 error_p = true;
29588 cpu_opt = q+4;
29591 else if (strncmp (q, "tune=", 5) == 0)
29593 int tune_index = rs6000_cpu_name_lookup (q+5);
29594 if (tune_index >= 0)
29595 rs6000_tune_index = tune_index;
29596 else
29598 error_p = true;
29599 cpu_opt = q+5;
29602 else
29604 size_t i;
29605 bool invert = false;
29606 char *r = q;
29608 error_p = true;
29609 if (strncmp (r, "no-", 3) == 0)
29611 invert = true;
29612 r += 3;
29615 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
29616 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
29618 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
29620 if (!rs6000_opt_masks[i].valid_target)
29621 not_valid_p = true;
29622 else
29624 error_p = false;
29625 rs6000_isa_flags_explicit |= mask;
29627 /* VSX needs altivec, so -mvsx automagically sets
29628 altivec. */
29629 if (mask == OPTION_MASK_VSX && !invert)
29630 mask |= OPTION_MASK_ALTIVEC;
29632 if (rs6000_opt_masks[i].invert)
29633 invert = !invert;
29635 if (invert)
29636 rs6000_isa_flags &= ~mask;
29637 else
29638 rs6000_isa_flags |= mask;
29640 break;
29643 if (error_p && !not_valid_p)
29645 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
29646 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
29648 size_t j = rs6000_opt_vars[i].global_offset;
29649 *((int *) ((char *)&global_options + j)) = !invert;
29650 error_p = false;
29651 break;
29656 if (error_p)
29658 const char *eprefix, *esuffix;
29660 ret = false;
29661 if (attr_p)
29663 eprefix = "__attribute__((__target__(";
29664 esuffix = ")))";
29666 else
29668 eprefix = "#pragma GCC target ";
29669 esuffix = "";
29672 if (cpu_opt)
29673 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
29674 q, esuffix);
29675 else if (not_valid_p)
29676 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
29677 else
29678 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
29683 else if (TREE_CODE (args) == TREE_LIST)
29687 tree value = TREE_VALUE (args);
29688 if (value)
29690 bool ret2 = rs6000_inner_target_options (value, attr_p);
29691 if (!ret2)
29692 ret = false;
29694 args = TREE_CHAIN (args);
29696 while (args != NULL_TREE);
29699 else
29700 gcc_unreachable ();
29702 return ret;
29705 /* Print out the target options as a list for -mdebug=target. */
29707 static void
29708 rs6000_debug_target_options (tree args, const char *prefix)
29710 if (args == NULL_TREE)
29711 fprintf (stderr, "%s<NULL>", prefix);
29713 else if (TREE_CODE (args) == STRING_CST)
29715 char *p = ASTRDUP (TREE_STRING_POINTER (args));
29716 char *q;
29718 while ((q = strtok (p, ",")) != NULL)
29720 p = NULL;
29721 fprintf (stderr, "%s\"%s\"", prefix, q);
29722 prefix = ", ";
29726 else if (TREE_CODE (args) == TREE_LIST)
29730 tree value = TREE_VALUE (args);
29731 if (value)
29733 rs6000_debug_target_options (value, prefix);
29734 prefix = ", ";
29736 args = TREE_CHAIN (args);
29738 while (args != NULL_TREE);
29741 else
29742 gcc_unreachable ();
29744 return;
29748 /* Hook to validate attribute((target("..."))). */
29750 static bool
29751 rs6000_valid_attribute_p (tree fndecl,
29752 tree ARG_UNUSED (name),
29753 tree args,
29754 int flags)
29756 struct cl_target_option cur_target;
29757 bool ret;
29758 tree old_optimize = build_optimization_node (&global_options);
29759 tree new_target, new_optimize;
29760 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
29762 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
29764 if (TARGET_DEBUG_TARGET)
29766 tree tname = DECL_NAME (fndecl);
29767 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
29768 if (tname)
29769 fprintf (stderr, "function: %.*s\n",
29770 (int) IDENTIFIER_LENGTH (tname),
29771 IDENTIFIER_POINTER (tname));
29772 else
29773 fprintf (stderr, "function: unknown\n");
29775 fprintf (stderr, "args:");
29776 rs6000_debug_target_options (args, " ");
29777 fprintf (stderr, "\n");
29779 if (flags)
29780 fprintf (stderr, "flags: 0x%x\n", flags);
29782 fprintf (stderr, "--------------------\n");
29785 old_optimize = build_optimization_node (&global_options);
29786 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
29788 /* If the function changed the optimization levels as well as setting target
29789 options, start with the optimizations specified. */
29790 if (func_optimize && func_optimize != old_optimize)
29791 cl_optimization_restore (&global_options,
29792 TREE_OPTIMIZATION (func_optimize));
29794 /* The target attributes may also change some optimization flags, so update
29795 the optimization options if necessary. */
29796 cl_target_option_save (&cur_target, &global_options);
29797 rs6000_cpu_index = rs6000_tune_index = -1;
29798 ret = rs6000_inner_target_options (args, true);
29800 /* Set up any additional state. */
29801 if (ret)
29803 ret = rs6000_option_override_internal (false);
29804 new_target = build_target_option_node (&global_options);
29806 else
29807 new_target = NULL;
29809 new_optimize = build_optimization_node (&global_options);
29811 if (!new_target)
29812 ret = false;
29814 else if (fndecl)
29816 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
29818 if (old_optimize != new_optimize)
29819 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
29822 cl_target_option_restore (&global_options, &cur_target);
29824 if (old_optimize != new_optimize)
29825 cl_optimization_restore (&global_options,
29826 TREE_OPTIMIZATION (old_optimize));
29828 return ret;
29832 /* Hook to validate the current #pragma GCC target and set the state, and
29833 update the macros based on what was changed. If ARGS is NULL, then
29834 POP_TARGET is used to reset the options. */
29836 bool
29837 rs6000_pragma_target_parse (tree args, tree pop_target)
29839 tree prev_tree = build_target_option_node (&global_options);
29840 tree cur_tree;
29841 struct cl_target_option *prev_opt, *cur_opt;
29842 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
29843 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
29845 if (TARGET_DEBUG_TARGET)
29847 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
29848 fprintf (stderr, "args:");
29849 rs6000_debug_target_options (args, " ");
29850 fprintf (stderr, "\n");
29852 if (pop_target)
29854 fprintf (stderr, "pop_target:\n");
29855 debug_tree (pop_target);
29857 else
29858 fprintf (stderr, "pop_target: <NULL>\n");
29860 fprintf (stderr, "--------------------\n");
29863 if (! args)
29865 cur_tree = ((pop_target)
29866 ? pop_target
29867 : target_option_default_node);
29868 cl_target_option_restore (&global_options,
29869 TREE_TARGET_OPTION (cur_tree));
29871 else
29873 rs6000_cpu_index = rs6000_tune_index = -1;
29874 if (!rs6000_inner_target_options (args, false)
29875 || !rs6000_option_override_internal (false)
29876 || (cur_tree = build_target_option_node (&global_options))
29877 == NULL_TREE)
29879 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
29880 fprintf (stderr, "invalid pragma\n");
29882 return false;
29886 target_option_current_node = cur_tree;
29888 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
29889 change the macros that are defined. */
29890 if (rs6000_target_modify_macros_ptr)
29892 prev_opt = TREE_TARGET_OPTION (prev_tree);
29893 prev_bumask = prev_opt->x_rs6000_builtin_mask;
29894 prev_flags = prev_opt->x_rs6000_isa_flags;
29896 cur_opt = TREE_TARGET_OPTION (cur_tree);
29897 cur_flags = cur_opt->x_rs6000_isa_flags;
29898 cur_bumask = cur_opt->x_rs6000_builtin_mask;
29900 diff_bumask = (prev_bumask ^ cur_bumask);
29901 diff_flags = (prev_flags ^ cur_flags);
29903 if ((diff_flags != 0) || (diff_bumask != 0))
29905 /* Delete old macros. */
29906 rs6000_target_modify_macros_ptr (false,
29907 prev_flags & diff_flags,
29908 prev_bumask & diff_bumask);
29910 /* Define new macros. */
29911 rs6000_target_modify_macros_ptr (true,
29912 cur_flags & diff_flags,
29913 cur_bumask & diff_bumask);
29917 return true;
29921 /* Remember the last target of rs6000_set_current_function. */
29922 static GTY(()) tree rs6000_previous_fndecl;
29924 /* Establish appropriate back-end context for processing the function
29925 FNDECL. The argument might be NULL to indicate processing at top
29926 level, outside of any function scope. */
29927 static void
29928 rs6000_set_current_function (tree fndecl)
29930 tree old_tree = (rs6000_previous_fndecl
29931 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
29932 : NULL_TREE);
29934 tree new_tree = (fndecl
29935 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
29936 : NULL_TREE);
29938 if (TARGET_DEBUG_TARGET)
29940 bool print_final = false;
29941 fprintf (stderr, "\n==================== rs6000_set_current_function");
29943 if (fndecl)
29944 fprintf (stderr, ", fndecl %s (%p)",
29945 (DECL_NAME (fndecl)
29946 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
29947 : "<unknown>"), (void *)fndecl);
29949 if (rs6000_previous_fndecl)
29950 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
29952 fprintf (stderr, "\n");
29953 if (new_tree)
29955 fprintf (stderr, "\nnew fndecl target specific options:\n");
29956 debug_tree (new_tree);
29957 print_final = true;
29960 if (old_tree)
29962 fprintf (stderr, "\nold fndecl target specific options:\n");
29963 debug_tree (old_tree);
29964 print_final = true;
29967 if (print_final)
29968 fprintf (stderr, "--------------------\n");
29971 /* Only change the context if the function changes. This hook is called
29972 several times in the course of compiling a function, and we don't want to
29973 slow things down too much or call target_reinit when it isn't safe. */
29974 if (fndecl && fndecl != rs6000_previous_fndecl)
29976 rs6000_previous_fndecl = fndecl;
29977 if (old_tree == new_tree)
29980 else if (new_tree)
29982 cl_target_option_restore (&global_options,
29983 TREE_TARGET_OPTION (new_tree));
29984 target_reinit ();
29987 else if (old_tree)
29989 struct cl_target_option *def
29990 = TREE_TARGET_OPTION (target_option_current_node);
29992 cl_target_option_restore (&global_options, def);
29993 target_reinit ();
29999 /* Save the current options */
30001 static void
30002 rs6000_function_specific_save (struct cl_target_option *ptr,
30003 struct gcc_options *opts)
30005 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
30006 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
30009 /* Restore the current options */
30011 static void
30012 rs6000_function_specific_restore (struct gcc_options *opts,
30013 struct cl_target_option *ptr)
30016 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
30017 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
30018 (void) rs6000_option_override_internal (false);
30021 /* Print the current options */
30023 static void
30024 rs6000_function_specific_print (FILE *file, int indent,
30025 struct cl_target_option *ptr)
30027 rs6000_print_isa_options (file, indent, "Isa options set",
30028 ptr->x_rs6000_isa_flags);
30030 rs6000_print_isa_options (file, indent, "Isa options explicit",
30031 ptr->x_rs6000_isa_flags_explicit);
30034 /* Helper function to print the current isa or misc options on a line. */
30036 static void
30037 rs6000_print_options_internal (FILE *file,
30038 int indent,
30039 const char *string,
30040 HOST_WIDE_INT flags,
30041 const char *prefix,
30042 const struct rs6000_opt_mask *opts,
30043 size_t num_elements)
30045 size_t i;
30046 size_t start_column = 0;
30047 size_t cur_column;
30048 size_t max_column = 76;
30049 const char *comma = "";
30050 const char *nl = "\n";
30052 if (indent)
30053 start_column += fprintf (file, "%*s", indent, "");
30055 if (!flags)
30057 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
30058 return;
30061 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
30063 /* Print the various mask options. */
30064 cur_column = start_column;
30065 for (i = 0; i < num_elements; i++)
30067 if ((flags & opts[i].mask) != 0)
30069 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
30070 size_t len = (strlen (comma)
30071 + strlen (prefix)
30072 + strlen (no_str)
30073 + strlen (rs6000_opt_masks[i].name));
30075 cur_column += len;
30076 if (cur_column > max_column)
30078 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
30079 cur_column = start_column + len;
30080 comma = "";
30081 nl = "\n\n";
30084 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
30085 rs6000_opt_masks[i].name);
30086 flags &= ~ opts[i].mask;
30087 comma = ", ";
30091 fputs (nl, file);
30094 /* Helper function to print the current isa options on a line. */
30096 static void
30097 rs6000_print_isa_options (FILE *file, int indent, const char *string,
30098 HOST_WIDE_INT flags)
30100 rs6000_print_options_internal (file, indent, string, flags, "-m",
30101 &rs6000_opt_masks[0],
30102 ARRAY_SIZE (rs6000_opt_masks));
30105 static void
30106 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
30107 HOST_WIDE_INT flags)
30109 rs6000_print_options_internal (file, indent, string, flags, "",
30110 &rs6000_builtin_mask_names[0],
30111 ARRAY_SIZE (rs6000_builtin_mask_names));
30115 /* Hook to determine if one function can safely inline another. */
30117 static bool
30118 rs6000_can_inline_p (tree caller, tree callee)
30120 bool ret = false;
30121 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
30122 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
30124 /* If callee has no option attributes, then it is ok to inline. */
30125 if (!callee_tree)
30126 ret = true;
30128 /* If caller has no option attributes, but callee does then it is not ok to
30129 inline. */
30130 else if (!caller_tree)
30131 ret = false;
30133 else
30135 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
30136 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
30138 /* Callee's options should a subset of the caller's, i.e. a vsx function
30139 can inline an altivec function but a non-vsx function can't inline a
30140 vsx function. */
30141 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
30142 == callee_opts->x_rs6000_isa_flags)
30143 ret = true;
30146 if (TARGET_DEBUG_TARGET)
30147 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
30148 (DECL_NAME (caller)
30149 ? IDENTIFIER_POINTER (DECL_NAME (caller))
30150 : "<unknown>"),
30151 (DECL_NAME (callee)
30152 ? IDENTIFIER_POINTER (DECL_NAME (callee))
30153 : "<unknown>"),
30154 (ret ? "can" : "cannot"));
30156 return ret;
30159 /* Allocate a stack temp and fixup the address so it meets the particular
30160 memory requirements (either offetable or REG+REG addressing). */
30163 rs6000_allocate_stack_temp (enum machine_mode mode,
30164 bool offsettable_p,
30165 bool reg_reg_p)
30167 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
30168 rtx addr = XEXP (stack, 0);
30169 int strict_p = (reload_in_progress || reload_completed);
30171 if (!legitimate_indirect_address_p (addr, strict_p))
30173 if (offsettable_p
30174 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
30175 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
30177 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
30178 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
30181 return stack;
30184 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
30185 to such a form to deal with memory reference instructions like STFIWX that
30186 only take reg+reg addressing. */
30189 rs6000_address_for_fpconvert (rtx x)
30191 int strict_p = (reload_in_progress || reload_completed);
30192 rtx addr;
30194 gcc_assert (MEM_P (x));
30195 addr = XEXP (x, 0);
30196 if (! legitimate_indirect_address_p (addr, strict_p)
30197 && ! legitimate_indexed_address_p (addr, strict_p))
30199 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
30201 rtx reg = XEXP (addr, 0);
30202 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
30203 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
30204 gcc_assert (REG_P (reg));
30205 emit_insn (gen_add3_insn (reg, reg, size_rtx));
30206 addr = reg;
30208 else if (GET_CODE (addr) == PRE_MODIFY)
30210 rtx reg = XEXP (addr, 0);
30211 rtx expr = XEXP (addr, 1);
30212 gcc_assert (REG_P (reg));
30213 gcc_assert (GET_CODE (expr) == PLUS);
30214 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
30215 addr = reg;
30218 x = replace_equiv_address (x, copy_addr_to_reg (addr));
30221 return x;
30224 /* Given a memory reference, if it is not in the form for altivec memory
30225 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
30226 convert to the altivec format. */
30229 rs6000_address_for_altivec (rtx x)
30231 gcc_assert (MEM_P (x));
30232 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
30234 rtx addr = XEXP (x, 0);
30235 int strict_p = (reload_in_progress || reload_completed);
30237 if (!legitimate_indexed_address_p (addr, strict_p)
30238 && !legitimate_indirect_address_p (addr, strict_p))
30239 addr = copy_to_mode_reg (Pmode, addr);
30241 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
30242 x = change_address (x, GET_MODE (x), addr);
30245 return x;
30248 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
30250 On the RS/6000, all integer constants are acceptable, most won't be valid
30251 for particular insns, though. Only easy FP constants are acceptable. */
30253 static bool
30254 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
30256 if (TARGET_ELF && rs6000_tls_referenced_p (x))
30257 return false;
30259 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
30260 || GET_MODE (x) == VOIDmode
30261 || (TARGET_POWERPC64 && mode == DImode)
30262 || easy_fp_constant (x, mode)
30263 || easy_vector_constant (x, mode));
30267 /* A function pointer under AIX is a pointer to a data area whose first word
30268 contains the actual address of the function, whose second word contains a
30269 pointer to its TOC, and whose third word contains a value to place in the
30270 static chain register (r11). Note that if we load the static chain, our
30271 "trampoline" need not have any executable code. */
30273 void
30274 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
30276 rtx func_addr;
30277 rtx toc_reg;
30278 rtx sc_reg;
30279 rtx stack_ptr;
30280 rtx stack_toc_offset;
30281 rtx stack_toc_mem;
30282 rtx func_toc_offset;
30283 rtx func_toc_mem;
30284 rtx func_sc_offset;
30285 rtx func_sc_mem;
30286 rtx insn;
30287 rtx (*call_func) (rtx, rtx, rtx, rtx);
30288 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
30290 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
30291 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
30293 /* Load up address of the actual function. */
30294 func_desc = force_reg (Pmode, func_desc);
30295 func_addr = gen_reg_rtx (Pmode);
30296 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
30298 if (TARGET_32BIT)
30301 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
30302 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
30303 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
30304 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
30306 call_func = gen_call_indirect_aix32bit;
30307 call_value_func = gen_call_value_indirect_aix32bit;
30309 else
30311 call_func = gen_call_indirect_aix32bit_nor11;
30312 call_value_func = gen_call_value_indirect_aix32bit_nor11;
30315 else
30317 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
30318 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
30319 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
30320 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
30322 call_func = gen_call_indirect_aix64bit;
30323 call_value_func = gen_call_value_indirect_aix64bit;
30325 else
30327 call_func = gen_call_indirect_aix64bit_nor11;
30328 call_value_func = gen_call_value_indirect_aix64bit_nor11;
30332 /* Reserved spot to store the TOC. */
30333 stack_toc_mem = gen_frame_mem (Pmode,
30334 gen_rtx_PLUS (Pmode,
30335 stack_ptr,
30336 stack_toc_offset));
30338 gcc_assert (cfun);
30339 gcc_assert (cfun->machine);
30341 /* Can we optimize saving the TOC in the prologue or do we need to do it at
30342 every call? */
30343 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
30344 cfun->machine->save_toc_in_prologue = true;
30346 else
30348 MEM_VOLATILE_P (stack_toc_mem) = 1;
30349 emit_move_insn (stack_toc_mem, toc_reg);
30352 /* Calculate the address to load the TOC of the called function. We don't
30353 actually load this until the split after reload. */
30354 func_toc_mem = gen_rtx_MEM (Pmode,
30355 gen_rtx_PLUS (Pmode,
30356 func_desc,
30357 func_toc_offset));
30359 /* If we have a static chain, load it up. */
30360 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
30362 func_sc_mem = gen_rtx_MEM (Pmode,
30363 gen_rtx_PLUS (Pmode,
30364 func_desc,
30365 func_sc_offset));
30367 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
30368 emit_move_insn (sc_reg, func_sc_mem);
30371 /* Create the call. */
30372 if (value)
30373 insn = call_value_func (value, func_addr, flag, func_toc_mem,
30374 stack_toc_mem);
30375 else
30376 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
30378 emit_call_insn (insn);
30381 /* Return whether we need to always update the saved TOC pointer when we update
30382 the stack pointer. */
30384 static bool
30385 rs6000_save_toc_in_prologue_p (void)
30387 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
30390 #ifdef HAVE_GAS_HIDDEN
30391 # define USE_HIDDEN_LINKONCE 1
30392 #else
30393 # define USE_HIDDEN_LINKONCE 0
30394 #endif
30396 /* Fills in the label name that should be used for a 476 link stack thunk. */
30398 void
30399 get_ppc476_thunk_name (char name[32])
30401 gcc_assert (TARGET_LINK_STACK);
30403 if (USE_HIDDEN_LINKONCE)
30404 sprintf (name, "__ppc476.get_thunk");
30405 else
30406 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
30409 /* This function emits the simple thunk routine that is used to preserve
30410 the link stack on the 476 cpu. */
30412 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
30413 static void
30414 rs6000_code_end (void)
30416 char name[32];
30417 tree decl;
30419 if (!TARGET_LINK_STACK)
30420 return;
30422 get_ppc476_thunk_name (name);
30424 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
30425 build_function_type_list (void_type_node, NULL_TREE));
30426 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
30427 NULL_TREE, void_type_node);
30428 TREE_PUBLIC (decl) = 1;
30429 TREE_STATIC (decl) = 1;
30431 #if RS6000_WEAK
30432 if (USE_HIDDEN_LINKONCE)
30434 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
30435 targetm.asm_out.unique_section (decl, 0);
30436 switch_to_section (get_named_section (decl, NULL, 0));
30437 DECL_WEAK (decl) = 1;
30438 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
30439 targetm.asm_out.globalize_label (asm_out_file, name);
30440 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
30441 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
30443 else
30444 #endif
30446 switch_to_section (text_section);
30447 ASM_OUTPUT_LABEL (asm_out_file, name);
30450 DECL_INITIAL (decl) = make_node (BLOCK);
30451 current_function_decl = decl;
30452 init_function_start (decl);
30453 first_function_block_is_cold = false;
30454 /* Make sure unwind info is emitted for the thunk if needed. */
30455 final_start_function (emit_barrier (), asm_out_file, 1);
30457 fputs ("\tblr\n", asm_out_file);
30459 final_end_function ();
30460 init_insn_lengths ();
30461 free_after_compilation (cfun);
30462 set_cfun (NULL);
30463 current_function_decl = NULL;
30466 /* Add r30 to hard reg set if the prologue sets it up and it is not
30467 pic_offset_table_rtx. */
30469 static void
30470 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
30472 if (!TARGET_SINGLE_PIC_BASE
30473 && TARGET_TOC
30474 && TARGET_MINIMAL_TOC
30475 && get_pool_size () != 0)
30476 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
30480 /* Helper function for rs6000_split_logical to emit a logical instruction after
30481 spliting the operation to single GPR registers.
30483 DEST is the destination register.
30484 OP1 and OP2 are the input source registers.
30485 CODE is the base operation (AND, IOR, XOR, NOT).
30486 MODE is the machine mode.
30487 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
30488 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
30489 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
30490 CLOBBER_REG is either NULL or a scratch register of type CC to allow
30491 formation of the AND instructions. */
30493 static void
30494 rs6000_split_logical_inner (rtx dest,
30495 rtx op1,
30496 rtx op2,
30497 enum rtx_code code,
30498 enum machine_mode mode,
30499 bool complement_final_p,
30500 bool complement_op1_p,
30501 bool complement_op2_p,
30502 rtx clobber_reg)
30504 rtx bool_rtx;
30505 rtx set_rtx;
30507 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
30508 if (op2 && GET_CODE (op2) == CONST_INT
30509 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
30510 && !complement_final_p && !complement_op1_p && !complement_op2_p)
30512 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
30513 HOST_WIDE_INT value = INTVAL (op2) & mask;
30515 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
30516 if (code == AND)
30518 if (value == 0)
30520 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
30521 return;
30524 else if (value == mask)
30526 if (!rtx_equal_p (dest, op1))
30527 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
30528 return;
30532 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
30533 into separate ORI/ORIS or XORI/XORIS instrucitons. */
30534 else if (code == IOR || code == XOR)
30536 if (value == 0)
30538 if (!rtx_equal_p (dest, op1))
30539 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
30540 return;
30545 if (complement_op1_p)
30546 op1 = gen_rtx_NOT (mode, op1);
30548 if (complement_op2_p)
30549 op2 = gen_rtx_NOT (mode, op2);
30551 bool_rtx = ((code == NOT)
30552 ? gen_rtx_NOT (mode, op1)
30553 : gen_rtx_fmt_ee (code, mode, op1, op2));
30555 if (complement_final_p)
30556 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
30558 set_rtx = gen_rtx_SET (VOIDmode, dest, bool_rtx);
30560 /* Is this AND with an explicit clobber? */
30561 if (clobber_reg)
30563 rtx clobber = gen_rtx_CLOBBER (VOIDmode, clobber_reg);
30564 set_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set_rtx, clobber));
30567 emit_insn (set_rtx);
30568 return;
30571 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
30572 operations are split immediately during RTL generation to allow for more
30573 optimizations of the AND/IOR/XOR.
30575 OPERANDS is an array containing the destination and two input operands.
30576 CODE is the base operation (AND, IOR, XOR, NOT).
30577 MODE is the machine mode.
30578 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
30579 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
30580 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
30581 CLOBBER_REG is either NULL or a scratch register of type CC to allow
30582 formation of the AND instructions. */
30584 static void
30585 rs6000_split_logical_di (rtx operands[3],
30586 enum rtx_code code,
30587 bool complement_final_p,
30588 bool complement_op1_p,
30589 bool complement_op2_p,
30590 rtx clobber_reg)
30592 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
30593 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
30594 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
30595 enum hi_lo { hi = 0, lo = 1 };
30596 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
30597 size_t i;
30599 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
30600 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
30601 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
30602 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
30604 if (code == NOT)
30605 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
30606 else
30608 if (GET_CODE (operands[2]) != CONST_INT)
30610 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
30611 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
30613 else
30615 HOST_WIDE_INT value = INTVAL (operands[2]);
30616 HOST_WIDE_INT value_hi_lo[2];
30618 gcc_assert (!complement_final_p);
30619 gcc_assert (!complement_op1_p);
30620 gcc_assert (!complement_op2_p);
30622 value_hi_lo[hi] = value >> 32;
30623 value_hi_lo[lo] = value & lower_32bits;
30625 for (i = 0; i < 2; i++)
30627 HOST_WIDE_INT sub_value = value_hi_lo[i];
30629 if (sub_value & sign_bit)
30630 sub_value |= upper_32bits;
30632 op2_hi_lo[i] = GEN_INT (sub_value);
30634 /* If this is an AND instruction, check to see if we need to load
30635 the value in a register. */
30636 if (code == AND && sub_value != -1 && sub_value != 0
30637 && !and_operand (op2_hi_lo[i], SImode))
30638 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
30643 for (i = 0; i < 2; i++)
30645 /* Split large IOR/XOR operations. */
30646 if ((code == IOR || code == XOR)
30647 && GET_CODE (op2_hi_lo[i]) == CONST_INT
30648 && !complement_final_p
30649 && !complement_op1_p
30650 && !complement_op2_p
30651 && clobber_reg == NULL_RTX
30652 && !logical_const_operand (op2_hi_lo[i], SImode))
30654 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
30655 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
30656 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
30657 rtx tmp = gen_reg_rtx (SImode);
30659 /* Make sure the constant is sign extended. */
30660 if ((hi_16bits & sign_bit) != 0)
30661 hi_16bits |= upper_32bits;
30663 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
30664 code, SImode, false, false, false,
30665 NULL_RTX);
30667 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
30668 code, SImode, false, false, false,
30669 NULL_RTX);
30671 else
30672 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
30673 code, SImode, complement_final_p,
30674 complement_op1_p, complement_op2_p,
30675 clobber_reg);
30678 return;
30681 /* Split the insns that make up boolean operations operating on multiple GPR
30682 registers. The boolean MD patterns ensure that the inputs either are
30683 exactly the same as the output registers, or there is no overlap.
30685 OPERANDS is an array containing the destination and two input operands.
30686 CODE is the base operation (AND, IOR, XOR, NOT).
30687 MODE is the machine mode.
30688 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
30689 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
30690 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
30691 CLOBBER_REG is either NULL or a scratch register of type CC to allow
30692 formation of the AND instructions. */
30694 void
30695 rs6000_split_logical (rtx operands[3],
30696 enum rtx_code code,
30697 bool complement_final_p,
30698 bool complement_op1_p,
30699 bool complement_op2_p,
30700 rtx clobber_reg)
30702 enum machine_mode mode = GET_MODE (operands[0]);
30703 enum machine_mode sub_mode;
30704 rtx op0, op1, op2;
30705 int sub_size, regno0, regno1, nregs, i;
30707 /* If this is DImode, use the specialized version that can run before
30708 register allocation. */
30709 if (mode == DImode && !TARGET_POWERPC64)
30711 rs6000_split_logical_di (operands, code, complement_final_p,
30712 complement_op1_p, complement_op2_p,
30713 clobber_reg);
30714 return;
30717 op0 = operands[0];
30718 op1 = operands[1];
30719 op2 = (code == NOT) ? NULL_RTX : operands[2];
30720 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
30721 sub_size = GET_MODE_SIZE (sub_mode);
30722 regno0 = REGNO (op0);
30723 regno1 = REGNO (op1);
30725 gcc_assert (reload_completed);
30726 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
30727 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
30729 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
30730 gcc_assert (nregs > 1);
30732 if (op2 && REG_P (op2))
30733 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
30735 for (i = 0; i < nregs; i++)
30737 int offset = i * sub_size;
30738 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
30739 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
30740 rtx sub_op2 = ((code == NOT)
30741 ? NULL_RTX
30742 : simplify_subreg (sub_mode, op2, mode, offset));
30744 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
30745 complement_final_p, complement_op1_p,
30746 complement_op2_p, clobber_reg);
30749 return;
30753 /* Return true if the peephole2 can combine a load involving a combination of
30754 an addis instruction and a load with an offset that can be fused together on
30755 a power8.
30757 The operands are:
30758 operands[0] register set with addis
30759 operands[1] value set via addis
30760 operands[2] target register being loaded
30761 operands[3] D-form memory reference using operands[0].
30763 In addition, we are passed a boolean that is true if this is a peephole2,
30764 and we can use see if the addis_reg is dead after the insn and can be
30765 replaced by the target register. */
30767 bool
30768 fusion_gpr_load_p (rtx *operands, bool peep2_p)
30770 rtx addis_reg = operands[0];
30771 rtx addis_value = operands[1];
30772 rtx target = operands[2];
30773 rtx mem = operands[3];
30774 rtx addr;
30775 rtx base_reg;
30777 /* Validate arguments. */
30778 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
30779 return false;
30781 if (!base_reg_operand (target, GET_MODE (target)))
30782 return false;
30784 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
30785 return false;
30787 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
30788 return false;
30790 /* Allow sign/zero extension. */
30791 if (GET_CODE (mem) == ZERO_EXTEND
30792 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
30793 mem = XEXP (mem, 0);
30795 if (!MEM_P (mem))
30796 return false;
30798 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
30799 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
30800 return false;
30802 /* Validate that the register used to load the high value is either the
30803 register being loaded, or we can safely replace its use in a peephole2.
30805 If this is a peephole2, we assume that there are 2 instructions in the
30806 peephole (addis and load), so we want to check if the target register was
30807 not used in the memory address and the register to hold the addis result
30808 is dead after the peephole. */
30809 if (REGNO (addis_reg) != REGNO (target))
30811 if (!peep2_p)
30812 return false;
30814 if (reg_mentioned_p (target, mem))
30815 return false;
30817 if (!peep2_reg_dead_p (2, addis_reg))
30818 return false;
30821 base_reg = XEXP (addr, 0);
30822 return REGNO (addis_reg) == REGNO (base_reg);
30825 /* During the peephole2 pass, adjust and expand the insns for a load fusion
30826 sequence. We adjust the addis register to use the target register. If the
30827 load sign extends, we adjust the code to do the zero extending load, and an
30828 explicit sign extension later since the fusion only covers zero extending
30829 loads.
30831 The operands are:
30832 operands[0] register set with addis (to be replaced with target)
30833 operands[1] value set via addis
30834 operands[2] target register being loaded
30835 operands[3] D-form memory reference using operands[0]. */
30837 void
30838 expand_fusion_gpr_load (rtx *operands)
30840 rtx addis_value = operands[1];
30841 rtx target = operands[2];
30842 rtx orig_mem = operands[3];
30843 rtx new_addr, new_mem, orig_addr, offset;
30844 enum rtx_code plus_or_lo_sum;
30845 enum machine_mode target_mode = GET_MODE (target);
30846 enum machine_mode extend_mode = target_mode;
30847 enum machine_mode ptr_mode = Pmode;
30848 enum rtx_code extend = UNKNOWN;
30849 rtx addis_reg = ((ptr_mode == target_mode)
30850 ? target
30851 : simplify_subreg (ptr_mode, target, target_mode, 0));
30853 if (GET_CODE (orig_mem) == ZERO_EXTEND
30854 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
30856 extend = GET_CODE (orig_mem);
30857 orig_mem = XEXP (orig_mem, 0);
30858 target_mode = GET_MODE (orig_mem);
30861 gcc_assert (MEM_P (orig_mem));
30863 orig_addr = XEXP (orig_mem, 0);
30864 plus_or_lo_sum = GET_CODE (orig_addr);
30865 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
30867 offset = XEXP (orig_addr, 1);
30868 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_reg, offset);
30869 new_mem = change_address (orig_mem, target_mode, new_addr);
30871 if (extend != UNKNOWN)
30872 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
30874 emit_insn (gen_rtx_SET (VOIDmode, addis_reg, addis_value));
30875 emit_insn (gen_rtx_SET (VOIDmode, target, new_mem));
30877 if (extend == SIGN_EXTEND)
30879 int sub_off = ((BYTES_BIG_ENDIAN)
30880 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
30881 : 0);
30882 rtx sign_reg
30883 = simplify_subreg (target_mode, target, extend_mode, sub_off);
30885 emit_insn (gen_rtx_SET (VOIDmode, target,
30886 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
30889 return;
30892 /* Return a string to fuse an addis instruction with a gpr load to the same
30893 register that we loaded up the addis instruction. The code is complicated,
30894 so we call output_asm_insn directly, and just return "".
30896 The operands are:
30897 operands[0] register set with addis (must be same reg as target).
30898 operands[1] value set via addis
30899 operands[2] target register being loaded
30900 operands[3] D-form memory reference using operands[0]. */
30902 const char *
30903 emit_fusion_gpr_load (rtx *operands)
30905 rtx addis_reg = operands[0];
30906 rtx addis_value = operands[1];
30907 rtx target = operands[2];
30908 rtx mem = operands[3];
30909 rtx fuse_ops[10];
30910 rtx addr;
30911 rtx load_offset;
30912 const char *addis_str = NULL;
30913 const char *load_str = NULL;
30914 const char *extend_insn = NULL;
30915 const char *mode_name = NULL;
30916 char insn_template[80];
30917 enum machine_mode mode;
30918 const char *comment_str = ASM_COMMENT_START;
30919 bool sign_p = false;
30921 gcc_assert (REG_P (addis_reg) && REG_P (target));
30922 gcc_assert (REGNO (addis_reg) == REGNO (target));
30924 if (*comment_str == ' ')
30925 comment_str++;
30927 /* Allow sign/zero extension. */
30928 if (GET_CODE (mem) == ZERO_EXTEND)
30929 mem = XEXP (mem, 0);
30931 else if (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN)
30933 sign_p = true;
30934 mem = XEXP (mem, 0);
30937 gcc_assert (MEM_P (mem));
30938 addr = XEXP (mem, 0);
30939 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
30940 gcc_unreachable ();
30942 load_offset = XEXP (addr, 1);
30944 /* Now emit the load instruction to the same register. */
30945 mode = GET_MODE (mem);
30946 switch (mode)
30948 case QImode:
30949 mode_name = "char";
30950 load_str = "lbz";
30951 extend_insn = "extsb %0,%0";
30952 break;
30954 case HImode:
30955 mode_name = "short";
30956 load_str = "lhz";
30957 extend_insn = "extsh %0,%0";
30958 break;
30960 case SImode:
30961 mode_name = "int";
30962 load_str = "lwz";
30963 extend_insn = "extsw %0,%0";
30964 break;
30966 case DImode:
30967 if (TARGET_POWERPC64)
30969 mode_name = "long";
30970 load_str = "ld";
30972 else
30973 gcc_unreachable ();
30974 break;
30976 default:
30977 gcc_unreachable ();
30980 /* Emit the addis instruction. */
30981 fuse_ops[0] = target;
30982 if (satisfies_constraint_L (addis_value))
30984 fuse_ops[1] = addis_value;
30985 addis_str = "lis %0,%v1";
30988 else if (GET_CODE (addis_value) == PLUS)
30990 rtx op0 = XEXP (addis_value, 0);
30991 rtx op1 = XEXP (addis_value, 1);
30993 if (REG_P (op0) && CONST_INT_P (op1)
30994 && satisfies_constraint_L (op1))
30996 fuse_ops[1] = op0;
30997 fuse_ops[2] = op1;
30998 addis_str = "addis %0,%1,%v2";
31002 else if (GET_CODE (addis_value) == HIGH)
31004 rtx value = XEXP (addis_value, 0);
31005 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
31007 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
31008 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
31009 if (TARGET_ELF)
31010 addis_str = "addis %0,%2,%1@toc@ha";
31012 else if (TARGET_XCOFF)
31013 addis_str = "addis %0,%1@u(%2)";
31015 else
31016 gcc_unreachable ();
31019 else if (GET_CODE (value) == PLUS)
31021 rtx op0 = XEXP (value, 0);
31022 rtx op1 = XEXP (value, 1);
31024 if (GET_CODE (op0) == UNSPEC
31025 && XINT (op0, 1) == UNSPEC_TOCREL
31026 && CONST_INT_P (op1))
31028 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
31029 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
31030 fuse_ops[3] = op1;
31031 if (TARGET_ELF)
31032 addis_str = "addis %0,%2,%1+%3@toc@ha";
31034 else if (TARGET_XCOFF)
31035 addis_str = "addis %0,%1+%3@u(%2)";
31037 else
31038 gcc_unreachable ();
31042 else if (satisfies_constraint_L (value))
31044 fuse_ops[1] = value;
31045 addis_str = "lis %0,%v1";
31048 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
31050 fuse_ops[1] = value;
31051 addis_str = "lis %0,%1@ha";
31055 if (!addis_str)
31056 fatal_insn ("Could not generate addis value for fusion", addis_value);
31058 sprintf (insn_template, "%s\t\t%s gpr load fusion, type %s", addis_str,
31059 comment_str, mode_name);
31060 output_asm_insn (insn_template, fuse_ops);
31062 /* Emit the D-form load instruction. */
31063 if (CONST_INT_P (load_offset) && satisfies_constraint_I (load_offset))
31065 sprintf (insn_template, "%s %%0,%%1(%%0)", load_str);
31066 fuse_ops[1] = load_offset;
31067 output_asm_insn (insn_template, fuse_ops);
31070 else if (GET_CODE (load_offset) == UNSPEC
31071 && XINT (load_offset, 1) == UNSPEC_TOCREL)
31073 if (TARGET_ELF)
31074 sprintf (insn_template, "%s %%0,%%1@toc@l(%%0)", load_str);
31076 else if (TARGET_XCOFF)
31077 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
31079 else
31080 gcc_unreachable ();
31082 fuse_ops[1] = XVECEXP (load_offset, 0, 0);
31083 output_asm_insn (insn_template, fuse_ops);
31086 else if (GET_CODE (load_offset) == PLUS
31087 && GET_CODE (XEXP (load_offset, 0)) == UNSPEC
31088 && XINT (XEXP (load_offset, 0), 1) == UNSPEC_TOCREL
31089 && CONST_INT_P (XEXP (load_offset, 1)))
31091 rtx tocrel_unspec = XEXP (load_offset, 0);
31092 if (TARGET_ELF)
31093 sprintf (insn_template, "%s %%0,%%1+%%2@toc@l(%%0)", load_str);
31095 else if (TARGET_XCOFF)
31096 sprintf (insn_template, "%s %%0,%%1+%%2@l(%%0)", load_str);
31098 else
31099 gcc_unreachable ();
31101 fuse_ops[1] = XVECEXP (tocrel_unspec, 0, 0);
31102 fuse_ops[2] = XEXP (load_offset, 1);
31103 output_asm_insn (insn_template, fuse_ops);
31106 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (load_offset))
31108 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
31110 fuse_ops[1] = load_offset;
31111 output_asm_insn (insn_template, fuse_ops);
31114 else
31115 fatal_insn ("Unable to generate load offset for fusion", load_offset);
31117 /* Handle sign extension. The peephole2 pass generates this as a separate
31118 insn, but we handle it just in case it got reattached. */
31119 if (sign_p)
31121 gcc_assert (extend_insn != NULL);
31122 output_asm_insn (extend_insn, fuse_ops);
31125 return "";
31129 struct gcc_target targetm = TARGET_INITIALIZER;
31131 #include "gt-rs6000.h"