* config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok): Only
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobaabef7f703c128756ac1888ba797eb5b06b892ae
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2013 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #include "dumpfile.h"
62 #if TARGET_XCOFF
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #endif
65 #if TARGET_MACHO
66 #include "gstab.h" /* for N_SLINE */
67 #endif
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
71 #endif
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack {
78 int reload_completed; /* stack info won't change from here on */
79 int first_gp_reg_save; /* first callee saved GP register used */
80 int first_fp_reg_save; /* first callee saved FP register used */
81 int first_altivec_reg_save; /* first callee saved AltiVec register used */
82 int lr_save_p; /* true if the link reg needs to be saved */
83 int cr_save_p; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask; /* mask of vec registers to save */
85 int push_p; /* true if we need to allocate stack space */
86 int calls_p; /* true if the function makes any calls */
87 int world_save_p; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi; /* which ABI to use */
90 int gp_save_offset; /* offset to save GP regs from initial SP */
91 int fp_save_offset; /* offset to save FP regs from initial SP */
92 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset; /* offset to save LR from initial SP */
94 int cr_save_offset; /* offset to save CR from initial SP */
95 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset; /* offset to save the varargs registers */
98 int ehrd_offset; /* offset to EH return data */
99 int reg_size; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size; /* variable save area size */
101 int parm_size; /* outgoing parameter size */
102 int save_size; /* save area size */
103 int fixed_size; /* fixed size of stack frame */
104 int gp_size; /* size of saved GP registers */
105 int fp_size; /* size of saved FP registers */
106 int altivec_size; /* size of saved AltiVec registers */
107 int cr_size; /* size to hold CR if not in save_size */
108 int vrsave_size; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size; /* size of altivec alignment padding if
110 not in save_size */
111 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size;
113 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
114 int spe_64bit_regs_used;
115 int savres_strategy;
116 } rs6000_stack_t;
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame;
128 /* Flags if __builtin_return_address (0) was used. */
129 int ra_need_lr;
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 int lr_save_state;
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot;
142 } machine_function;
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load;
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined;
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno;
154 #ifdef USING_ELFOS_H
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno = 0;
157 #endif
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 int dot_symbols;
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode;
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size;
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct;
178 #endif
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
192 /* Reload functions based on the type and the vector unit. */
193 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
195 static int dbg_cost_ctrl;
197 /* Built in types. */
198 tree rs6000_builtin_types[RS6000_BTI_MAX];
199 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
201 /* Flag to say the TOC is initialized */
202 int toc_initialized;
203 char toc_label_name[10];
205 /* Cached value of rs6000_variable_issue. This is cached in
206 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
207 static short cached_can_issue_more;
209 static GTY(()) section *read_only_data_section;
210 static GTY(()) section *private_data_section;
211 static GTY(()) section *tls_data_section;
212 static GTY(()) section *tls_private_data_section;
213 static GTY(()) section *read_only_private_data_section;
214 static GTY(()) section *sdata2_section;
215 static GTY(()) section *toc_section;
217 struct builtin_description
219 const HOST_WIDE_INT mask;
220 const enum insn_code icode;
221 const char *const name;
222 const enum rs6000_builtins code;
225 /* Describe the vector unit used for modes. */
226 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
227 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
229 /* Register classes for various constraints that are based on the target
230 switches. */
231 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
233 /* Describe the alignment of a vector. */
234 int rs6000_vector_align[NUM_MACHINE_MODES];
236 /* Map selected modes to types for builtins. */
237 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
239 /* What modes to automatically generate reciprocal divide estimate (fre) and
240 reciprocal sqrt (frsqrte) for. */
241 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
243 /* Masks to determine which reciprocal esitmate instructions to generate
244 automatically. */
245 enum rs6000_recip_mask {
246 RECIP_SF_DIV = 0x001, /* Use divide estimate */
247 RECIP_DF_DIV = 0x002,
248 RECIP_V4SF_DIV = 0x004,
249 RECIP_V2DF_DIV = 0x008,
251 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
252 RECIP_DF_RSQRT = 0x020,
253 RECIP_V4SF_RSQRT = 0x040,
254 RECIP_V2DF_RSQRT = 0x080,
256 /* Various combination of flags for -mrecip=xxx. */
257 RECIP_NONE = 0,
258 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
259 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
260 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
262 RECIP_HIGH_PRECISION = RECIP_ALL,
264 /* On low precision machines like the power5, don't enable double precision
265 reciprocal square root estimate, since it isn't accurate enough. */
266 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
269 /* -mrecip options. */
270 static struct
272 const char *string; /* option name */
273 unsigned int mask; /* mask bits to set */
274 } recip_options[] = {
275 { "all", RECIP_ALL },
276 { "none", RECIP_NONE },
277 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
278 | RECIP_V2DF_DIV) },
279 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
280 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
281 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
282 | RECIP_V2DF_RSQRT) },
283 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
284 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
287 /* 2 argument gen function typedef. */
288 typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx);
290 /* Pointer to function (in rs6000-c.c) that can define or undefine target
291 macros that have changed. Languages that don't support the preprocessor
292 don't link in rs6000-c.c, so we can't call it directly. */
293 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
295 /* Simplfy register classes into simpler classifications. We assume
296 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
297 check for standard register classes (gpr/floating/altivec/vsx) and
298 floating/vector classes (float/altivec/vsx). */
300 enum rs6000_reg_type {
301 NO_REG_TYPE,
302 PSEUDO_REG_TYPE,
303 GPR_REG_TYPE,
304 VSX_REG_TYPE,
305 ALTIVEC_REG_TYPE,
306 FPR_REG_TYPE,
307 SPR_REG_TYPE,
308 CR_REG_TYPE,
309 SPE_ACC_TYPE,
310 SPEFSCR_REG_TYPE
313 /* Map register class to register type. */
314 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
316 /* First/last register type for the 'normal' register types (i.e. general
317 purpose, floating point, altivec, and VSX registers). */
318 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
320 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
322 /* Direct moves to/from vsx/gpr registers that need an additional register to
323 do the move. */
324 static enum insn_code reload_fpr_gpr[NUM_MACHINE_MODES];
325 static enum insn_code reload_gpr_vsx[NUM_MACHINE_MODES];
326 static enum insn_code reload_vsx_gpr[NUM_MACHINE_MODES];
329 /* Target cpu costs. */
331 struct processor_costs {
332 const int mulsi; /* cost of SImode multiplication. */
333 const int mulsi_const; /* cost of SImode multiplication by constant. */
334 const int mulsi_const9; /* cost of SImode mult by short constant. */
335 const int muldi; /* cost of DImode multiplication. */
336 const int divsi; /* cost of SImode division. */
337 const int divdi; /* cost of DImode division. */
338 const int fp; /* cost of simple SFmode and DFmode insns. */
339 const int dmul; /* cost of DFmode multiplication (and fmadd). */
340 const int sdiv; /* cost of SFmode division (fdivs). */
341 const int ddiv; /* cost of DFmode division (fdiv). */
342 const int cache_line_size; /* cache line size in bytes. */
343 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
344 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
345 const int simultaneous_prefetches; /* number of parallel prefetch
346 operations. */
349 const struct processor_costs *rs6000_cost;
351 /* Processor costs (relative to an add) */
353 /* Instruction size costs on 32bit processors. */
354 static const
355 struct processor_costs size32_cost = {
356 COSTS_N_INSNS (1), /* mulsi */
357 COSTS_N_INSNS (1), /* mulsi_const */
358 COSTS_N_INSNS (1), /* mulsi_const9 */
359 COSTS_N_INSNS (1), /* muldi */
360 COSTS_N_INSNS (1), /* divsi */
361 COSTS_N_INSNS (1), /* divdi */
362 COSTS_N_INSNS (1), /* fp */
363 COSTS_N_INSNS (1), /* dmul */
364 COSTS_N_INSNS (1), /* sdiv */
365 COSTS_N_INSNS (1), /* ddiv */
372 /* Instruction size costs on 64bit processors. */
373 static const
374 struct processor_costs size64_cost = {
375 COSTS_N_INSNS (1), /* mulsi */
376 COSTS_N_INSNS (1), /* mulsi_const */
377 COSTS_N_INSNS (1), /* mulsi_const9 */
378 COSTS_N_INSNS (1), /* muldi */
379 COSTS_N_INSNS (1), /* divsi */
380 COSTS_N_INSNS (1), /* divdi */
381 COSTS_N_INSNS (1), /* fp */
382 COSTS_N_INSNS (1), /* dmul */
383 COSTS_N_INSNS (1), /* sdiv */
384 COSTS_N_INSNS (1), /* ddiv */
385 128,
391 /* Instruction costs on RS64A processors. */
392 static const
393 struct processor_costs rs64a_cost = {
394 COSTS_N_INSNS (20), /* mulsi */
395 COSTS_N_INSNS (12), /* mulsi_const */
396 COSTS_N_INSNS (8), /* mulsi_const9 */
397 COSTS_N_INSNS (34), /* muldi */
398 COSTS_N_INSNS (65), /* divsi */
399 COSTS_N_INSNS (67), /* divdi */
400 COSTS_N_INSNS (4), /* fp */
401 COSTS_N_INSNS (4), /* dmul */
402 COSTS_N_INSNS (31), /* sdiv */
403 COSTS_N_INSNS (31), /* ddiv */
404 128, /* cache line size */
405 128, /* l1 cache */
406 2048, /* l2 cache */
407 1, /* streams */
410 /* Instruction costs on MPCCORE processors. */
411 static const
412 struct processor_costs mpccore_cost = {
413 COSTS_N_INSNS (2), /* mulsi */
414 COSTS_N_INSNS (2), /* mulsi_const */
415 COSTS_N_INSNS (2), /* mulsi_const9 */
416 COSTS_N_INSNS (2), /* muldi */
417 COSTS_N_INSNS (6), /* divsi */
418 COSTS_N_INSNS (6), /* divdi */
419 COSTS_N_INSNS (4), /* fp */
420 COSTS_N_INSNS (5), /* dmul */
421 COSTS_N_INSNS (10), /* sdiv */
422 COSTS_N_INSNS (17), /* ddiv */
423 32, /* cache line size */
424 4, /* l1 cache */
425 16, /* l2 cache */
426 1, /* streams */
429 /* Instruction costs on PPC403 processors. */
430 static const
431 struct processor_costs ppc403_cost = {
432 COSTS_N_INSNS (4), /* mulsi */
433 COSTS_N_INSNS (4), /* mulsi_const */
434 COSTS_N_INSNS (4), /* mulsi_const9 */
435 COSTS_N_INSNS (4), /* muldi */
436 COSTS_N_INSNS (33), /* divsi */
437 COSTS_N_INSNS (33), /* divdi */
438 COSTS_N_INSNS (11), /* fp */
439 COSTS_N_INSNS (11), /* dmul */
440 COSTS_N_INSNS (11), /* sdiv */
441 COSTS_N_INSNS (11), /* ddiv */
442 32, /* cache line size */
443 4, /* l1 cache */
444 16, /* l2 cache */
445 1, /* streams */
448 /* Instruction costs on PPC405 processors. */
449 static const
450 struct processor_costs ppc405_cost = {
451 COSTS_N_INSNS (5), /* mulsi */
452 COSTS_N_INSNS (4), /* mulsi_const */
453 COSTS_N_INSNS (3), /* mulsi_const9 */
454 COSTS_N_INSNS (5), /* muldi */
455 COSTS_N_INSNS (35), /* divsi */
456 COSTS_N_INSNS (35), /* divdi */
457 COSTS_N_INSNS (11), /* fp */
458 COSTS_N_INSNS (11), /* dmul */
459 COSTS_N_INSNS (11), /* sdiv */
460 COSTS_N_INSNS (11), /* ddiv */
461 32, /* cache line size */
462 16, /* l1 cache */
463 128, /* l2 cache */
464 1, /* streams */
467 /* Instruction costs on PPC440 processors. */
468 static const
469 struct processor_costs ppc440_cost = {
470 COSTS_N_INSNS (3), /* mulsi */
471 COSTS_N_INSNS (2), /* mulsi_const */
472 COSTS_N_INSNS (2), /* mulsi_const9 */
473 COSTS_N_INSNS (3), /* muldi */
474 COSTS_N_INSNS (34), /* divsi */
475 COSTS_N_INSNS (34), /* divdi */
476 COSTS_N_INSNS (5), /* fp */
477 COSTS_N_INSNS (5), /* dmul */
478 COSTS_N_INSNS (19), /* sdiv */
479 COSTS_N_INSNS (33), /* ddiv */
480 32, /* cache line size */
481 32, /* l1 cache */
482 256, /* l2 cache */
483 1, /* streams */
486 /* Instruction costs on PPC476 processors. */
487 static const
488 struct processor_costs ppc476_cost = {
489 COSTS_N_INSNS (4), /* mulsi */
490 COSTS_N_INSNS (4), /* mulsi_const */
491 COSTS_N_INSNS (4), /* mulsi_const9 */
492 COSTS_N_INSNS (4), /* muldi */
493 COSTS_N_INSNS (11), /* divsi */
494 COSTS_N_INSNS (11), /* divdi */
495 COSTS_N_INSNS (6), /* fp */
496 COSTS_N_INSNS (6), /* dmul */
497 COSTS_N_INSNS (19), /* sdiv */
498 COSTS_N_INSNS (33), /* ddiv */
499 32, /* l1 cache line size */
500 32, /* l1 cache */
501 512, /* l2 cache */
502 1, /* streams */
505 /* Instruction costs on PPC601 processors. */
506 static const
507 struct processor_costs ppc601_cost = {
508 COSTS_N_INSNS (5), /* mulsi */
509 COSTS_N_INSNS (5), /* mulsi_const */
510 COSTS_N_INSNS (5), /* mulsi_const9 */
511 COSTS_N_INSNS (5), /* muldi */
512 COSTS_N_INSNS (36), /* divsi */
513 COSTS_N_INSNS (36), /* divdi */
514 COSTS_N_INSNS (4), /* fp */
515 COSTS_N_INSNS (5), /* dmul */
516 COSTS_N_INSNS (17), /* sdiv */
517 COSTS_N_INSNS (31), /* ddiv */
518 32, /* cache line size */
519 32, /* l1 cache */
520 256, /* l2 cache */
521 1, /* streams */
524 /* Instruction costs on PPC603 processors. */
525 static const
526 struct processor_costs ppc603_cost = {
527 COSTS_N_INSNS (5), /* mulsi */
528 COSTS_N_INSNS (3), /* mulsi_const */
529 COSTS_N_INSNS (2), /* mulsi_const9 */
530 COSTS_N_INSNS (5), /* muldi */
531 COSTS_N_INSNS (37), /* divsi */
532 COSTS_N_INSNS (37), /* divdi */
533 COSTS_N_INSNS (3), /* fp */
534 COSTS_N_INSNS (4), /* dmul */
535 COSTS_N_INSNS (18), /* sdiv */
536 COSTS_N_INSNS (33), /* ddiv */
537 32, /* cache line size */
538 8, /* l1 cache */
539 64, /* l2 cache */
540 1, /* streams */
543 /* Instruction costs on PPC604 processors. */
544 static const
545 struct processor_costs ppc604_cost = {
546 COSTS_N_INSNS (4), /* mulsi */
547 COSTS_N_INSNS (4), /* mulsi_const */
548 COSTS_N_INSNS (4), /* mulsi_const9 */
549 COSTS_N_INSNS (4), /* muldi */
550 COSTS_N_INSNS (20), /* divsi */
551 COSTS_N_INSNS (20), /* divdi */
552 COSTS_N_INSNS (3), /* fp */
553 COSTS_N_INSNS (3), /* dmul */
554 COSTS_N_INSNS (18), /* sdiv */
555 COSTS_N_INSNS (32), /* ddiv */
556 32, /* cache line size */
557 16, /* l1 cache */
558 512, /* l2 cache */
559 1, /* streams */
562 /* Instruction costs on PPC604e processors. */
563 static const
564 struct processor_costs ppc604e_cost = {
565 COSTS_N_INSNS (2), /* mulsi */
566 COSTS_N_INSNS (2), /* mulsi_const */
567 COSTS_N_INSNS (2), /* mulsi_const9 */
568 COSTS_N_INSNS (2), /* muldi */
569 COSTS_N_INSNS (20), /* divsi */
570 COSTS_N_INSNS (20), /* divdi */
571 COSTS_N_INSNS (3), /* fp */
572 COSTS_N_INSNS (3), /* dmul */
573 COSTS_N_INSNS (18), /* sdiv */
574 COSTS_N_INSNS (32), /* ddiv */
575 32, /* cache line size */
576 32, /* l1 cache */
577 1024, /* l2 cache */
578 1, /* streams */
581 /* Instruction costs on PPC620 processors. */
582 static const
583 struct processor_costs ppc620_cost = {
584 COSTS_N_INSNS (5), /* mulsi */
585 COSTS_N_INSNS (4), /* mulsi_const */
586 COSTS_N_INSNS (3), /* mulsi_const9 */
587 COSTS_N_INSNS (7), /* muldi */
588 COSTS_N_INSNS (21), /* divsi */
589 COSTS_N_INSNS (37), /* divdi */
590 COSTS_N_INSNS (3), /* fp */
591 COSTS_N_INSNS (3), /* dmul */
592 COSTS_N_INSNS (18), /* sdiv */
593 COSTS_N_INSNS (32), /* ddiv */
594 128, /* cache line size */
595 32, /* l1 cache */
596 1024, /* l2 cache */
597 1, /* streams */
600 /* Instruction costs on PPC630 processors. */
601 static const
602 struct processor_costs ppc630_cost = {
603 COSTS_N_INSNS (5), /* mulsi */
604 COSTS_N_INSNS (4), /* mulsi_const */
605 COSTS_N_INSNS (3), /* mulsi_const9 */
606 COSTS_N_INSNS (7), /* muldi */
607 COSTS_N_INSNS (21), /* divsi */
608 COSTS_N_INSNS (37), /* divdi */
609 COSTS_N_INSNS (3), /* fp */
610 COSTS_N_INSNS (3), /* dmul */
611 COSTS_N_INSNS (17), /* sdiv */
612 COSTS_N_INSNS (21), /* ddiv */
613 128, /* cache line size */
614 64, /* l1 cache */
615 1024, /* l2 cache */
616 1, /* streams */
619 /* Instruction costs on Cell processor. */
620 /* COSTS_N_INSNS (1) ~ one add. */
621 static const
622 struct processor_costs ppccell_cost = {
623 COSTS_N_INSNS (9/2)+2, /* mulsi */
624 COSTS_N_INSNS (6/2), /* mulsi_const */
625 COSTS_N_INSNS (6/2), /* mulsi_const9 */
626 COSTS_N_INSNS (15/2)+2, /* muldi */
627 COSTS_N_INSNS (38/2), /* divsi */
628 COSTS_N_INSNS (70/2), /* divdi */
629 COSTS_N_INSNS (10/2), /* fp */
630 COSTS_N_INSNS (10/2), /* dmul */
631 COSTS_N_INSNS (74/2), /* sdiv */
632 COSTS_N_INSNS (74/2), /* ddiv */
633 128, /* cache line size */
634 32, /* l1 cache */
635 512, /* l2 cache */
636 6, /* streams */
639 /* Instruction costs on PPC750 and PPC7400 processors. */
640 static const
641 struct processor_costs ppc750_cost = {
642 COSTS_N_INSNS (5), /* mulsi */
643 COSTS_N_INSNS (3), /* mulsi_const */
644 COSTS_N_INSNS (2), /* mulsi_const9 */
645 COSTS_N_INSNS (5), /* muldi */
646 COSTS_N_INSNS (17), /* divsi */
647 COSTS_N_INSNS (17), /* divdi */
648 COSTS_N_INSNS (3), /* fp */
649 COSTS_N_INSNS (3), /* dmul */
650 COSTS_N_INSNS (17), /* sdiv */
651 COSTS_N_INSNS (31), /* ddiv */
652 32, /* cache line size */
653 32, /* l1 cache */
654 512, /* l2 cache */
655 1, /* streams */
658 /* Instruction costs on PPC7450 processors. */
659 static const
660 struct processor_costs ppc7450_cost = {
661 COSTS_N_INSNS (4), /* mulsi */
662 COSTS_N_INSNS (3), /* mulsi_const */
663 COSTS_N_INSNS (3), /* mulsi_const9 */
664 COSTS_N_INSNS (4), /* muldi */
665 COSTS_N_INSNS (23), /* divsi */
666 COSTS_N_INSNS (23), /* divdi */
667 COSTS_N_INSNS (5), /* fp */
668 COSTS_N_INSNS (5), /* dmul */
669 COSTS_N_INSNS (21), /* sdiv */
670 COSTS_N_INSNS (35), /* ddiv */
671 32, /* cache line size */
672 32, /* l1 cache */
673 1024, /* l2 cache */
674 1, /* streams */
677 /* Instruction costs on PPC8540 processors. */
678 static const
679 struct processor_costs ppc8540_cost = {
680 COSTS_N_INSNS (4), /* mulsi */
681 COSTS_N_INSNS (4), /* mulsi_const */
682 COSTS_N_INSNS (4), /* mulsi_const9 */
683 COSTS_N_INSNS (4), /* muldi */
684 COSTS_N_INSNS (19), /* divsi */
685 COSTS_N_INSNS (19), /* divdi */
686 COSTS_N_INSNS (4), /* fp */
687 COSTS_N_INSNS (4), /* dmul */
688 COSTS_N_INSNS (29), /* sdiv */
689 COSTS_N_INSNS (29), /* ddiv */
690 32, /* cache line size */
691 32, /* l1 cache */
692 256, /* l2 cache */
693 1, /* prefetch streams /*/
696 /* Instruction costs on E300C2 and E300C3 cores. */
697 static const
698 struct processor_costs ppce300c2c3_cost = {
699 COSTS_N_INSNS (4), /* mulsi */
700 COSTS_N_INSNS (4), /* mulsi_const */
701 COSTS_N_INSNS (4), /* mulsi_const9 */
702 COSTS_N_INSNS (4), /* muldi */
703 COSTS_N_INSNS (19), /* divsi */
704 COSTS_N_INSNS (19), /* divdi */
705 COSTS_N_INSNS (3), /* fp */
706 COSTS_N_INSNS (4), /* dmul */
707 COSTS_N_INSNS (18), /* sdiv */
708 COSTS_N_INSNS (33), /* ddiv */
710 16, /* l1 cache */
711 16, /* l2 cache */
712 1, /* prefetch streams /*/
715 /* Instruction costs on PPCE500MC processors. */
716 static const
717 struct processor_costs ppce500mc_cost = {
718 COSTS_N_INSNS (4), /* mulsi */
719 COSTS_N_INSNS (4), /* mulsi_const */
720 COSTS_N_INSNS (4), /* mulsi_const9 */
721 COSTS_N_INSNS (4), /* muldi */
722 COSTS_N_INSNS (14), /* divsi */
723 COSTS_N_INSNS (14), /* divdi */
724 COSTS_N_INSNS (8), /* fp */
725 COSTS_N_INSNS (10), /* dmul */
726 COSTS_N_INSNS (36), /* sdiv */
727 COSTS_N_INSNS (66), /* ddiv */
728 64, /* cache line size */
729 32, /* l1 cache */
730 128, /* l2 cache */
731 1, /* prefetch streams /*/
734 /* Instruction costs on PPCE500MC64 processors. */
735 static const
736 struct processor_costs ppce500mc64_cost = {
737 COSTS_N_INSNS (4), /* mulsi */
738 COSTS_N_INSNS (4), /* mulsi_const */
739 COSTS_N_INSNS (4), /* mulsi_const9 */
740 COSTS_N_INSNS (4), /* muldi */
741 COSTS_N_INSNS (14), /* divsi */
742 COSTS_N_INSNS (14), /* divdi */
743 COSTS_N_INSNS (4), /* fp */
744 COSTS_N_INSNS (10), /* dmul */
745 COSTS_N_INSNS (36), /* sdiv */
746 COSTS_N_INSNS (66), /* ddiv */
747 64, /* cache line size */
748 32, /* l1 cache */
749 128, /* l2 cache */
750 1, /* prefetch streams /*/
753 /* Instruction costs on PPCE5500 processors. */
754 static const
755 struct processor_costs ppce5500_cost = {
756 COSTS_N_INSNS (5), /* mulsi */
757 COSTS_N_INSNS (5), /* mulsi_const */
758 COSTS_N_INSNS (4), /* mulsi_const9 */
759 COSTS_N_INSNS (5), /* muldi */
760 COSTS_N_INSNS (14), /* divsi */
761 COSTS_N_INSNS (14), /* divdi */
762 COSTS_N_INSNS (7), /* fp */
763 COSTS_N_INSNS (10), /* dmul */
764 COSTS_N_INSNS (36), /* sdiv */
765 COSTS_N_INSNS (66), /* ddiv */
766 64, /* cache line size */
767 32, /* l1 cache */
768 128, /* l2 cache */
769 1, /* prefetch streams /*/
772 /* Instruction costs on PPCE6500 processors. */
773 static const
774 struct processor_costs ppce6500_cost = {
775 COSTS_N_INSNS (5), /* mulsi */
776 COSTS_N_INSNS (5), /* mulsi_const */
777 COSTS_N_INSNS (4), /* mulsi_const9 */
778 COSTS_N_INSNS (5), /* muldi */
779 COSTS_N_INSNS (14), /* divsi */
780 COSTS_N_INSNS (14), /* divdi */
781 COSTS_N_INSNS (7), /* fp */
782 COSTS_N_INSNS (10), /* dmul */
783 COSTS_N_INSNS (36), /* sdiv */
784 COSTS_N_INSNS (66), /* ddiv */
785 64, /* cache line size */
786 32, /* l1 cache */
787 128, /* l2 cache */
788 1, /* prefetch streams /*/
791 /* Instruction costs on AppliedMicro Titan processors. */
792 static const
793 struct processor_costs titan_cost = {
794 COSTS_N_INSNS (5), /* mulsi */
795 COSTS_N_INSNS (5), /* mulsi_const */
796 COSTS_N_INSNS (5), /* mulsi_const9 */
797 COSTS_N_INSNS (5), /* muldi */
798 COSTS_N_INSNS (18), /* divsi */
799 COSTS_N_INSNS (18), /* divdi */
800 COSTS_N_INSNS (10), /* fp */
801 COSTS_N_INSNS (10), /* dmul */
802 COSTS_N_INSNS (46), /* sdiv */
803 COSTS_N_INSNS (72), /* ddiv */
804 32, /* cache line size */
805 32, /* l1 cache */
806 512, /* l2 cache */
807 1, /* prefetch streams /*/
810 /* Instruction costs on POWER4 and POWER5 processors. */
811 static const
812 struct processor_costs power4_cost = {
813 COSTS_N_INSNS (3), /* mulsi */
814 COSTS_N_INSNS (2), /* mulsi_const */
815 COSTS_N_INSNS (2), /* mulsi_const9 */
816 COSTS_N_INSNS (4), /* muldi */
817 COSTS_N_INSNS (18), /* divsi */
818 COSTS_N_INSNS (34), /* divdi */
819 COSTS_N_INSNS (3), /* fp */
820 COSTS_N_INSNS (3), /* dmul */
821 COSTS_N_INSNS (17), /* sdiv */
822 COSTS_N_INSNS (17), /* ddiv */
823 128, /* cache line size */
824 32, /* l1 cache */
825 1024, /* l2 cache */
826 8, /* prefetch streams /*/
829 /* Instruction costs on POWER6 processors. */
830 static const
831 struct processor_costs power6_cost = {
832 COSTS_N_INSNS (8), /* mulsi */
833 COSTS_N_INSNS (8), /* mulsi_const */
834 COSTS_N_INSNS (8), /* mulsi_const9 */
835 COSTS_N_INSNS (8), /* muldi */
836 COSTS_N_INSNS (22), /* divsi */
837 COSTS_N_INSNS (28), /* divdi */
838 COSTS_N_INSNS (3), /* fp */
839 COSTS_N_INSNS (3), /* dmul */
840 COSTS_N_INSNS (13), /* sdiv */
841 COSTS_N_INSNS (16), /* ddiv */
842 128, /* cache line size */
843 64, /* l1 cache */
844 2048, /* l2 cache */
845 16, /* prefetch streams */
848 /* Instruction costs on POWER7 processors. */
849 static const
850 struct processor_costs power7_cost = {
851 COSTS_N_INSNS (2), /* mulsi */
852 COSTS_N_INSNS (2), /* mulsi_const */
853 COSTS_N_INSNS (2), /* mulsi_const9 */
854 COSTS_N_INSNS (2), /* muldi */
855 COSTS_N_INSNS (18), /* divsi */
856 COSTS_N_INSNS (34), /* divdi */
857 COSTS_N_INSNS (3), /* fp */
858 COSTS_N_INSNS (3), /* dmul */
859 COSTS_N_INSNS (13), /* sdiv */
860 COSTS_N_INSNS (16), /* ddiv */
861 128, /* cache line size */
862 32, /* l1 cache */
863 256, /* l2 cache */
864 12, /* prefetch streams */
867 /* Instruction costs on POWER8 processors. */
868 static const
869 struct processor_costs power8_cost = {
870 COSTS_N_INSNS (3), /* mulsi */
871 COSTS_N_INSNS (3), /* mulsi_const */
872 COSTS_N_INSNS (3), /* mulsi_const9 */
873 COSTS_N_INSNS (3), /* muldi */
874 COSTS_N_INSNS (19), /* divsi */
875 COSTS_N_INSNS (35), /* divdi */
876 COSTS_N_INSNS (3), /* fp */
877 COSTS_N_INSNS (3), /* dmul */
878 COSTS_N_INSNS (14), /* sdiv */
879 COSTS_N_INSNS (17), /* ddiv */
880 128, /* cache line size */
881 32, /* l1 cache */
882 256, /* l2 cache */
883 12, /* prefetch streams */
886 /* Instruction costs on POWER A2 processors. */
887 static const
888 struct processor_costs ppca2_cost = {
889 COSTS_N_INSNS (16), /* mulsi */
890 COSTS_N_INSNS (16), /* mulsi_const */
891 COSTS_N_INSNS (16), /* mulsi_const9 */
892 COSTS_N_INSNS (16), /* muldi */
893 COSTS_N_INSNS (22), /* divsi */
894 COSTS_N_INSNS (28), /* divdi */
895 COSTS_N_INSNS (3), /* fp */
896 COSTS_N_INSNS (3), /* dmul */
897 COSTS_N_INSNS (59), /* sdiv */
898 COSTS_N_INSNS (72), /* ddiv */
900 16, /* l1 cache */
901 2048, /* l2 cache */
902 16, /* prefetch streams */
906 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
907 #undef RS6000_BUILTIN_1
908 #undef RS6000_BUILTIN_2
909 #undef RS6000_BUILTIN_3
910 #undef RS6000_BUILTIN_A
911 #undef RS6000_BUILTIN_D
912 #undef RS6000_BUILTIN_E
913 #undef RS6000_BUILTIN_P
914 #undef RS6000_BUILTIN_Q
915 #undef RS6000_BUILTIN_S
916 #undef RS6000_BUILTIN_X
918 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
919 { NAME, ICODE, MASK, ATTR },
921 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
922 { NAME, ICODE, MASK, ATTR },
924 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
925 { NAME, ICODE, MASK, ATTR },
927 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
928 { NAME, ICODE, MASK, ATTR },
930 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
931 { NAME, ICODE, MASK, ATTR },
933 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
934 { NAME, ICODE, MASK, ATTR },
936 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
937 { NAME, ICODE, MASK, ATTR },
939 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
940 { NAME, ICODE, MASK, ATTR },
942 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
943 { NAME, ICODE, MASK, ATTR },
945 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
946 { NAME, ICODE, MASK, ATTR },
948 struct rs6000_builtin_info_type {
949 const char *name;
950 const enum insn_code icode;
951 const HOST_WIDE_INT mask;
952 const unsigned attr;
955 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
957 #include "rs6000-builtin.def"
960 #undef RS6000_BUILTIN_1
961 #undef RS6000_BUILTIN_2
962 #undef RS6000_BUILTIN_3
963 #undef RS6000_BUILTIN_A
964 #undef RS6000_BUILTIN_D
965 #undef RS6000_BUILTIN_E
966 #undef RS6000_BUILTIN_P
967 #undef RS6000_BUILTIN_Q
968 #undef RS6000_BUILTIN_S
969 #undef RS6000_BUILTIN_X
971 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
972 static tree (*rs6000_veclib_handler) (tree, tree, tree);
975 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
976 static bool spe_func_has_64bit_regs_p (void);
977 static struct machine_function * rs6000_init_machine_status (void);
978 static int rs6000_ra_ever_killed (void);
979 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
980 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
981 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
982 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
983 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
984 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
985 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
986 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
987 bool);
988 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
989 static bool is_microcoded_insn (rtx);
990 static bool is_nonpipeline_insn (rtx);
991 static bool is_cracked_insn (rtx);
992 static bool is_load_insn (rtx, rtx *);
993 static bool is_store_insn (rtx, rtx *);
994 static bool set_to_load_agen (rtx,rtx);
995 static bool insn_terminates_group_p (rtx , enum group_termination);
996 static bool insn_must_be_first_in_group (rtx);
997 static bool insn_must_be_last_in_group (rtx);
998 static void altivec_init_builtins (void);
999 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1000 enum machine_mode, enum machine_mode,
1001 enum rs6000_builtins, const char *name);
1002 static void rs6000_common_init_builtins (void);
1003 static void paired_init_builtins (void);
1004 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1005 static void spe_init_builtins (void);
1006 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1007 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1008 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1009 static rs6000_stack_t *rs6000_stack_info (void);
1010 static void is_altivec_return_reg (rtx, void *);
1011 int easy_vector_constant (rtx, enum machine_mode);
1012 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1013 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1014 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
1015 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1016 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1017 bool, bool);
1018 #if TARGET_MACHO
1019 static void macho_branch_islands (void);
1020 #endif
1021 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1022 int, int *);
1023 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1024 int, int, int *);
1025 static bool rs6000_mode_dependent_address (const_rtx);
1026 static bool rs6000_debug_mode_dependent_address (const_rtx);
1027 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1028 enum machine_mode, rtx);
1029 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1030 enum machine_mode,
1031 rtx);
1032 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1033 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1034 enum reg_class);
1035 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1036 enum machine_mode);
1037 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1038 enum reg_class,
1039 enum machine_mode);
1040 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1041 enum machine_mode,
1042 enum reg_class);
1043 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1044 enum machine_mode,
1045 enum reg_class);
1046 static bool rs6000_save_toc_in_prologue_p (void);
1048 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1049 int, int *)
1050 = rs6000_legitimize_reload_address;
1052 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1053 = rs6000_mode_dependent_address;
1055 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1056 enum machine_mode, rtx)
1057 = rs6000_secondary_reload_class;
1059 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1060 = rs6000_preferred_reload_class;
1062 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1063 enum machine_mode)
1064 = rs6000_secondary_memory_needed;
1066 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1067 enum machine_mode,
1068 enum reg_class)
1069 = rs6000_cannot_change_mode_class;
1071 const int INSN_NOT_AVAILABLE = -1;
1073 static void rs6000_print_isa_options (FILE *, int, const char *,
1074 HOST_WIDE_INT);
1075 static void rs6000_print_builtin_options (FILE *, int, const char *,
1076 HOST_WIDE_INT);
1078 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1079 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1080 enum rs6000_reg_type,
1081 enum machine_mode,
1082 secondary_reload_info *,
1083 bool);
1085 /* Hash table stuff for keeping track of TOC entries. */
1087 struct GTY(()) toc_hash_struct
1089 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1090 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1091 rtx key;
1092 enum machine_mode key_mode;
1093 int labelno;
1096 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1098 /* Hash table to keep track of the argument types for builtin functions. */
1100 struct GTY(()) builtin_hash_struct
1102 tree type;
1103 enum machine_mode mode[4]; /* return value + 3 arguments. */
1104 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1107 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1110 /* Default register names. */
1111 char rs6000_reg_names[][8] =
1113 "0", "1", "2", "3", "4", "5", "6", "7",
1114 "8", "9", "10", "11", "12", "13", "14", "15",
1115 "16", "17", "18", "19", "20", "21", "22", "23",
1116 "24", "25", "26", "27", "28", "29", "30", "31",
1117 "0", "1", "2", "3", "4", "5", "6", "7",
1118 "8", "9", "10", "11", "12", "13", "14", "15",
1119 "16", "17", "18", "19", "20", "21", "22", "23",
1120 "24", "25", "26", "27", "28", "29", "30", "31",
1121 "mq", "lr", "ctr","ap",
1122 "0", "1", "2", "3", "4", "5", "6", "7",
1123 "ca",
1124 /* AltiVec registers. */
1125 "0", "1", "2", "3", "4", "5", "6", "7",
1126 "8", "9", "10", "11", "12", "13", "14", "15",
1127 "16", "17", "18", "19", "20", "21", "22", "23",
1128 "24", "25", "26", "27", "28", "29", "30", "31",
1129 "vrsave", "vscr",
1130 /* SPE registers. */
1131 "spe_acc", "spefscr",
1132 /* Soft frame pointer. */
1133 "sfp"
1136 #ifdef TARGET_REGNAMES
1137 static const char alt_reg_names[][8] =
1139 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1140 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1141 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1142 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1143 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1144 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1145 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1146 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1147 "mq", "lr", "ctr", "ap",
1148 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1149 "ca",
1150 /* AltiVec registers. */
1151 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1152 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1153 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1154 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1155 "vrsave", "vscr",
1156 /* SPE registers. */
1157 "spe_acc", "spefscr",
1158 /* Soft frame pointer. */
1159 "sfp"
1161 #endif
1163 /* Table of valid machine attributes. */
1165 static const struct attribute_spec rs6000_attribute_table[] =
1167 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1168 affects_type_identity } */
1169 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1170 false },
1171 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1172 false },
1173 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1174 false },
1175 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1176 false },
1177 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1178 false },
1179 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1180 SUBTARGET_ATTRIBUTE_TABLE,
1181 #endif
1182 { NULL, 0, 0, false, false, false, NULL, false }
1185 #ifndef TARGET_PROFILE_KERNEL
1186 #define TARGET_PROFILE_KERNEL 0
1187 #endif
1189 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1190 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1192 /* Initialize the GCC target structure. */
1193 #undef TARGET_ATTRIBUTE_TABLE
1194 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1195 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1196 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1197 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1198 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1200 #undef TARGET_ASM_ALIGNED_DI_OP
1201 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1203 /* Default unaligned ops are only provided for ELF. Find the ops needed
1204 for non-ELF systems. */
1205 #ifndef OBJECT_FORMAT_ELF
1206 #if TARGET_XCOFF
1207 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1208 64-bit targets. */
1209 #undef TARGET_ASM_UNALIGNED_HI_OP
1210 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1211 #undef TARGET_ASM_UNALIGNED_SI_OP
1212 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1213 #undef TARGET_ASM_UNALIGNED_DI_OP
1214 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1215 #else
1216 /* For Darwin. */
1217 #undef TARGET_ASM_UNALIGNED_HI_OP
1218 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1219 #undef TARGET_ASM_UNALIGNED_SI_OP
1220 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1221 #undef TARGET_ASM_UNALIGNED_DI_OP
1222 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1223 #undef TARGET_ASM_ALIGNED_DI_OP
1224 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1225 #endif
1226 #endif
1228 /* This hook deals with fixups for relocatable code and DI-mode objects
1229 in 64-bit code. */
1230 #undef TARGET_ASM_INTEGER
1231 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1233 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1234 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1235 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1236 #endif
1238 #undef TARGET_SET_UP_BY_PROLOGUE
1239 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1241 #undef TARGET_HAVE_TLS
1242 #define TARGET_HAVE_TLS HAVE_AS_TLS
1244 #undef TARGET_CANNOT_FORCE_CONST_MEM
1245 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1247 #undef TARGET_DELEGITIMIZE_ADDRESS
1248 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1250 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1251 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1253 #undef TARGET_ASM_FUNCTION_PROLOGUE
1254 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1255 #undef TARGET_ASM_FUNCTION_EPILOGUE
1256 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1258 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1259 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1261 #undef TARGET_LEGITIMIZE_ADDRESS
1262 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1264 #undef TARGET_SCHED_VARIABLE_ISSUE
1265 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1267 #undef TARGET_SCHED_ISSUE_RATE
1268 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1269 #undef TARGET_SCHED_ADJUST_COST
1270 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1271 #undef TARGET_SCHED_ADJUST_PRIORITY
1272 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1273 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1274 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1275 #undef TARGET_SCHED_INIT
1276 #define TARGET_SCHED_INIT rs6000_sched_init
1277 #undef TARGET_SCHED_FINISH
1278 #define TARGET_SCHED_FINISH rs6000_sched_finish
1279 #undef TARGET_SCHED_REORDER
1280 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1281 #undef TARGET_SCHED_REORDER2
1282 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1284 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1285 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1287 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1288 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1290 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1291 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1292 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1293 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1294 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1295 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1296 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1297 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1299 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1300 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1301 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1302 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1303 rs6000_builtin_support_vector_misalignment
1304 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1305 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1306 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1307 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1308 rs6000_builtin_vectorization_cost
1309 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1310 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1311 rs6000_preferred_simd_mode
1312 #undef TARGET_VECTORIZE_INIT_COST
1313 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1314 #undef TARGET_VECTORIZE_ADD_STMT_COST
1315 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1316 #undef TARGET_VECTORIZE_FINISH_COST
1317 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1318 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1319 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1321 #undef TARGET_INIT_BUILTINS
1322 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1323 #undef TARGET_BUILTIN_DECL
1324 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1326 #undef TARGET_EXPAND_BUILTIN
1327 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1329 #undef TARGET_MANGLE_TYPE
1330 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1332 #undef TARGET_INIT_LIBFUNCS
1333 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1335 #if TARGET_MACHO
1336 #undef TARGET_BINDS_LOCAL_P
1337 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1338 #endif
1340 #undef TARGET_MS_BITFIELD_LAYOUT_P
1341 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1343 #undef TARGET_ASM_OUTPUT_MI_THUNK
1344 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1346 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1347 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1349 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1350 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1352 #undef TARGET_REGISTER_MOVE_COST
1353 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1354 #undef TARGET_MEMORY_MOVE_COST
1355 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1356 #undef TARGET_RTX_COSTS
1357 #define TARGET_RTX_COSTS rs6000_rtx_costs
1358 #undef TARGET_ADDRESS_COST
1359 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1361 #undef TARGET_DWARF_REGISTER_SPAN
1362 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1364 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1365 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1367 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1368 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1370 /* On rs6000, function arguments are promoted, as are function return
1371 values. */
1372 #undef TARGET_PROMOTE_FUNCTION_MODE
1373 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1375 #undef TARGET_RETURN_IN_MEMORY
1376 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1378 #undef TARGET_SETUP_INCOMING_VARARGS
1379 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1381 /* Always strict argument naming on rs6000. */
1382 #undef TARGET_STRICT_ARGUMENT_NAMING
1383 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1384 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1385 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1386 #undef TARGET_SPLIT_COMPLEX_ARG
1387 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1388 #undef TARGET_MUST_PASS_IN_STACK
1389 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1390 #undef TARGET_PASS_BY_REFERENCE
1391 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1392 #undef TARGET_ARG_PARTIAL_BYTES
1393 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1394 #undef TARGET_FUNCTION_ARG_ADVANCE
1395 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1396 #undef TARGET_FUNCTION_ARG
1397 #define TARGET_FUNCTION_ARG rs6000_function_arg
1398 #undef TARGET_FUNCTION_ARG_BOUNDARY
1399 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1401 #undef TARGET_BUILD_BUILTIN_VA_LIST
1402 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1404 #undef TARGET_EXPAND_BUILTIN_VA_START
1405 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1407 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1408 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1410 #undef TARGET_EH_RETURN_FILTER_MODE
1411 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1413 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1414 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1416 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1417 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1419 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1420 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1422 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1423 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1425 #undef TARGET_OPTION_OVERRIDE
1426 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1428 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1429 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1430 rs6000_builtin_vectorized_function
1432 #if !TARGET_MACHO
1433 #undef TARGET_STACK_PROTECT_FAIL
1434 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1435 #endif
1437 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1438 The PowerPC architecture requires only weak consistency among
1439 processors--that is, memory accesses between processors need not be
1440 sequentially consistent and memory accesses among processors can occur
1441 in any order. The ability to order memory accesses weakly provides
1442 opportunities for more efficient use of the system bus. Unless a
1443 dependency exists, the 604e allows read operations to precede store
1444 operations. */
1445 #undef TARGET_RELAXED_ORDERING
1446 #define TARGET_RELAXED_ORDERING true
1448 #ifdef HAVE_AS_TLS
1449 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1450 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1451 #endif
1453 /* Use a 32-bit anchor range. This leads to sequences like:
1455 addis tmp,anchor,high
1456 add dest,tmp,low
1458 where tmp itself acts as an anchor, and can be shared between
1459 accesses to the same 64k page. */
1460 #undef TARGET_MIN_ANCHOR_OFFSET
1461 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1462 #undef TARGET_MAX_ANCHOR_OFFSET
1463 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1464 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1465 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1466 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1467 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1469 #undef TARGET_BUILTIN_RECIPROCAL
1470 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1472 #undef TARGET_EXPAND_TO_RTL_HOOK
1473 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1475 #undef TARGET_INSTANTIATE_DECLS
1476 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1478 #undef TARGET_SECONDARY_RELOAD
1479 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1481 #undef TARGET_LEGITIMATE_ADDRESS_P
1482 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1484 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1485 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1487 #undef TARGET_CAN_ELIMINATE
1488 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1490 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1491 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1493 #undef TARGET_TRAMPOLINE_INIT
1494 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1496 #undef TARGET_FUNCTION_VALUE
1497 #define TARGET_FUNCTION_VALUE rs6000_function_value
1499 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1500 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1502 #undef TARGET_OPTION_SAVE
1503 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1505 #undef TARGET_OPTION_RESTORE
1506 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1508 #undef TARGET_OPTION_PRINT
1509 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1511 #undef TARGET_CAN_INLINE_P
1512 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1514 #undef TARGET_SET_CURRENT_FUNCTION
1515 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1517 #undef TARGET_LEGITIMATE_CONSTANT_P
1518 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1520 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1521 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1524 /* Processor table. */
1525 struct rs6000_ptt
1527 const char *const name; /* Canonical processor name. */
1528 const enum processor_type processor; /* Processor type enum value. */
1529 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1532 static struct rs6000_ptt const processor_target_table[] =
1534 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1535 #include "rs6000-cpus.def"
1536 #undef RS6000_CPU
1539 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1540 name is invalid. */
1542 static int
1543 rs6000_cpu_name_lookup (const char *name)
1545 size_t i;
1547 if (name != NULL)
1549 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1550 if (! strcmp (name, processor_target_table[i].name))
1551 return (int)i;
1554 return -1;
1558 /* Return number of consecutive hard regs needed starting at reg REGNO
1559 to hold something of mode MODE.
1560 This is ordinarily the length in words of a value of mode MODE
1561 but can be less for certain modes in special long registers.
1563 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1564 scalar instructions. The upper 32 bits are only available to the
1565 SIMD instructions.
1567 POWER and PowerPC GPRs hold 32 bits worth;
1568 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1570 static int
1571 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1573 unsigned HOST_WIDE_INT reg_size;
1575 /* TF/TD modes are special in that they always take 2 registers. */
1576 if (FP_REGNO_P (regno))
1577 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1578 ? UNITS_PER_VSX_WORD
1579 : UNITS_PER_FP_WORD);
1581 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1582 reg_size = UNITS_PER_SPE_WORD;
1584 else if (ALTIVEC_REGNO_P (regno))
1585 reg_size = UNITS_PER_ALTIVEC_WORD;
1587 /* The value returned for SCmode in the E500 double case is 2 for
1588 ABI compatibility; storing an SCmode value in a single register
1589 would require function_arg and rs6000_spe_function_arg to handle
1590 SCmode so as to pass the value correctly in a pair of
1591 registers. */
1592 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1593 && !DECIMAL_FLOAT_MODE_P (mode))
1594 reg_size = UNITS_PER_FP_WORD;
1596 else
1597 reg_size = UNITS_PER_WORD;
1599 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1602 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1603 MODE. */
1604 static int
1605 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1607 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1609 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1610 register combinations, and use PTImode where we need to deal with quad
1611 word memory operations. Don't allow quad words in the argument or frame
1612 pointer registers, just registers 0..31. */
1613 if (mode == PTImode)
1614 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1615 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1616 && ((regno & 1) == 0));
1618 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1619 implementations. Don't allow an item to be split between a FP register
1620 and an Altivec register. */
1621 if (VECTOR_MEM_VSX_P (mode))
1623 if (FP_REGNO_P (regno))
1624 return FP_REGNO_P (last_regno);
1626 if (ALTIVEC_REGNO_P (regno))
1627 return ALTIVEC_REGNO_P (last_regno);
1630 /* Allow TImode in all VSX registers if the user asked for it. */
1631 if (mode == TImode && TARGET_VSX_TIMODE && VSX_REGNO_P (regno))
1632 return 1;
1634 /* The GPRs can hold any mode, but values bigger than one register
1635 cannot go past R31. */
1636 if (INT_REGNO_P (regno))
1637 return INT_REGNO_P (last_regno);
1639 /* The float registers (except for VSX vector modes) can only hold floating
1640 modes and DImode. */
1641 if (FP_REGNO_P (regno))
1643 if (SCALAR_FLOAT_MODE_P (mode)
1644 && (mode != TDmode || (regno % 2) == 0)
1645 && FP_REGNO_P (last_regno))
1646 return 1;
1648 if (GET_MODE_CLASS (mode) == MODE_INT
1649 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1650 return 1;
1652 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1653 && PAIRED_VECTOR_MODE (mode))
1654 return 1;
1656 return 0;
1659 /* The CR register can only hold CC modes. */
1660 if (CR_REGNO_P (regno))
1661 return GET_MODE_CLASS (mode) == MODE_CC;
1663 if (CA_REGNO_P (regno))
1664 return mode == BImode;
1666 /* AltiVec only in AldyVec registers. */
1667 if (ALTIVEC_REGNO_P (regno))
1668 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1670 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1671 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1672 return 1;
1674 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1675 and it must be able to fit within the register set. */
1677 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1680 /* Print interesting facts about registers. */
1681 static void
1682 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1684 int r, m;
1686 for (r = first_regno; r <= last_regno; ++r)
1688 const char *comma = "";
1689 int len;
1691 if (first_regno == last_regno)
1692 fprintf (stderr, "%s:\t", reg_name);
1693 else
1694 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1696 len = 8;
1697 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1698 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1700 if (len > 70)
1702 fprintf (stderr, ",\n\t");
1703 len = 8;
1704 comma = "";
1707 if (rs6000_hard_regno_nregs[m][r] > 1)
1708 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1709 rs6000_hard_regno_nregs[m][r]);
1710 else
1711 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1713 comma = ", ";
1716 if (call_used_regs[r])
1718 if (len > 70)
1720 fprintf (stderr, ",\n\t");
1721 len = 8;
1722 comma = "";
1725 len += fprintf (stderr, "%s%s", comma, "call-used");
1726 comma = ", ";
1729 if (fixed_regs[r])
1731 if (len > 70)
1733 fprintf (stderr, ",\n\t");
1734 len = 8;
1735 comma = "";
1738 len += fprintf (stderr, "%s%s", comma, "fixed");
1739 comma = ", ";
1742 if (len > 70)
1744 fprintf (stderr, ",\n\t");
1745 comma = "";
1748 len += fprintf (stderr, "%sreg-class = %s", comma,
1749 reg_class_names[(int)rs6000_regno_regclass[r]]);
1750 comma = ", ";
1752 if (len > 70)
1754 fprintf (stderr, ",\n\t");
1755 comma = "";
1758 fprintf (stderr, "%sregno = %d\n", comma, r);
1762 #define DEBUG_FMT_ID "%-32s= "
1763 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1764 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1765 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1767 /* Print various interesting information with -mdebug=reg. */
1768 static void
1769 rs6000_debug_reg_global (void)
1771 static const char *const tf[2] = { "false", "true" };
1772 const char *nl = (const char *)0;
1773 int m;
1774 size_t m1, m2, v;
1775 char costly_num[20];
1776 char nop_num[20];
1777 char flags_buffer[40];
1778 const char *costly_str;
1779 const char *nop_str;
1780 const char *trace_str;
1781 const char *abi_str;
1782 const char *cmodel_str;
1783 struct cl_target_option cl_opts;
1785 /* Map enum rs6000_vector to string. */
1786 static const char *rs6000_debug_vector_unit[] = {
1787 "none",
1788 "altivec",
1789 "vsx",
1790 "p8_vector",
1791 "paired",
1792 "spe",
1793 "other"
1796 /* Modes we want tieable information on. */
1797 static const enum machine_mode print_tieable_modes[] = {
1798 QImode,
1799 HImode,
1800 SImode,
1801 DImode,
1802 TImode,
1803 PTImode,
1804 SFmode,
1805 DFmode,
1806 TFmode,
1807 SDmode,
1808 DDmode,
1809 TDmode,
1810 V8QImode,
1811 V4HImode,
1812 V2SImode,
1813 V16QImode,
1814 V8HImode,
1815 V4SImode,
1816 V2DImode,
1817 V32QImode,
1818 V16HImode,
1819 V8SImode,
1820 V4DImode,
1821 V2SFmode,
1822 V4SFmode,
1823 V2DFmode,
1824 V8SFmode,
1825 V4DFmode,
1826 CCmode,
1827 CCUNSmode,
1828 CCEQmode,
1831 /* Virtual regs we are interested in. */
1832 const static struct {
1833 int regno; /* register number. */
1834 const char *name; /* register name. */
1835 } virtual_regs[] = {
1836 { STACK_POINTER_REGNUM, "stack pointer:" },
1837 { TOC_REGNUM, "toc: " },
1838 { STATIC_CHAIN_REGNUM, "static chain: " },
1839 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
1840 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
1841 { ARG_POINTER_REGNUM, "arg pointer: " },
1842 { FRAME_POINTER_REGNUM, "frame pointer:" },
1843 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
1844 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
1845 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
1846 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
1847 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
1848 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
1849 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
1850 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
1851 { LAST_VIRTUAL_REGISTER, "last virtual: " },
1854 fputs ("\nHard register information:\n", stderr);
1855 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
1856 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
1857 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1858 LAST_ALTIVEC_REGNO,
1859 "vs");
1860 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1861 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1862 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1863 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1864 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1865 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1866 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1867 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1869 fputs ("\nVirtual/stack/frame registers:\n", stderr);
1870 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
1871 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
1873 fprintf (stderr,
1874 "\n"
1875 "d reg_class = %s\n"
1876 "f reg_class = %s\n"
1877 "v reg_class = %s\n"
1878 "wa reg_class = %s\n"
1879 "wd reg_class = %s\n"
1880 "wf reg_class = %s\n"
1881 "wg reg_class = %s\n"
1882 "wl reg_class = %s\n"
1883 "wm reg_class = %s\n"
1884 "wr reg_class = %s\n"
1885 "ws reg_class = %s\n"
1886 "wt reg_class = %s\n"
1887 "wv reg_class = %s\n"
1888 "wx reg_class = %s\n"
1889 "wz reg_class = %s\n"
1890 "\n",
1891 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1892 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1893 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1894 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1895 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1896 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1897 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
1898 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
1899 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
1900 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
1901 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
1902 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
1903 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
1904 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
1905 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
1907 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1908 if (rs6000_vector_unit[m] || rs6000_vector_mem[m]
1909 || (rs6000_vector_reload[m][0] != CODE_FOR_nothing)
1910 || (rs6000_vector_reload[m][1] != CODE_FOR_nothing))
1912 nl = "\n";
1913 fprintf (stderr,
1914 "Vector mode: %-5s arithmetic: %-10s move: %-10s "
1915 "reload-out: %c reload-in: %c\n",
1916 GET_MODE_NAME (m),
1917 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1918 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ],
1919 (rs6000_vector_reload[m][0] != CODE_FOR_nothing) ? 'y' : 'n',
1920 (rs6000_vector_reload[m][1] != CODE_FOR_nothing) ? 'y' : 'n');
1923 if (nl)
1924 fputs (nl, stderr);
1926 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
1928 enum machine_mode mode1 = print_tieable_modes[m1];
1929 bool first_time = true;
1931 nl = (const char *)0;
1932 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
1934 enum machine_mode mode2 = print_tieable_modes[m2];
1935 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
1937 if (first_time)
1939 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
1940 nl = "\n";
1941 first_time = false;
1944 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
1948 if (!first_time)
1949 fputs ("\n", stderr);
1952 if (nl)
1953 fputs (nl, stderr);
1955 if (rs6000_recip_control)
1957 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1959 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1960 if (rs6000_recip_bits[m])
1962 fprintf (stderr,
1963 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1964 GET_MODE_NAME (m),
1965 (RS6000_RECIP_AUTO_RE_P (m)
1966 ? "auto"
1967 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1968 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1969 ? "auto"
1970 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1973 fputs ("\n", stderr);
1976 if (rs6000_cpu_index >= 0)
1978 const char *name = processor_target_table[rs6000_cpu_index].name;
1979 HOST_WIDE_INT flags
1980 = processor_target_table[rs6000_cpu_index].target_enable;
1982 sprintf (flags_buffer, "-mcpu=%s flags", name);
1983 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
1985 else
1986 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
1988 if (rs6000_tune_index >= 0)
1990 const char *name = processor_target_table[rs6000_tune_index].name;
1991 HOST_WIDE_INT flags
1992 = processor_target_table[rs6000_tune_index].target_enable;
1994 sprintf (flags_buffer, "-mtune=%s flags", name);
1995 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
1997 else
1998 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2000 cl_target_option_save (&cl_opts, &global_options);
2001 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2002 rs6000_isa_flags);
2004 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2005 rs6000_isa_flags_explicit);
2007 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2008 rs6000_builtin_mask);
2010 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2012 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2013 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2015 switch (rs6000_sched_costly_dep)
2017 case max_dep_latency:
2018 costly_str = "max_dep_latency";
2019 break;
2021 case no_dep_costly:
2022 costly_str = "no_dep_costly";
2023 break;
2025 case all_deps_costly:
2026 costly_str = "all_deps_costly";
2027 break;
2029 case true_store_to_load_dep_costly:
2030 costly_str = "true_store_to_load_dep_costly";
2031 break;
2033 case store_to_load_dep_costly:
2034 costly_str = "store_to_load_dep_costly";
2035 break;
2037 default:
2038 costly_str = costly_num;
2039 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2040 break;
2043 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2045 switch (rs6000_sched_insert_nops)
2047 case sched_finish_regroup_exact:
2048 nop_str = "sched_finish_regroup_exact";
2049 break;
2051 case sched_finish_pad_groups:
2052 nop_str = "sched_finish_pad_groups";
2053 break;
2055 case sched_finish_none:
2056 nop_str = "sched_finish_none";
2057 break;
2059 default:
2060 nop_str = nop_num;
2061 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2062 break;
2065 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2067 switch (rs6000_sdata)
2069 default:
2070 case SDATA_NONE:
2071 break;
2073 case SDATA_DATA:
2074 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2075 break;
2077 case SDATA_SYSV:
2078 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2079 break;
2081 case SDATA_EABI:
2082 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2083 break;
2087 switch (rs6000_traceback)
2089 case traceback_default: trace_str = "default"; break;
2090 case traceback_none: trace_str = "none"; break;
2091 case traceback_part: trace_str = "part"; break;
2092 case traceback_full: trace_str = "full"; break;
2093 default: trace_str = "unknown"; break;
2096 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2098 switch (rs6000_current_cmodel)
2100 case CMODEL_SMALL: cmodel_str = "small"; break;
2101 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2102 case CMODEL_LARGE: cmodel_str = "large"; break;
2103 default: cmodel_str = "unknown"; break;
2106 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2108 switch (rs6000_current_abi)
2110 case ABI_NONE: abi_str = "none"; break;
2111 case ABI_AIX: abi_str = "aix"; break;
2112 case ABI_V4: abi_str = "V4"; break;
2113 case ABI_DARWIN: abi_str = "darwin"; break;
2114 default: abi_str = "unknown"; break;
2117 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2119 if (rs6000_altivec_abi)
2120 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2122 if (rs6000_spe_abi)
2123 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2125 if (rs6000_darwin64_abi)
2126 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2128 if (rs6000_float_gprs)
2129 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2131 if (TARGET_LINK_STACK)
2132 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2134 if (targetm.lra_p ())
2135 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2137 if (TARGET_P8_FUSION)
2138 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2139 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2141 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2142 TARGET_SECURE_PLT ? "secure" : "bss");
2143 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2144 aix_struct_return ? "aix" : "sysv");
2145 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2146 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2147 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2148 tf[!!rs6000_align_branch_targets]);
2149 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2150 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2151 rs6000_long_double_type_size);
2152 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2153 (int)rs6000_sched_restricted_insns_priority);
2154 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2155 (int)END_BUILTINS);
2156 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2157 (int)RS6000_BUILTIN_COUNT);
2160 /* Initialize the various global tables that are based on register size. */
2161 static void
2162 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2164 int r, m, c;
2165 int align64;
2166 int align32;
2168 /* Precalculate REGNO_REG_CLASS. */
2169 rs6000_regno_regclass[0] = GENERAL_REGS;
2170 for (r = 1; r < 32; ++r)
2171 rs6000_regno_regclass[r] = BASE_REGS;
2173 for (r = 32; r < 64; ++r)
2174 rs6000_regno_regclass[r] = FLOAT_REGS;
2176 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2177 rs6000_regno_regclass[r] = NO_REGS;
2179 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2180 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2182 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2183 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2184 rs6000_regno_regclass[r] = CR_REGS;
2186 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2187 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2188 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2189 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2190 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2191 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2192 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2193 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2194 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2196 /* Precalculate register class to simpler reload register class. We don't
2197 need all of the register classes that are combinations of different
2198 classes, just the simple ones that have constraint letters. */
2199 for (c = 0; c < N_REG_CLASSES; c++)
2200 reg_class_to_reg_type[c] = NO_REG_TYPE;
2202 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2203 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2204 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2205 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2206 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2207 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2208 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2209 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2210 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2211 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2212 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2213 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2215 if (TARGET_VSX)
2217 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2218 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2220 else
2222 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2223 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2226 /* Precalculate vector information, this must be set up before the
2227 rs6000_hard_regno_nregs_internal below. */
2228 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2230 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
2231 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
2232 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
2235 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
2236 rs6000_constraints[c] = NO_REGS;
2238 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2239 believes it can use native alignment or still uses 128-bit alignment. */
2240 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2242 align64 = 64;
2243 align32 = 32;
2245 else
2247 align64 = 128;
2248 align32 = 128;
2251 /* V2DF mode, VSX only. */
2252 if (TARGET_VSX)
2254 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2255 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2256 rs6000_vector_align[V2DFmode] = align64;
2259 /* V4SF mode, either VSX or Altivec. */
2260 if (TARGET_VSX)
2262 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2263 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2264 rs6000_vector_align[V4SFmode] = align32;
2266 else if (TARGET_ALTIVEC)
2268 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2269 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2270 rs6000_vector_align[V4SFmode] = align32;
2273 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2274 and stores. */
2275 if (TARGET_ALTIVEC)
2277 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2278 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2279 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2280 rs6000_vector_align[V4SImode] = align32;
2281 rs6000_vector_align[V8HImode] = align32;
2282 rs6000_vector_align[V16QImode] = align32;
2284 if (TARGET_VSX)
2286 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2287 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2288 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2290 else
2292 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2293 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2294 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2298 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2299 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2300 if (TARGET_VSX)
2302 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2303 rs6000_vector_unit[V2DImode]
2304 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2305 rs6000_vector_align[V2DImode] = align64;
2308 /* DFmode, see if we want to use the VSX unit. */
2309 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2311 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2312 rs6000_vector_mem[DFmode]
2313 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2314 rs6000_vector_align[DFmode] = align64;
2317 /* Allow TImode in VSX register and set the VSX memory macros. */
2318 if (TARGET_VSX && TARGET_VSX_TIMODE)
2320 rs6000_vector_mem[TImode] = VECTOR_VSX;
2321 rs6000_vector_align[TImode] = align64;
2324 /* TODO add SPE and paired floating point vector support. */
2326 /* Register class constraints for the constraints that depend on compile
2327 switches. */
2328 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2329 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2331 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2332 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2334 if (TARGET_VSX)
2336 /* At present, we just use VSX_REGS, but we have different constraints
2337 based on the use, in case we want to fine tune the default register
2338 class used. wa = any VSX register, wf = register class to use for
2339 V4SF, wd = register class to use for V2DF, and ws = register classs to
2340 use for DF scalars. */
2341 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2342 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2343 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2344 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2345 ? VSX_REGS
2346 : FLOAT_REGS);
2347 if (TARGET_VSX_TIMODE)
2348 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS;
2351 /* Add conditional constraints based on various options, to allow us to
2352 collapse multiple insn patterns. */
2353 if (TARGET_ALTIVEC)
2354 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2356 if (TARGET_MFPGPR)
2357 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2359 if (TARGET_LFIWAX)
2360 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS;
2362 if (TARGET_DIRECT_MOVE)
2363 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2365 if (TARGET_POWERPC64)
2366 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2368 if (TARGET_P8_VECTOR)
2369 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2371 if (TARGET_STFIWX)
2372 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS;
2374 if (TARGET_LFIWZX)
2375 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS;
2377 /* Setup the direct move combinations. */
2378 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2380 reload_fpr_gpr[m] = CODE_FOR_nothing;
2381 reload_gpr_vsx[m] = CODE_FOR_nothing;
2382 reload_vsx_gpr[m] = CODE_FOR_nothing;
2385 /* Set up the reload helper and direct move functions. */
2386 if (TARGET_VSX || TARGET_ALTIVEC)
2388 if (TARGET_64BIT)
2390 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2391 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2392 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2393 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2394 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2395 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2396 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2397 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2398 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2399 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2400 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2401 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2402 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2404 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2405 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2406 rs6000_vector_reload[DDmode][0] = CODE_FOR_reload_dd_di_store;
2407 rs6000_vector_reload[DDmode][1] = CODE_FOR_reload_dd_di_load;
2409 if (TARGET_P8_VECTOR)
2411 rs6000_vector_reload[SFmode][0] = CODE_FOR_reload_sf_di_store;
2412 rs6000_vector_reload[SFmode][1] = CODE_FOR_reload_sf_di_load;
2413 rs6000_vector_reload[SDmode][0] = CODE_FOR_reload_sd_di_store;
2414 rs6000_vector_reload[SDmode][1] = CODE_FOR_reload_sd_di_load;
2416 if (TARGET_VSX_TIMODE)
2418 rs6000_vector_reload[TImode][0] = CODE_FOR_reload_ti_di_store;
2419 rs6000_vector_reload[TImode][1] = CODE_FOR_reload_ti_di_load;
2421 if (TARGET_DIRECT_MOVE)
2423 if (TARGET_POWERPC64)
2425 reload_gpr_vsx[TImode] = CODE_FOR_reload_gpr_from_vsxti;
2426 reload_gpr_vsx[V2DFmode] = CODE_FOR_reload_gpr_from_vsxv2df;
2427 reload_gpr_vsx[V2DImode] = CODE_FOR_reload_gpr_from_vsxv2di;
2428 reload_gpr_vsx[V4SFmode] = CODE_FOR_reload_gpr_from_vsxv4sf;
2429 reload_gpr_vsx[V4SImode] = CODE_FOR_reload_gpr_from_vsxv4si;
2430 reload_gpr_vsx[V8HImode] = CODE_FOR_reload_gpr_from_vsxv8hi;
2431 reload_gpr_vsx[V16QImode] = CODE_FOR_reload_gpr_from_vsxv16qi;
2432 reload_gpr_vsx[SFmode] = CODE_FOR_reload_gpr_from_vsxsf;
2434 reload_vsx_gpr[TImode] = CODE_FOR_reload_vsx_from_gprti;
2435 reload_vsx_gpr[V2DFmode] = CODE_FOR_reload_vsx_from_gprv2df;
2436 reload_vsx_gpr[V2DImode] = CODE_FOR_reload_vsx_from_gprv2di;
2437 reload_vsx_gpr[V4SFmode] = CODE_FOR_reload_vsx_from_gprv4sf;
2438 reload_vsx_gpr[V4SImode] = CODE_FOR_reload_vsx_from_gprv4si;
2439 reload_vsx_gpr[V8HImode] = CODE_FOR_reload_vsx_from_gprv8hi;
2440 reload_vsx_gpr[V16QImode] = CODE_FOR_reload_vsx_from_gprv16qi;
2441 reload_vsx_gpr[SFmode] = CODE_FOR_reload_vsx_from_gprsf;
2443 else
2445 reload_fpr_gpr[DImode] = CODE_FOR_reload_fpr_from_gprdi;
2446 reload_fpr_gpr[DDmode] = CODE_FOR_reload_fpr_from_gprdd;
2447 reload_fpr_gpr[DFmode] = CODE_FOR_reload_fpr_from_gprdf;
2451 else
2453 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2454 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2455 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2456 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2457 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2458 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2459 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2460 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2461 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2462 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2463 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2464 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2465 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2467 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2468 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2469 rs6000_vector_reload[DDmode][0] = CODE_FOR_reload_dd_si_store;
2470 rs6000_vector_reload[DDmode][1] = CODE_FOR_reload_dd_si_load;
2472 if (TARGET_P8_VECTOR)
2474 rs6000_vector_reload[SFmode][0] = CODE_FOR_reload_sf_si_store;
2475 rs6000_vector_reload[SFmode][1] = CODE_FOR_reload_sf_si_load;
2476 rs6000_vector_reload[SDmode][0] = CODE_FOR_reload_sd_si_store;
2477 rs6000_vector_reload[SDmode][1] = CODE_FOR_reload_sd_si_load;
2479 if (TARGET_VSX_TIMODE)
2481 rs6000_vector_reload[TImode][0] = CODE_FOR_reload_ti_si_store;
2482 rs6000_vector_reload[TImode][1] = CODE_FOR_reload_ti_si_load;
2487 /* Precalculate HARD_REGNO_NREGS. */
2488 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2489 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2490 rs6000_hard_regno_nregs[m][r]
2491 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2493 /* Precalculate HARD_REGNO_MODE_OK. */
2494 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2495 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2496 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2497 rs6000_hard_regno_mode_ok_p[m][r] = true;
2499 /* Precalculate CLASS_MAX_NREGS sizes. */
2500 for (c = 0; c < LIM_REG_CLASSES; ++c)
2502 int reg_size;
2504 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2505 reg_size = UNITS_PER_VSX_WORD;
2507 else if (c == ALTIVEC_REGS)
2508 reg_size = UNITS_PER_ALTIVEC_WORD;
2510 else if (c == FLOAT_REGS)
2511 reg_size = UNITS_PER_FP_WORD;
2513 else
2514 reg_size = UNITS_PER_WORD;
2516 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2518 int reg_size2 = reg_size;
2520 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2521 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2522 && (m == TDmode || m == TFmode))
2523 reg_size2 = UNITS_PER_FP_WORD;
2525 rs6000_class_max_nregs[m][c]
2526 = (GET_MODE_SIZE (m) + reg_size2 - 1) / reg_size2;
2530 if (TARGET_E500_DOUBLE)
2531 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2533 /* Calculate which modes to automatically generate code to use a the
2534 reciprocal divide and square root instructions. In the future, possibly
2535 automatically generate the instructions even if the user did not specify
2536 -mrecip. The older machines double precision reciprocal sqrt estimate is
2537 not accurate enough. */
2538 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2539 if (TARGET_FRES)
2540 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2541 if (TARGET_FRE)
2542 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2543 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2544 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2545 if (VECTOR_UNIT_VSX_P (V2DFmode))
2546 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2548 if (TARGET_FRSQRTES)
2549 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2550 if (TARGET_FRSQRTE)
2551 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2552 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2553 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2554 if (VECTOR_UNIT_VSX_P (V2DFmode))
2555 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2557 if (rs6000_recip_control)
2559 if (!flag_finite_math_only)
2560 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2561 if (flag_trapping_math)
2562 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2563 if (!flag_reciprocal_math)
2564 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2565 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2567 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2568 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2569 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2571 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2572 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2573 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2575 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2576 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2577 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2579 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2580 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2581 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2583 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2584 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2585 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2587 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2588 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2589 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2591 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2592 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2593 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2595 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2596 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2597 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2601 if (global_init_p || TARGET_DEBUG_TARGET)
2603 if (TARGET_DEBUG_REG)
2604 rs6000_debug_reg_global ();
2606 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2607 fprintf (stderr,
2608 "SImode variable mult cost = %d\n"
2609 "SImode constant mult cost = %d\n"
2610 "SImode short constant mult cost = %d\n"
2611 "DImode multipliciation cost = %d\n"
2612 "SImode division cost = %d\n"
2613 "DImode division cost = %d\n"
2614 "Simple fp operation cost = %d\n"
2615 "DFmode multiplication cost = %d\n"
2616 "SFmode division cost = %d\n"
2617 "DFmode division cost = %d\n"
2618 "cache line size = %d\n"
2619 "l1 cache size = %d\n"
2620 "l2 cache size = %d\n"
2621 "simultaneous prefetches = %d\n"
2622 "\n",
2623 rs6000_cost->mulsi,
2624 rs6000_cost->mulsi_const,
2625 rs6000_cost->mulsi_const9,
2626 rs6000_cost->muldi,
2627 rs6000_cost->divsi,
2628 rs6000_cost->divdi,
2629 rs6000_cost->fp,
2630 rs6000_cost->dmul,
2631 rs6000_cost->sdiv,
2632 rs6000_cost->ddiv,
2633 rs6000_cost->cache_line_size,
2634 rs6000_cost->l1_cache_size,
2635 rs6000_cost->l2_cache_size,
2636 rs6000_cost->simultaneous_prefetches);
2640 #if TARGET_MACHO
2641 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2643 static void
2644 darwin_rs6000_override_options (void)
2646 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2647 off. */
2648 rs6000_altivec_abi = 1;
2649 TARGET_ALTIVEC_VRSAVE = 1;
2650 rs6000_current_abi = ABI_DARWIN;
2652 if (DEFAULT_ABI == ABI_DARWIN
2653 && TARGET_64BIT)
2654 darwin_one_byte_bool = 1;
2656 if (TARGET_64BIT && ! TARGET_POWERPC64)
2658 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
2659 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2661 if (flag_mkernel)
2663 rs6000_default_long_calls = 1;
2664 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
2667 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2668 Altivec. */
2669 if (!flag_mkernel && !flag_apple_kext
2670 && TARGET_64BIT
2671 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
2672 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2674 /* Unless the user (not the configurer) has explicitly overridden
2675 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2676 G4 unless targeting the kernel. */
2677 if (!flag_mkernel
2678 && !flag_apple_kext
2679 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2680 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
2681 && ! global_options_set.x_rs6000_cpu_index)
2683 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2686 #endif
2688 /* If not otherwise specified by a target, make 'long double' equivalent to
2689 'double'. */
2691 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2692 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2693 #endif
2695 /* Return the builtin mask of the various options used that could affect which
2696 builtins were used. In the past we used target_flags, but we've run out of
2697 bits, and some options like SPE and PAIRED are no longer in
2698 target_flags. */
2700 HOST_WIDE_INT
2701 rs6000_builtin_mask_calculate (void)
2703 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2704 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2705 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2706 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2707 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2708 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2709 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2710 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2711 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2712 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
2713 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
2714 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0));
2717 /* Override command line options. Mostly we process the processor type and
2718 sometimes adjust other TARGET_ options. */
2720 static bool
2721 rs6000_option_override_internal (bool global_init_p)
2723 bool ret = true;
2724 bool have_cpu = false;
2726 /* The default cpu requested at configure time, if any. */
2727 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2729 HOST_WIDE_INT set_masks;
2730 int cpu_index;
2731 int tune_index;
2732 struct cl_target_option *main_target_opt
2733 = ((global_init_p || target_option_default_node == NULL)
2734 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2736 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2737 library functions, so warn about it. The flag may be useful for
2738 performance studies from time to time though, so don't disable it
2739 entirely. */
2740 if (global_options_set.x_rs6000_alignment_flags
2741 && rs6000_alignment_flags == MASK_ALIGN_POWER
2742 && DEFAULT_ABI == ABI_DARWIN
2743 && TARGET_64BIT)
2744 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2745 " it is incompatible with the installed C and C++ libraries");
2747 /* Numerous experiment shows that IRA based loop pressure
2748 calculation works better for RTL loop invariant motion on targets
2749 with enough (>= 32) registers. It is an expensive optimization.
2750 So it is on only for peak performance. */
2751 if (optimize >= 3 && global_init_p)
2752 flag_ira_loop_pressure = 1;
2754 /* Set the pointer size. */
2755 if (TARGET_64BIT)
2757 rs6000_pmode = (int)DImode;
2758 rs6000_pointer_size = 64;
2760 else
2762 rs6000_pmode = (int)SImode;
2763 rs6000_pointer_size = 32;
2766 /* Some OSs don't support saving the high part of 64-bit registers on context
2767 switch. Other OSs don't support saving Altivec registers. On those OSs,
2768 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
2769 if the user wants either, the user must explicitly specify them and we
2770 won't interfere with the user's specification. */
2772 set_masks = POWERPC_MASKS;
2773 #ifdef OS_MISSING_POWERPC64
2774 if (OS_MISSING_POWERPC64)
2775 set_masks &= ~OPTION_MASK_POWERPC64;
2776 #endif
2777 #ifdef OS_MISSING_ALTIVEC
2778 if (OS_MISSING_ALTIVEC)
2779 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
2780 #endif
2782 /* Don't override by the processor default if given explicitly. */
2783 set_masks &= ~rs6000_isa_flags_explicit;
2785 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2786 the cpu in a target attribute or pragma, but did not specify a tuning
2787 option, use the cpu for the tuning option rather than the option specified
2788 with -mtune on the command line. Process a '--with-cpu' configuration
2789 request as an implicit --cpu. */
2790 if (rs6000_cpu_index >= 0)
2792 cpu_index = rs6000_cpu_index;
2793 have_cpu = true;
2795 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2797 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2798 have_cpu = true;
2800 else if (implicit_cpu)
2802 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
2803 have_cpu = true;
2805 else
2807 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
2808 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2809 have_cpu = false;
2812 gcc_assert (cpu_index >= 0);
2814 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
2815 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
2816 with those from the cpu, except for options that were explicitly set. If
2817 we don't have a cpu, do not override the target bits set in
2818 TARGET_DEFAULT. */
2819 if (have_cpu)
2821 rs6000_isa_flags &= ~set_masks;
2822 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2823 & set_masks);
2825 else
2826 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2827 & ~rs6000_isa_flags_explicit);
2829 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
2830 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
2831 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
2832 to using rs6000_isa_flags, we need to do the initialization here. */
2833 if (!have_cpu)
2834 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
2836 if (rs6000_tune_index >= 0)
2837 tune_index = rs6000_tune_index;
2838 else if (have_cpu)
2839 rs6000_tune_index = tune_index = cpu_index;
2840 else
2842 size_t i;
2843 enum processor_type tune_proc
2844 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2846 tune_index = -1;
2847 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2848 if (processor_target_table[i].processor == tune_proc)
2850 rs6000_tune_index = tune_index = i;
2851 break;
2855 gcc_assert (tune_index >= 0);
2856 rs6000_cpu = processor_target_table[tune_index].processor;
2858 /* Pick defaults for SPE related control flags. Do this early to make sure
2859 that the TARGET_ macros are representative ASAP. */
2861 int spe_capable_cpu =
2862 (rs6000_cpu == PROCESSOR_PPC8540
2863 || rs6000_cpu == PROCESSOR_PPC8548);
2865 if (!global_options_set.x_rs6000_spe_abi)
2866 rs6000_spe_abi = spe_capable_cpu;
2868 if (!global_options_set.x_rs6000_spe)
2869 rs6000_spe = spe_capable_cpu;
2871 if (!global_options_set.x_rs6000_float_gprs)
2872 rs6000_float_gprs =
2873 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2874 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2875 : 0);
2878 if (global_options_set.x_rs6000_spe_abi
2879 && rs6000_spe_abi
2880 && !TARGET_SPE_ABI)
2881 error ("not configured for SPE ABI");
2883 if (global_options_set.x_rs6000_spe
2884 && rs6000_spe
2885 && !TARGET_SPE)
2886 error ("not configured for SPE instruction set");
2888 if (main_target_opt != NULL
2889 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2890 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2891 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2892 error ("target attribute or pragma changes SPE ABI");
2894 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2895 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2896 || rs6000_cpu == PROCESSOR_PPCE5500)
2898 if (TARGET_ALTIVEC)
2899 error ("AltiVec not supported in this target");
2900 if (TARGET_SPE)
2901 error ("SPE not supported in this target");
2903 if (rs6000_cpu == PROCESSOR_PPCE6500)
2905 if (TARGET_SPE)
2906 error ("SPE not supported in this target");
2909 /* Disable Cell microcode if we are optimizing for the Cell
2910 and not optimizing for size. */
2911 if (rs6000_gen_cell_microcode == -1)
2912 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2913 && !optimize_size);
2915 /* If we are optimizing big endian systems for space and it's OK to
2916 use instructions that would be microcoded on the Cell, use the
2917 load/store multiple and string instructions. */
2918 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2919 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
2920 | OPTION_MASK_STRING);
2922 /* Don't allow -mmultiple or -mstring on little endian systems
2923 unless the cpu is a 750, because the hardware doesn't support the
2924 instructions used in little endian mode, and causes an alignment
2925 trap. The 750 does not cause an alignment trap (except when the
2926 target is unaligned). */
2928 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2930 if (TARGET_MULTIPLE)
2932 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
2933 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
2934 warning (0, "-mmultiple is not supported on little endian systems");
2937 if (TARGET_STRING)
2939 rs6000_isa_flags &= ~OPTION_MASK_STRING;
2940 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
2941 warning (0, "-mstring is not supported on little endian systems");
2945 /* Add some warnings for VSX. */
2946 if (TARGET_VSX)
2948 const char *msg = NULL;
2949 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2950 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2952 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
2953 msg = N_("-mvsx requires hardware floating point");
2954 else
2955 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
2957 else if (TARGET_PAIRED_FLOAT)
2958 msg = N_("-mvsx and -mpaired are incompatible");
2959 /* The hardware will allow VSX and little endian, but until we make sure
2960 things like vector select, etc. work don't allow VSX on little endian
2961 systems at this point. */
2962 else if (!BYTES_BIG_ENDIAN)
2963 msg = N_("-mvsx used with little endian code");
2964 else if (TARGET_AVOID_XFORM > 0)
2965 msg = N_("-mvsx needs indexed addressing");
2966 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
2967 & OPTION_MASK_ALTIVEC))
2969 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
2970 msg = N_("-mvsx and -mno-altivec are incompatible");
2971 else
2972 msg = N_("-mno-altivec disables vsx");
2975 if (msg)
2977 warning (0, msg);
2978 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
2979 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
2983 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
2984 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
2986 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2987 unless the user explicitly used the -mno-<option> to disable the code. */
2988 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
2989 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2990 else if (TARGET_VSX)
2991 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2992 else if (TARGET_POPCNTD)
2993 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
2994 else if (TARGET_DFP)
2995 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2996 else if (TARGET_CMPB)
2997 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
2998 else if (TARGET_FPRND)
2999 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3000 else if (TARGET_POPCNTB)
3001 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3002 else if (TARGET_ALTIVEC)
3003 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3005 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3007 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3008 error ("-mcrypto requires -maltivec");
3009 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3012 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3014 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3015 error ("-mdirect-move requires -mvsx");
3016 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3019 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3021 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3022 error ("-mpower8-vector requires -maltivec");
3023 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3026 if (TARGET_P8_VECTOR && !TARGET_VSX)
3028 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3029 error ("-mpower8-vector requires -mvsx");
3030 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3033 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3035 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3036 error ("-mvsx-timode requires -mvsx");
3037 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3040 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3041 silently turn off quad memory mode. */
3042 if (TARGET_QUAD_MEMORY && !TARGET_POWERPC64)
3044 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3045 warning (0, N_("-mquad-memory requires 64-bit mode"));
3047 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3050 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3051 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3053 /* E500mc does "better" if we inline more aggressively. Respect the
3054 user's opinion, though. */
3055 if (rs6000_block_move_inline_limit == 0
3056 && (rs6000_cpu == PROCESSOR_PPCE500MC
3057 || rs6000_cpu == PROCESSOR_PPCE500MC64
3058 || rs6000_cpu == PROCESSOR_PPCE5500
3059 || rs6000_cpu == PROCESSOR_PPCE6500))
3060 rs6000_block_move_inline_limit = 128;
3062 /* store_one_arg depends on expand_block_move to handle at least the
3063 size of reg_parm_stack_space. */
3064 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3065 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3067 if (global_init_p)
3069 /* If the appropriate debug option is enabled, replace the target hooks
3070 with debug versions that call the real version and then prints
3071 debugging information. */
3072 if (TARGET_DEBUG_COST)
3074 targetm.rtx_costs = rs6000_debug_rtx_costs;
3075 targetm.address_cost = rs6000_debug_address_cost;
3076 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3079 if (TARGET_DEBUG_ADDR)
3081 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3082 targetm.legitimize_address = rs6000_debug_legitimize_address;
3083 rs6000_secondary_reload_class_ptr
3084 = rs6000_debug_secondary_reload_class;
3085 rs6000_secondary_memory_needed_ptr
3086 = rs6000_debug_secondary_memory_needed;
3087 rs6000_cannot_change_mode_class_ptr
3088 = rs6000_debug_cannot_change_mode_class;
3089 rs6000_preferred_reload_class_ptr
3090 = rs6000_debug_preferred_reload_class;
3091 rs6000_legitimize_reload_address_ptr
3092 = rs6000_debug_legitimize_reload_address;
3093 rs6000_mode_dependent_address_ptr
3094 = rs6000_debug_mode_dependent_address;
3097 if (rs6000_veclibabi_name)
3099 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3100 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3101 else
3103 error ("unknown vectorization library ABI type (%s) for "
3104 "-mveclibabi= switch", rs6000_veclibabi_name);
3105 ret = false;
3110 if (!global_options_set.x_rs6000_long_double_type_size)
3112 if (main_target_opt != NULL
3113 && (main_target_opt->x_rs6000_long_double_type_size
3114 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3115 error ("target attribute or pragma changes long double size");
3116 else
3117 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3120 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3121 if (!global_options_set.x_rs6000_ieeequad)
3122 rs6000_ieeequad = 1;
3123 #endif
3125 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3126 target attribute or pragma which automatically enables both options,
3127 unless the altivec ABI was set. This is set by default for 64-bit, but
3128 not for 32-bit. */
3129 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3130 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3131 & ~rs6000_isa_flags_explicit);
3133 /* Enable Altivec ABI for AIX -maltivec. */
3134 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3136 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3137 error ("target attribute or pragma changes AltiVec ABI");
3138 else
3139 rs6000_altivec_abi = 1;
3142 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3143 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3144 be explicitly overridden in either case. */
3145 if (TARGET_ELF)
3147 if (!global_options_set.x_rs6000_altivec_abi
3148 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3150 if (main_target_opt != NULL &&
3151 !main_target_opt->x_rs6000_altivec_abi)
3152 error ("target attribute or pragma changes AltiVec ABI");
3153 else
3154 rs6000_altivec_abi = 1;
3158 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3159 So far, the only darwin64 targets are also MACH-O. */
3160 if (TARGET_MACHO
3161 && DEFAULT_ABI == ABI_DARWIN
3162 && TARGET_64BIT)
3164 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3165 error ("target attribute or pragma changes darwin64 ABI");
3166 else
3168 rs6000_darwin64_abi = 1;
3169 /* Default to natural alignment, for better performance. */
3170 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3174 /* Place FP constants in the constant pool instead of TOC
3175 if section anchors enabled. */
3176 if (flag_section_anchors
3177 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3178 TARGET_NO_FP_IN_TOC = 1;
3180 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3181 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3183 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3184 SUBTARGET_OVERRIDE_OPTIONS;
3185 #endif
3186 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3187 SUBSUBTARGET_OVERRIDE_OPTIONS;
3188 #endif
3189 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3190 SUB3TARGET_OVERRIDE_OPTIONS;
3191 #endif
3193 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3194 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3196 /* For the E500 family of cores, reset the single/double FP flags to let us
3197 check that they remain constant across attributes or pragmas. Also,
3198 clear a possible request for string instructions, not supported and which
3199 we might have silently queried above for -Os.
3201 For other families, clear ISEL in case it was set implicitly.
3204 switch (rs6000_cpu)
3206 case PROCESSOR_PPC8540:
3207 case PROCESSOR_PPC8548:
3208 case PROCESSOR_PPCE500MC:
3209 case PROCESSOR_PPCE500MC64:
3210 case PROCESSOR_PPCE5500:
3211 case PROCESSOR_PPCE6500:
3213 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3214 rs6000_double_float = TARGET_E500_DOUBLE;
3216 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3218 break;
3220 default:
3222 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3223 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3225 break;
3228 if (main_target_opt)
3230 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3231 error ("target attribute or pragma changes single precision floating "
3232 "point");
3233 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3234 error ("target attribute or pragma changes double precision floating "
3235 "point");
3238 /* Detect invalid option combinations with E500. */
3239 CHECK_E500_OPTIONS;
3241 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3242 && rs6000_cpu != PROCESSOR_POWER5
3243 && rs6000_cpu != PROCESSOR_POWER6
3244 && rs6000_cpu != PROCESSOR_POWER7
3245 && rs6000_cpu != PROCESSOR_POWER8
3246 && rs6000_cpu != PROCESSOR_PPCA2
3247 && rs6000_cpu != PROCESSOR_CELL
3248 && rs6000_cpu != PROCESSOR_PPC476);
3249 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3250 || rs6000_cpu == PROCESSOR_POWER5
3251 || rs6000_cpu == PROCESSOR_POWER7
3252 || rs6000_cpu == PROCESSOR_POWER8);
3253 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3254 || rs6000_cpu == PROCESSOR_POWER5
3255 || rs6000_cpu == PROCESSOR_POWER6
3256 || rs6000_cpu == PROCESSOR_POWER7
3257 || rs6000_cpu == PROCESSOR_POWER8
3258 || rs6000_cpu == PROCESSOR_PPCE500MC
3259 || rs6000_cpu == PROCESSOR_PPCE500MC64
3260 || rs6000_cpu == PROCESSOR_PPCE5500
3261 || rs6000_cpu == PROCESSOR_PPCE6500);
3263 /* Allow debug switches to override the above settings. These are set to -1
3264 in rs6000.opt to indicate the user hasn't directly set the switch. */
3265 if (TARGET_ALWAYS_HINT >= 0)
3266 rs6000_always_hint = TARGET_ALWAYS_HINT;
3268 if (TARGET_SCHED_GROUPS >= 0)
3269 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3271 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3272 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3274 rs6000_sched_restricted_insns_priority
3275 = (rs6000_sched_groups ? 1 : 0);
3277 /* Handle -msched-costly-dep option. */
3278 rs6000_sched_costly_dep
3279 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3281 if (rs6000_sched_costly_dep_str)
3283 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3284 rs6000_sched_costly_dep = no_dep_costly;
3285 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3286 rs6000_sched_costly_dep = all_deps_costly;
3287 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3288 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3289 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3290 rs6000_sched_costly_dep = store_to_load_dep_costly;
3291 else
3292 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3293 atoi (rs6000_sched_costly_dep_str));
3296 /* Handle -minsert-sched-nops option. */
3297 rs6000_sched_insert_nops
3298 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3300 if (rs6000_sched_insert_nops_str)
3302 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3303 rs6000_sched_insert_nops = sched_finish_none;
3304 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3305 rs6000_sched_insert_nops = sched_finish_pad_groups;
3306 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3307 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3308 else
3309 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3310 atoi (rs6000_sched_insert_nops_str));
3313 if (global_init_p)
3315 #ifdef TARGET_REGNAMES
3316 /* If the user desires alternate register names, copy in the
3317 alternate names now. */
3318 if (TARGET_REGNAMES)
3319 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3320 #endif
3322 /* Set aix_struct_return last, after the ABI is determined.
3323 If -maix-struct-return or -msvr4-struct-return was explicitly
3324 used, don't override with the ABI default. */
3325 if (!global_options_set.x_aix_struct_return)
3326 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3328 #if 0
3329 /* IBM XL compiler defaults to unsigned bitfields. */
3330 if (TARGET_XL_COMPAT)
3331 flag_signed_bitfields = 0;
3332 #endif
3334 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3335 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3337 if (TARGET_TOC)
3338 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3340 /* We can only guarantee the availability of DI pseudo-ops when
3341 assembling for 64-bit targets. */
3342 if (!TARGET_64BIT)
3344 targetm.asm_out.aligned_op.di = NULL;
3345 targetm.asm_out.unaligned_op.di = NULL;
3349 /* Set branch target alignment, if not optimizing for size. */
3350 if (!optimize_size)
3352 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3353 aligned 8byte to avoid misprediction by the branch predictor. */
3354 if (rs6000_cpu == PROCESSOR_TITAN
3355 || rs6000_cpu == PROCESSOR_CELL)
3357 if (align_functions <= 0)
3358 align_functions = 8;
3359 if (align_jumps <= 0)
3360 align_jumps = 8;
3361 if (align_loops <= 0)
3362 align_loops = 8;
3364 if (rs6000_align_branch_targets)
3366 if (align_functions <= 0)
3367 align_functions = 16;
3368 if (align_jumps <= 0)
3369 align_jumps = 16;
3370 if (align_loops <= 0)
3372 can_override_loop_align = 1;
3373 align_loops = 16;
3376 if (align_jumps_max_skip <= 0)
3377 align_jumps_max_skip = 15;
3378 if (align_loops_max_skip <= 0)
3379 align_loops_max_skip = 15;
3382 /* Arrange to save and restore machine status around nested functions. */
3383 init_machine_status = rs6000_init_machine_status;
3385 /* We should always be splitting complex arguments, but we can't break
3386 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3387 if (DEFAULT_ABI != ABI_AIX)
3388 targetm.calls.split_complex_arg = NULL;
3391 /* Initialize rs6000_cost with the appropriate target costs. */
3392 if (optimize_size)
3393 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3394 else
3395 switch (rs6000_cpu)
3397 case PROCESSOR_RS64A:
3398 rs6000_cost = &rs64a_cost;
3399 break;
3401 case PROCESSOR_MPCCORE:
3402 rs6000_cost = &mpccore_cost;
3403 break;
3405 case PROCESSOR_PPC403:
3406 rs6000_cost = &ppc403_cost;
3407 break;
3409 case PROCESSOR_PPC405:
3410 rs6000_cost = &ppc405_cost;
3411 break;
3413 case PROCESSOR_PPC440:
3414 rs6000_cost = &ppc440_cost;
3415 break;
3417 case PROCESSOR_PPC476:
3418 rs6000_cost = &ppc476_cost;
3419 break;
3421 case PROCESSOR_PPC601:
3422 rs6000_cost = &ppc601_cost;
3423 break;
3425 case PROCESSOR_PPC603:
3426 rs6000_cost = &ppc603_cost;
3427 break;
3429 case PROCESSOR_PPC604:
3430 rs6000_cost = &ppc604_cost;
3431 break;
3433 case PROCESSOR_PPC604e:
3434 rs6000_cost = &ppc604e_cost;
3435 break;
3437 case PROCESSOR_PPC620:
3438 rs6000_cost = &ppc620_cost;
3439 break;
3441 case PROCESSOR_PPC630:
3442 rs6000_cost = &ppc630_cost;
3443 break;
3445 case PROCESSOR_CELL:
3446 rs6000_cost = &ppccell_cost;
3447 break;
3449 case PROCESSOR_PPC750:
3450 case PROCESSOR_PPC7400:
3451 rs6000_cost = &ppc750_cost;
3452 break;
3454 case PROCESSOR_PPC7450:
3455 rs6000_cost = &ppc7450_cost;
3456 break;
3458 case PROCESSOR_PPC8540:
3459 case PROCESSOR_PPC8548:
3460 rs6000_cost = &ppc8540_cost;
3461 break;
3463 case PROCESSOR_PPCE300C2:
3464 case PROCESSOR_PPCE300C3:
3465 rs6000_cost = &ppce300c2c3_cost;
3466 break;
3468 case PROCESSOR_PPCE500MC:
3469 rs6000_cost = &ppce500mc_cost;
3470 break;
3472 case PROCESSOR_PPCE500MC64:
3473 rs6000_cost = &ppce500mc64_cost;
3474 break;
3476 case PROCESSOR_PPCE5500:
3477 rs6000_cost = &ppce5500_cost;
3478 break;
3480 case PROCESSOR_PPCE6500:
3481 rs6000_cost = &ppce6500_cost;
3482 break;
3484 case PROCESSOR_TITAN:
3485 rs6000_cost = &titan_cost;
3486 break;
3488 case PROCESSOR_POWER4:
3489 case PROCESSOR_POWER5:
3490 rs6000_cost = &power4_cost;
3491 break;
3493 case PROCESSOR_POWER6:
3494 rs6000_cost = &power6_cost;
3495 break;
3497 case PROCESSOR_POWER7:
3498 rs6000_cost = &power7_cost;
3499 break;
3501 case PROCESSOR_POWER8:
3502 rs6000_cost = &power8_cost;
3503 break;
3505 case PROCESSOR_PPCA2:
3506 rs6000_cost = &ppca2_cost;
3507 break;
3509 default:
3510 gcc_unreachable ();
3513 if (global_init_p)
3515 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3516 rs6000_cost->simultaneous_prefetches,
3517 global_options.x_param_values,
3518 global_options_set.x_param_values);
3519 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3520 global_options.x_param_values,
3521 global_options_set.x_param_values);
3522 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3523 rs6000_cost->cache_line_size,
3524 global_options.x_param_values,
3525 global_options_set.x_param_values);
3526 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3527 global_options.x_param_values,
3528 global_options_set.x_param_values);
3530 /* Increase loop peeling limits based on performance analysis. */
3531 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3532 global_options.x_param_values,
3533 global_options_set.x_param_values);
3534 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3535 global_options.x_param_values,
3536 global_options_set.x_param_values);
3538 /* If using typedef char *va_list, signal that
3539 __builtin_va_start (&ap, 0) can be optimized to
3540 ap = __builtin_next_arg (0). */
3541 if (DEFAULT_ABI != ABI_V4)
3542 targetm.expand_builtin_va_start = NULL;
3545 /* Set up single/double float flags.
3546 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3547 then set both flags. */
3548 if (TARGET_HARD_FLOAT && TARGET_FPRS
3549 && rs6000_single_float == 0 && rs6000_double_float == 0)
3550 rs6000_single_float = rs6000_double_float = 1;
3552 /* If not explicitly specified via option, decide whether to generate indexed
3553 load/store instructions. */
3554 if (TARGET_AVOID_XFORM == -1)
3555 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3556 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3557 need indexed accesses and the type used is the scalar type of the element
3558 being loaded or stored. */
3559 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3560 && !TARGET_ALTIVEC);
3562 /* Set the -mrecip options. */
3563 if (rs6000_recip_name)
3565 char *p = ASTRDUP (rs6000_recip_name);
3566 char *q;
3567 unsigned int mask, i;
3568 bool invert;
3570 while ((q = strtok (p, ",")) != NULL)
3572 p = NULL;
3573 if (*q == '!')
3575 invert = true;
3576 q++;
3578 else
3579 invert = false;
3581 if (!strcmp (q, "default"))
3582 mask = ((TARGET_RECIP_PRECISION)
3583 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3584 else
3586 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3587 if (!strcmp (q, recip_options[i].string))
3589 mask = recip_options[i].mask;
3590 break;
3593 if (i == ARRAY_SIZE (recip_options))
3595 error ("unknown option for -mrecip=%s", q);
3596 invert = false;
3597 mask = 0;
3598 ret = false;
3602 if (invert)
3603 rs6000_recip_control &= ~mask;
3604 else
3605 rs6000_recip_control |= mask;
3609 /* Set the builtin mask of the various options used that could affect which
3610 builtins were used. In the past we used target_flags, but we've run out
3611 of bits, and some options like SPE and PAIRED are no longer in
3612 target_flags. */
3613 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3614 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3616 fprintf (stderr,
3617 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
3618 rs6000_builtin_mask);
3619 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
3622 /* Initialize all of the registers. */
3623 rs6000_init_hard_regno_mode_ok (global_init_p);
3625 /* Save the initial options in case the user does function specific options */
3626 if (global_init_p)
3627 target_option_default_node = target_option_current_node
3628 = build_target_option_node ();
3630 /* If not explicitly specified via option, decide whether to generate the
3631 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3632 if (TARGET_LINK_STACK == -1)
3633 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3635 return ret;
3638 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3639 define the target cpu type. */
3641 static void
3642 rs6000_option_override (void)
3644 (void) rs6000_option_override_internal (true);
3648 /* Implement targetm.vectorize.builtin_mask_for_load. */
3649 static tree
3650 rs6000_builtin_mask_for_load (void)
3652 if (TARGET_ALTIVEC || TARGET_VSX)
3653 return altivec_builtin_mask_for_load;
3654 else
3655 return 0;
3658 /* Implement LOOP_ALIGN. */
3660 rs6000_loop_align (rtx label)
3662 basic_block bb;
3663 int ninsns;
3665 /* Don't override loop alignment if -falign-loops was specified. */
3666 if (!can_override_loop_align)
3667 return align_loops_log;
3669 bb = BLOCK_FOR_INSN (label);
3670 ninsns = num_loop_insns(bb->loop_father);
3672 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3673 if (ninsns > 4 && ninsns <= 8
3674 && (rs6000_cpu == PROCESSOR_POWER4
3675 || rs6000_cpu == PROCESSOR_POWER5
3676 || rs6000_cpu == PROCESSOR_POWER6
3677 || rs6000_cpu == PROCESSOR_POWER7
3678 || rs6000_cpu == PROCESSOR_POWER8))
3679 return 5;
3680 else
3681 return align_loops_log;
3684 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3685 static int
3686 rs6000_loop_align_max_skip (rtx label)
3688 return (1 << rs6000_loop_align (label)) - 1;
3691 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3692 after applying N number of iterations. This routine does not determine
3693 how may iterations are required to reach desired alignment. */
3695 static bool
3696 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3698 if (is_packed)
3699 return false;
3701 if (TARGET_32BIT)
3703 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3704 return true;
3706 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3707 return true;
3709 return false;
3711 else
3713 if (TARGET_MACHO)
3714 return false;
3716 /* Assuming that all other types are naturally aligned. CHECKME! */
3717 return true;
3721 /* Return true if the vector misalignment factor is supported by the
3722 target. */
3723 static bool
3724 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3725 const_tree type,
3726 int misalignment,
3727 bool is_packed)
3729 if (TARGET_VSX)
3731 /* Return if movmisalign pattern is not supported for this mode. */
3732 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3733 return false;
3735 if (misalignment == -1)
3737 /* Misalignment factor is unknown at compile time but we know
3738 it's word aligned. */
3739 if (rs6000_vector_alignment_reachable (type, is_packed))
3741 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3743 if (element_size == 64 || element_size == 32)
3744 return true;
3747 return false;
3750 /* VSX supports word-aligned vector. */
3751 if (misalignment % 4 == 0)
3752 return true;
3754 return false;
3757 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3758 static int
3759 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3760 tree vectype, int misalign)
3762 unsigned elements;
3763 tree elem_type;
3765 switch (type_of_cost)
3767 case scalar_stmt:
3768 case scalar_load:
3769 case scalar_store:
3770 case vector_stmt:
3771 case vector_load:
3772 case vector_store:
3773 case vec_to_scalar:
3774 case scalar_to_vec:
3775 case cond_branch_not_taken:
3776 return 1;
3778 case vec_perm:
3779 if (TARGET_VSX)
3780 return 3;
3781 else
3782 return 1;
3784 case vec_promote_demote:
3785 if (TARGET_VSX)
3786 return 4;
3787 else
3788 return 1;
3790 case cond_branch_taken:
3791 return 3;
3793 case unaligned_load:
3794 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3796 elements = TYPE_VECTOR_SUBPARTS (vectype);
3797 if (elements == 2)
3798 /* Double word aligned. */
3799 return 2;
3801 if (elements == 4)
3803 switch (misalign)
3805 case 8:
3806 /* Double word aligned. */
3807 return 2;
3809 case -1:
3810 /* Unknown misalignment. */
3811 case 4:
3812 case 12:
3813 /* Word aligned. */
3814 return 22;
3816 default:
3817 gcc_unreachable ();
3822 if (TARGET_ALTIVEC)
3823 /* Misaligned loads are not supported. */
3824 gcc_unreachable ();
3826 return 2;
3828 case unaligned_store:
3829 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3831 elements = TYPE_VECTOR_SUBPARTS (vectype);
3832 if (elements == 2)
3833 /* Double word aligned. */
3834 return 2;
3836 if (elements == 4)
3838 switch (misalign)
3840 case 8:
3841 /* Double word aligned. */
3842 return 2;
3844 case -1:
3845 /* Unknown misalignment. */
3846 case 4:
3847 case 12:
3848 /* Word aligned. */
3849 return 23;
3851 default:
3852 gcc_unreachable ();
3857 if (TARGET_ALTIVEC)
3858 /* Misaligned stores are not supported. */
3859 gcc_unreachable ();
3861 return 2;
3863 case vec_construct:
3864 elements = TYPE_VECTOR_SUBPARTS (vectype);
3865 elem_type = TREE_TYPE (vectype);
3866 /* 32-bit vectors loaded into registers are stored as double
3867 precision, so we need n/2 converts in addition to the usual
3868 n/2 merges to construct a vector of short floats from them. */
3869 if (SCALAR_FLOAT_TYPE_P (elem_type)
3870 && TYPE_PRECISION (elem_type) == 32)
3871 return elements + 1;
3872 else
3873 return elements / 2 + 1;
3875 default:
3876 gcc_unreachable ();
3880 /* Implement targetm.vectorize.preferred_simd_mode. */
3882 static enum machine_mode
3883 rs6000_preferred_simd_mode (enum machine_mode mode)
3885 if (TARGET_VSX)
3886 switch (mode)
3888 case DFmode:
3889 return V2DFmode;
3890 default:;
3892 if (TARGET_ALTIVEC || TARGET_VSX)
3893 switch (mode)
3895 case SFmode:
3896 return V4SFmode;
3897 case DImode:
3898 return V2DImode;
3899 case SImode:
3900 return V4SImode;
3901 case HImode:
3902 return V8HImode;
3903 case QImode:
3904 return V16QImode;
3905 default:;
3907 if (TARGET_SPE)
3908 switch (mode)
3910 case SFmode:
3911 return V2SFmode;
3912 case SImode:
3913 return V2SImode;
3914 default:;
3916 if (TARGET_PAIRED_FLOAT
3917 && mode == SFmode)
3918 return V2SFmode;
3919 return word_mode;
3922 typedef struct _rs6000_cost_data
3924 struct loop *loop_info;
3925 unsigned cost[3];
3926 } rs6000_cost_data;
3928 /* Test for likely overcommitment of vector hardware resources. If a
3929 loop iteration is relatively large, and too large a percentage of
3930 instructions in the loop are vectorized, the cost model may not
3931 adequately reflect delays from unavailable vector resources.
3932 Penalize the loop body cost for this case. */
3934 static void
3935 rs6000_density_test (rs6000_cost_data *data)
3937 const int DENSITY_PCT_THRESHOLD = 85;
3938 const int DENSITY_SIZE_THRESHOLD = 70;
3939 const int DENSITY_PENALTY = 10;
3940 struct loop *loop = data->loop_info;
3941 basic_block *bbs = get_loop_body (loop);
3942 int nbbs = loop->num_nodes;
3943 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
3944 int i, density_pct;
3946 for (i = 0; i < nbbs; i++)
3948 basic_block bb = bbs[i];
3949 gimple_stmt_iterator gsi;
3951 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3953 gimple stmt = gsi_stmt (gsi);
3954 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3956 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3957 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
3958 not_vec_cost++;
3962 free (bbs);
3963 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
3965 if (density_pct > DENSITY_PCT_THRESHOLD
3966 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
3968 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
3969 if (dump_enabled_p ())
3970 dump_printf_loc (MSG_NOTE, vect_location,
3971 "density %d%%, cost %d exceeds threshold, penalizing "
3972 "loop body cost by %d%%", density_pct,
3973 vec_cost + not_vec_cost, DENSITY_PENALTY);
3977 /* Implement targetm.vectorize.init_cost. */
3979 static void *
3980 rs6000_init_cost (struct loop *loop_info)
3982 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
3983 data->loop_info = loop_info;
3984 data->cost[vect_prologue] = 0;
3985 data->cost[vect_body] = 0;
3986 data->cost[vect_epilogue] = 0;
3987 return data;
3990 /* Implement targetm.vectorize.add_stmt_cost. */
3992 static unsigned
3993 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
3994 struct _stmt_vec_info *stmt_info, int misalign,
3995 enum vect_cost_model_location where)
3997 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3998 unsigned retval = 0;
4000 if (flag_vect_cost_model)
4002 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4003 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4004 misalign);
4005 /* Statements in an inner loop relative to the loop being
4006 vectorized are weighted more heavily. The value here is
4007 arbitrary and could potentially be improved with analysis. */
4008 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4009 count *= 50; /* FIXME. */
4011 retval = (unsigned) (count * stmt_cost);
4012 cost_data->cost[where] += retval;
4015 return retval;
4018 /* Implement targetm.vectorize.finish_cost. */
4020 static void
4021 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4022 unsigned *body_cost, unsigned *epilogue_cost)
4024 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4026 if (cost_data->loop_info)
4027 rs6000_density_test (cost_data);
4029 *prologue_cost = cost_data->cost[vect_prologue];
4030 *body_cost = cost_data->cost[vect_body];
4031 *epilogue_cost = cost_data->cost[vect_epilogue];
4034 /* Implement targetm.vectorize.destroy_cost_data. */
4036 static void
4037 rs6000_destroy_cost_data (void *data)
4039 free (data);
4042 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4043 library with vectorized intrinsics. */
4045 static tree
4046 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4048 char name[32];
4049 const char *suffix = NULL;
4050 tree fntype, new_fndecl, bdecl = NULL_TREE;
4051 int n_args = 1;
4052 const char *bname;
4053 enum machine_mode el_mode, in_mode;
4054 int n, in_n;
4056 /* Libmass is suitable for unsafe math only as it does not correctly support
4057 parts of IEEE with the required precision such as denormals. Only support
4058 it if we have VSX to use the simd d2 or f4 functions.
4059 XXX: Add variable length support. */
4060 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4061 return NULL_TREE;
4063 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4064 n = TYPE_VECTOR_SUBPARTS (type_out);
4065 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4066 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4067 if (el_mode != in_mode
4068 || n != in_n)
4069 return NULL_TREE;
4071 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4073 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4074 switch (fn)
4076 case BUILT_IN_ATAN2:
4077 case BUILT_IN_HYPOT:
4078 case BUILT_IN_POW:
4079 n_args = 2;
4080 /* fall through */
4082 case BUILT_IN_ACOS:
4083 case BUILT_IN_ACOSH:
4084 case BUILT_IN_ASIN:
4085 case BUILT_IN_ASINH:
4086 case BUILT_IN_ATAN:
4087 case BUILT_IN_ATANH:
4088 case BUILT_IN_CBRT:
4089 case BUILT_IN_COS:
4090 case BUILT_IN_COSH:
4091 case BUILT_IN_ERF:
4092 case BUILT_IN_ERFC:
4093 case BUILT_IN_EXP2:
4094 case BUILT_IN_EXP:
4095 case BUILT_IN_EXPM1:
4096 case BUILT_IN_LGAMMA:
4097 case BUILT_IN_LOG10:
4098 case BUILT_IN_LOG1P:
4099 case BUILT_IN_LOG2:
4100 case BUILT_IN_LOG:
4101 case BUILT_IN_SIN:
4102 case BUILT_IN_SINH:
4103 case BUILT_IN_SQRT:
4104 case BUILT_IN_TAN:
4105 case BUILT_IN_TANH:
4106 bdecl = builtin_decl_implicit (fn);
4107 suffix = "d2"; /* pow -> powd2 */
4108 if (el_mode != DFmode
4109 || n != 2
4110 || !bdecl)
4111 return NULL_TREE;
4112 break;
4114 case BUILT_IN_ATAN2F:
4115 case BUILT_IN_HYPOTF:
4116 case BUILT_IN_POWF:
4117 n_args = 2;
4118 /* fall through */
4120 case BUILT_IN_ACOSF:
4121 case BUILT_IN_ACOSHF:
4122 case BUILT_IN_ASINF:
4123 case BUILT_IN_ASINHF:
4124 case BUILT_IN_ATANF:
4125 case BUILT_IN_ATANHF:
4126 case BUILT_IN_CBRTF:
4127 case BUILT_IN_COSF:
4128 case BUILT_IN_COSHF:
4129 case BUILT_IN_ERFF:
4130 case BUILT_IN_ERFCF:
4131 case BUILT_IN_EXP2F:
4132 case BUILT_IN_EXPF:
4133 case BUILT_IN_EXPM1F:
4134 case BUILT_IN_LGAMMAF:
4135 case BUILT_IN_LOG10F:
4136 case BUILT_IN_LOG1PF:
4137 case BUILT_IN_LOG2F:
4138 case BUILT_IN_LOGF:
4139 case BUILT_IN_SINF:
4140 case BUILT_IN_SINHF:
4141 case BUILT_IN_SQRTF:
4142 case BUILT_IN_TANF:
4143 case BUILT_IN_TANHF:
4144 bdecl = builtin_decl_implicit (fn);
4145 suffix = "4"; /* powf -> powf4 */
4146 if (el_mode != SFmode
4147 || n != 4
4148 || !bdecl)
4149 return NULL_TREE;
4150 break;
4152 default:
4153 return NULL_TREE;
4156 else
4157 return NULL_TREE;
4159 gcc_assert (suffix != NULL);
4160 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4161 if (!bname)
4162 return NULL_TREE;
4164 strcpy (name, bname + sizeof ("__builtin_") - 1);
4165 strcat (name, suffix);
4167 if (n_args == 1)
4168 fntype = build_function_type_list (type_out, type_in, NULL);
4169 else if (n_args == 2)
4170 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4171 else
4172 gcc_unreachable ();
4174 /* Build a function declaration for the vectorized function. */
4175 new_fndecl = build_decl (BUILTINS_LOCATION,
4176 FUNCTION_DECL, get_identifier (name), fntype);
4177 TREE_PUBLIC (new_fndecl) = 1;
4178 DECL_EXTERNAL (new_fndecl) = 1;
4179 DECL_IS_NOVOPS (new_fndecl) = 1;
4180 TREE_READONLY (new_fndecl) = 1;
4182 return new_fndecl;
4185 /* Returns a function decl for a vectorized version of the builtin function
4186 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4187 if it is not available. */
4189 static tree
4190 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4191 tree type_in)
4193 enum machine_mode in_mode, out_mode;
4194 int in_n, out_n;
4196 if (TARGET_DEBUG_BUILTIN)
4197 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4198 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4199 GET_MODE_NAME (TYPE_MODE (type_out)),
4200 GET_MODE_NAME (TYPE_MODE (type_in)));
4202 if (TREE_CODE (type_out) != VECTOR_TYPE
4203 || TREE_CODE (type_in) != VECTOR_TYPE
4204 || !TARGET_VECTORIZE_BUILTINS)
4205 return NULL_TREE;
4207 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4208 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4209 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4210 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4212 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4214 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4215 switch (fn)
4217 case BUILT_IN_CLZIMAX:
4218 case BUILT_IN_CLZLL:
4219 case BUILT_IN_CLZL:
4220 case BUILT_IN_CLZ:
4221 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4223 if (out_mode == QImode && out_n == 16)
4224 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4225 else if (out_mode == HImode && out_n == 8)
4226 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4227 else if (out_mode == SImode && out_n == 4)
4228 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4229 else if (out_mode == DImode && out_n == 2)
4230 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4232 break;
4233 case BUILT_IN_COPYSIGN:
4234 if (VECTOR_UNIT_VSX_P (V2DFmode)
4235 && out_mode == DFmode && out_n == 2
4236 && in_mode == DFmode && in_n == 2)
4237 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4238 break;
4239 case BUILT_IN_COPYSIGNF:
4240 if (out_mode != SFmode || out_n != 4
4241 || in_mode != SFmode || in_n != 4)
4242 break;
4243 if (VECTOR_UNIT_VSX_P (V4SFmode))
4244 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4245 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4246 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4247 break;
4248 case BUILT_IN_POPCOUNTIMAX:
4249 case BUILT_IN_POPCOUNTLL:
4250 case BUILT_IN_POPCOUNTL:
4251 case BUILT_IN_POPCOUNT:
4252 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4254 if (out_mode == QImode && out_n == 16)
4255 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4256 else if (out_mode == HImode && out_n == 8)
4257 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4258 else if (out_mode == SImode && out_n == 4)
4259 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4260 else if (out_mode == DImode && out_n == 2)
4261 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4263 break;
4264 case BUILT_IN_SQRT:
4265 if (VECTOR_UNIT_VSX_P (V2DFmode)
4266 && out_mode == DFmode && out_n == 2
4267 && in_mode == DFmode && in_n == 2)
4268 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4269 break;
4270 case BUILT_IN_SQRTF:
4271 if (VECTOR_UNIT_VSX_P (V4SFmode)
4272 && out_mode == SFmode && out_n == 4
4273 && in_mode == SFmode && in_n == 4)
4274 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4275 break;
4276 case BUILT_IN_CEIL:
4277 if (VECTOR_UNIT_VSX_P (V2DFmode)
4278 && out_mode == DFmode && out_n == 2
4279 && in_mode == DFmode && in_n == 2)
4280 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4281 break;
4282 case BUILT_IN_CEILF:
4283 if (out_mode != SFmode || out_n != 4
4284 || in_mode != SFmode || in_n != 4)
4285 break;
4286 if (VECTOR_UNIT_VSX_P (V4SFmode))
4287 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4288 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4289 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4290 break;
4291 case BUILT_IN_FLOOR:
4292 if (VECTOR_UNIT_VSX_P (V2DFmode)
4293 && out_mode == DFmode && out_n == 2
4294 && in_mode == DFmode && in_n == 2)
4295 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4296 break;
4297 case BUILT_IN_FLOORF:
4298 if (out_mode != SFmode || out_n != 4
4299 || in_mode != SFmode || in_n != 4)
4300 break;
4301 if (VECTOR_UNIT_VSX_P (V4SFmode))
4302 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4303 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4304 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4305 break;
4306 case BUILT_IN_FMA:
4307 if (VECTOR_UNIT_VSX_P (V2DFmode)
4308 && out_mode == DFmode && out_n == 2
4309 && in_mode == DFmode && in_n == 2)
4310 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4311 break;
4312 case BUILT_IN_FMAF:
4313 if (VECTOR_UNIT_VSX_P (V4SFmode)
4314 && out_mode == SFmode && out_n == 4
4315 && in_mode == SFmode && in_n == 4)
4316 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4317 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4318 && out_mode == SFmode && out_n == 4
4319 && in_mode == SFmode && in_n == 4)
4320 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4321 break;
4322 case BUILT_IN_TRUNC:
4323 if (VECTOR_UNIT_VSX_P (V2DFmode)
4324 && out_mode == DFmode && out_n == 2
4325 && in_mode == DFmode && in_n == 2)
4326 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4327 break;
4328 case BUILT_IN_TRUNCF:
4329 if (out_mode != SFmode || out_n != 4
4330 || in_mode != SFmode || in_n != 4)
4331 break;
4332 if (VECTOR_UNIT_VSX_P (V4SFmode))
4333 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4334 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4335 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4336 break;
4337 case BUILT_IN_NEARBYINT:
4338 if (VECTOR_UNIT_VSX_P (V2DFmode)
4339 && flag_unsafe_math_optimizations
4340 && out_mode == DFmode && out_n == 2
4341 && in_mode == DFmode && in_n == 2)
4342 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4343 break;
4344 case BUILT_IN_NEARBYINTF:
4345 if (VECTOR_UNIT_VSX_P (V4SFmode)
4346 && flag_unsafe_math_optimizations
4347 && out_mode == SFmode && out_n == 4
4348 && in_mode == SFmode && in_n == 4)
4349 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4350 break;
4351 case BUILT_IN_RINT:
4352 if (VECTOR_UNIT_VSX_P (V2DFmode)
4353 && !flag_trapping_math
4354 && out_mode == DFmode && out_n == 2
4355 && in_mode == DFmode && in_n == 2)
4356 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4357 break;
4358 case BUILT_IN_RINTF:
4359 if (VECTOR_UNIT_VSX_P (V4SFmode)
4360 && !flag_trapping_math
4361 && out_mode == SFmode && out_n == 4
4362 && in_mode == SFmode && in_n == 4)
4363 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4364 break;
4365 default:
4366 break;
4370 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4372 enum rs6000_builtins fn
4373 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4374 switch (fn)
4376 case RS6000_BUILTIN_RSQRTF:
4377 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4378 && out_mode == SFmode && out_n == 4
4379 && in_mode == SFmode && in_n == 4)
4380 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4381 break;
4382 case RS6000_BUILTIN_RSQRT:
4383 if (VECTOR_UNIT_VSX_P (V2DFmode)
4384 && out_mode == DFmode && out_n == 2
4385 && in_mode == DFmode && in_n == 2)
4386 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4387 break;
4388 case RS6000_BUILTIN_RECIPF:
4389 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4390 && out_mode == SFmode && out_n == 4
4391 && in_mode == SFmode && in_n == 4)
4392 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4393 break;
4394 case RS6000_BUILTIN_RECIP:
4395 if (VECTOR_UNIT_VSX_P (V2DFmode)
4396 && out_mode == DFmode && out_n == 2
4397 && in_mode == DFmode && in_n == 2)
4398 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4399 break;
4400 default:
4401 break;
4405 /* Generate calls to libmass if appropriate. */
4406 if (rs6000_veclib_handler)
4407 return rs6000_veclib_handler (fndecl, type_out, type_in);
4409 return NULL_TREE;
4412 /* Default CPU string for rs6000*_file_start functions. */
4413 static const char *rs6000_default_cpu;
4415 /* Do anything needed at the start of the asm file. */
4417 static void
4418 rs6000_file_start (void)
4420 char buffer[80];
4421 const char *start = buffer;
4422 FILE *file = asm_out_file;
4424 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4426 default_file_start ();
4428 if (flag_verbose_asm)
4430 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4432 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4434 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4435 start = "";
4438 if (global_options_set.x_rs6000_cpu_index)
4440 fprintf (file, "%s -mcpu=%s", start,
4441 processor_target_table[rs6000_cpu_index].name);
4442 start = "";
4445 if (global_options_set.x_rs6000_tune_index)
4447 fprintf (file, "%s -mtune=%s", start,
4448 processor_target_table[rs6000_tune_index].name);
4449 start = "";
4452 if (PPC405_ERRATUM77)
4454 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4455 start = "";
4458 #ifdef USING_ELFOS_H
4459 switch (rs6000_sdata)
4461 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4462 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4463 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4464 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4467 if (rs6000_sdata && g_switch_value)
4469 fprintf (file, "%s -G %d", start,
4470 g_switch_value);
4471 start = "";
4473 #endif
4475 if (*start == '\0')
4476 putc ('\n', file);
4479 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
4481 switch_to_section (toc_section);
4482 switch_to_section (text_section);
4487 /* Return nonzero if this function is known to have a null epilogue. */
4490 direct_return (void)
4492 if (reload_completed)
4494 rs6000_stack_t *info = rs6000_stack_info ();
4496 if (info->first_gp_reg_save == 32
4497 && info->first_fp_reg_save == 64
4498 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4499 && ! info->lr_save_p
4500 && ! info->cr_save_p
4501 && info->vrsave_mask == 0
4502 && ! info->push_p)
4503 return 1;
4506 return 0;
4509 /* Return the number of instructions it takes to form a constant in an
4510 integer register. */
4513 num_insns_constant_wide (HOST_WIDE_INT value)
4515 /* signed constant loadable with addi */
4516 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4517 return 1;
4519 /* constant loadable with addis */
4520 else if ((value & 0xffff) == 0
4521 && (value >> 31 == -1 || value >> 31 == 0))
4522 return 1;
4524 else if (TARGET_POWERPC64)
4526 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4527 HOST_WIDE_INT high = value >> 31;
4529 if (high == 0 || high == -1)
4530 return 2;
4532 high >>= 1;
4534 if (low == 0)
4535 return num_insns_constant_wide (high) + 1;
4536 else if (high == 0)
4537 return num_insns_constant_wide (low) + 1;
4538 else
4539 return (num_insns_constant_wide (high)
4540 + num_insns_constant_wide (low) + 1);
4543 else
4544 return 2;
4548 num_insns_constant (rtx op, enum machine_mode mode)
4550 HOST_WIDE_INT low, high;
4552 switch (GET_CODE (op))
4554 case CONST_INT:
4555 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4556 && mask64_operand (op, mode))
4557 return 2;
4558 else
4559 return num_insns_constant_wide (INTVAL (op));
4561 case CONST_DOUBLE:
4562 if (mode == SFmode || mode == SDmode)
4564 long l;
4565 REAL_VALUE_TYPE rv;
4567 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4568 if (DECIMAL_FLOAT_MODE_P (mode))
4569 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4570 else
4571 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4572 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4575 long l[2];
4576 REAL_VALUE_TYPE rv;
4578 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4579 if (DECIMAL_FLOAT_MODE_P (mode))
4580 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4581 else
4582 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4583 high = l[WORDS_BIG_ENDIAN == 0];
4584 low = l[WORDS_BIG_ENDIAN != 0];
4586 if (TARGET_32BIT)
4587 return (num_insns_constant_wide (low)
4588 + num_insns_constant_wide (high));
4589 else
4591 if ((high == 0 && low >= 0)
4592 || (high == -1 && low < 0))
4593 return num_insns_constant_wide (low);
4595 else if (mask64_operand (op, mode))
4596 return 2;
4598 else if (low == 0)
4599 return num_insns_constant_wide (high) + 1;
4601 else
4602 return (num_insns_constant_wide (high)
4603 + num_insns_constant_wide (low) + 1);
4606 default:
4607 gcc_unreachable ();
4611 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4612 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4613 corresponding element of the vector, but for V4SFmode and V2SFmode,
4614 the corresponding "float" is interpreted as an SImode integer. */
4616 HOST_WIDE_INT
4617 const_vector_elt_as_int (rtx op, unsigned int elt)
4619 rtx tmp;
4621 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4622 gcc_assert (GET_MODE (op) != V2DImode
4623 && GET_MODE (op) != V2DFmode);
4625 tmp = CONST_VECTOR_ELT (op, elt);
4626 if (GET_MODE (op) == V4SFmode
4627 || GET_MODE (op) == V2SFmode)
4628 tmp = gen_lowpart (SImode, tmp);
4629 return INTVAL (tmp);
4632 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4633 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4634 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4635 all items are set to the same value and contain COPIES replicas of the
4636 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4637 operand and the others are set to the value of the operand's msb. */
4639 static bool
4640 vspltis_constant (rtx op, unsigned step, unsigned copies)
4642 enum machine_mode mode = GET_MODE (op);
4643 enum machine_mode inner = GET_MODE_INNER (mode);
4645 unsigned i;
4646 unsigned nunits;
4647 unsigned bitsize;
4648 unsigned mask;
4650 HOST_WIDE_INT val;
4651 HOST_WIDE_INT splat_val;
4652 HOST_WIDE_INT msb_val;
4654 if (mode == V2DImode || mode == V2DFmode)
4655 return false;
4657 nunits = GET_MODE_NUNITS (mode);
4658 bitsize = GET_MODE_BITSIZE (inner);
4659 mask = GET_MODE_MASK (inner);
4661 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
4662 splat_val = val;
4663 msb_val = val > 0 ? 0 : -1;
4665 /* Construct the value to be splatted, if possible. If not, return 0. */
4666 for (i = 2; i <= copies; i *= 2)
4668 HOST_WIDE_INT small_val;
4669 bitsize /= 2;
4670 small_val = splat_val >> bitsize;
4671 mask >>= bitsize;
4672 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4673 return false;
4674 splat_val = small_val;
4677 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4678 if (EASY_VECTOR_15 (splat_val))
4681 /* Also check if we can splat, and then add the result to itself. Do so if
4682 the value is positive, of if the splat instruction is using OP's mode;
4683 for splat_val < 0, the splat and the add should use the same mode. */
4684 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4685 && (splat_val >= 0 || (step == 1 && copies == 1)))
4688 /* Also check if are loading up the most significant bit which can be done by
4689 loading up -1 and shifting the value left by -1. */
4690 else if (EASY_VECTOR_MSB (splat_val, inner))
4693 else
4694 return false;
4696 /* Check if VAL is present in every STEP-th element, and the
4697 other elements are filled with its most significant bit. */
4698 for (i = 0; i < nunits - 1; ++i)
4700 HOST_WIDE_INT desired_val;
4701 if (((BYTES_BIG_ENDIAN ? i + 1 : i) & (step - 1)) == 0)
4702 desired_val = val;
4703 else
4704 desired_val = msb_val;
4706 if (desired_val != const_vector_elt_as_int (op, i))
4707 return false;
4710 return true;
4714 /* Return true if OP is of the given MODE and can be synthesized
4715 with a vspltisb, vspltish or vspltisw. */
4717 bool
4718 easy_altivec_constant (rtx op, enum machine_mode mode)
4720 unsigned step, copies;
4722 if (mode == VOIDmode)
4723 mode = GET_MODE (op);
4724 else if (mode != GET_MODE (op))
4725 return false;
4727 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4728 constants. */
4729 if (mode == V2DFmode)
4730 return zero_constant (op, mode);
4732 if (mode == V2DImode)
4734 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4735 easy. */
4736 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4737 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4738 return false;
4740 if (zero_constant (op, mode))
4741 return true;
4743 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4744 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4745 return true;
4747 return false;
4750 /* Start with a vspltisw. */
4751 step = GET_MODE_NUNITS (mode) / 4;
4752 copies = 1;
4754 if (vspltis_constant (op, step, copies))
4755 return true;
4757 /* Then try with a vspltish. */
4758 if (step == 1)
4759 copies <<= 1;
4760 else
4761 step >>= 1;
4763 if (vspltis_constant (op, step, copies))
4764 return true;
4766 /* And finally a vspltisb. */
4767 if (step == 1)
4768 copies <<= 1;
4769 else
4770 step >>= 1;
4772 if (vspltis_constant (op, step, copies))
4773 return true;
4775 return false;
4778 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4779 result is OP. Abort if it is not possible. */
4782 gen_easy_altivec_constant (rtx op)
4784 enum machine_mode mode = GET_MODE (op);
4785 int nunits = GET_MODE_NUNITS (mode);
4786 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
4787 unsigned step = nunits / 4;
4788 unsigned copies = 1;
4790 /* Start with a vspltisw. */
4791 if (vspltis_constant (op, step, copies))
4792 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
4794 /* Then try with a vspltish. */
4795 if (step == 1)
4796 copies <<= 1;
4797 else
4798 step >>= 1;
4800 if (vspltis_constant (op, step, copies))
4801 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
4803 /* And finally a vspltisb. */
4804 if (step == 1)
4805 copies <<= 1;
4806 else
4807 step >>= 1;
4809 if (vspltis_constant (op, step, copies))
4810 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
4812 gcc_unreachable ();
4815 const char *
4816 output_vec_const_move (rtx *operands)
4818 int cst, cst2;
4819 enum machine_mode mode;
4820 rtx dest, vec;
4822 dest = operands[0];
4823 vec = operands[1];
4824 mode = GET_MODE (dest);
4826 if (TARGET_VSX)
4828 if (zero_constant (vec, mode))
4829 return "xxlxor %x0,%x0,%x0";
4831 if (mode == V2DImode
4832 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4833 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4834 return "vspltisw %0,-1";
4837 if (TARGET_ALTIVEC)
4839 rtx splat_vec;
4840 if (zero_constant (vec, mode))
4841 return "vxor %0,%0,%0";
4843 splat_vec = gen_easy_altivec_constant (vec);
4844 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4845 operands[1] = XEXP (splat_vec, 0);
4846 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4847 return "#";
4849 switch (GET_MODE (splat_vec))
4851 case V4SImode:
4852 return "vspltisw %0,%1";
4854 case V8HImode:
4855 return "vspltish %0,%1";
4857 case V16QImode:
4858 return "vspltisb %0,%1";
4860 default:
4861 gcc_unreachable ();
4865 gcc_assert (TARGET_SPE);
4867 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4868 pattern of V1DI, V4HI, and V2SF.
4870 FIXME: We should probably return # and add post reload
4871 splitters for these, but this way is so easy ;-). */
4872 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4873 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4874 operands[1] = CONST_VECTOR_ELT (vec, 0);
4875 operands[2] = CONST_VECTOR_ELT (vec, 1);
4876 if (cst == cst2)
4877 return "li %0,%1\n\tevmergelo %0,%0,%0";
4878 else
4879 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4882 /* Initialize TARGET of vector PAIRED to VALS. */
4884 void
4885 paired_expand_vector_init (rtx target, rtx vals)
4887 enum machine_mode mode = GET_MODE (target);
4888 int n_elts = GET_MODE_NUNITS (mode);
4889 int n_var = 0;
4890 rtx x, new_rtx, tmp, constant_op, op1, op2;
4891 int i;
4893 for (i = 0; i < n_elts; ++i)
4895 x = XVECEXP (vals, 0, i);
4896 if (!(CONST_INT_P (x)
4897 || GET_CODE (x) == CONST_DOUBLE
4898 || GET_CODE (x) == CONST_FIXED))
4899 ++n_var;
4901 if (n_var == 0)
4903 /* Load from constant pool. */
4904 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4905 return;
4908 if (n_var == 2)
4910 /* The vector is initialized only with non-constants. */
4911 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4912 XVECEXP (vals, 0, 1));
4914 emit_move_insn (target, new_rtx);
4915 return;
4918 /* One field is non-constant and the other one is a constant. Load the
4919 constant from the constant pool and use ps_merge instruction to
4920 construct the whole vector. */
4921 op1 = XVECEXP (vals, 0, 0);
4922 op2 = XVECEXP (vals, 0, 1);
4924 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4926 tmp = gen_reg_rtx (GET_MODE (constant_op));
4927 emit_move_insn (tmp, constant_op);
4929 if (CONSTANT_P (op1))
4930 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4931 else
4932 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4934 emit_move_insn (target, new_rtx);
4937 void
4938 paired_expand_vector_move (rtx operands[])
4940 rtx op0 = operands[0], op1 = operands[1];
4942 emit_move_insn (op0, op1);
4945 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4946 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4947 operands for the relation operation COND. This is a recursive
4948 function. */
4950 static void
4951 paired_emit_vector_compare (enum rtx_code rcode,
4952 rtx dest, rtx op0, rtx op1,
4953 rtx cc_op0, rtx cc_op1)
4955 rtx tmp = gen_reg_rtx (V2SFmode);
4956 rtx tmp1, max, min;
4958 gcc_assert (TARGET_PAIRED_FLOAT);
4959 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4961 switch (rcode)
4963 case LT:
4964 case LTU:
4965 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4966 return;
4967 case GE:
4968 case GEU:
4969 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4970 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4971 return;
4972 case LE:
4973 case LEU:
4974 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4975 return;
4976 case GT:
4977 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4978 return;
4979 case EQ:
4980 tmp1 = gen_reg_rtx (V2SFmode);
4981 max = gen_reg_rtx (V2SFmode);
4982 min = gen_reg_rtx (V2SFmode);
4983 gen_reg_rtx (V2SFmode);
4985 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4986 emit_insn (gen_selv2sf4
4987 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4988 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4989 emit_insn (gen_selv2sf4
4990 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4991 emit_insn (gen_subv2sf3 (tmp1, min, max));
4992 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4993 return;
4994 case NE:
4995 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4996 return;
4997 case UNLE:
4998 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4999 return;
5000 case UNLT:
5001 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5002 return;
5003 case UNGE:
5004 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5005 return;
5006 case UNGT:
5007 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5008 return;
5009 default:
5010 gcc_unreachable ();
5013 return;
5016 /* Emit vector conditional expression.
5017 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5018 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5021 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5022 rtx cond, rtx cc_op0, rtx cc_op1)
5024 enum rtx_code rcode = GET_CODE (cond);
5026 if (!TARGET_PAIRED_FLOAT)
5027 return 0;
5029 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5031 return 1;
5034 /* Initialize vector TARGET to VALS. */
5036 void
5037 rs6000_expand_vector_init (rtx target, rtx vals)
5039 enum machine_mode mode = GET_MODE (target);
5040 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5041 int n_elts = GET_MODE_NUNITS (mode);
5042 int n_var = 0, one_var = -1;
5043 bool all_same = true, all_const_zero = true;
5044 rtx x, mem;
5045 int i;
5047 for (i = 0; i < n_elts; ++i)
5049 x = XVECEXP (vals, 0, i);
5050 if (!(CONST_INT_P (x)
5051 || GET_CODE (x) == CONST_DOUBLE
5052 || GET_CODE (x) == CONST_FIXED))
5053 ++n_var, one_var = i;
5054 else if (x != CONST0_RTX (inner_mode))
5055 all_const_zero = false;
5057 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5058 all_same = false;
5061 if (n_var == 0)
5063 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5064 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5065 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5067 /* Zero register. */
5068 emit_insn (gen_rtx_SET (VOIDmode, target,
5069 gen_rtx_XOR (mode, target, target)));
5070 return;
5072 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5074 /* Splat immediate. */
5075 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
5076 return;
5078 else
5080 /* Load from constant pool. */
5081 emit_move_insn (target, const_vec);
5082 return;
5086 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5087 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5089 rtx op0 = XVECEXP (vals, 0, 0);
5090 rtx op1 = XVECEXP (vals, 0, 1);
5091 if (all_same)
5093 if (!MEM_P (op0) && !REG_P (op0))
5094 op0 = force_reg (inner_mode, op0);
5095 if (mode == V2DFmode)
5096 emit_insn (gen_vsx_splat_v2df (target, op0));
5097 else
5098 emit_insn (gen_vsx_splat_v2di (target, op0));
5100 else
5102 op0 = force_reg (inner_mode, op0);
5103 op1 = force_reg (inner_mode, op1);
5104 if (mode == V2DFmode)
5105 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5106 else
5107 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5109 return;
5112 /* With single precision floating point on VSX, know that internally single
5113 precision is actually represented as a double, and either make 2 V2DF
5114 vectors, and convert these vectors to single precision, or do one
5115 conversion, and splat the result to the other elements. */
5116 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5118 if (all_same)
5120 rtx freg = gen_reg_rtx (V4SFmode);
5121 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5122 rtx cvt = ((TARGET_XSCVDPSPN)
5123 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5124 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5126 emit_insn (cvt);
5127 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
5129 else
5131 rtx dbl_even = gen_reg_rtx (V2DFmode);
5132 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5133 rtx flt_even = gen_reg_rtx (V4SFmode);
5134 rtx flt_odd = gen_reg_rtx (V4SFmode);
5135 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5136 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5137 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5138 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5140 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5141 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5142 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5143 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5144 rs6000_expand_extract_even (target, flt_even, flt_odd);
5146 return;
5149 /* Store value to stack temp. Load vector element. Splat. However, splat
5150 of 64-bit items is not supported on Altivec. */
5151 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5153 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5154 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5155 XVECEXP (vals, 0, 0));
5156 x = gen_rtx_UNSPEC (VOIDmode,
5157 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5158 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5159 gen_rtvec (2,
5160 gen_rtx_SET (VOIDmode,
5161 target, mem),
5162 x)));
5163 x = gen_rtx_VEC_SELECT (inner_mode, target,
5164 gen_rtx_PARALLEL (VOIDmode,
5165 gen_rtvec (1, const0_rtx)));
5166 emit_insn (gen_rtx_SET (VOIDmode, target,
5167 gen_rtx_VEC_DUPLICATE (mode, x)));
5168 return;
5171 /* One field is non-constant. Load constant then overwrite
5172 varying field. */
5173 if (n_var == 1)
5175 rtx copy = copy_rtx (vals);
5177 /* Load constant part of vector, substitute neighboring value for
5178 varying element. */
5179 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5180 rs6000_expand_vector_init (target, copy);
5182 /* Insert variable. */
5183 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5184 return;
5187 /* Construct the vector in memory one field at a time
5188 and load the whole vector. */
5189 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5190 for (i = 0; i < n_elts; i++)
5191 emit_move_insn (adjust_address_nv (mem, inner_mode,
5192 i * GET_MODE_SIZE (inner_mode)),
5193 XVECEXP (vals, 0, i));
5194 emit_move_insn (target, mem);
5197 /* Set field ELT of TARGET to VAL. */
5199 void
5200 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5202 enum machine_mode mode = GET_MODE (target);
5203 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5204 rtx reg = gen_reg_rtx (mode);
5205 rtx mask, mem, x;
5206 int width = GET_MODE_SIZE (inner_mode);
5207 int i;
5209 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5211 rtx (*set_func) (rtx, rtx, rtx, rtx)
5212 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5213 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5214 return;
5217 /* Load single variable value. */
5218 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5219 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5220 x = gen_rtx_UNSPEC (VOIDmode,
5221 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5222 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5223 gen_rtvec (2,
5224 gen_rtx_SET (VOIDmode,
5225 reg, mem),
5226 x)));
5228 /* Linear sequence. */
5229 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5230 for (i = 0; i < 16; ++i)
5231 XVECEXP (mask, 0, i) = GEN_INT (i);
5233 /* Set permute mask to insert element into target. */
5234 for (i = 0; i < width; ++i)
5235 XVECEXP (mask, 0, elt*width + i)
5236 = GEN_INT (i + 0x10);
5237 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5238 x = gen_rtx_UNSPEC (mode,
5239 gen_rtvec (3, target, reg,
5240 force_reg (V16QImode, x)),
5241 UNSPEC_VPERM);
5242 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5245 /* Extract field ELT from VEC into TARGET. */
5247 void
5248 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5250 enum machine_mode mode = GET_MODE (vec);
5251 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5252 rtx mem;
5254 if (VECTOR_MEM_VSX_P (mode))
5256 switch (mode)
5258 default:
5259 break;
5260 case V2DFmode:
5261 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5262 return;
5263 case V2DImode:
5264 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5265 return;
5266 case V4SFmode:
5267 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5268 return;
5272 /* Allocate mode-sized buffer. */
5273 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5275 emit_move_insn (mem, vec);
5277 /* Add offset to field within buffer matching vector element. */
5278 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5280 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5283 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5284 implement ANDing by the mask IN. */
5285 void
5286 build_mask64_2_operands (rtx in, rtx *out)
5288 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5289 int shift;
5291 gcc_assert (GET_CODE (in) == CONST_INT);
5293 c = INTVAL (in);
5294 if (c & 1)
5296 /* Assume c initially something like 0x00fff000000fffff. The idea
5297 is to rotate the word so that the middle ^^^^^^ group of zeros
5298 is at the MS end and can be cleared with an rldicl mask. We then
5299 rotate back and clear off the MS ^^ group of zeros with a
5300 second rldicl. */
5301 c = ~c; /* c == 0xff000ffffff00000 */
5302 lsb = c & -c; /* lsb == 0x0000000000100000 */
5303 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5304 c = ~c; /* c == 0x00fff000000fffff */
5305 c &= -lsb; /* c == 0x00fff00000000000 */
5306 lsb = c & -c; /* lsb == 0x0000100000000000 */
5307 c = ~c; /* c == 0xff000fffffffffff */
5308 c &= -lsb; /* c == 0xff00000000000000 */
5309 shift = 0;
5310 while ((lsb >>= 1) != 0)
5311 shift++; /* shift == 44 on exit from loop */
5312 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5313 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5314 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5316 else
5318 /* Assume c initially something like 0xff000f0000000000. The idea
5319 is to rotate the word so that the ^^^ middle group of zeros
5320 is at the LS end and can be cleared with an rldicr mask. We then
5321 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5322 a second rldicr. */
5323 lsb = c & -c; /* lsb == 0x0000010000000000 */
5324 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5325 c = ~c; /* c == 0x00fff0ffffffffff */
5326 c &= -lsb; /* c == 0x00fff00000000000 */
5327 lsb = c & -c; /* lsb == 0x0000100000000000 */
5328 c = ~c; /* c == 0xff000fffffffffff */
5329 c &= -lsb; /* c == 0xff00000000000000 */
5330 shift = 0;
5331 while ((lsb >>= 1) != 0)
5332 shift++; /* shift == 44 on exit from loop */
5333 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5334 m1 >>= shift; /* m1 == 0x0000000000000fff */
5335 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5338 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5339 masks will be all 1's. We are guaranteed more than one transition. */
5340 out[0] = GEN_INT (64 - shift);
5341 out[1] = GEN_INT (m1);
5342 out[2] = GEN_INT (shift);
5343 out[3] = GEN_INT (m2);
5346 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5348 bool
5349 invalid_e500_subreg (rtx op, enum machine_mode mode)
5351 if (TARGET_E500_DOUBLE)
5353 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5354 subreg:TI and reg:TF. Decimal float modes are like integer
5355 modes (only low part of each register used) for this
5356 purpose. */
5357 if (GET_CODE (op) == SUBREG
5358 && (mode == SImode || mode == DImode || mode == TImode
5359 || mode == DDmode || mode == TDmode || mode == PTImode)
5360 && REG_P (SUBREG_REG (op))
5361 && (GET_MODE (SUBREG_REG (op)) == DFmode
5362 || GET_MODE (SUBREG_REG (op)) == TFmode))
5363 return true;
5365 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5366 reg:TI. */
5367 if (GET_CODE (op) == SUBREG
5368 && (mode == DFmode || mode == TFmode)
5369 && REG_P (SUBREG_REG (op))
5370 && (GET_MODE (SUBREG_REG (op)) == DImode
5371 || GET_MODE (SUBREG_REG (op)) == TImode
5372 || GET_MODE (SUBREG_REG (op)) == PTImode
5373 || GET_MODE (SUBREG_REG (op)) == DDmode
5374 || GET_MODE (SUBREG_REG (op)) == TDmode))
5375 return true;
5378 if (TARGET_SPE
5379 && GET_CODE (op) == SUBREG
5380 && mode == SImode
5381 && REG_P (SUBREG_REG (op))
5382 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5383 return true;
5385 return false;
5388 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5389 selects whether the alignment is abi mandated, optional, or
5390 both abi and optional alignment. */
5392 unsigned int
5393 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
5395 if (how != align_opt)
5397 if (TREE_CODE (type) == VECTOR_TYPE)
5399 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
5400 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
5402 if (align < 64)
5403 align = 64;
5405 else if (align < 128)
5406 align = 128;
5408 else if (TARGET_E500_DOUBLE
5409 && TREE_CODE (type) == REAL_TYPE
5410 && TYPE_MODE (type) == DFmode)
5412 if (align < 64)
5413 align = 64;
5417 if (how != align_abi)
5419 if (TREE_CODE (type) == ARRAY_TYPE
5420 && TYPE_MODE (TREE_TYPE (type)) == QImode)
5422 if (align < BITS_PER_WORD)
5423 align = BITS_PER_WORD;
5427 return align;
5430 /* AIX increases natural record alignment to doubleword if the first
5431 field is an FP double while the FP fields remain word aligned. */
5433 unsigned int
5434 rs6000_special_round_type_align (tree type, unsigned int computed,
5435 unsigned int specified)
5437 unsigned int align = MAX (computed, specified);
5438 tree field = TYPE_FIELDS (type);
5440 /* Skip all non field decls */
5441 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5442 field = DECL_CHAIN (field);
5444 if (field != NULL && field != type)
5446 type = TREE_TYPE (field);
5447 while (TREE_CODE (type) == ARRAY_TYPE)
5448 type = TREE_TYPE (type);
5450 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5451 align = MAX (align, 64);
5454 return align;
5457 /* Darwin increases record alignment to the natural alignment of
5458 the first field. */
5460 unsigned int
5461 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5462 unsigned int specified)
5464 unsigned int align = MAX (computed, specified);
5466 if (TYPE_PACKED (type))
5467 return align;
5469 /* Find the first field, looking down into aggregates. */
5470 do {
5471 tree field = TYPE_FIELDS (type);
5472 /* Skip all non field decls */
5473 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5474 field = DECL_CHAIN (field);
5475 if (! field)
5476 break;
5477 /* A packed field does not contribute any extra alignment. */
5478 if (DECL_PACKED (field))
5479 return align;
5480 type = TREE_TYPE (field);
5481 while (TREE_CODE (type) == ARRAY_TYPE)
5482 type = TREE_TYPE (type);
5483 } while (AGGREGATE_TYPE_P (type));
5485 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
5486 align = MAX (align, TYPE_ALIGN (type));
5488 return align;
5491 /* Return 1 for an operand in small memory on V.4/eabi. */
5494 small_data_operand (rtx op ATTRIBUTE_UNUSED,
5495 enum machine_mode mode ATTRIBUTE_UNUSED)
5497 #if TARGET_ELF
5498 rtx sym_ref;
5500 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
5501 return 0;
5503 if (DEFAULT_ABI != ABI_V4)
5504 return 0;
5506 /* Vector and float memory instructions have a limited offset on the
5507 SPE, so using a vector or float variable directly as an operand is
5508 not useful. */
5509 if (TARGET_SPE
5510 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5511 return 0;
5513 if (GET_CODE (op) == SYMBOL_REF)
5514 sym_ref = op;
5516 else if (GET_CODE (op) != CONST
5517 || GET_CODE (XEXP (op, 0)) != PLUS
5518 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5519 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5520 return 0;
5522 else
5524 rtx sum = XEXP (op, 0);
5525 HOST_WIDE_INT summand;
5527 /* We have to be careful here, because it is the referenced address
5528 that must be 32k from _SDA_BASE_, not just the symbol. */
5529 summand = INTVAL (XEXP (sum, 1));
5530 if (summand < 0 || summand > g_switch_value)
5531 return 0;
5533 sym_ref = XEXP (sum, 0);
5536 return SYMBOL_REF_SMALL_P (sym_ref);
5537 #else
5538 return 0;
5539 #endif
5542 /* Return true if either operand is a general purpose register. */
5544 bool
5545 gpr_or_gpr_p (rtx op0, rtx op1)
5547 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5548 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5551 /* Return true if this is a move direct operation between GPR registers and
5552 floating point/VSX registers. */
5554 bool
5555 direct_move_p (rtx op0, rtx op1)
5557 int regno0, regno1;
5559 if (!REG_P (op0) || !REG_P (op1))
5560 return false;
5562 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
5563 return false;
5565 regno0 = REGNO (op0);
5566 regno1 = REGNO (op1);
5567 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
5568 return false;
5570 if (INT_REGNO_P (regno0))
5571 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
5573 else if (INT_REGNO_P (regno1))
5575 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
5576 return true;
5578 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
5579 return true;
5582 return false;
5585 /* Return true if this is a load or store quad operation. */
5587 bool
5588 quad_load_store_p (rtx op0, rtx op1)
5590 bool ret;
5592 if (!TARGET_QUAD_MEMORY)
5593 ret = false;
5595 else if (REG_P (op0) && MEM_P (op1))
5596 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
5597 && quad_memory_operand (op1, GET_MODE (op1))
5598 && !reg_overlap_mentioned_p (op0, op1));
5600 else if (MEM_P (op0) && REG_P (op1))
5601 ret = (quad_memory_operand (op0, GET_MODE (op0))
5602 && quad_int_reg_operand (op1, GET_MODE (op1)));
5604 else
5605 ret = false;
5607 if (TARGET_DEBUG_ADDR)
5609 fprintf (stderr, "\n========== quad_load_store, return %s\n",
5610 ret ? "true" : "false");
5611 debug_rtx (gen_rtx_SET (VOIDmode, op0, op1));
5614 return ret;
5617 /* Given an address, return a constant offset term if one exists. */
5619 static rtx
5620 address_offset (rtx op)
5622 if (GET_CODE (op) == PRE_INC
5623 || GET_CODE (op) == PRE_DEC)
5624 op = XEXP (op, 0);
5625 else if (GET_CODE (op) == PRE_MODIFY
5626 || GET_CODE (op) == LO_SUM)
5627 op = XEXP (op, 1);
5629 if (GET_CODE (op) == CONST)
5630 op = XEXP (op, 0);
5632 if (GET_CODE (op) == PLUS)
5633 op = XEXP (op, 1);
5635 if (CONST_INT_P (op))
5636 return op;
5638 return NULL_RTX;
5641 /* Return true if the MEM operand is a memory operand suitable for use
5642 with a (full width, possibly multiple) gpr load/store. On
5643 powerpc64 this means the offset must be divisible by 4.
5644 Implements 'Y' constraint.
5646 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5647 a constraint function we know the operand has satisfied a suitable
5648 memory predicate. Also accept some odd rtl generated by reload
5649 (see rs6000_legitimize_reload_address for various forms). It is
5650 important that reload rtl be accepted by appropriate constraints
5651 but not by the operand predicate.
5653 Offsetting a lo_sum should not be allowed, except where we know by
5654 alignment that a 32k boundary is not crossed, but see the ???
5655 comment in rs6000_legitimize_reload_address. Note that by
5656 "offsetting" here we mean a further offset to access parts of the
5657 MEM. It's fine to have a lo_sum where the inner address is offset
5658 from a sym, since the same sym+offset will appear in the high part
5659 of the address calculation. */
5661 bool
5662 mem_operand_gpr (rtx op, enum machine_mode mode)
5664 unsigned HOST_WIDE_INT offset;
5665 int extra;
5666 rtx addr = XEXP (op, 0);
5668 op = address_offset (addr);
5669 if (op == NULL_RTX)
5670 return true;
5672 offset = INTVAL (op);
5673 if (TARGET_POWERPC64 && (offset & 3) != 0)
5674 return false;
5676 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5677 gcc_assert (extra >= 0);
5679 if (GET_CODE (addr) == LO_SUM)
5680 /* For lo_sum addresses, we must allow any offset except one that
5681 causes a wrap, so test only the low 16 bits. */
5682 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
5684 return offset + 0x8000 < 0x10000u - extra;
5687 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5689 static bool
5690 reg_offset_addressing_ok_p (enum machine_mode mode)
5692 switch (mode)
5694 case V16QImode:
5695 case V8HImode:
5696 case V4SFmode:
5697 case V4SImode:
5698 case V2DFmode:
5699 case V2DImode:
5700 case TImode:
5701 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
5702 TImode is not a vector mode, if we want to use the VSX registers to
5703 move it around, we need to restrict ourselves to reg+reg
5704 addressing. */
5705 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5706 return false;
5707 break;
5709 case V4HImode:
5710 case V2SImode:
5711 case V1DImode:
5712 case V2SFmode:
5713 /* Paired vector modes. Only reg+reg addressing is valid. */
5714 if (TARGET_PAIRED_FLOAT)
5715 return false;
5716 break;
5718 case SDmode:
5719 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
5720 addressing for the LFIWZX and STFIWX instructions. */
5721 if (TARGET_NO_SDMODE_STACK)
5722 return false;
5723 break;
5725 default:
5726 break;
5729 return true;
5732 static bool
5733 virtual_stack_registers_memory_p (rtx op)
5735 int regnum;
5737 if (GET_CODE (op) == REG)
5738 regnum = REGNO (op);
5740 else if (GET_CODE (op) == PLUS
5741 && GET_CODE (XEXP (op, 0)) == REG
5742 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5743 regnum = REGNO (XEXP (op, 0));
5745 else
5746 return false;
5748 return (regnum >= FIRST_VIRTUAL_REGISTER
5749 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5752 /* Return true if a MODE sized memory accesses to OP plus OFFSET
5753 is known to not straddle a 32k boundary. */
5755 static bool
5756 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5757 enum machine_mode mode)
5759 tree decl, type;
5760 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
5762 if (GET_CODE (op) != SYMBOL_REF)
5763 return false;
5765 dsize = GET_MODE_SIZE (mode);
5766 decl = SYMBOL_REF_DECL (op);
5767 if (!decl)
5769 if (dsize == 0)
5770 return false;
5772 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5773 replacing memory addresses with an anchor plus offset. We
5774 could find the decl by rummaging around in the block->objects
5775 VEC for the given offset but that seems like too much work. */
5776 dalign = BITS_PER_UNIT;
5777 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5778 && SYMBOL_REF_ANCHOR_P (op)
5779 && SYMBOL_REF_BLOCK (op) != NULL)
5781 struct object_block *block = SYMBOL_REF_BLOCK (op);
5783 dalign = block->alignment;
5784 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5786 else if (CONSTANT_POOL_ADDRESS_P (op))
5788 /* It would be nice to have get_pool_align().. */
5789 enum machine_mode cmode = get_pool_mode (op);
5791 dalign = GET_MODE_ALIGNMENT (cmode);
5794 else if (DECL_P (decl))
5796 dalign = DECL_ALIGN (decl);
5798 if (dsize == 0)
5800 /* Allow BLKmode when the entire object is known to not
5801 cross a 32k boundary. */
5802 if (!DECL_SIZE_UNIT (decl))
5803 return false;
5805 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5806 return false;
5808 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5809 if (dsize > 32768)
5810 return false;
5812 return dalign / BITS_PER_UNIT >= dsize;
5815 else
5817 type = TREE_TYPE (decl);
5819 dalign = TYPE_ALIGN (type);
5820 if (CONSTANT_CLASS_P (decl))
5821 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5822 else
5823 dalign = DATA_ALIGNMENT (decl, dalign);
5825 if (dsize == 0)
5827 /* BLKmode, check the entire object. */
5828 if (TREE_CODE (decl) == STRING_CST)
5829 dsize = TREE_STRING_LENGTH (decl);
5830 else if (TYPE_SIZE_UNIT (type)
5831 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5832 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5833 else
5834 return false;
5835 if (dsize > 32768)
5836 return false;
5838 return dalign / BITS_PER_UNIT >= dsize;
5842 /* Find how many bits of the alignment we know for this access. */
5843 mask = dalign / BITS_PER_UNIT - 1;
5844 lsb = offset & -offset;
5845 mask &= lsb - 1;
5846 dalign = mask + 1;
5848 return dalign >= dsize;
5851 static bool
5852 constant_pool_expr_p (rtx op)
5854 rtx base, offset;
5856 split_const (op, &base, &offset);
5857 return (GET_CODE (base) == SYMBOL_REF
5858 && CONSTANT_POOL_ADDRESS_P (base)
5859 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5862 static const_rtx tocrel_base, tocrel_offset;
5864 /* Return true if OP is a toc pointer relative address (the output
5865 of create_TOC_reference). If STRICT, do not match high part or
5866 non-split -mcmodel=large/medium toc pointer relative addresses. */
5868 bool
5869 toc_relative_expr_p (const_rtx op, bool strict)
5871 if (!TARGET_TOC)
5872 return false;
5874 if (TARGET_CMODEL != CMODEL_SMALL)
5876 /* Only match the low part. */
5877 if (GET_CODE (op) == LO_SUM
5878 && REG_P (XEXP (op, 0))
5879 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5880 op = XEXP (op, 1);
5881 else if (strict)
5882 return false;
5885 tocrel_base = op;
5886 tocrel_offset = const0_rtx;
5887 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
5889 tocrel_base = XEXP (op, 0);
5890 tocrel_offset = XEXP (op, 1);
5893 return (GET_CODE (tocrel_base) == UNSPEC
5894 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5897 /* Return true if X is a constant pool address, and also for cmodel=medium
5898 if X is a toc-relative address known to be offsettable within MODE. */
5900 bool
5901 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5902 bool strict)
5904 return (toc_relative_expr_p (x, strict)
5905 && (TARGET_CMODEL != CMODEL_MEDIUM
5906 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5907 || mode == QImode
5908 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5909 INTVAL (tocrel_offset), mode)));
5912 static bool
5913 legitimate_small_data_p (enum machine_mode mode, rtx x)
5915 return (DEFAULT_ABI == ABI_V4
5916 && !flag_pic && !TARGET_TOC
5917 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5918 && small_data_operand (x, mode));
5921 /* SPE offset addressing is limited to 5-bits worth of double words. */
5922 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5924 bool
5925 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
5926 bool strict, bool worst_case)
5928 unsigned HOST_WIDE_INT offset;
5929 unsigned int extra;
5931 if (GET_CODE (x) != PLUS)
5932 return false;
5933 if (!REG_P (XEXP (x, 0)))
5934 return false;
5935 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5936 return false;
5937 if (!reg_offset_addressing_ok_p (mode))
5938 return virtual_stack_registers_memory_p (x);
5939 if (legitimate_constant_pool_address_p (x, mode, strict))
5940 return true;
5941 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5942 return false;
5944 offset = INTVAL (XEXP (x, 1));
5945 extra = 0;
5946 switch (mode)
5948 case V4HImode:
5949 case V2SImode:
5950 case V1DImode:
5951 case V2SFmode:
5952 /* SPE vector modes. */
5953 return SPE_CONST_OFFSET_OK (offset);
5955 case DFmode:
5956 case DDmode:
5957 case DImode:
5958 /* On e500v2, we may have:
5960 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5962 Which gets addressed with evldd instructions. */
5963 if (TARGET_E500_DOUBLE)
5964 return SPE_CONST_OFFSET_OK (offset);
5966 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5967 addressing. */
5968 if (VECTOR_MEM_VSX_P (mode))
5969 return false;
5971 if (!worst_case)
5972 break;
5973 if (!TARGET_POWERPC64)
5974 extra = 4;
5975 else if (offset & 3)
5976 return false;
5977 break;
5979 case TFmode:
5980 case TDmode:
5981 case TImode:
5982 case PTImode:
5983 if (TARGET_E500_DOUBLE)
5984 return (SPE_CONST_OFFSET_OK (offset)
5985 && SPE_CONST_OFFSET_OK (offset + 8));
5987 extra = 8;
5988 if (!worst_case)
5989 break;
5990 if (!TARGET_POWERPC64)
5991 extra = 12;
5992 else if (offset & 3)
5993 return false;
5994 break;
5996 default:
5997 break;
6000 offset += 0x8000;
6001 return offset < 0x10000 - extra;
6004 bool
6005 legitimate_indexed_address_p (rtx x, int strict)
6007 rtx op0, op1;
6009 if (GET_CODE (x) != PLUS)
6010 return false;
6012 op0 = XEXP (x, 0);
6013 op1 = XEXP (x, 1);
6015 /* Recognize the rtl generated by reload which we know will later be
6016 replaced with proper base and index regs. */
6017 if (!strict
6018 && reload_in_progress
6019 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6020 && REG_P (op1))
6021 return true;
6023 return (REG_P (op0) && REG_P (op1)
6024 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6025 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6026 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6027 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6030 bool
6031 avoiding_indexed_address_p (enum machine_mode mode)
6033 /* Avoid indexed addressing for modes that have non-indexed
6034 load/store instruction forms. */
6035 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6038 bool
6039 legitimate_indirect_address_p (rtx x, int strict)
6041 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6044 bool
6045 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
6047 if (!TARGET_MACHO || !flag_pic
6048 || mode != SImode || GET_CODE (x) != MEM)
6049 return false;
6050 x = XEXP (x, 0);
6052 if (GET_CODE (x) != LO_SUM)
6053 return false;
6054 if (GET_CODE (XEXP (x, 0)) != REG)
6055 return false;
6056 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6057 return false;
6058 x = XEXP (x, 1);
6060 return CONSTANT_P (x);
6063 static bool
6064 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
6066 if (GET_CODE (x) != LO_SUM)
6067 return false;
6068 if (GET_CODE (XEXP (x, 0)) != REG)
6069 return false;
6070 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6071 return false;
6072 /* Restrict addressing for DI because of our SUBREG hackery. */
6073 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6074 return false;
6075 x = XEXP (x, 1);
6077 if (TARGET_ELF || TARGET_MACHO)
6079 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
6080 return false;
6081 if (TARGET_TOC)
6082 return false;
6083 if (GET_MODE_NUNITS (mode) != 1)
6084 return false;
6085 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6086 && !(/* ??? Assume floating point reg based on mode? */
6087 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6088 && (mode == DFmode || mode == DDmode)))
6089 return false;
6091 return CONSTANT_P (x);
6094 return false;
6098 /* Try machine-dependent ways of modifying an illegitimate address
6099 to be legitimate. If we find one, return the new, valid address.
6100 This is used from only one place: `memory_address' in explow.c.
6102 OLDX is the address as it was before break_out_memory_refs was
6103 called. In some cases it is useful to look at this to decide what
6104 needs to be done.
6106 It is always safe for this function to do nothing. It exists to
6107 recognize opportunities to optimize the output.
6109 On RS/6000, first check for the sum of a register with a constant
6110 integer that is out of range. If so, generate code to add the
6111 constant with the low-order 16 bits masked to the register and force
6112 this result into another register (this can be done with `cau').
6113 Then generate an address of REG+(CONST&0xffff), allowing for the
6114 possibility of bit 16 being a one.
6116 Then check for the sum of a register and something not constant, try to
6117 load the other things into a register and return the sum. */
6119 static rtx
6120 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6121 enum machine_mode mode)
6123 unsigned int extra;
6125 if (!reg_offset_addressing_ok_p (mode))
6127 if (virtual_stack_registers_memory_p (x))
6128 return x;
6130 /* In theory we should not be seeing addresses of the form reg+0,
6131 but just in case it is generated, optimize it away. */
6132 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6133 return force_reg (Pmode, XEXP (x, 0));
6135 /* For TImode with load/store quad, restrict addresses to just a single
6136 pointer, so it works with both GPRs and VSX registers. */
6137 /* Make sure both operands are registers. */
6138 else if (GET_CODE (x) == PLUS
6139 && (mode != TImode || !TARGET_QUAD_MEMORY))
6140 return gen_rtx_PLUS (Pmode,
6141 force_reg (Pmode, XEXP (x, 0)),
6142 force_reg (Pmode, XEXP (x, 1)));
6143 else
6144 return force_reg (Pmode, x);
6146 if (GET_CODE (x) == SYMBOL_REF)
6148 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6149 if (model != 0)
6150 return rs6000_legitimize_tls_address (x, model);
6153 extra = 0;
6154 switch (mode)
6156 case TFmode:
6157 case TDmode:
6158 case TImode:
6159 case PTImode:
6160 /* As in legitimate_offset_address_p we do not assume
6161 worst-case. The mode here is just a hint as to the registers
6162 used. A TImode is usually in gprs, but may actually be in
6163 fprs. Leave worst-case scenario for reload to handle via
6164 insn constraints. PTImode is only GPRs. */
6165 extra = 8;
6166 break;
6167 default:
6168 break;
6171 if (GET_CODE (x) == PLUS
6172 && GET_CODE (XEXP (x, 0)) == REG
6173 && GET_CODE (XEXP (x, 1)) == CONST_INT
6174 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6175 >= 0x10000 - extra)
6176 && !(SPE_VECTOR_MODE (mode)
6177 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6179 HOST_WIDE_INT high_int, low_int;
6180 rtx sum;
6181 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6182 if (low_int >= 0x8000 - extra)
6183 low_int = 0;
6184 high_int = INTVAL (XEXP (x, 1)) - low_int;
6185 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6186 GEN_INT (high_int)), 0);
6187 return plus_constant (Pmode, sum, low_int);
6189 else if (GET_CODE (x) == PLUS
6190 && GET_CODE (XEXP (x, 0)) == REG
6191 && GET_CODE (XEXP (x, 1)) != CONST_INT
6192 && GET_MODE_NUNITS (mode) == 1
6193 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6194 || (/* ??? Assume floating point reg based on mode? */
6195 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6196 && (mode == DFmode || mode == DDmode)))
6197 && !avoiding_indexed_address_p (mode))
6199 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6200 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6202 else if (SPE_VECTOR_MODE (mode)
6203 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6205 if (mode == DImode)
6206 return x;
6207 /* We accept [reg + reg] and [reg + OFFSET]. */
6209 if (GET_CODE (x) == PLUS)
6211 rtx op1 = XEXP (x, 0);
6212 rtx op2 = XEXP (x, 1);
6213 rtx y;
6215 op1 = force_reg (Pmode, op1);
6217 if (GET_CODE (op2) != REG
6218 && (GET_CODE (op2) != CONST_INT
6219 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6220 || (GET_MODE_SIZE (mode) > 8
6221 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6222 op2 = force_reg (Pmode, op2);
6224 /* We can't always do [reg + reg] for these, because [reg +
6225 reg + offset] is not a legitimate addressing mode. */
6226 y = gen_rtx_PLUS (Pmode, op1, op2);
6228 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
6229 return force_reg (Pmode, y);
6230 else
6231 return y;
6234 return force_reg (Pmode, x);
6236 else if ((TARGET_ELF
6237 #if TARGET_MACHO
6238 || !MACHO_DYNAMIC_NO_PIC_P
6239 #endif
6241 && TARGET_32BIT
6242 && TARGET_NO_TOC
6243 && ! flag_pic
6244 && GET_CODE (x) != CONST_INT
6245 && GET_CODE (x) != CONST_DOUBLE
6246 && CONSTANT_P (x)
6247 && GET_MODE_NUNITS (mode) == 1
6248 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6249 || (/* ??? Assume floating point reg based on mode? */
6250 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6251 && (mode == DFmode || mode == DDmode))))
6253 rtx reg = gen_reg_rtx (Pmode);
6254 if (TARGET_ELF)
6255 emit_insn (gen_elf_high (reg, x));
6256 else
6257 emit_insn (gen_macho_high (reg, x));
6258 return gen_rtx_LO_SUM (Pmode, reg, x);
6260 else if (TARGET_TOC
6261 && GET_CODE (x) == SYMBOL_REF
6262 && constant_pool_expr_p (x)
6263 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
6264 return create_TOC_reference (x, NULL_RTX);
6265 else
6266 return x;
6269 /* Debug version of rs6000_legitimize_address. */
6270 static rtx
6271 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
6273 rtx ret;
6274 rtx insns;
6276 start_sequence ();
6277 ret = rs6000_legitimize_address (x, oldx, mode);
6278 insns = get_insns ();
6279 end_sequence ();
6281 if (ret != x)
6283 fprintf (stderr,
6284 "\nrs6000_legitimize_address: mode %s, old code %s, "
6285 "new code %s, modified\n",
6286 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
6287 GET_RTX_NAME (GET_CODE (ret)));
6289 fprintf (stderr, "Original address:\n");
6290 debug_rtx (x);
6292 fprintf (stderr, "oldx:\n");
6293 debug_rtx (oldx);
6295 fprintf (stderr, "New address:\n");
6296 debug_rtx (ret);
6298 if (insns)
6300 fprintf (stderr, "Insns added:\n");
6301 debug_rtx_list (insns, 20);
6304 else
6306 fprintf (stderr,
6307 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6308 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6310 debug_rtx (x);
6313 if (insns)
6314 emit_insn (insns);
6316 return ret;
6319 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6320 We need to emit DTP-relative relocations. */
6322 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6323 static void
6324 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6326 switch (size)
6328 case 4:
6329 fputs ("\t.long\t", file);
6330 break;
6331 case 8:
6332 fputs (DOUBLE_INT_ASM_OP, file);
6333 break;
6334 default:
6335 gcc_unreachable ();
6337 output_addr_const (file, x);
6338 fputs ("@dtprel+0x8000", file);
6341 /* In the name of slightly smaller debug output, and to cater to
6342 general assembler lossage, recognize various UNSPEC sequences
6343 and turn them back into a direct symbol reference. */
6345 static rtx
6346 rs6000_delegitimize_address (rtx orig_x)
6348 rtx x, y, offset;
6350 orig_x = delegitimize_mem_from_attrs (orig_x);
6351 x = orig_x;
6352 if (MEM_P (x))
6353 x = XEXP (x, 0);
6355 y = x;
6356 if (TARGET_CMODEL != CMODEL_SMALL
6357 && GET_CODE (y) == LO_SUM)
6358 y = XEXP (y, 1);
6360 offset = NULL_RTX;
6361 if (GET_CODE (y) == PLUS
6362 && GET_MODE (y) == Pmode
6363 && CONST_INT_P (XEXP (y, 1)))
6365 offset = XEXP (y, 1);
6366 y = XEXP (y, 0);
6369 if (GET_CODE (y) == UNSPEC
6370 && XINT (y, 1) == UNSPEC_TOCREL)
6372 #ifdef ENABLE_CHECKING
6373 if (REG_P (XVECEXP (y, 0, 1))
6374 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6376 /* All good. */
6378 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6380 /* Weirdness alert. df_note_compute can replace r2 with a
6381 debug_expr when this unspec is in a debug_insn.
6382 Seen in gcc.dg/pr51957-1.c */
6384 else
6386 debug_rtx (orig_x);
6387 abort ();
6389 #endif
6390 y = XVECEXP (y, 0, 0);
6392 #ifdef HAVE_AS_TLS
6393 /* Do not associate thread-local symbols with the original
6394 constant pool symbol. */
6395 if (TARGET_XCOFF
6396 && GET_CODE (y) == SYMBOL_REF
6397 && CONSTANT_POOL_ADDRESS_P (y)
6398 && SYMBOL_REF_TLS_MODEL (get_pool_constant (y)) >= TLS_MODEL_REAL)
6399 return orig_x;
6400 #endif
6402 if (offset != NULL_RTX)
6403 y = gen_rtx_PLUS (Pmode, y, offset);
6404 if (!MEM_P (orig_x))
6405 return y;
6406 else
6407 return replace_equiv_address_nv (orig_x, y);
6410 if (TARGET_MACHO
6411 && GET_CODE (orig_x) == LO_SUM
6412 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6414 y = XEXP (XEXP (orig_x, 1), 0);
6415 if (GET_CODE (y) == UNSPEC
6416 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6417 return XVECEXP (y, 0, 0);
6420 return orig_x;
6423 /* Return true if X shouldn't be emitted into the debug info.
6424 The linker doesn't like .toc section references from
6425 .debug_* sections, so reject .toc section symbols. */
6427 static bool
6428 rs6000_const_not_ok_for_debug_p (rtx x)
6430 if (GET_CODE (x) == SYMBOL_REF
6431 && CONSTANT_POOL_ADDRESS_P (x))
6433 rtx c = get_pool_constant (x);
6434 enum machine_mode cmode = get_pool_mode (x);
6435 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6436 return true;
6439 return false;
6442 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6444 static GTY(()) rtx rs6000_tls_symbol;
6445 static rtx
6446 rs6000_tls_get_addr (void)
6448 if (!rs6000_tls_symbol)
6449 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
6451 return rs6000_tls_symbol;
6454 /* Construct the SYMBOL_REF for TLS GOT references. */
6456 static GTY(()) rtx rs6000_got_symbol;
6457 static rtx
6458 rs6000_got_sym (void)
6460 if (!rs6000_got_symbol)
6462 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
6463 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
6464 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
6467 return rs6000_got_symbol;
6470 /* AIX Thread-Local Address support. */
6472 static rtx
6473 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
6475 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
6476 const char *name;
6477 char *tlsname;
6479 name = XSTR (addr, 0);
6480 /* Append TLS CSECT qualifier, unless the symbol already is qualified
6481 or the symbol will be in TLS private data section. */
6482 if (name[strlen (name) - 1] != ']'
6483 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
6484 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
6486 tlsname = XALLOCAVEC (char, strlen (name) + 4);
6487 strcpy (tlsname, name);
6488 strcat (tlsname,
6489 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
6490 tlsaddr = copy_rtx (addr);
6491 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
6493 else
6494 tlsaddr = addr;
6496 /* Place addr into TOC constant pool. */
6497 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
6499 /* Output the TOC entry and create the MEM referencing the value. */
6500 if (constant_pool_expr_p (XEXP (sym, 0))
6501 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
6503 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
6504 mem = gen_const_mem (Pmode, tocref);
6505 set_mem_alias_set (mem, get_TOC_alias_set ());
6507 else
6508 return sym;
6510 /* Use global-dynamic for local-dynamic. */
6511 if (model == TLS_MODEL_GLOBAL_DYNAMIC
6512 || model == TLS_MODEL_LOCAL_DYNAMIC)
6514 /* Create new TOC reference for @m symbol. */
6515 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
6516 tlsname = XALLOCAVEC (char, strlen (name) + 1);
6517 strcpy (tlsname, "*LCM");
6518 strcat (tlsname, name + 3);
6519 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
6520 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
6521 tocref = create_TOC_reference (modaddr, NULL_RTX);
6522 rtx modmem = gen_const_mem (Pmode, tocref);
6523 set_mem_alias_set (modmem, get_TOC_alias_set ());
6525 rtx modreg = gen_reg_rtx (Pmode);
6526 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
6528 tmpreg = gen_reg_rtx (Pmode);
6529 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6531 dest = gen_reg_rtx (Pmode);
6532 if (TARGET_32BIT)
6533 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
6534 else
6535 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
6536 return dest;
6538 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
6539 else if (TARGET_32BIT)
6541 tlsreg = gen_reg_rtx (SImode);
6542 emit_insn (gen_tls_get_tpointer (tlsreg));
6544 else
6545 tlsreg = gen_rtx_REG (DImode, 13);
6547 /* Load the TOC value into temporary register. */
6548 tmpreg = gen_reg_rtx (Pmode);
6549 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6550 set_unique_reg_note (get_last_insn (), REG_EQUAL,
6551 gen_rtx_MINUS (Pmode, addr, tlsreg));
6553 /* Add TOC symbol value to TLS pointer. */
6554 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
6556 return dest;
6559 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
6560 this (thread-local) address. */
6562 static rtx
6563 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
6565 rtx dest, insn;
6567 if (TARGET_XCOFF)
6568 return rs6000_legitimize_tls_address_aix (addr, model);
6570 dest = gen_reg_rtx (Pmode);
6571 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
6573 rtx tlsreg;
6575 if (TARGET_64BIT)
6577 tlsreg = gen_rtx_REG (Pmode, 13);
6578 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
6580 else
6582 tlsreg = gen_rtx_REG (Pmode, 2);
6583 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
6585 emit_insn (insn);
6587 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
6589 rtx tlsreg, tmp;
6591 tmp = gen_reg_rtx (Pmode);
6592 if (TARGET_64BIT)
6594 tlsreg = gen_rtx_REG (Pmode, 13);
6595 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
6597 else
6599 tlsreg = gen_rtx_REG (Pmode, 2);
6600 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
6602 emit_insn (insn);
6603 if (TARGET_64BIT)
6604 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
6605 else
6606 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
6607 emit_insn (insn);
6609 else
6611 rtx r3, got, tga, tmp1, tmp2, call_insn;
6613 /* We currently use relocations like @got@tlsgd for tls, which
6614 means the linker will handle allocation of tls entries, placing
6615 them in the .got section. So use a pointer to the .got section,
6616 not one to secondary TOC sections used by 64-bit -mminimal-toc,
6617 or to secondary GOT sections used by 32-bit -fPIC. */
6618 if (TARGET_64BIT)
6619 got = gen_rtx_REG (Pmode, 2);
6620 else
6622 if (flag_pic == 1)
6623 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
6624 else
6626 rtx gsym = rs6000_got_sym ();
6627 got = gen_reg_rtx (Pmode);
6628 if (flag_pic == 0)
6629 rs6000_emit_move (got, gsym, Pmode);
6630 else
6632 rtx mem, lab, last;
6634 tmp1 = gen_reg_rtx (Pmode);
6635 tmp2 = gen_reg_rtx (Pmode);
6636 mem = gen_const_mem (Pmode, tmp1);
6637 lab = gen_label_rtx ();
6638 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
6639 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
6640 if (TARGET_LINK_STACK)
6641 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
6642 emit_move_insn (tmp2, mem);
6643 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
6644 set_unique_reg_note (last, REG_EQUAL, gsym);
6649 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
6651 tga = rs6000_tls_get_addr ();
6652 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
6653 1, const0_rtx, Pmode);
6655 r3 = gen_rtx_REG (Pmode, 3);
6656 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6657 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
6658 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6659 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
6660 else if (DEFAULT_ABI == ABI_V4)
6661 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
6662 else
6663 gcc_unreachable ();
6664 call_insn = last_call_insn ();
6665 PATTERN (call_insn) = insn;
6666 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6667 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6668 pic_offset_table_rtx);
6670 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
6672 tga = rs6000_tls_get_addr ();
6673 tmp1 = gen_reg_rtx (Pmode);
6674 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
6675 1, const0_rtx, Pmode);
6677 r3 = gen_rtx_REG (Pmode, 3);
6678 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6679 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
6680 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6681 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
6682 else if (DEFAULT_ABI == ABI_V4)
6683 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
6684 else
6685 gcc_unreachable ();
6686 call_insn = last_call_insn ();
6687 PATTERN (call_insn) = insn;
6688 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6689 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6690 pic_offset_table_rtx);
6692 if (rs6000_tls_size == 16)
6694 if (TARGET_64BIT)
6695 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
6696 else
6697 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
6699 else if (rs6000_tls_size == 32)
6701 tmp2 = gen_reg_rtx (Pmode);
6702 if (TARGET_64BIT)
6703 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
6704 else
6705 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
6706 emit_insn (insn);
6707 if (TARGET_64BIT)
6708 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6709 else
6710 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6712 else
6714 tmp2 = gen_reg_rtx (Pmode);
6715 if (TARGET_64BIT)
6716 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6717 else
6718 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6719 emit_insn (insn);
6720 insn = gen_rtx_SET (Pmode, dest,
6721 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6723 emit_insn (insn);
6725 else
6727 /* IE, or 64-bit offset LE. */
6728 tmp2 = gen_reg_rtx (Pmode);
6729 if (TARGET_64BIT)
6730 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6731 else
6732 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6733 emit_insn (insn);
6734 if (TARGET_64BIT)
6735 insn = gen_tls_tls_64 (dest, tmp2, addr);
6736 else
6737 insn = gen_tls_tls_32 (dest, tmp2, addr);
6738 emit_insn (insn);
6742 return dest;
6745 /* Return 1 if X contains a thread-local symbol. */
6747 static bool
6748 rs6000_tls_referenced_p (rtx x)
6750 if (! TARGET_HAVE_TLS)
6751 return false;
6753 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6756 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6758 static bool
6759 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6761 if (GET_CODE (x) == HIGH
6762 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6763 return true;
6765 /* A TLS symbol in the TOC cannot contain a sum. */
6766 if (GET_CODE (x) == CONST
6767 && GET_CODE (XEXP (x, 0)) == PLUS
6768 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6769 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
6770 return true;
6772 /* Do not place an ELF TLS symbol in the constant pool. */
6773 return TARGET_ELF && rs6000_tls_referenced_p (x);
6776 /* Return 1 if *X is a thread-local symbol. This is the same as
6777 rs6000_tls_symbol_ref except for the type of the unused argument. */
6779 static int
6780 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6782 return RS6000_SYMBOL_REF_TLS_P (*x);
6785 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6786 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6787 can be addressed relative to the toc pointer. */
6789 static bool
6790 use_toc_relative_ref (rtx sym)
6792 return ((constant_pool_expr_p (sym)
6793 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6794 get_pool_mode (sym)))
6795 || (TARGET_CMODEL == CMODEL_MEDIUM
6796 && SYMBOL_REF_LOCAL_P (sym)));
6799 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6800 replace the input X, or the original X if no replacement is called for.
6801 The output parameter *WIN is 1 if the calling macro should goto WIN,
6802 0 if it should not.
6804 For RS/6000, we wish to handle large displacements off a base
6805 register by splitting the addend across an addiu/addis and the mem insn.
6806 This cuts number of extra insns needed from 3 to 1.
6808 On Darwin, we use this to generate code for floating point constants.
6809 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6810 The Darwin code is inside #if TARGET_MACHO because only then are the
6811 machopic_* functions defined. */
6812 static rtx
6813 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6814 int opnum, int type,
6815 int ind_levels ATTRIBUTE_UNUSED, int *win)
6817 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6819 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6820 DFmode/DImode MEM. */
6821 if (reg_offset_p
6822 && opnum == 1
6823 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6824 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6825 reg_offset_p = false;
6827 /* We must recognize output that we have already generated ourselves. */
6828 if (GET_CODE (x) == PLUS
6829 && GET_CODE (XEXP (x, 0)) == PLUS
6830 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6831 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6832 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6834 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6835 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6836 opnum, (enum reload_type) type);
6837 *win = 1;
6838 return x;
6841 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6842 if (GET_CODE (x) == LO_SUM
6843 && GET_CODE (XEXP (x, 0)) == HIGH)
6845 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6846 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6847 opnum, (enum reload_type) type);
6848 *win = 1;
6849 return x;
6852 #if TARGET_MACHO
6853 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6854 && GET_CODE (x) == LO_SUM
6855 && GET_CODE (XEXP (x, 0)) == PLUS
6856 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6857 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6858 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6859 && machopic_operand_p (XEXP (x, 1)))
6861 /* Result of previous invocation of this function on Darwin
6862 floating point constant. */
6863 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6864 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6865 opnum, (enum reload_type) type);
6866 *win = 1;
6867 return x;
6869 #endif
6871 if (TARGET_CMODEL != CMODEL_SMALL
6872 && reg_offset_p
6873 && small_toc_ref (x, VOIDmode))
6875 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6876 x = gen_rtx_LO_SUM (Pmode, hi, x);
6877 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6878 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6879 opnum, (enum reload_type) type);
6880 *win = 1;
6881 return x;
6884 if (GET_CODE (x) == PLUS
6885 && GET_CODE (XEXP (x, 0)) == REG
6886 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6887 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6888 && GET_CODE (XEXP (x, 1)) == CONST_INT
6889 && reg_offset_p
6890 && !SPE_VECTOR_MODE (mode)
6891 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6892 || mode == DDmode || mode == TDmode
6893 || mode == DImode))
6894 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
6896 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6897 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6898 HOST_WIDE_INT high
6899 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6901 /* Check for 32-bit overflow. */
6902 if (high + low != val)
6904 *win = 0;
6905 return x;
6908 /* Reload the high part into a base reg; leave the low part
6909 in the mem directly. */
6911 x = gen_rtx_PLUS (GET_MODE (x),
6912 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6913 GEN_INT (high)),
6914 GEN_INT (low));
6916 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6917 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6918 opnum, (enum reload_type) type);
6919 *win = 1;
6920 return x;
6923 if (GET_CODE (x) == SYMBOL_REF
6924 && reg_offset_p
6925 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
6926 && !SPE_VECTOR_MODE (mode)
6927 #if TARGET_MACHO
6928 && DEFAULT_ABI == ABI_DARWIN
6929 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6930 && machopic_symbol_defined_p (x)
6931 #else
6932 && DEFAULT_ABI == ABI_V4
6933 && !flag_pic
6934 #endif
6935 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6936 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6937 without fprs.
6938 ??? Assume floating point reg based on mode? This assumption is
6939 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6940 where reload ends up doing a DFmode load of a constant from
6941 mem using two gprs. Unfortunately, at this point reload
6942 hasn't yet selected regs so poking around in reload data
6943 won't help and even if we could figure out the regs reliably,
6944 we'd still want to allow this transformation when the mem is
6945 naturally aligned. Since we say the address is good here, we
6946 can't disable offsets from LO_SUMs in mem_operand_gpr.
6947 FIXME: Allow offset from lo_sum for other modes too, when
6948 mem is sufficiently aligned. */
6949 && mode != TFmode
6950 && mode != TDmode
6951 && (mode != TImode || !TARGET_VSX_TIMODE)
6952 && mode != PTImode
6953 && (mode != DImode || TARGET_POWERPC64)
6954 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6955 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6957 #if TARGET_MACHO
6958 if (flag_pic)
6960 rtx offset = machopic_gen_offset (x);
6961 x = gen_rtx_LO_SUM (GET_MODE (x),
6962 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6963 gen_rtx_HIGH (Pmode, offset)), offset);
6965 else
6966 #endif
6967 x = gen_rtx_LO_SUM (GET_MODE (x),
6968 gen_rtx_HIGH (Pmode, x), x);
6970 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6971 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6972 opnum, (enum reload_type) type);
6973 *win = 1;
6974 return x;
6977 /* Reload an offset address wrapped by an AND that represents the
6978 masking of the lower bits. Strip the outer AND and let reload
6979 convert the offset address into an indirect address. For VSX,
6980 force reload to create the address with an AND in a separate
6981 register, because we can't guarantee an altivec register will
6982 be used. */
6983 if (VECTOR_MEM_ALTIVEC_P (mode)
6984 && GET_CODE (x) == AND
6985 && GET_CODE (XEXP (x, 0)) == PLUS
6986 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6987 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6988 && GET_CODE (XEXP (x, 1)) == CONST_INT
6989 && INTVAL (XEXP (x, 1)) == -16)
6991 x = XEXP (x, 0);
6992 *win = 1;
6993 return x;
6996 if (TARGET_TOC
6997 && reg_offset_p
6998 && GET_CODE (x) == SYMBOL_REF
6999 && use_toc_relative_ref (x))
7001 x = create_TOC_reference (x, NULL_RTX);
7002 if (TARGET_CMODEL != CMODEL_SMALL)
7003 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7004 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7005 opnum, (enum reload_type) type);
7006 *win = 1;
7007 return x;
7009 *win = 0;
7010 return x;
7013 /* Debug version of rs6000_legitimize_reload_address. */
7014 static rtx
7015 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
7016 int opnum, int type,
7017 int ind_levels, int *win)
7019 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7020 ind_levels, win);
7021 fprintf (stderr,
7022 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7023 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7024 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7025 debug_rtx (x);
7027 if (x == ret)
7028 fprintf (stderr, "Same address returned\n");
7029 else if (!ret)
7030 fprintf (stderr, "NULL returned\n");
7031 else
7033 fprintf (stderr, "New address:\n");
7034 debug_rtx (ret);
7037 return ret;
7040 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7041 that is a valid memory address for an instruction.
7042 The MODE argument is the machine mode for the MEM expression
7043 that wants to use this address.
7045 On the RS/6000, there are four valid address: a SYMBOL_REF that
7046 refers to a constant pool entry of an address (or the sum of it
7047 plus a constant), a short (16-bit signed) constant plus a register,
7048 the sum of two registers, or a register indirect, possibly with an
7049 auto-increment. For DFmode, DDmode and DImode with a constant plus
7050 register, we must ensure that both words are addressable or PowerPC64
7051 with offset word aligned.
7053 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7054 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7055 because adjacent memory cells are accessed by adding word-sized offsets
7056 during assembly output. */
7057 static bool
7058 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
7060 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7062 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7063 if (VECTOR_MEM_ALTIVEC_P (mode)
7064 && GET_CODE (x) == AND
7065 && GET_CODE (XEXP (x, 1)) == CONST_INT
7066 && INTVAL (XEXP (x, 1)) == -16)
7067 x = XEXP (x, 0);
7069 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7070 return 0;
7071 if (legitimate_indirect_address_p (x, reg_ok_strict))
7072 return 1;
7073 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7074 && !ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7075 && !SPE_VECTOR_MODE (mode)
7076 && mode != TFmode
7077 && mode != TDmode
7078 && mode != TImode
7079 && mode != PTImode
7080 /* Restrict addressing for DI because of our SUBREG hackery. */
7081 && !(TARGET_E500_DOUBLE
7082 && (mode == DFmode || mode == DDmode || mode == DImode))
7083 && TARGET_UPDATE
7084 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7085 return 1;
7086 if (virtual_stack_registers_memory_p (x))
7087 return 1;
7088 if (reg_offset_p && legitimate_small_data_p (mode, x))
7089 return 1;
7090 if (reg_offset_p
7091 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
7092 return 1;
7093 /* For TImode, if we have load/store quad, only allow register indirect
7094 addresses. This will allow the values to go in either GPRs or VSX
7095 registers without reloading. The vector types would tend to go into VSX
7096 registers, so we allow REG+REG, while TImode seems somewhat split, in that
7097 some uses are GPR based, and some VSX based. */
7098 if (mode == TImode && TARGET_QUAD_MEMORY)
7099 return 0;
7100 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7101 if (! reg_ok_strict
7102 && reg_offset_p
7103 && GET_CODE (x) == PLUS
7104 && GET_CODE (XEXP (x, 0)) == REG
7105 && (XEXP (x, 0) == virtual_stack_vars_rtx
7106 || XEXP (x, 0) == arg_pointer_rtx)
7107 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7108 return 1;
7109 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7110 return 1;
7111 if (mode != TFmode
7112 && mode != TDmode
7113 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7114 || TARGET_POWERPC64
7115 || (mode != DFmode && mode != DDmode)
7116 || (TARGET_E500_DOUBLE && mode != DDmode))
7117 && (TARGET_POWERPC64 || mode != DImode)
7118 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7119 && mode != PTImode
7120 && !avoiding_indexed_address_p (mode)
7121 && legitimate_indexed_address_p (x, reg_ok_strict))
7122 return 1;
7123 if (GET_CODE (x) == PRE_MODIFY
7124 && mode != TImode
7125 && mode != PTImode
7126 && mode != TFmode
7127 && mode != TDmode
7128 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7129 || TARGET_POWERPC64
7130 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
7131 && (TARGET_POWERPC64 || mode != DImode)
7132 && !ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7133 && !SPE_VECTOR_MODE (mode)
7134 /* Restrict addressing for DI because of our SUBREG hackery. */
7135 && !(TARGET_E500_DOUBLE
7136 && (mode == DFmode || mode == DDmode || mode == DImode))
7137 && TARGET_UPDATE
7138 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7139 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7140 reg_ok_strict, false)
7141 || (!avoiding_indexed_address_p (mode)
7142 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7143 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7144 return 1;
7145 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7146 return 1;
7147 return 0;
7150 /* Debug version of rs6000_legitimate_address_p. */
7151 static bool
7152 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
7153 bool reg_ok_strict)
7155 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7156 fprintf (stderr,
7157 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7158 "strict = %d, code = %s\n",
7159 ret ? "true" : "false",
7160 GET_MODE_NAME (mode),
7161 reg_ok_strict,
7162 GET_RTX_NAME (GET_CODE (x)));
7163 debug_rtx (x);
7165 return ret;
7168 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7170 static bool
7171 rs6000_mode_dependent_address_p (const_rtx addr,
7172 addr_space_t as ATTRIBUTE_UNUSED)
7174 return rs6000_mode_dependent_address_ptr (addr);
7177 /* Go to LABEL if ADDR (a legitimate address expression)
7178 has an effect that depends on the machine mode it is used for.
7180 On the RS/6000 this is true of all integral offsets (since AltiVec
7181 and VSX modes don't allow them) or is a pre-increment or decrement.
7183 ??? Except that due to conceptual problems in offsettable_address_p
7184 we can't really report the problems of integral offsets. So leave
7185 this assuming that the adjustable offset must be valid for the
7186 sub-words of a TFmode operand, which is what we had before. */
7188 static bool
7189 rs6000_mode_dependent_address (const_rtx addr)
7191 switch (GET_CODE (addr))
7193 case PLUS:
7194 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7195 is considered a legitimate address before reload, so there
7196 are no offset restrictions in that case. Note that this
7197 condition is safe in strict mode because any address involving
7198 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7199 been rejected as illegitimate. */
7200 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7201 && XEXP (addr, 0) != arg_pointer_rtx
7202 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7204 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7205 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7207 break;
7209 case LO_SUM:
7210 /* Anything in the constant pool is sufficiently aligned that
7211 all bytes have the same high part address. */
7212 return !legitimate_constant_pool_address_p (addr, QImode, false);
7214 /* Auto-increment cases are now treated generically in recog.c. */
7215 case PRE_MODIFY:
7216 return TARGET_UPDATE;
7218 /* AND is only allowed in Altivec loads. */
7219 case AND:
7220 return true;
7222 default:
7223 break;
7226 return false;
7229 /* Debug version of rs6000_mode_dependent_address. */
7230 static bool
7231 rs6000_debug_mode_dependent_address (const_rtx addr)
7233 bool ret = rs6000_mode_dependent_address (addr);
7235 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7236 ret ? "true" : "false");
7237 debug_rtx (addr);
7239 return ret;
7242 /* Implement FIND_BASE_TERM. */
7245 rs6000_find_base_term (rtx op)
7247 rtx base;
7249 base = op;
7250 if (GET_CODE (base) == CONST)
7251 base = XEXP (base, 0);
7252 if (GET_CODE (base) == PLUS)
7253 base = XEXP (base, 0);
7254 if (GET_CODE (base) == UNSPEC)
7255 switch (XINT (base, 1))
7257 case UNSPEC_TOCREL:
7258 case UNSPEC_MACHOPIC_OFFSET:
7259 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7260 for aliasing purposes. */
7261 return XVECEXP (base, 0, 0);
7264 return op;
7267 /* More elaborate version of recog's offsettable_memref_p predicate
7268 that works around the ??? note of rs6000_mode_dependent_address.
7269 In particular it accepts
7271 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7273 in 32-bit mode, that the recog predicate rejects. */
7275 static bool
7276 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
7278 bool worst_case;
7280 if (!MEM_P (op))
7281 return false;
7283 /* First mimic offsettable_memref_p. */
7284 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
7285 return true;
7287 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7288 the latter predicate knows nothing about the mode of the memory
7289 reference and, therefore, assumes that it is the largest supported
7290 mode (TFmode). As a consequence, legitimate offsettable memory
7291 references are rejected. rs6000_legitimate_offset_address_p contains
7292 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7293 at least with a little bit of help here given that we know the
7294 actual registers used. */
7295 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7296 || GET_MODE_SIZE (reg_mode) == 4);
7297 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7298 true, worst_case);
7301 /* Change register usage conditional on target flags. */
7302 static void
7303 rs6000_conditional_register_usage (void)
7305 int i;
7307 if (TARGET_DEBUG_TARGET)
7308 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7310 /* Set MQ register fixed (already call_used) so that it will not be
7311 allocated. */
7312 fixed_regs[64] = 1;
7314 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7315 if (TARGET_64BIT)
7316 fixed_regs[13] = call_used_regs[13]
7317 = call_really_used_regs[13] = 1;
7319 /* Conditionally disable FPRs. */
7320 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7321 for (i = 32; i < 64; i++)
7322 fixed_regs[i] = call_used_regs[i]
7323 = call_really_used_regs[i] = 1;
7325 /* The TOC register is not killed across calls in a way that is
7326 visible to the compiler. */
7327 if (DEFAULT_ABI == ABI_AIX)
7328 call_really_used_regs[2] = 0;
7330 if (DEFAULT_ABI == ABI_V4
7331 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7332 && flag_pic == 2)
7333 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7335 if (DEFAULT_ABI == ABI_V4
7336 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7337 && flag_pic == 1)
7338 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7339 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7340 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7342 if (DEFAULT_ABI == ABI_DARWIN
7343 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7344 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7345 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7346 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7348 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7349 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7350 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7352 if (TARGET_SPE)
7354 global_regs[SPEFSCR_REGNO] = 1;
7355 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7356 registers in prologues and epilogues. We no longer use r14
7357 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7358 pool for link-compatibility with older versions of GCC. Once
7359 "old" code has died out, we can return r14 to the allocation
7360 pool. */
7361 fixed_regs[14]
7362 = call_used_regs[14]
7363 = call_really_used_regs[14] = 1;
7366 if (!TARGET_ALTIVEC && !TARGET_VSX)
7368 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7369 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7370 call_really_used_regs[VRSAVE_REGNO] = 1;
7373 if (TARGET_ALTIVEC || TARGET_VSX)
7374 global_regs[VSCR_REGNO] = 1;
7376 if (TARGET_ALTIVEC_ABI)
7378 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7379 call_used_regs[i] = call_really_used_regs[i] = 1;
7381 /* AIX reserves VR20:31 in non-extended ABI mode. */
7382 if (TARGET_XCOFF)
7383 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7384 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7388 /* Try to output insns to set TARGET equal to the constant C if it can
7389 be done in less than N insns. Do all computations in MODE.
7390 Returns the place where the output has been placed if it can be
7391 done and the insns have been emitted. If it would take more than N
7392 insns, zero is returned and no insns and emitted. */
7395 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
7396 rtx source, int n ATTRIBUTE_UNUSED)
7398 rtx result, insn, set;
7399 HOST_WIDE_INT c0, c1;
7401 switch (mode)
7403 case QImode:
7404 case HImode:
7405 if (dest == NULL)
7406 dest = gen_reg_rtx (mode);
7407 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7408 return dest;
7410 case SImode:
7411 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7413 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
7414 GEN_INT (INTVAL (source)
7415 & (~ (HOST_WIDE_INT) 0xffff))));
7416 emit_insn (gen_rtx_SET (VOIDmode, dest,
7417 gen_rtx_IOR (SImode, copy_rtx (result),
7418 GEN_INT (INTVAL (source) & 0xffff))));
7419 result = dest;
7420 break;
7422 case DImode:
7423 switch (GET_CODE (source))
7425 case CONST_INT:
7426 c0 = INTVAL (source);
7427 c1 = -(c0 < 0);
7428 break;
7430 default:
7431 gcc_unreachable ();
7434 result = rs6000_emit_set_long_const (dest, c0, c1);
7435 break;
7437 default:
7438 gcc_unreachable ();
7441 insn = get_last_insn ();
7442 set = single_set (insn);
7443 if (! CONSTANT_P (SET_SRC (set)))
7444 set_unique_reg_note (insn, REG_EQUAL, source);
7446 return result;
7449 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
7450 fall back to a straight forward decomposition. We do this to avoid
7451 exponential run times encountered when looking for longer sequences
7452 with rs6000_emit_set_const. */
7453 static rtx
7454 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
7456 if (!TARGET_POWERPC64)
7458 rtx operand1, operand2;
7460 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
7461 DImode);
7462 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
7463 DImode);
7464 emit_move_insn (operand1, GEN_INT (c1));
7465 emit_move_insn (operand2, GEN_INT (c2));
7467 else
7469 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7471 ud1 = c1 & 0xffff;
7472 ud2 = (c1 & 0xffff0000) >> 16;
7473 c2 = c1 >> 32;
7474 ud3 = c2 & 0xffff;
7475 ud4 = (c2 & 0xffff0000) >> 16;
7477 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7478 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7479 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7481 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7482 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7484 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7485 - 0x80000000));
7486 if (ud1 != 0)
7487 emit_move_insn (copy_rtx (dest),
7488 gen_rtx_IOR (DImode, copy_rtx (dest),
7489 GEN_INT (ud1)));
7491 else if (ud3 == 0 && ud4 == 0)
7493 gcc_assert (ud2 & 0x8000);
7494 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7495 - 0x80000000));
7496 if (ud1 != 0)
7497 emit_move_insn (copy_rtx (dest),
7498 gen_rtx_IOR (DImode, copy_rtx (dest),
7499 GEN_INT (ud1)));
7500 emit_move_insn (copy_rtx (dest),
7501 gen_rtx_ZERO_EXTEND (DImode,
7502 gen_lowpart (SImode,
7503 copy_rtx (dest))));
7505 else if ((ud4 == 0xffff && (ud3 & 0x8000))
7506 || (ud4 == 0 && ! (ud3 & 0x8000)))
7508 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
7509 - 0x80000000));
7510 if (ud2 != 0)
7511 emit_move_insn (copy_rtx (dest),
7512 gen_rtx_IOR (DImode, copy_rtx (dest),
7513 GEN_INT (ud2)));
7514 emit_move_insn (copy_rtx (dest),
7515 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7516 GEN_INT (16)));
7517 if (ud1 != 0)
7518 emit_move_insn (copy_rtx (dest),
7519 gen_rtx_IOR (DImode, copy_rtx (dest),
7520 GEN_INT (ud1)));
7522 else
7524 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
7525 - 0x80000000));
7526 if (ud3 != 0)
7527 emit_move_insn (copy_rtx (dest),
7528 gen_rtx_IOR (DImode, copy_rtx (dest),
7529 GEN_INT (ud3)));
7531 emit_move_insn (copy_rtx (dest),
7532 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7533 GEN_INT (32)));
7534 if (ud2 != 0)
7535 emit_move_insn (copy_rtx (dest),
7536 gen_rtx_IOR (DImode, copy_rtx (dest),
7537 GEN_INT (ud2 << 16)));
7538 if (ud1 != 0)
7539 emit_move_insn (copy_rtx (dest),
7540 gen_rtx_IOR (DImode, copy_rtx (dest),
7541 GEN_INT (ud1)));
7544 return dest;
7547 /* Helper for the following. Get rid of [r+r] memory refs
7548 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
7550 static void
7551 rs6000_eliminate_indexed_memrefs (rtx operands[2])
7553 if (reload_in_progress)
7554 return;
7556 if (GET_CODE (operands[0]) == MEM
7557 && GET_CODE (XEXP (operands[0], 0)) != REG
7558 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
7559 GET_MODE (operands[0]), false))
7560 operands[0]
7561 = replace_equiv_address (operands[0],
7562 copy_addr_to_reg (XEXP (operands[0], 0)));
7564 if (GET_CODE (operands[1]) == MEM
7565 && GET_CODE (XEXP (operands[1], 0)) != REG
7566 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
7567 GET_MODE (operands[1]), false))
7568 operands[1]
7569 = replace_equiv_address (operands[1],
7570 copy_addr_to_reg (XEXP (operands[1], 0)));
7573 /* Emit a move from SOURCE to DEST in mode MODE. */
7574 void
7575 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
7577 rtx operands[2];
7578 operands[0] = dest;
7579 operands[1] = source;
7581 if (TARGET_DEBUG_ADDR)
7583 fprintf (stderr,
7584 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
7585 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
7586 GET_MODE_NAME (mode),
7587 reload_in_progress,
7588 reload_completed,
7589 can_create_pseudo_p ());
7590 debug_rtx (dest);
7591 fprintf (stderr, "source:\n");
7592 debug_rtx (source);
7595 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
7596 if (GET_CODE (operands[1]) == CONST_DOUBLE
7597 && ! FLOAT_MODE_P (mode)
7598 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
7600 /* FIXME. This should never happen. */
7601 /* Since it seems that it does, do the safe thing and convert
7602 to a CONST_INT. */
7603 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
7605 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
7606 || FLOAT_MODE_P (mode)
7607 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
7608 || CONST_DOUBLE_LOW (operands[1]) < 0)
7609 && (CONST_DOUBLE_HIGH (operands[1]) != -1
7610 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
7612 /* Check if GCC is setting up a block move that will end up using FP
7613 registers as temporaries. We must make sure this is acceptable. */
7614 if (GET_CODE (operands[0]) == MEM
7615 && GET_CODE (operands[1]) == MEM
7616 && mode == DImode
7617 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
7618 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
7619 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
7620 ? 32 : MEM_ALIGN (operands[0])))
7621 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
7622 ? 32
7623 : MEM_ALIGN (operands[1]))))
7624 && ! MEM_VOLATILE_P (operands [0])
7625 && ! MEM_VOLATILE_P (operands [1]))
7627 emit_move_insn (adjust_address (operands[0], SImode, 0),
7628 adjust_address (operands[1], SImode, 0));
7629 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
7630 adjust_address (copy_rtx (operands[1]), SImode, 4));
7631 return;
7634 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
7635 && !gpc_reg_operand (operands[1], mode))
7636 operands[1] = force_reg (mode, operands[1]);
7638 /* Recognize the case where operand[1] is a reference to thread-local
7639 data and load its address to a register. */
7640 if (rs6000_tls_referenced_p (operands[1]))
7642 enum tls_model model;
7643 rtx tmp = operands[1];
7644 rtx addend = NULL;
7646 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
7648 addend = XEXP (XEXP (tmp, 0), 1);
7649 tmp = XEXP (XEXP (tmp, 0), 0);
7652 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
7653 model = SYMBOL_REF_TLS_MODEL (tmp);
7654 gcc_assert (model != 0);
7656 tmp = rs6000_legitimize_tls_address (tmp, model);
7657 if (addend)
7659 tmp = gen_rtx_PLUS (mode, tmp, addend);
7660 tmp = force_operand (tmp, operands[0]);
7662 operands[1] = tmp;
7665 /* Handle the case where reload calls us with an invalid address. */
7666 if (reload_in_progress && mode == Pmode
7667 && (! general_operand (operands[1], mode)
7668 || ! nonimmediate_operand (operands[0], mode)))
7669 goto emit_set;
7671 /* 128-bit constant floating-point values on Darwin should really be
7672 loaded as two parts. */
7673 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
7674 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
7676 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
7677 simplify_gen_subreg (DFmode, operands[1], mode, 0),
7678 DFmode);
7679 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
7680 GET_MODE_SIZE (DFmode)),
7681 simplify_gen_subreg (DFmode, operands[1], mode,
7682 GET_MODE_SIZE (DFmode)),
7683 DFmode);
7684 return;
7687 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7688 cfun->machine->sdmode_stack_slot =
7689 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7691 if (reload_in_progress
7692 && mode == SDmode
7693 && cfun->machine->sdmode_stack_slot != NULL_RTX
7694 && MEM_P (operands[0])
7695 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7696 && REG_P (operands[1]))
7698 if (FP_REGNO_P (REGNO (operands[1])))
7700 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7701 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7702 emit_insn (gen_movsd_store (mem, operands[1]));
7704 else if (INT_REGNO_P (REGNO (operands[1])))
7706 rtx mem = adjust_address_nv (operands[0], mode, 4);
7707 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7708 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7710 else
7711 gcc_unreachable();
7712 return;
7714 if (reload_in_progress
7715 && mode == SDmode
7716 && REG_P (operands[0])
7717 && MEM_P (operands[1])
7718 && cfun->machine->sdmode_stack_slot != NULL_RTX
7719 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7721 if (FP_REGNO_P (REGNO (operands[0])))
7723 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7724 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7725 emit_insn (gen_movsd_load (operands[0], mem));
7727 else if (INT_REGNO_P (REGNO (operands[0])))
7729 rtx mem = adjust_address_nv (operands[1], mode, 4);
7730 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7731 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7733 else
7734 gcc_unreachable();
7735 return;
7738 /* FIXME: In the long term, this switch statement should go away
7739 and be replaced by a sequence of tests based on things like
7740 mode == Pmode. */
7741 switch (mode)
7743 case HImode:
7744 case QImode:
7745 if (CONSTANT_P (operands[1])
7746 && GET_CODE (operands[1]) != CONST_INT)
7747 operands[1] = force_const_mem (mode, operands[1]);
7748 break;
7750 case TFmode:
7751 case TDmode:
7752 rs6000_eliminate_indexed_memrefs (operands);
7753 /* fall through */
7755 case DFmode:
7756 case DDmode:
7757 case SFmode:
7758 case SDmode:
7759 if (CONSTANT_P (operands[1])
7760 && ! easy_fp_constant (operands[1], mode))
7761 operands[1] = force_const_mem (mode, operands[1]);
7762 break;
7764 case V16QImode:
7765 case V8HImode:
7766 case V4SFmode:
7767 case V4SImode:
7768 case V4HImode:
7769 case V2SFmode:
7770 case V2SImode:
7771 case V1DImode:
7772 case V2DFmode:
7773 case V2DImode:
7774 if (CONSTANT_P (operands[1])
7775 && !easy_vector_constant (operands[1], mode))
7776 operands[1] = force_const_mem (mode, operands[1]);
7777 break;
7779 case SImode:
7780 case DImode:
7781 /* Use default pattern for address of ELF small data */
7782 if (TARGET_ELF
7783 && mode == Pmode
7784 && DEFAULT_ABI == ABI_V4
7785 && (GET_CODE (operands[1]) == SYMBOL_REF
7786 || GET_CODE (operands[1]) == CONST)
7787 && small_data_operand (operands[1], mode))
7789 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7790 return;
7793 if (DEFAULT_ABI == ABI_V4
7794 && mode == Pmode && mode == SImode
7795 && flag_pic == 1 && got_operand (operands[1], mode))
7797 emit_insn (gen_movsi_got (operands[0], operands[1]));
7798 return;
7801 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
7802 && TARGET_NO_TOC
7803 && ! flag_pic
7804 && mode == Pmode
7805 && CONSTANT_P (operands[1])
7806 && GET_CODE (operands[1]) != HIGH
7807 && GET_CODE (operands[1]) != CONST_INT)
7809 rtx target = (!can_create_pseudo_p ()
7810 ? operands[0]
7811 : gen_reg_rtx (mode));
7813 /* If this is a function address on -mcall-aixdesc,
7814 convert it to the address of the descriptor. */
7815 if (DEFAULT_ABI == ABI_AIX
7816 && GET_CODE (operands[1]) == SYMBOL_REF
7817 && XSTR (operands[1], 0)[0] == '.')
7819 const char *name = XSTR (operands[1], 0);
7820 rtx new_ref;
7821 while (*name == '.')
7822 name++;
7823 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7824 CONSTANT_POOL_ADDRESS_P (new_ref)
7825 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7826 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7827 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7828 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7829 operands[1] = new_ref;
7832 if (DEFAULT_ABI == ABI_DARWIN)
7834 #if TARGET_MACHO
7835 if (MACHO_DYNAMIC_NO_PIC_P)
7837 /* Take care of any required data indirection. */
7838 operands[1] = rs6000_machopic_legitimize_pic_address (
7839 operands[1], mode, operands[0]);
7840 if (operands[0] != operands[1])
7841 emit_insn (gen_rtx_SET (VOIDmode,
7842 operands[0], operands[1]));
7843 return;
7845 #endif
7846 emit_insn (gen_macho_high (target, operands[1]));
7847 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7848 return;
7851 emit_insn (gen_elf_high (target, operands[1]));
7852 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7853 return;
7856 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7857 and we have put it in the TOC, we just need to make a TOC-relative
7858 reference to it. */
7859 if (TARGET_TOC
7860 && GET_CODE (operands[1]) == SYMBOL_REF
7861 && use_toc_relative_ref (operands[1]))
7862 operands[1] = create_TOC_reference (operands[1], operands[0]);
7863 else if (mode == Pmode
7864 && CONSTANT_P (operands[1])
7865 && GET_CODE (operands[1]) != HIGH
7866 && ((GET_CODE (operands[1]) != CONST_INT
7867 && ! easy_fp_constant (operands[1], mode))
7868 || (GET_CODE (operands[1]) == CONST_INT
7869 && (num_insns_constant (operands[1], mode)
7870 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7871 || (GET_CODE (operands[0]) == REG
7872 && FP_REGNO_P (REGNO (operands[0]))))
7873 && !toc_relative_expr_p (operands[1], false)
7874 && (TARGET_CMODEL == CMODEL_SMALL
7875 || can_create_pseudo_p ()
7876 || (REG_P (operands[0])
7877 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7880 #if TARGET_MACHO
7881 /* Darwin uses a special PIC legitimizer. */
7882 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7884 operands[1] =
7885 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7886 operands[0]);
7887 if (operands[0] != operands[1])
7888 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7889 return;
7891 #endif
7893 /* If we are to limit the number of things we put in the TOC and
7894 this is a symbol plus a constant we can add in one insn,
7895 just put the symbol in the TOC and add the constant. Don't do
7896 this if reload is in progress. */
7897 if (GET_CODE (operands[1]) == CONST
7898 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7899 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7900 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7901 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7902 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7903 && ! side_effects_p (operands[0]))
7905 rtx sym =
7906 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7907 rtx other = XEXP (XEXP (operands[1], 0), 1);
7909 sym = force_reg (mode, sym);
7910 emit_insn (gen_add3_insn (operands[0], sym, other));
7911 return;
7914 operands[1] = force_const_mem (mode, operands[1]);
7916 if (TARGET_TOC
7917 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7918 && constant_pool_expr_p (XEXP (operands[1], 0))
7919 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7920 get_pool_constant (XEXP (operands[1], 0)),
7921 get_pool_mode (XEXP (operands[1], 0))))
7923 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7924 operands[0]);
7925 operands[1] = gen_const_mem (mode, tocref);
7926 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7929 break;
7931 case TImode:
7932 if (!VECTOR_MEM_VSX_P (TImode))
7933 rs6000_eliminate_indexed_memrefs (operands);
7934 break;
7936 case PTImode:
7937 rs6000_eliminate_indexed_memrefs (operands);
7938 break;
7940 default:
7941 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7944 /* Above, we may have called force_const_mem which may have returned
7945 an invalid address. If we can, fix this up; otherwise, reload will
7946 have to deal with it. */
7947 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7948 operands[1] = validize_mem (operands[1]);
7950 emit_set:
7951 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7954 /* Return true if a structure, union or array containing FIELD should be
7955 accessed using `BLKMODE'.
7957 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7958 entire thing in a DI and use subregs to access the internals.
7959 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7960 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7961 best thing to do is set structs to BLKmode and avoid Severe Tire
7962 Damage.
7964 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7965 fit into 1, whereas DI still needs two. */
7967 static bool
7968 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
7970 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7971 || (TARGET_E500_DOUBLE && mode == DFmode));
7974 /* Nonzero if we can use a floating-point register to pass this arg. */
7975 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7976 (SCALAR_FLOAT_MODE_P (MODE) \
7977 && (CUM)->fregno <= FP_ARG_MAX_REG \
7978 && TARGET_HARD_FLOAT && TARGET_FPRS)
7980 /* Nonzero if we can use an AltiVec register to pass this arg. */
7981 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7982 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7983 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7984 && TARGET_ALTIVEC_ABI \
7985 && (NAMED))
7987 /* Return a nonzero value to say to return the function value in
7988 memory, just as large structures are always returned. TYPE will be
7989 the data type of the value, and FNTYPE will be the type of the
7990 function doing the returning, or @code{NULL} for libcalls.
7992 The AIX ABI for the RS/6000 specifies that all structures are
7993 returned in memory. The Darwin ABI does the same.
7995 For the Darwin 64 Bit ABI, a function result can be returned in
7996 registers or in memory, depending on the size of the return data
7997 type. If it is returned in registers, the value occupies the same
7998 registers as it would if it were the first and only function
7999 argument. Otherwise, the function places its result in memory at
8000 the location pointed to by GPR3.
8002 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8003 but a draft put them in memory, and GCC used to implement the draft
8004 instead of the final standard. Therefore, aix_struct_return
8005 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8006 compatibility can change DRAFT_V4_STRUCT_RET to override the
8007 default, and -m switches get the final word. See
8008 rs6000_option_override_internal for more details.
8010 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8011 long double support is enabled. These values are returned in memory.
8013 int_size_in_bytes returns -1 for variable size objects, which go in
8014 memory always. The cast to unsigned makes -1 > 8. */
8016 static bool
8017 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
8019 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8020 if (TARGET_MACHO
8021 && rs6000_darwin64_abi
8022 && TREE_CODE (type) == RECORD_TYPE
8023 && int_size_in_bytes (type) > 0)
8025 CUMULATIVE_ARGS valcum;
8026 rtx valret;
8028 valcum.words = 0;
8029 valcum.fregno = FP_ARG_MIN_REG;
8030 valcum.vregno = ALTIVEC_ARG_MIN_REG;
8031 /* Do a trial code generation as if this were going to be passed
8032 as an argument; if any part goes in memory, we return NULL. */
8033 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
8034 if (valret)
8035 return false;
8036 /* Otherwise fall through to more conventional ABI rules. */
8039 if (AGGREGATE_TYPE_P (type)
8040 && (aix_struct_return
8041 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
8042 return true;
8044 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8045 modes only exist for GCC vector types if -maltivec. */
8046 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
8047 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
8048 return false;
8050 /* Return synthetic vectors in memory. */
8051 if (TREE_CODE (type) == VECTOR_TYPE
8052 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8054 static bool warned_for_return_big_vectors = false;
8055 if (!warned_for_return_big_vectors)
8057 warning (0, "GCC vector returned by reference: "
8058 "non-standard ABI extension with no compatibility guarantee");
8059 warned_for_return_big_vectors = true;
8061 return true;
8064 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
8065 return true;
8067 return false;
8070 #ifdef HAVE_AS_GNU_ATTRIBUTE
8071 /* Return TRUE if a call to function FNDECL may be one that
8072 potentially affects the function calling ABI of the object file. */
8074 static bool
8075 call_ABI_of_interest (tree fndecl)
8077 if (cgraph_state == CGRAPH_STATE_EXPANSION)
8079 struct cgraph_node *c_node;
8081 /* Libcalls are always interesting. */
8082 if (fndecl == NULL_TREE)
8083 return true;
8085 /* Any call to an external function is interesting. */
8086 if (DECL_EXTERNAL (fndecl))
8087 return true;
8089 /* Interesting functions that we are emitting in this object file. */
8090 c_node = cgraph_get_node (fndecl);
8091 c_node = cgraph_function_or_thunk_node (c_node, NULL);
8092 return !cgraph_only_called_directly_p (c_node);
8094 return false;
8096 #endif
8098 /* Initialize a variable CUM of type CUMULATIVE_ARGS
8099 for a call to a function whose data type is FNTYPE.
8100 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
8102 For incoming args we set the number of arguments in the prototype large
8103 so we never return a PARALLEL. */
8105 void
8106 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
8107 rtx libname ATTRIBUTE_UNUSED, int incoming,
8108 int libcall, int n_named_args,
8109 tree fndecl ATTRIBUTE_UNUSED,
8110 enum machine_mode return_mode ATTRIBUTE_UNUSED)
8112 static CUMULATIVE_ARGS zero_cumulative;
8114 *cum = zero_cumulative;
8115 cum->words = 0;
8116 cum->fregno = FP_ARG_MIN_REG;
8117 cum->vregno = ALTIVEC_ARG_MIN_REG;
8118 cum->prototype = (fntype && prototype_p (fntype));
8119 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
8120 ? CALL_LIBCALL : CALL_NORMAL);
8121 cum->sysv_gregno = GP_ARG_MIN_REG;
8122 cum->stdarg = stdarg_p (fntype);
8124 cum->nargs_prototype = 0;
8125 if (incoming || cum->prototype)
8126 cum->nargs_prototype = n_named_args;
8128 /* Check for a longcall attribute. */
8129 if ((!fntype && rs6000_default_long_calls)
8130 || (fntype
8131 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
8132 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
8133 cum->call_cookie |= CALL_LONG;
8135 if (TARGET_DEBUG_ARG)
8137 fprintf (stderr, "\ninit_cumulative_args:");
8138 if (fntype)
8140 tree ret_type = TREE_TYPE (fntype);
8141 fprintf (stderr, " ret code = %s,",
8142 tree_code_name[ (int)TREE_CODE (ret_type) ]);
8145 if (cum->call_cookie & CALL_LONG)
8146 fprintf (stderr, " longcall,");
8148 fprintf (stderr, " proto = %d, nargs = %d\n",
8149 cum->prototype, cum->nargs_prototype);
8152 #ifdef HAVE_AS_GNU_ATTRIBUTE
8153 if (DEFAULT_ABI == ABI_V4)
8155 cum->escapes = call_ABI_of_interest (fndecl);
8156 if (cum->escapes)
8158 tree return_type;
8160 if (fntype)
8162 return_type = TREE_TYPE (fntype);
8163 return_mode = TYPE_MODE (return_type);
8165 else
8166 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
8168 if (return_type != NULL)
8170 if (TREE_CODE (return_type) == RECORD_TYPE
8171 && TYPE_TRANSPARENT_AGGR (return_type))
8173 return_type = TREE_TYPE (first_field (return_type));
8174 return_mode = TYPE_MODE (return_type);
8176 if (AGGREGATE_TYPE_P (return_type)
8177 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
8178 <= 8))
8179 rs6000_returns_struct = true;
8181 if (SCALAR_FLOAT_MODE_P (return_mode))
8182 rs6000_passes_float = true;
8183 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
8184 || SPE_VECTOR_MODE (return_mode))
8185 rs6000_passes_vector = true;
8188 #endif
8190 if (fntype
8191 && !TARGET_ALTIVEC
8192 && TARGET_ALTIVEC_ABI
8193 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
8195 error ("cannot return value in vector register because"
8196 " altivec instructions are disabled, use -maltivec"
8197 " to enable them");
8201 /* Return true if TYPE must be passed on the stack and not in registers. */
8203 static bool
8204 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
8206 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
8207 return must_pass_in_stack_var_size (mode, type);
8208 else
8209 return must_pass_in_stack_var_size_or_pad (mode, type);
8212 /* If defined, a C expression which determines whether, and in which
8213 direction, to pad out an argument with extra space. The value
8214 should be of type `enum direction': either `upward' to pad above
8215 the argument, `downward' to pad below, or `none' to inhibit
8216 padding.
8218 For the AIX ABI structs are always stored left shifted in their
8219 argument slot. */
8221 enum direction
8222 function_arg_padding (enum machine_mode mode, const_tree type)
8224 #ifndef AGGREGATE_PADDING_FIXED
8225 #define AGGREGATE_PADDING_FIXED 0
8226 #endif
8227 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
8228 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
8229 #endif
8231 if (!AGGREGATE_PADDING_FIXED)
8233 /* GCC used to pass structures of the same size as integer types as
8234 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
8235 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
8236 passed padded downward, except that -mstrict-align further
8237 muddied the water in that multi-component structures of 2 and 4
8238 bytes in size were passed padded upward.
8240 The following arranges for best compatibility with previous
8241 versions of gcc, but removes the -mstrict-align dependency. */
8242 if (BYTES_BIG_ENDIAN)
8244 HOST_WIDE_INT size = 0;
8246 if (mode == BLKmode)
8248 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
8249 size = int_size_in_bytes (type);
8251 else
8252 size = GET_MODE_SIZE (mode);
8254 if (size == 1 || size == 2 || size == 4)
8255 return downward;
8257 return upward;
8260 if (AGGREGATES_PAD_UPWARD_ALWAYS)
8262 if (type != 0 && AGGREGATE_TYPE_P (type))
8263 return upward;
8266 /* Fall back to the default. */
8267 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8270 /* If defined, a C expression that gives the alignment boundary, in bits,
8271 of an argument with the specified mode and type. If it is not defined,
8272 PARM_BOUNDARY is used for all arguments.
8274 V.4 wants long longs and doubles to be double word aligned. Just
8275 testing the mode size is a boneheaded way to do this as it means
8276 that other types such as complex int are also double word aligned.
8277 However, we're stuck with this because changing the ABI might break
8278 existing library interfaces.
8280 Doubleword align SPE vectors.
8281 Quadword align Altivec/VSX vectors.
8282 Quadword align large synthetic vector types. */
8284 static unsigned int
8285 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
8287 if (DEFAULT_ABI == ABI_V4
8288 && (GET_MODE_SIZE (mode) == 8
8289 || (TARGET_HARD_FLOAT
8290 && TARGET_FPRS
8291 && (mode == TFmode || mode == TDmode))))
8292 return 64;
8293 else if (SPE_VECTOR_MODE (mode)
8294 || (type && TREE_CODE (type) == VECTOR_TYPE
8295 && int_size_in_bytes (type) >= 8
8296 && int_size_in_bytes (type) < 16))
8297 return 64;
8298 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8299 || (type && TREE_CODE (type) == VECTOR_TYPE
8300 && int_size_in_bytes (type) >= 16))
8301 return 128;
8302 else if (TARGET_MACHO
8303 && rs6000_darwin64_abi
8304 && mode == BLKmode
8305 && type && TYPE_ALIGN (type) > 64)
8306 return 128;
8307 else
8308 return PARM_BOUNDARY;
8311 /* For a function parm of MODE and TYPE, return the starting word in
8312 the parameter area. NWORDS of the parameter area are already used. */
8314 static unsigned int
8315 rs6000_parm_start (enum machine_mode mode, const_tree type,
8316 unsigned int nwords)
8318 unsigned int align;
8319 unsigned int parm_offset;
8321 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
8322 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
8323 return nwords + (-(parm_offset + nwords) & align);
8326 /* Compute the size (in words) of a function argument. */
8328 static unsigned long
8329 rs6000_arg_size (enum machine_mode mode, const_tree type)
8331 unsigned long size;
8333 if (mode != BLKmode)
8334 size = GET_MODE_SIZE (mode);
8335 else
8336 size = int_size_in_bytes (type);
8338 if (TARGET_32BIT)
8339 return (size + 3) >> 2;
8340 else
8341 return (size + 7) >> 3;
8344 /* Use this to flush pending int fields. */
8346 static void
8347 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
8348 HOST_WIDE_INT bitpos, int final)
8350 unsigned int startbit, endbit;
8351 int intregs, intoffset;
8352 enum machine_mode mode;
8354 /* Handle the situations where a float is taking up the first half
8355 of the GPR, and the other half is empty (typically due to
8356 alignment restrictions). We can detect this by a 8-byte-aligned
8357 int field, or by seeing that this is the final flush for this
8358 argument. Count the word and continue on. */
8359 if (cum->floats_in_gpr == 1
8360 && (cum->intoffset % 64 == 0
8361 || (cum->intoffset == -1 && final)))
8363 cum->words++;
8364 cum->floats_in_gpr = 0;
8367 if (cum->intoffset == -1)
8368 return;
8370 intoffset = cum->intoffset;
8371 cum->intoffset = -1;
8372 cum->floats_in_gpr = 0;
8374 if (intoffset % BITS_PER_WORD != 0)
8376 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8377 MODE_INT, 0);
8378 if (mode == BLKmode)
8380 /* We couldn't find an appropriate mode, which happens,
8381 e.g., in packed structs when there are 3 bytes to load.
8382 Back intoffset back to the beginning of the word in this
8383 case. */
8384 intoffset = intoffset & -BITS_PER_WORD;
8388 startbit = intoffset & -BITS_PER_WORD;
8389 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8390 intregs = (endbit - startbit) / BITS_PER_WORD;
8391 cum->words += intregs;
8392 /* words should be unsigned. */
8393 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
8395 int pad = (endbit/BITS_PER_WORD) - cum->words;
8396 cum->words += pad;
8400 /* The darwin64 ABI calls for us to recurse down through structs,
8401 looking for elements passed in registers. Unfortunately, we have
8402 to track int register count here also because of misalignments
8403 in powerpc alignment mode. */
8405 static void
8406 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
8407 const_tree type,
8408 HOST_WIDE_INT startbitpos)
8410 tree f;
8412 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8413 if (TREE_CODE (f) == FIELD_DECL)
8415 HOST_WIDE_INT bitpos = startbitpos;
8416 tree ftype = TREE_TYPE (f);
8417 enum machine_mode mode;
8418 if (ftype == error_mark_node)
8419 continue;
8420 mode = TYPE_MODE (ftype);
8422 if (DECL_SIZE (f) != 0
8423 && host_integerp (bit_position (f), 1))
8424 bitpos += int_bit_position (f);
8426 /* ??? FIXME: else assume zero offset. */
8428 if (TREE_CODE (ftype) == RECORD_TYPE)
8429 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
8430 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
8432 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
8433 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
8434 cum->fregno += n_fpregs;
8435 /* Single-precision floats present a special problem for
8436 us, because they are smaller than an 8-byte GPR, and so
8437 the structure-packing rules combined with the standard
8438 varargs behavior mean that we want to pack float/float
8439 and float/int combinations into a single register's
8440 space. This is complicated by the arg advance flushing,
8441 which works on arbitrarily large groups of int-type
8442 fields. */
8443 if (mode == SFmode)
8445 if (cum->floats_in_gpr == 1)
8447 /* Two floats in a word; count the word and reset
8448 the float count. */
8449 cum->words++;
8450 cum->floats_in_gpr = 0;
8452 else if (bitpos % 64 == 0)
8454 /* A float at the beginning of an 8-byte word;
8455 count it and put off adjusting cum->words until
8456 we see if a arg advance flush is going to do it
8457 for us. */
8458 cum->floats_in_gpr++;
8460 else
8462 /* The float is at the end of a word, preceded
8463 by integer fields, so the arg advance flush
8464 just above has already set cum->words and
8465 everything is taken care of. */
8468 else
8469 cum->words += n_fpregs;
8471 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
8473 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
8474 cum->vregno++;
8475 cum->words += 2;
8477 else if (cum->intoffset == -1)
8478 cum->intoffset = bitpos;
8482 /* Check for an item that needs to be considered specially under the darwin 64
8483 bit ABI. These are record types where the mode is BLK or the structure is
8484 8 bytes in size. */
8485 static int
8486 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
8488 return rs6000_darwin64_abi
8489 && ((mode == BLKmode
8490 && TREE_CODE (type) == RECORD_TYPE
8491 && int_size_in_bytes (type) > 0)
8492 || (type && TREE_CODE (type) == RECORD_TYPE
8493 && int_size_in_bytes (type) == 8)) ? 1 : 0;
8496 /* Update the data in CUM to advance over an argument
8497 of mode MODE and data type TYPE.
8498 (TYPE is null for libcalls where that information may not be available.)
8500 Note that for args passed by reference, function_arg will be called
8501 with MODE and TYPE set to that of the pointer to the arg, not the arg
8502 itself. */
8504 static void
8505 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8506 const_tree type, bool named, int depth)
8508 /* Only tick off an argument if we're not recursing. */
8509 if (depth == 0)
8510 cum->nargs_prototype--;
8512 #ifdef HAVE_AS_GNU_ATTRIBUTE
8513 if (DEFAULT_ABI == ABI_V4
8514 && cum->escapes)
8516 if (SCALAR_FLOAT_MODE_P (mode))
8517 rs6000_passes_float = true;
8518 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
8519 rs6000_passes_vector = true;
8520 else if (SPE_VECTOR_MODE (mode)
8521 && !cum->stdarg
8522 && cum->sysv_gregno <= GP_ARG_MAX_REG)
8523 rs6000_passes_vector = true;
8525 #endif
8527 if (TARGET_ALTIVEC_ABI
8528 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8529 || (type && TREE_CODE (type) == VECTOR_TYPE
8530 && int_size_in_bytes (type) == 16)))
8532 bool stack = false;
8534 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8536 cum->vregno++;
8537 if (!TARGET_ALTIVEC)
8538 error ("cannot pass argument in vector register because"
8539 " altivec instructions are disabled, use -maltivec"
8540 " to enable them");
8542 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
8543 even if it is going to be passed in a vector register.
8544 Darwin does the same for variable-argument functions. */
8545 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
8546 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
8547 stack = true;
8549 else
8550 stack = true;
8552 if (stack)
8554 int align;
8556 /* Vector parameters must be 16-byte aligned. This places
8557 them at 2 mod 4 in terms of words in 32-bit mode, since
8558 the parameter save area starts at offset 24 from the
8559 stack. In 64-bit mode, they just have to start on an
8560 even word, since the parameter save area is 16-byte
8561 aligned. Space for GPRs is reserved even if the argument
8562 will be passed in memory. */
8563 if (TARGET_32BIT)
8564 align = (2 - cum->words) & 3;
8565 else
8566 align = cum->words & 1;
8567 cum->words += align + rs6000_arg_size (mode, type);
8569 if (TARGET_DEBUG_ARG)
8571 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
8572 cum->words, align);
8573 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
8574 cum->nargs_prototype, cum->prototype,
8575 GET_MODE_NAME (mode));
8579 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
8580 && !cum->stdarg
8581 && cum->sysv_gregno <= GP_ARG_MAX_REG)
8582 cum->sysv_gregno++;
8584 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8586 int size = int_size_in_bytes (type);
8587 /* Variable sized types have size == -1 and are
8588 treated as if consisting entirely of ints.
8589 Pad to 16 byte boundary if needed. */
8590 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8591 && (cum->words % 2) != 0)
8592 cum->words++;
8593 /* For varargs, we can just go up by the size of the struct. */
8594 if (!named)
8595 cum->words += (size + 7) / 8;
8596 else
8598 /* It is tempting to say int register count just goes up by
8599 sizeof(type)/8, but this is wrong in a case such as
8600 { int; double; int; } [powerpc alignment]. We have to
8601 grovel through the fields for these too. */
8602 cum->intoffset = 0;
8603 cum->floats_in_gpr = 0;
8604 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
8605 rs6000_darwin64_record_arg_advance_flush (cum,
8606 size * BITS_PER_UNIT, 1);
8608 if (TARGET_DEBUG_ARG)
8610 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
8611 cum->words, TYPE_ALIGN (type), size);
8612 fprintf (stderr,
8613 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
8614 cum->nargs_prototype, cum->prototype,
8615 GET_MODE_NAME (mode));
8618 else if (DEFAULT_ABI == ABI_V4)
8620 if (TARGET_HARD_FLOAT && TARGET_FPRS
8621 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8622 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8623 || (mode == TFmode && !TARGET_IEEEQUAD)
8624 || mode == SDmode || mode == DDmode || mode == TDmode))
8626 /* _Decimal128 must use an even/odd register pair. This assumes
8627 that the register number is odd when fregno is odd. */
8628 if (mode == TDmode && (cum->fregno % 2) == 1)
8629 cum->fregno++;
8631 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8632 <= FP_ARG_V4_MAX_REG)
8633 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8634 else
8636 cum->fregno = FP_ARG_V4_MAX_REG + 1;
8637 if (mode == DFmode || mode == TFmode
8638 || mode == DDmode || mode == TDmode)
8639 cum->words += cum->words & 1;
8640 cum->words += rs6000_arg_size (mode, type);
8643 else
8645 int n_words = rs6000_arg_size (mode, type);
8646 int gregno = cum->sysv_gregno;
8648 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8649 (r7,r8) or (r9,r10). As does any other 2 word item such
8650 as complex int due to a historical mistake. */
8651 if (n_words == 2)
8652 gregno += (1 - gregno) & 1;
8654 /* Multi-reg args are not split between registers and stack. */
8655 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8657 /* Long long and SPE vectors are aligned on the stack.
8658 So are other 2 word items such as complex int due to
8659 a historical mistake. */
8660 if (n_words == 2)
8661 cum->words += cum->words & 1;
8662 cum->words += n_words;
8665 /* Note: continuing to accumulate gregno past when we've started
8666 spilling to the stack indicates the fact that we've started
8667 spilling to the stack to expand_builtin_saveregs. */
8668 cum->sysv_gregno = gregno + n_words;
8671 if (TARGET_DEBUG_ARG)
8673 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8674 cum->words, cum->fregno);
8675 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
8676 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
8677 fprintf (stderr, "mode = %4s, named = %d\n",
8678 GET_MODE_NAME (mode), named);
8681 else
8683 int n_words = rs6000_arg_size (mode, type);
8684 int start_words = cum->words;
8685 int align_words = rs6000_parm_start (mode, type, start_words);
8687 cum->words = align_words + n_words;
8689 if (SCALAR_FLOAT_MODE_P (mode)
8690 && TARGET_HARD_FLOAT && TARGET_FPRS)
8692 /* _Decimal128 must be passed in an even/odd float register pair.
8693 This assumes that the register number is odd when fregno is
8694 odd. */
8695 if (mode == TDmode && (cum->fregno % 2) == 1)
8696 cum->fregno++;
8697 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8700 if (TARGET_DEBUG_ARG)
8702 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8703 cum->words, cum->fregno);
8704 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8705 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8706 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8707 named, align_words - start_words, depth);
8712 static void
8713 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8714 const_tree type, bool named)
8716 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8720 static rtx
8721 spe_build_register_parallel (enum machine_mode mode, int gregno)
8723 rtx r1, r3, r5, r7;
8725 switch (mode)
8727 case DFmode:
8728 r1 = gen_rtx_REG (DImode, gregno);
8729 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8730 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8732 case DCmode:
8733 case TFmode:
8734 r1 = gen_rtx_REG (DImode, gregno);
8735 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8736 r3 = gen_rtx_REG (DImode, gregno + 2);
8737 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8738 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8740 case TCmode:
8741 r1 = gen_rtx_REG (DImode, gregno);
8742 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8743 r3 = gen_rtx_REG (DImode, gregno + 2);
8744 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8745 r5 = gen_rtx_REG (DImode, gregno + 4);
8746 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8747 r7 = gen_rtx_REG (DImode, gregno + 6);
8748 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8749 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8751 default:
8752 gcc_unreachable ();
8756 /* Determine where to put a SIMD argument on the SPE. */
8757 static rtx
8758 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8759 const_tree type)
8761 int gregno = cum->sysv_gregno;
8763 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8764 are passed and returned in a pair of GPRs for ABI compatibility. */
8765 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8766 || mode == DCmode || mode == TCmode))
8768 int n_words = rs6000_arg_size (mode, type);
8770 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8771 if (mode == DFmode)
8772 gregno += (1 - gregno) & 1;
8774 /* Multi-reg args are not split between registers and stack. */
8775 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8776 return NULL_RTX;
8778 return spe_build_register_parallel (mode, gregno);
8780 if (cum->stdarg)
8782 int n_words = rs6000_arg_size (mode, type);
8784 /* SPE vectors are put in odd registers. */
8785 if (n_words == 2 && (gregno & 1) == 0)
8786 gregno += 1;
8788 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8790 rtx r1, r2;
8791 enum machine_mode m = SImode;
8793 r1 = gen_rtx_REG (m, gregno);
8794 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8795 r2 = gen_rtx_REG (m, gregno + 1);
8796 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8797 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8799 else
8800 return NULL_RTX;
8802 else
8804 if (gregno <= GP_ARG_MAX_REG)
8805 return gen_rtx_REG (mode, gregno);
8806 else
8807 return NULL_RTX;
8811 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8812 structure between cum->intoffset and bitpos to integer registers. */
8814 static void
8815 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
8816 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
8818 enum machine_mode mode;
8819 unsigned int regno;
8820 unsigned int startbit, endbit;
8821 int this_regno, intregs, intoffset;
8822 rtx reg;
8824 if (cum->intoffset == -1)
8825 return;
8827 intoffset = cum->intoffset;
8828 cum->intoffset = -1;
8830 /* If this is the trailing part of a word, try to only load that
8831 much into the register. Otherwise load the whole register. Note
8832 that in the latter case we may pick up unwanted bits. It's not a
8833 problem at the moment but may wish to revisit. */
8835 if (intoffset % BITS_PER_WORD != 0)
8837 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8838 MODE_INT, 0);
8839 if (mode == BLKmode)
8841 /* We couldn't find an appropriate mode, which happens,
8842 e.g., in packed structs when there are 3 bytes to load.
8843 Back intoffset back to the beginning of the word in this
8844 case. */
8845 intoffset = intoffset & -BITS_PER_WORD;
8846 mode = word_mode;
8849 else
8850 mode = word_mode;
8852 startbit = intoffset & -BITS_PER_WORD;
8853 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8854 intregs = (endbit - startbit) / BITS_PER_WORD;
8855 this_regno = cum->words + intoffset / BITS_PER_WORD;
8857 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8858 cum->use_stack = 1;
8860 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8861 if (intregs <= 0)
8862 return;
8864 intoffset /= BITS_PER_UNIT;
8867 regno = GP_ARG_MIN_REG + this_regno;
8868 reg = gen_rtx_REG (mode, regno);
8869 rvec[(*k)++] =
8870 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8872 this_regno += 1;
8873 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8874 mode = word_mode;
8875 intregs -= 1;
8877 while (intregs > 0);
8880 /* Recursive workhorse for the following. */
8882 static void
8883 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8884 HOST_WIDE_INT startbitpos, rtx rvec[],
8885 int *k)
8887 tree f;
8889 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8890 if (TREE_CODE (f) == FIELD_DECL)
8892 HOST_WIDE_INT bitpos = startbitpos;
8893 tree ftype = TREE_TYPE (f);
8894 enum machine_mode mode;
8895 if (ftype == error_mark_node)
8896 continue;
8897 mode = TYPE_MODE (ftype);
8899 if (DECL_SIZE (f) != 0
8900 && host_integerp (bit_position (f), 1))
8901 bitpos += int_bit_position (f);
8903 /* ??? FIXME: else assume zero offset. */
8905 if (TREE_CODE (ftype) == RECORD_TYPE)
8906 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8907 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8909 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8910 #if 0
8911 switch (mode)
8913 case SCmode: mode = SFmode; break;
8914 case DCmode: mode = DFmode; break;
8915 case TCmode: mode = TFmode; break;
8916 default: break;
8918 #endif
8919 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8920 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8922 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8923 && (mode == TFmode || mode == TDmode));
8924 /* Long double or _Decimal128 split over regs and memory. */
8925 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8926 cum->use_stack=1;
8928 rvec[(*k)++]
8929 = gen_rtx_EXPR_LIST (VOIDmode,
8930 gen_rtx_REG (mode, cum->fregno++),
8931 GEN_INT (bitpos / BITS_PER_UNIT));
8932 if (mode == TFmode || mode == TDmode)
8933 cum->fregno++;
8935 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8937 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8938 rvec[(*k)++]
8939 = gen_rtx_EXPR_LIST (VOIDmode,
8940 gen_rtx_REG (mode, cum->vregno++),
8941 GEN_INT (bitpos / BITS_PER_UNIT));
8943 else if (cum->intoffset == -1)
8944 cum->intoffset = bitpos;
8948 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8949 the register(s) to be used for each field and subfield of a struct
8950 being passed by value, along with the offset of where the
8951 register's value may be found in the block. FP fields go in FP
8952 register, vector fields go in vector registers, and everything
8953 else goes in int registers, packed as in memory.
8955 This code is also used for function return values. RETVAL indicates
8956 whether this is the case.
8958 Much of this is taken from the SPARC V9 port, which has a similar
8959 calling convention. */
8961 static rtx
8962 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8963 bool named, bool retval)
8965 rtx rvec[FIRST_PSEUDO_REGISTER];
8966 int k = 1, kbase = 1;
8967 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8968 /* This is a copy; modifications are not visible to our caller. */
8969 CUMULATIVE_ARGS copy_cum = *orig_cum;
8970 CUMULATIVE_ARGS *cum = &copy_cum;
8972 /* Pad to 16 byte boundary if needed. */
8973 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8974 && (cum->words % 2) != 0)
8975 cum->words++;
8977 cum->intoffset = 0;
8978 cum->use_stack = 0;
8979 cum->named = named;
8981 /* Put entries into rvec[] for individual FP and vector fields, and
8982 for the chunks of memory that go in int regs. Note we start at
8983 element 1; 0 is reserved for an indication of using memory, and
8984 may or may not be filled in below. */
8985 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8986 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8988 /* If any part of the struct went on the stack put all of it there.
8989 This hack is because the generic code for
8990 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8991 parts of the struct are not at the beginning. */
8992 if (cum->use_stack)
8994 if (retval)
8995 return NULL_RTX; /* doesn't go in registers at all */
8996 kbase = 0;
8997 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8999 if (k > 1 || cum->use_stack)
9000 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
9001 else
9002 return NULL_RTX;
9005 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
9007 static rtx
9008 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
9009 int align_words)
9011 int n_units;
9012 int i, k;
9013 rtx rvec[GP_ARG_NUM_REG + 1];
9015 if (align_words >= GP_ARG_NUM_REG)
9016 return NULL_RTX;
9018 n_units = rs6000_arg_size (mode, type);
9020 /* Optimize the simple case where the arg fits in one gpr, except in
9021 the case of BLKmode due to assign_parms assuming that registers are
9022 BITS_PER_WORD wide. */
9023 if (n_units == 0
9024 || (n_units == 1 && mode != BLKmode))
9025 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9027 k = 0;
9028 if (align_words + n_units > GP_ARG_NUM_REG)
9029 /* Not all of the arg fits in gprs. Say that it goes in memory too,
9030 using a magic NULL_RTX component.
9031 This is not strictly correct. Only some of the arg belongs in
9032 memory, not all of it. However, the normal scheme using
9033 function_arg_partial_nregs can result in unusual subregs, eg.
9034 (subreg:SI (reg:DF) 4), which are not handled well. The code to
9035 store the whole arg to memory is often more efficient than code
9036 to store pieces, and we know that space is available in the right
9037 place for the whole arg. */
9038 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9040 i = 0;
9043 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
9044 rtx off = GEN_INT (i++ * 4);
9045 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9047 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
9049 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9052 /* Determine where to put an argument to a function.
9053 Value is zero to push the argument on the stack,
9054 or a hard register in which to store the argument.
9056 MODE is the argument's machine mode.
9057 TYPE is the data type of the argument (as a tree).
9058 This is null for libcalls where that information may
9059 not be available.
9060 CUM is a variable of type CUMULATIVE_ARGS which gives info about
9061 the preceding args and about the function being called. It is
9062 not modified in this routine.
9063 NAMED is nonzero if this argument is a named parameter
9064 (otherwise it is an extra parameter matching an ellipsis).
9066 On RS/6000 the first eight words of non-FP are normally in registers
9067 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
9068 Under V.4, the first 8 FP args are in registers.
9070 If this is floating-point and no prototype is specified, we use
9071 both an FP and integer register (or possibly FP reg and stack). Library
9072 functions (when CALL_LIBCALL is set) always have the proper types for args,
9073 so we can pass the FP value just in one register. emit_library_function
9074 doesn't support PARALLEL anyway.
9076 Note that for args passed by reference, function_arg will be called
9077 with MODE and TYPE set to that of the pointer to the arg, not the arg
9078 itself. */
9080 static rtx
9081 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9082 const_tree type, bool named)
9084 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9085 enum rs6000_abi abi = DEFAULT_ABI;
9087 /* Return a marker to indicate whether CR1 needs to set or clear the
9088 bit that V.4 uses to say fp args were passed in registers.
9089 Assume that we don't need the marker for software floating point,
9090 or compiler generated library calls. */
9091 if (mode == VOIDmode)
9093 if (abi == ABI_V4
9094 && (cum->call_cookie & CALL_LIBCALL) == 0
9095 && (cum->stdarg
9096 || (cum->nargs_prototype < 0
9097 && (cum->prototype || TARGET_NO_PROTOTYPE))))
9099 /* For the SPE, we need to crxor CR6 always. */
9100 if (TARGET_SPE_ABI)
9101 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
9102 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
9103 return GEN_INT (cum->call_cookie
9104 | ((cum->fregno == FP_ARG_MIN_REG)
9105 ? CALL_V4_SET_FP_ARGS
9106 : CALL_V4_CLEAR_FP_ARGS));
9109 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
9112 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9114 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
9115 if (rslt != NULL_RTX)
9116 return rslt;
9117 /* Else fall through to usual handling. */
9120 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
9121 if (TARGET_64BIT && ! cum->prototype)
9123 /* Vector parameters get passed in vector register
9124 and also in GPRs or memory, in absence of prototype. */
9125 int align_words;
9126 rtx slot;
9127 align_words = (cum->words + 1) & ~1;
9129 if (align_words >= GP_ARG_NUM_REG)
9131 slot = NULL_RTX;
9133 else
9135 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9137 return gen_rtx_PARALLEL (mode,
9138 gen_rtvec (2,
9139 gen_rtx_EXPR_LIST (VOIDmode,
9140 slot, const0_rtx),
9141 gen_rtx_EXPR_LIST (VOIDmode,
9142 gen_rtx_REG (mode, cum->vregno),
9143 const0_rtx)));
9145 else
9146 return gen_rtx_REG (mode, cum->vregno);
9147 else if (TARGET_ALTIVEC_ABI
9148 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
9149 || (type && TREE_CODE (type) == VECTOR_TYPE
9150 && int_size_in_bytes (type) == 16)))
9152 if (named || abi == ABI_V4)
9153 return NULL_RTX;
9154 else
9156 /* Vector parameters to varargs functions under AIX or Darwin
9157 get passed in memory and possibly also in GPRs. */
9158 int align, align_words, n_words;
9159 enum machine_mode part_mode;
9161 /* Vector parameters must be 16-byte aligned. This places them at
9162 2 mod 4 in terms of words in 32-bit mode, since the parameter
9163 save area starts at offset 24 from the stack. In 64-bit mode,
9164 they just have to start on an even word, since the parameter
9165 save area is 16-byte aligned. */
9166 if (TARGET_32BIT)
9167 align = (2 - cum->words) & 3;
9168 else
9169 align = cum->words & 1;
9170 align_words = cum->words + align;
9172 /* Out of registers? Memory, then. */
9173 if (align_words >= GP_ARG_NUM_REG)
9174 return NULL_RTX;
9176 if (TARGET_32BIT && TARGET_POWERPC64)
9177 return rs6000_mixed_function_arg (mode, type, align_words);
9179 /* The vector value goes in GPRs. Only the part of the
9180 value in GPRs is reported here. */
9181 part_mode = mode;
9182 n_words = rs6000_arg_size (mode, type);
9183 if (align_words + n_words > GP_ARG_NUM_REG)
9184 /* Fortunately, there are only two possibilities, the value
9185 is either wholly in GPRs or half in GPRs and half not. */
9186 part_mode = DImode;
9188 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
9191 else if (TARGET_SPE_ABI && TARGET_SPE
9192 && (SPE_VECTOR_MODE (mode)
9193 || (TARGET_E500_DOUBLE && (mode == DFmode
9194 || mode == DCmode
9195 || mode == TFmode
9196 || mode == TCmode))))
9197 return rs6000_spe_function_arg (cum, mode, type);
9199 else if (abi == ABI_V4)
9201 if (TARGET_HARD_FLOAT && TARGET_FPRS
9202 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9203 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9204 || (mode == TFmode && !TARGET_IEEEQUAD)
9205 || mode == SDmode || mode == DDmode || mode == TDmode))
9207 /* _Decimal128 must use an even/odd register pair. This assumes
9208 that the register number is odd when fregno is odd. */
9209 if (mode == TDmode && (cum->fregno % 2) == 1)
9210 cum->fregno++;
9212 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9213 <= FP_ARG_V4_MAX_REG)
9214 return gen_rtx_REG (mode, cum->fregno);
9215 else
9216 return NULL_RTX;
9218 else
9220 int n_words = rs6000_arg_size (mode, type);
9221 int gregno = cum->sysv_gregno;
9223 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9224 (r7,r8) or (r9,r10). As does any other 2 word item such
9225 as complex int due to a historical mistake. */
9226 if (n_words == 2)
9227 gregno += (1 - gregno) & 1;
9229 /* Multi-reg args are not split between registers and stack. */
9230 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9231 return NULL_RTX;
9233 if (TARGET_32BIT && TARGET_POWERPC64)
9234 return rs6000_mixed_function_arg (mode, type,
9235 gregno - GP_ARG_MIN_REG);
9236 return gen_rtx_REG (mode, gregno);
9239 else
9241 int align_words = rs6000_parm_start (mode, type, cum->words);
9243 /* _Decimal128 must be passed in an even/odd float register pair.
9244 This assumes that the register number is odd when fregno is odd. */
9245 if (mode == TDmode && (cum->fregno % 2) == 1)
9246 cum->fregno++;
9248 if (USE_FP_FOR_ARG_P (cum, mode, type))
9250 rtx rvec[GP_ARG_NUM_REG + 1];
9251 rtx r;
9252 int k;
9253 bool needs_psave;
9254 enum machine_mode fmode = mode;
9255 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9257 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9259 /* Currently, we only ever need one reg here because complex
9260 doubles are split. */
9261 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9262 && (fmode == TFmode || fmode == TDmode));
9264 /* Long double or _Decimal128 split over regs and memory. */
9265 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
9268 /* Do we also need to pass this arg in the parameter save
9269 area? */
9270 needs_psave = (type
9271 && (cum->nargs_prototype <= 0
9272 || (DEFAULT_ABI == ABI_AIX
9273 && TARGET_XL_COMPAT
9274 && align_words >= GP_ARG_NUM_REG)));
9276 if (!needs_psave && mode == fmode)
9277 return gen_rtx_REG (fmode, cum->fregno);
9279 k = 0;
9280 if (needs_psave)
9282 /* Describe the part that goes in gprs or the stack.
9283 This piece must come first, before the fprs. */
9284 if (align_words < GP_ARG_NUM_REG)
9286 unsigned long n_words = rs6000_arg_size (mode, type);
9288 if (align_words + n_words > GP_ARG_NUM_REG
9289 || (TARGET_32BIT && TARGET_POWERPC64))
9291 /* If this is partially on the stack, then we only
9292 include the portion actually in registers here. */
9293 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
9294 rtx off;
9295 int i = 0;
9296 if (align_words + n_words > GP_ARG_NUM_REG)
9297 /* Not all of the arg fits in gprs. Say that it
9298 goes in memory too, using a magic NULL_RTX
9299 component. Also see comment in
9300 rs6000_mixed_function_arg for why the normal
9301 function_arg_partial_nregs scheme doesn't work
9302 in this case. */
9303 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
9304 const0_rtx);
9307 r = gen_rtx_REG (rmode,
9308 GP_ARG_MIN_REG + align_words);
9309 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
9310 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9312 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
9314 else
9316 /* The whole arg fits in gprs. */
9317 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9318 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
9321 else
9322 /* It's entirely in memory. */
9323 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9326 /* Describe where this piece goes in the fprs. */
9327 r = gen_rtx_REG (fmode, cum->fregno);
9328 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
9330 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9332 else if (align_words < GP_ARG_NUM_REG)
9334 if (TARGET_32BIT && TARGET_POWERPC64)
9335 return rs6000_mixed_function_arg (mode, type, align_words);
9337 if (mode == BLKmode)
9338 mode = Pmode;
9340 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9342 else
9343 return NULL_RTX;
9347 /* For an arg passed partly in registers and partly in memory, this is
9348 the number of bytes passed in registers. For args passed entirely in
9349 registers or entirely in memory, zero. When an arg is described by a
9350 PARALLEL, perhaps using more than one register type, this function
9351 returns the number of bytes used by the first element of the PARALLEL. */
9353 static int
9354 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9355 tree type, bool named)
9357 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9358 int ret = 0;
9359 int align_words;
9361 if (DEFAULT_ABI == ABI_V4)
9362 return 0;
9364 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
9365 && cum->nargs_prototype >= 0)
9366 return 0;
9368 /* In this complicated case we just disable the partial_nregs code. */
9369 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9370 return 0;
9372 align_words = rs6000_parm_start (mode, type, cum->words);
9374 if (USE_FP_FOR_ARG_P (cum, mode, type))
9376 /* If we are passing this arg in the fixed parameter save area
9377 (gprs or memory) as well as fprs, then this function should
9378 return the number of partial bytes passed in the parameter
9379 save area rather than partial bytes passed in fprs. */
9380 if (type
9381 && (cum->nargs_prototype <= 0
9382 || (DEFAULT_ABI == ABI_AIX
9383 && TARGET_XL_COMPAT
9384 && align_words >= GP_ARG_NUM_REG)))
9385 return 0;
9386 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
9387 > FP_ARG_MAX_REG + 1)
9388 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
9389 else if (cum->nargs_prototype >= 0)
9390 return 0;
9393 if (align_words < GP_ARG_NUM_REG
9394 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
9395 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
9397 if (ret != 0 && TARGET_DEBUG_ARG)
9398 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
9400 return ret;
9403 /* A C expression that indicates when an argument must be passed by
9404 reference. If nonzero for an argument, a copy of that argument is
9405 made in memory and a pointer to the argument is passed instead of
9406 the argument itself. The pointer is passed in whatever way is
9407 appropriate for passing a pointer to that type.
9409 Under V.4, aggregates and long double are passed by reference.
9411 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
9412 reference unless the AltiVec vector extension ABI is in force.
9414 As an extension to all ABIs, variable sized types are passed by
9415 reference. */
9417 static bool
9418 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
9419 enum machine_mode mode, const_tree type,
9420 bool named ATTRIBUTE_UNUSED)
9422 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
9424 if (TARGET_DEBUG_ARG)
9425 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
9426 return 1;
9429 if (!type)
9430 return 0;
9432 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
9434 if (TARGET_DEBUG_ARG)
9435 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
9436 return 1;
9439 if (int_size_in_bytes (type) < 0)
9441 if (TARGET_DEBUG_ARG)
9442 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
9443 return 1;
9446 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
9447 modes only exist for GCC vector types if -maltivec. */
9448 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
9450 if (TARGET_DEBUG_ARG)
9451 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
9452 return 1;
9455 /* Pass synthetic vectors in memory. */
9456 if (TREE_CODE (type) == VECTOR_TYPE
9457 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
9459 static bool warned_for_pass_big_vectors = false;
9460 if (TARGET_DEBUG_ARG)
9461 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
9462 if (!warned_for_pass_big_vectors)
9464 warning (0, "GCC vector passed by reference: "
9465 "non-standard ABI extension with no compatibility guarantee");
9466 warned_for_pass_big_vectors = true;
9468 return 1;
9471 return 0;
9474 static void
9475 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
9477 int i;
9478 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
9480 if (nregs == 0)
9481 return;
9483 for (i = 0; i < nregs; i++)
9485 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
9486 if (reload_completed)
9488 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
9489 tem = NULL_RTX;
9490 else
9491 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
9492 i * GET_MODE_SIZE (reg_mode));
9494 else
9495 tem = replace_equiv_address (tem, XEXP (tem, 0));
9497 gcc_assert (tem);
9499 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
9503 /* Perform any needed actions needed for a function that is receiving a
9504 variable number of arguments.
9506 CUM is as above.
9508 MODE and TYPE are the mode and type of the current parameter.
9510 PRETEND_SIZE is a variable that should be set to the amount of stack
9511 that must be pushed by the prolog to pretend that our caller pushed
9514 Normally, this macro will push all remaining incoming registers on the
9515 stack and set PRETEND_SIZE to the length of the registers pushed. */
9517 static void
9518 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
9519 tree type, int *pretend_size ATTRIBUTE_UNUSED,
9520 int no_rtl)
9522 CUMULATIVE_ARGS next_cum;
9523 int reg_size = TARGET_32BIT ? 4 : 8;
9524 rtx save_area = NULL_RTX, mem;
9525 int first_reg_offset;
9526 alias_set_type set;
9528 /* Skip the last named argument. */
9529 next_cum = *get_cumulative_args (cum);
9530 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
9532 if (DEFAULT_ABI == ABI_V4)
9534 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
9536 if (! no_rtl)
9538 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
9539 HOST_WIDE_INT offset = 0;
9541 /* Try to optimize the size of the varargs save area.
9542 The ABI requires that ap.reg_save_area is doubleword
9543 aligned, but we don't need to allocate space for all
9544 the bytes, only those to which we actually will save
9545 anything. */
9546 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
9547 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
9548 if (TARGET_HARD_FLOAT && TARGET_FPRS
9549 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9550 && cfun->va_list_fpr_size)
9552 if (gpr_reg_num)
9553 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
9554 * UNITS_PER_FP_WORD;
9555 if (cfun->va_list_fpr_size
9556 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9557 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
9558 else
9559 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9560 * UNITS_PER_FP_WORD;
9562 if (gpr_reg_num)
9564 offset = -((first_reg_offset * reg_size) & ~7);
9565 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
9567 gpr_reg_num = cfun->va_list_gpr_size;
9568 if (reg_size == 4 && (first_reg_offset & 1))
9569 gpr_reg_num++;
9571 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
9573 else if (fpr_size)
9574 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
9575 * UNITS_PER_FP_WORD
9576 - (int) (GP_ARG_NUM_REG * reg_size);
9578 if (gpr_size + fpr_size)
9580 rtx reg_save_area
9581 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
9582 gcc_assert (GET_CODE (reg_save_area) == MEM);
9583 reg_save_area = XEXP (reg_save_area, 0);
9584 if (GET_CODE (reg_save_area) == PLUS)
9586 gcc_assert (XEXP (reg_save_area, 0)
9587 == virtual_stack_vars_rtx);
9588 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
9589 offset += INTVAL (XEXP (reg_save_area, 1));
9591 else
9592 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
9595 cfun->machine->varargs_save_offset = offset;
9596 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
9599 else
9601 first_reg_offset = next_cum.words;
9602 save_area = virtual_incoming_args_rtx;
9604 if (targetm.calls.must_pass_in_stack (mode, type))
9605 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
9608 set = get_varargs_alias_set ();
9609 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
9610 && cfun->va_list_gpr_size)
9612 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
9614 if (va_list_gpr_counter_field)
9615 /* V4 va_list_gpr_size counts number of registers needed. */
9616 n_gpr = cfun->va_list_gpr_size;
9617 else
9618 /* char * va_list instead counts number of bytes needed. */
9619 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
9621 if (nregs > n_gpr)
9622 nregs = n_gpr;
9624 mem = gen_rtx_MEM (BLKmode,
9625 plus_constant (Pmode, save_area,
9626 first_reg_offset * reg_size));
9627 MEM_NOTRAP_P (mem) = 1;
9628 set_mem_alias_set (mem, set);
9629 set_mem_align (mem, BITS_PER_WORD);
9631 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
9632 nregs);
9635 /* Save FP registers if needed. */
9636 if (DEFAULT_ABI == ABI_V4
9637 && TARGET_HARD_FLOAT && TARGET_FPRS
9638 && ! no_rtl
9639 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9640 && cfun->va_list_fpr_size)
9642 int fregno = next_cum.fregno, nregs;
9643 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
9644 rtx lab = gen_label_rtx ();
9645 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
9646 * UNITS_PER_FP_WORD);
9648 emit_jump_insn
9649 (gen_rtx_SET (VOIDmode,
9650 pc_rtx,
9651 gen_rtx_IF_THEN_ELSE (VOIDmode,
9652 gen_rtx_NE (VOIDmode, cr1,
9653 const0_rtx),
9654 gen_rtx_LABEL_REF (VOIDmode, lab),
9655 pc_rtx)));
9657 for (nregs = 0;
9658 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
9659 fregno++, off += UNITS_PER_FP_WORD, nregs++)
9661 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9662 ? DFmode : SFmode,
9663 plus_constant (Pmode, save_area, off));
9664 MEM_NOTRAP_P (mem) = 1;
9665 set_mem_alias_set (mem, set);
9666 set_mem_align (mem, GET_MODE_ALIGNMENT (
9667 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9668 ? DFmode : SFmode));
9669 emit_move_insn (mem, gen_rtx_REG (
9670 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9671 ? DFmode : SFmode, fregno));
9674 emit_label (lab);
9678 /* Create the va_list data type. */
9680 static tree
9681 rs6000_build_builtin_va_list (void)
9683 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9685 /* For AIX, prefer 'char *' because that's what the system
9686 header files like. */
9687 if (DEFAULT_ABI != ABI_V4)
9688 return build_pointer_type (char_type_node);
9690 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9691 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9692 get_identifier ("__va_list_tag"), record);
9694 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9695 unsigned_char_type_node);
9696 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9697 unsigned_char_type_node);
9698 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9699 every user file. */
9700 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9701 get_identifier ("reserved"), short_unsigned_type_node);
9702 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9703 get_identifier ("overflow_arg_area"),
9704 ptr_type_node);
9705 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9706 get_identifier ("reg_save_area"),
9707 ptr_type_node);
9709 va_list_gpr_counter_field = f_gpr;
9710 va_list_fpr_counter_field = f_fpr;
9712 DECL_FIELD_CONTEXT (f_gpr) = record;
9713 DECL_FIELD_CONTEXT (f_fpr) = record;
9714 DECL_FIELD_CONTEXT (f_res) = record;
9715 DECL_FIELD_CONTEXT (f_ovf) = record;
9716 DECL_FIELD_CONTEXT (f_sav) = record;
9718 TYPE_STUB_DECL (record) = type_decl;
9719 TYPE_NAME (record) = type_decl;
9720 TYPE_FIELDS (record) = f_gpr;
9721 DECL_CHAIN (f_gpr) = f_fpr;
9722 DECL_CHAIN (f_fpr) = f_res;
9723 DECL_CHAIN (f_res) = f_ovf;
9724 DECL_CHAIN (f_ovf) = f_sav;
9726 layout_type (record);
9728 /* The correct type is an array type of one element. */
9729 return build_array_type (record, build_index_type (size_zero_node));
9732 /* Implement va_start. */
9734 static void
9735 rs6000_va_start (tree valist, rtx nextarg)
9737 HOST_WIDE_INT words, n_gpr, n_fpr;
9738 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9739 tree gpr, fpr, ovf, sav, t;
9741 /* Only SVR4 needs something special. */
9742 if (DEFAULT_ABI != ABI_V4)
9744 std_expand_builtin_va_start (valist, nextarg);
9745 return;
9748 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9749 f_fpr = DECL_CHAIN (f_gpr);
9750 f_res = DECL_CHAIN (f_fpr);
9751 f_ovf = DECL_CHAIN (f_res);
9752 f_sav = DECL_CHAIN (f_ovf);
9754 valist = build_simple_mem_ref (valist);
9755 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9756 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9757 f_fpr, NULL_TREE);
9758 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9759 f_ovf, NULL_TREE);
9760 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9761 f_sav, NULL_TREE);
9763 /* Count number of gp and fp argument registers used. */
9764 words = crtl->args.info.words;
9765 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9766 GP_ARG_NUM_REG);
9767 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9768 FP_ARG_NUM_REG);
9770 if (TARGET_DEBUG_ARG)
9771 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9772 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9773 words, n_gpr, n_fpr);
9775 if (cfun->va_list_gpr_size)
9777 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9778 build_int_cst (NULL_TREE, n_gpr));
9779 TREE_SIDE_EFFECTS (t) = 1;
9780 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9783 if (cfun->va_list_fpr_size)
9785 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9786 build_int_cst (NULL_TREE, n_fpr));
9787 TREE_SIDE_EFFECTS (t) = 1;
9788 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9790 #ifdef HAVE_AS_GNU_ATTRIBUTE
9791 if (call_ABI_of_interest (cfun->decl))
9792 rs6000_passes_float = true;
9793 #endif
9796 /* Find the overflow area. */
9797 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9798 if (words != 0)
9799 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9800 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9801 TREE_SIDE_EFFECTS (t) = 1;
9802 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9804 /* If there were no va_arg invocations, don't set up the register
9805 save area. */
9806 if (!cfun->va_list_gpr_size
9807 && !cfun->va_list_fpr_size
9808 && n_gpr < GP_ARG_NUM_REG
9809 && n_fpr < FP_ARG_V4_MAX_REG)
9810 return;
9812 /* Find the register save area. */
9813 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
9814 if (cfun->machine->varargs_save_offset)
9815 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
9816 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9817 TREE_SIDE_EFFECTS (t) = 1;
9818 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9821 /* Implement va_arg. */
9823 static tree
9824 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9825 gimple_seq *post_p)
9827 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9828 tree gpr, fpr, ovf, sav, reg, t, u;
9829 int size, rsize, n_reg, sav_ofs, sav_scale;
9830 tree lab_false, lab_over, addr;
9831 int align;
9832 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9833 int regalign = 0;
9834 gimple stmt;
9836 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9838 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9839 return build_va_arg_indirect_ref (t);
9842 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9843 earlier version of gcc, with the property that it always applied alignment
9844 adjustments to the va-args (even for zero-sized types). The cheapest way
9845 to deal with this is to replicate the effect of the part of
9846 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9847 of relevance.
9848 We don't need to check for pass-by-reference because of the test above.
9849 We can return a simplifed answer, since we know there's no offset to add. */
9851 if (TARGET_MACHO
9852 && rs6000_darwin64_abi
9853 && integer_zerop (TYPE_SIZE (type)))
9855 unsigned HOST_WIDE_INT align, boundary;
9856 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9857 align = PARM_BOUNDARY / BITS_PER_UNIT;
9858 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9859 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9860 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9861 boundary /= BITS_PER_UNIT;
9862 if (boundary > align)
9864 tree t ;
9865 /* This updates arg ptr by the amount that would be necessary
9866 to align the zero-sized (but not zero-alignment) item. */
9867 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9868 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9869 gimplify_and_add (t, pre_p);
9871 t = fold_convert (sizetype, valist_tmp);
9872 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9873 fold_convert (TREE_TYPE (valist),
9874 fold_build2 (BIT_AND_EXPR, sizetype, t,
9875 size_int (-boundary))));
9876 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9877 gimplify_and_add (t, pre_p);
9879 /* Since it is zero-sized there's no increment for the item itself. */
9880 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9881 return build_va_arg_indirect_ref (valist_tmp);
9884 if (DEFAULT_ABI != ABI_V4)
9886 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9888 tree elem_type = TREE_TYPE (type);
9889 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9890 int elem_size = GET_MODE_SIZE (elem_mode);
9892 if (elem_size < UNITS_PER_WORD)
9894 tree real_part, imag_part;
9895 gimple_seq post = NULL;
9897 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9898 &post);
9899 /* Copy the value into a temporary, lest the formal temporary
9900 be reused out from under us. */
9901 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9902 gimple_seq_add_seq (pre_p, post);
9904 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9905 post_p);
9907 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9911 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9914 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9915 f_fpr = DECL_CHAIN (f_gpr);
9916 f_res = DECL_CHAIN (f_fpr);
9917 f_ovf = DECL_CHAIN (f_res);
9918 f_sav = DECL_CHAIN (f_ovf);
9920 valist = build_va_arg_indirect_ref (valist);
9921 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9922 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9923 f_fpr, NULL_TREE);
9924 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9925 f_ovf, NULL_TREE);
9926 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9927 f_sav, NULL_TREE);
9929 size = int_size_in_bytes (type);
9930 rsize = (size + 3) / 4;
9931 align = 1;
9933 if (TARGET_HARD_FLOAT && TARGET_FPRS
9934 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9935 || (TARGET_DOUBLE_FLOAT
9936 && (TYPE_MODE (type) == DFmode
9937 || TYPE_MODE (type) == TFmode
9938 || TYPE_MODE (type) == SDmode
9939 || TYPE_MODE (type) == DDmode
9940 || TYPE_MODE (type) == TDmode))))
9942 /* FP args go in FP registers, if present. */
9943 reg = fpr;
9944 n_reg = (size + 7) / 8;
9945 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9946 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9947 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9948 align = 8;
9950 else
9952 /* Otherwise into GP registers. */
9953 reg = gpr;
9954 n_reg = rsize;
9955 sav_ofs = 0;
9956 sav_scale = 4;
9957 if (n_reg == 2)
9958 align = 8;
9961 /* Pull the value out of the saved registers.... */
9963 lab_over = NULL;
9964 addr = create_tmp_var (ptr_type_node, "addr");
9966 /* AltiVec vectors never go in registers when -mabi=altivec. */
9967 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9968 align = 16;
9969 else
9971 lab_false = create_artificial_label (input_location);
9972 lab_over = create_artificial_label (input_location);
9974 /* Long long and SPE vectors are aligned in the registers.
9975 As are any other 2 gpr item such as complex int due to a
9976 historical mistake. */
9977 u = reg;
9978 if (n_reg == 2 && reg == gpr)
9980 regalign = 1;
9981 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9982 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9983 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9984 unshare_expr (reg), u);
9986 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9987 reg number is 0 for f1, so we want to make it odd. */
9988 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9990 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9991 build_int_cst (TREE_TYPE (reg), 1));
9992 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9995 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9996 t = build2 (GE_EXPR, boolean_type_node, u, t);
9997 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9998 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9999 gimplify_and_add (t, pre_p);
10001 t = sav;
10002 if (sav_ofs)
10003 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
10005 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
10006 build_int_cst (TREE_TYPE (reg), n_reg));
10007 u = fold_convert (sizetype, u);
10008 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
10009 t = fold_build_pointer_plus (t, u);
10011 /* _Decimal32 varargs are located in the second word of the 64-bit
10012 FP register for 32-bit binaries. */
10013 if (!TARGET_POWERPC64
10014 && TARGET_HARD_FLOAT && TARGET_FPRS
10015 && TYPE_MODE (type) == SDmode)
10016 t = fold_build_pointer_plus_hwi (t, size);
10018 gimplify_assign (addr, t, pre_p);
10020 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
10022 stmt = gimple_build_label (lab_false);
10023 gimple_seq_add_stmt (pre_p, stmt);
10025 if ((n_reg == 2 && !regalign) || n_reg > 2)
10027 /* Ensure that we don't find any more args in regs.
10028 Alignment has taken care of for special cases. */
10029 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
10033 /* ... otherwise out of the overflow area. */
10035 /* Care for on-stack alignment if needed. */
10036 t = ovf;
10037 if (align != 1)
10039 t = fold_build_pointer_plus_hwi (t, align - 1);
10040 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
10041 build_int_cst (TREE_TYPE (t), -align));
10043 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
10045 gimplify_assign (unshare_expr (addr), t, pre_p);
10047 t = fold_build_pointer_plus_hwi (t, size);
10048 gimplify_assign (unshare_expr (ovf), t, pre_p);
10050 if (lab_over)
10052 stmt = gimple_build_label (lab_over);
10053 gimple_seq_add_stmt (pre_p, stmt);
10056 if (STRICT_ALIGNMENT
10057 && (TYPE_ALIGN (type)
10058 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
10060 /* The value (of type complex double, for example) may not be
10061 aligned in memory in the saved registers, so copy via a
10062 temporary. (This is the same code as used for SPARC.) */
10063 tree tmp = create_tmp_var (type, "va_arg_tmp");
10064 tree dest_addr = build_fold_addr_expr (tmp);
10066 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
10067 3, dest_addr, addr, size_int (rsize * 4));
10069 gimplify_and_add (copy, pre_p);
10070 addr = dest_addr;
10073 addr = fold_convert (ptrtype, addr);
10074 return build_va_arg_indirect_ref (addr);
10077 /* Builtins. */
10079 static void
10080 def_builtin (const char *name, tree type, enum rs6000_builtins code)
10082 tree t;
10083 unsigned classify = rs6000_builtin_info[(int)code].attr;
10084 const char *attr_string = "";
10086 gcc_assert (name != NULL);
10087 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
10089 if (rs6000_builtin_decls[(int)code])
10090 fatal_error ("internal error: builtin function %s already processed", name);
10092 rs6000_builtin_decls[(int)code] = t =
10093 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
10095 /* Set any special attributes. */
10096 if ((classify & RS6000_BTC_CONST) != 0)
10098 /* const function, function only depends on the inputs. */
10099 TREE_READONLY (t) = 1;
10100 TREE_NOTHROW (t) = 1;
10101 attr_string = ", pure";
10103 else if ((classify & RS6000_BTC_PURE) != 0)
10105 /* pure function, function can read global memory, but does not set any
10106 external state. */
10107 DECL_PURE_P (t) = 1;
10108 TREE_NOTHROW (t) = 1;
10109 attr_string = ", const";
10111 else if ((classify & RS6000_BTC_FP) != 0)
10113 /* Function is a math function. If rounding mode is on, then treat the
10114 function as not reading global memory, but it can have arbitrary side
10115 effects. If it is off, then assume the function is a const function.
10116 This mimics the ATTR_MATHFN_FPROUNDING attribute in
10117 builtin-attribute.def that is used for the math functions. */
10118 TREE_NOTHROW (t) = 1;
10119 if (flag_rounding_math)
10121 DECL_PURE_P (t) = 1;
10122 DECL_IS_NOVOPS (t) = 1;
10123 attr_string = ", fp, pure";
10125 else
10127 TREE_READONLY (t) = 1;
10128 attr_string = ", fp, const";
10131 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
10132 gcc_unreachable ();
10134 if (TARGET_DEBUG_BUILTIN)
10135 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
10136 (int)code, name, attr_string);
10139 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
10141 #undef RS6000_BUILTIN_1
10142 #undef RS6000_BUILTIN_2
10143 #undef RS6000_BUILTIN_3
10144 #undef RS6000_BUILTIN_A
10145 #undef RS6000_BUILTIN_D
10146 #undef RS6000_BUILTIN_E
10147 #undef RS6000_BUILTIN_P
10148 #undef RS6000_BUILTIN_Q
10149 #undef RS6000_BUILTIN_S
10150 #undef RS6000_BUILTIN_X
10152 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10153 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10154 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
10155 { MASK, ICODE, NAME, ENUM },
10157 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10158 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10159 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10160 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10161 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10162 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10163 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10165 static const struct builtin_description bdesc_3arg[] =
10167 #include "rs6000-builtin.def"
10170 /* DST operations: void foo (void *, const int, const char). */
10172 #undef RS6000_BUILTIN_1
10173 #undef RS6000_BUILTIN_2
10174 #undef RS6000_BUILTIN_3
10175 #undef RS6000_BUILTIN_A
10176 #undef RS6000_BUILTIN_D
10177 #undef RS6000_BUILTIN_E
10178 #undef RS6000_BUILTIN_P
10179 #undef RS6000_BUILTIN_Q
10180 #undef RS6000_BUILTIN_S
10181 #undef RS6000_BUILTIN_X
10183 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10184 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10185 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10186 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10187 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
10188 { MASK, ICODE, NAME, ENUM },
10190 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10191 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10192 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10193 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10194 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10196 static const struct builtin_description bdesc_dst[] =
10198 #include "rs6000-builtin.def"
10201 /* Simple binary operations: VECc = foo (VECa, VECb). */
10203 #undef RS6000_BUILTIN_1
10204 #undef RS6000_BUILTIN_2
10205 #undef RS6000_BUILTIN_3
10206 #undef RS6000_BUILTIN_A
10207 #undef RS6000_BUILTIN_D
10208 #undef RS6000_BUILTIN_E
10209 #undef RS6000_BUILTIN_P
10210 #undef RS6000_BUILTIN_Q
10211 #undef RS6000_BUILTIN_S
10212 #undef RS6000_BUILTIN_X
10214 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10215 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
10216 { MASK, ICODE, NAME, ENUM },
10218 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10219 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10220 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10221 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10222 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10223 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10224 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10225 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10227 static const struct builtin_description bdesc_2arg[] =
10229 #include "rs6000-builtin.def"
10232 #undef RS6000_BUILTIN_1
10233 #undef RS6000_BUILTIN_2
10234 #undef RS6000_BUILTIN_3
10235 #undef RS6000_BUILTIN_A
10236 #undef RS6000_BUILTIN_D
10237 #undef RS6000_BUILTIN_E
10238 #undef RS6000_BUILTIN_P
10239 #undef RS6000_BUILTIN_Q
10240 #undef RS6000_BUILTIN_S
10241 #undef RS6000_BUILTIN_X
10243 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10244 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10245 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10246 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10247 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10248 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10249 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
10250 { MASK, ICODE, NAME, ENUM },
10252 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10253 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10254 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10256 /* AltiVec predicates. */
10258 static const struct builtin_description bdesc_altivec_preds[] =
10260 #include "rs6000-builtin.def"
10263 /* SPE predicates. */
10264 #undef RS6000_BUILTIN_1
10265 #undef RS6000_BUILTIN_2
10266 #undef RS6000_BUILTIN_3
10267 #undef RS6000_BUILTIN_A
10268 #undef RS6000_BUILTIN_D
10269 #undef RS6000_BUILTIN_E
10270 #undef RS6000_BUILTIN_P
10271 #undef RS6000_BUILTIN_Q
10272 #undef RS6000_BUILTIN_S
10273 #undef RS6000_BUILTIN_X
10275 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10276 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10277 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10278 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10279 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10280 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10281 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10282 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10283 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
10284 { MASK, ICODE, NAME, ENUM },
10286 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10288 static const struct builtin_description bdesc_spe_predicates[] =
10290 #include "rs6000-builtin.def"
10293 /* SPE evsel predicates. */
10294 #undef RS6000_BUILTIN_1
10295 #undef RS6000_BUILTIN_2
10296 #undef RS6000_BUILTIN_3
10297 #undef RS6000_BUILTIN_A
10298 #undef RS6000_BUILTIN_D
10299 #undef RS6000_BUILTIN_E
10300 #undef RS6000_BUILTIN_P
10301 #undef RS6000_BUILTIN_Q
10302 #undef RS6000_BUILTIN_S
10303 #undef RS6000_BUILTIN_X
10305 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10306 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10307 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10308 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10309 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10310 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
10311 { MASK, ICODE, NAME, ENUM },
10313 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10314 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10315 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10316 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10318 static const struct builtin_description bdesc_spe_evsel[] =
10320 #include "rs6000-builtin.def"
10323 /* PAIRED predicates. */
10324 #undef RS6000_BUILTIN_1
10325 #undef RS6000_BUILTIN_2
10326 #undef RS6000_BUILTIN_3
10327 #undef RS6000_BUILTIN_A
10328 #undef RS6000_BUILTIN_D
10329 #undef RS6000_BUILTIN_E
10330 #undef RS6000_BUILTIN_P
10331 #undef RS6000_BUILTIN_Q
10332 #undef RS6000_BUILTIN_S
10333 #undef RS6000_BUILTIN_X
10335 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10336 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10337 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10338 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10339 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10340 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10341 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10342 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
10343 { MASK, ICODE, NAME, ENUM },
10345 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10346 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10348 static const struct builtin_description bdesc_paired_preds[] =
10350 #include "rs6000-builtin.def"
10353 /* ABS* operations. */
10355 #undef RS6000_BUILTIN_1
10356 #undef RS6000_BUILTIN_2
10357 #undef RS6000_BUILTIN_3
10358 #undef RS6000_BUILTIN_A
10359 #undef RS6000_BUILTIN_D
10360 #undef RS6000_BUILTIN_E
10361 #undef RS6000_BUILTIN_P
10362 #undef RS6000_BUILTIN_Q
10363 #undef RS6000_BUILTIN_S
10364 #undef RS6000_BUILTIN_X
10366 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10367 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10368 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10369 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
10370 { MASK, ICODE, NAME, ENUM },
10372 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10373 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10374 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10375 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10376 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10377 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10379 static const struct builtin_description bdesc_abs[] =
10381 #include "rs6000-builtin.def"
10384 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
10385 foo (VECa). */
10387 #undef RS6000_BUILTIN_1
10388 #undef RS6000_BUILTIN_2
10389 #undef RS6000_BUILTIN_3
10390 #undef RS6000_BUILTIN_A
10391 #undef RS6000_BUILTIN_E
10392 #undef RS6000_BUILTIN_D
10393 #undef RS6000_BUILTIN_P
10394 #undef RS6000_BUILTIN_Q
10395 #undef RS6000_BUILTIN_S
10396 #undef RS6000_BUILTIN_X
10398 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
10399 { MASK, ICODE, NAME, ENUM },
10401 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10402 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10403 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10404 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10405 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10406 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10407 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10408 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10409 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10411 static const struct builtin_description bdesc_1arg[] =
10413 #include "rs6000-builtin.def"
10416 #undef RS6000_BUILTIN_1
10417 #undef RS6000_BUILTIN_2
10418 #undef RS6000_BUILTIN_3
10419 #undef RS6000_BUILTIN_A
10420 #undef RS6000_BUILTIN_D
10421 #undef RS6000_BUILTIN_E
10422 #undef RS6000_BUILTIN_P
10423 #undef RS6000_BUILTIN_Q
10424 #undef RS6000_BUILTIN_S
10425 #undef RS6000_BUILTIN_X
10427 /* Return true if a builtin function is overloaded. */
10428 bool
10429 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
10431 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
10434 /* Expand an expression EXP that calls a builtin without arguments. */
10435 static rtx
10436 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
10438 rtx pat;
10439 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10441 if (icode == CODE_FOR_nothing)
10442 /* Builtin not supported on this processor. */
10443 return 0;
10445 if (target == 0
10446 || GET_MODE (target) != tmode
10447 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10448 target = gen_reg_rtx (tmode);
10450 pat = GEN_FCN (icode) (target);
10451 if (! pat)
10452 return 0;
10453 emit_insn (pat);
10455 return target;
10459 static rtx
10460 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
10462 rtx pat;
10463 tree arg0 = CALL_EXPR_ARG (exp, 0);
10464 rtx op0 = expand_normal (arg0);
10465 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10466 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10468 if (icode == CODE_FOR_nothing)
10469 /* Builtin not supported on this processor. */
10470 return 0;
10472 /* If we got invalid arguments bail out before generating bad rtl. */
10473 if (arg0 == error_mark_node)
10474 return const0_rtx;
10476 if (icode == CODE_FOR_altivec_vspltisb
10477 || icode == CODE_FOR_altivec_vspltish
10478 || icode == CODE_FOR_altivec_vspltisw
10479 || icode == CODE_FOR_spe_evsplatfi
10480 || icode == CODE_FOR_spe_evsplati)
10482 /* Only allow 5-bit *signed* literals. */
10483 if (GET_CODE (op0) != CONST_INT
10484 || INTVAL (op0) > 15
10485 || INTVAL (op0) < -16)
10487 error ("argument 1 must be a 5-bit signed literal");
10488 return const0_rtx;
10492 if (target == 0
10493 || GET_MODE (target) != tmode
10494 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10495 target = gen_reg_rtx (tmode);
10497 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10498 op0 = copy_to_mode_reg (mode0, op0);
10500 pat = GEN_FCN (icode) (target, op0);
10501 if (! pat)
10502 return 0;
10503 emit_insn (pat);
10505 return target;
10508 static rtx
10509 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
10511 rtx pat, scratch1, scratch2;
10512 tree arg0 = CALL_EXPR_ARG (exp, 0);
10513 rtx op0 = expand_normal (arg0);
10514 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10515 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10517 /* If we have invalid arguments, bail out before generating bad rtl. */
10518 if (arg0 == error_mark_node)
10519 return const0_rtx;
10521 if (target == 0
10522 || GET_MODE (target) != tmode
10523 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10524 target = gen_reg_rtx (tmode);
10526 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10527 op0 = copy_to_mode_reg (mode0, op0);
10529 scratch1 = gen_reg_rtx (mode0);
10530 scratch2 = gen_reg_rtx (mode0);
10532 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
10533 if (! pat)
10534 return 0;
10535 emit_insn (pat);
10537 return target;
10540 static rtx
10541 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
10543 rtx pat;
10544 tree arg0 = CALL_EXPR_ARG (exp, 0);
10545 tree arg1 = CALL_EXPR_ARG (exp, 1);
10546 rtx op0 = expand_normal (arg0);
10547 rtx op1 = expand_normal (arg1);
10548 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10549 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10550 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10552 if (icode == CODE_FOR_nothing)
10553 /* Builtin not supported on this processor. */
10554 return 0;
10556 /* If we got invalid arguments bail out before generating bad rtl. */
10557 if (arg0 == error_mark_node || arg1 == error_mark_node)
10558 return const0_rtx;
10560 if (icode == CODE_FOR_altivec_vcfux
10561 || icode == CODE_FOR_altivec_vcfsx
10562 || icode == CODE_FOR_altivec_vctsxs
10563 || icode == CODE_FOR_altivec_vctuxs
10564 || icode == CODE_FOR_altivec_vspltb
10565 || icode == CODE_FOR_altivec_vsplth
10566 || icode == CODE_FOR_altivec_vspltw
10567 || icode == CODE_FOR_spe_evaddiw
10568 || icode == CODE_FOR_spe_evldd
10569 || icode == CODE_FOR_spe_evldh
10570 || icode == CODE_FOR_spe_evldw
10571 || icode == CODE_FOR_spe_evlhhesplat
10572 || icode == CODE_FOR_spe_evlhhossplat
10573 || icode == CODE_FOR_spe_evlhhousplat
10574 || icode == CODE_FOR_spe_evlwhe
10575 || icode == CODE_FOR_spe_evlwhos
10576 || icode == CODE_FOR_spe_evlwhou
10577 || icode == CODE_FOR_spe_evlwhsplat
10578 || icode == CODE_FOR_spe_evlwwsplat
10579 || icode == CODE_FOR_spe_evrlwi
10580 || icode == CODE_FOR_spe_evslwi
10581 || icode == CODE_FOR_spe_evsrwis
10582 || icode == CODE_FOR_spe_evsubifw
10583 || icode == CODE_FOR_spe_evsrwiu)
10585 /* Only allow 5-bit unsigned literals. */
10586 STRIP_NOPS (arg1);
10587 if (TREE_CODE (arg1) != INTEGER_CST
10588 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10590 error ("argument 2 must be a 5-bit unsigned literal");
10591 return const0_rtx;
10595 if (target == 0
10596 || GET_MODE (target) != tmode
10597 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10598 target = gen_reg_rtx (tmode);
10600 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10601 op0 = copy_to_mode_reg (mode0, op0);
10602 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10603 op1 = copy_to_mode_reg (mode1, op1);
10605 pat = GEN_FCN (icode) (target, op0, op1);
10606 if (! pat)
10607 return 0;
10608 emit_insn (pat);
10610 return target;
10613 static rtx
10614 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10616 rtx pat, scratch;
10617 tree cr6_form = CALL_EXPR_ARG (exp, 0);
10618 tree arg0 = CALL_EXPR_ARG (exp, 1);
10619 tree arg1 = CALL_EXPR_ARG (exp, 2);
10620 rtx op0 = expand_normal (arg0);
10621 rtx op1 = expand_normal (arg1);
10622 enum machine_mode tmode = SImode;
10623 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10624 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10625 int cr6_form_int;
10627 if (TREE_CODE (cr6_form) != INTEGER_CST)
10629 error ("argument 1 of __builtin_altivec_predicate must be a constant");
10630 return const0_rtx;
10632 else
10633 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
10635 gcc_assert (mode0 == mode1);
10637 /* If we have invalid arguments, bail out before generating bad rtl. */
10638 if (arg0 == error_mark_node || arg1 == error_mark_node)
10639 return const0_rtx;
10641 if (target == 0
10642 || GET_MODE (target) != tmode
10643 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10644 target = gen_reg_rtx (tmode);
10646 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10647 op0 = copy_to_mode_reg (mode0, op0);
10648 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10649 op1 = copy_to_mode_reg (mode1, op1);
10651 scratch = gen_reg_rtx (mode0);
10653 pat = GEN_FCN (icode) (scratch, op0, op1);
10654 if (! pat)
10655 return 0;
10656 emit_insn (pat);
10658 /* The vec_any* and vec_all* predicates use the same opcodes for two
10659 different operations, but the bits in CR6 will be different
10660 depending on what information we want. So we have to play tricks
10661 with CR6 to get the right bits out.
10663 If you think this is disgusting, look at the specs for the
10664 AltiVec predicates. */
10666 switch (cr6_form_int)
10668 case 0:
10669 emit_insn (gen_cr6_test_for_zero (target));
10670 break;
10671 case 1:
10672 emit_insn (gen_cr6_test_for_zero_reverse (target));
10673 break;
10674 case 2:
10675 emit_insn (gen_cr6_test_for_lt (target));
10676 break;
10677 case 3:
10678 emit_insn (gen_cr6_test_for_lt_reverse (target));
10679 break;
10680 default:
10681 error ("argument 1 of __builtin_altivec_predicate is out of range");
10682 break;
10685 return target;
10688 static rtx
10689 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10691 rtx pat, addr;
10692 tree arg0 = CALL_EXPR_ARG (exp, 0);
10693 tree arg1 = CALL_EXPR_ARG (exp, 1);
10694 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10695 enum machine_mode mode0 = Pmode;
10696 enum machine_mode mode1 = Pmode;
10697 rtx op0 = expand_normal (arg0);
10698 rtx op1 = expand_normal (arg1);
10700 if (icode == CODE_FOR_nothing)
10701 /* Builtin not supported on this processor. */
10702 return 0;
10704 /* If we got invalid arguments bail out before generating bad rtl. */
10705 if (arg0 == error_mark_node || arg1 == error_mark_node)
10706 return const0_rtx;
10708 if (target == 0
10709 || GET_MODE (target) != tmode
10710 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10711 target = gen_reg_rtx (tmode);
10713 op1 = copy_to_mode_reg (mode1, op1);
10715 if (op0 == const0_rtx)
10717 addr = gen_rtx_MEM (tmode, op1);
10719 else
10721 op0 = copy_to_mode_reg (mode0, op0);
10722 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10725 pat = GEN_FCN (icode) (target, addr);
10727 if (! pat)
10728 return 0;
10729 emit_insn (pat);
10731 return target;
10734 static rtx
10735 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10737 rtx pat, addr;
10738 tree arg0 = CALL_EXPR_ARG (exp, 0);
10739 tree arg1 = CALL_EXPR_ARG (exp, 1);
10740 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10741 enum machine_mode mode0 = Pmode;
10742 enum machine_mode mode1 = Pmode;
10743 rtx op0 = expand_normal (arg0);
10744 rtx op1 = expand_normal (arg1);
10746 if (icode == CODE_FOR_nothing)
10747 /* Builtin not supported on this processor. */
10748 return 0;
10750 /* If we got invalid arguments bail out before generating bad rtl. */
10751 if (arg0 == error_mark_node || arg1 == error_mark_node)
10752 return const0_rtx;
10754 if (target == 0
10755 || GET_MODE (target) != tmode
10756 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10757 target = gen_reg_rtx (tmode);
10759 op1 = copy_to_mode_reg (mode1, op1);
10761 if (op0 == const0_rtx)
10763 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
10765 else
10767 op0 = copy_to_mode_reg (mode0, op0);
10768 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
10771 pat = GEN_FCN (icode) (target, addr);
10773 if (! pat)
10774 return 0;
10775 emit_insn (pat);
10777 return target;
10780 static rtx
10781 spe_expand_stv_builtin (enum insn_code icode, tree exp)
10783 tree arg0 = CALL_EXPR_ARG (exp, 0);
10784 tree arg1 = CALL_EXPR_ARG (exp, 1);
10785 tree arg2 = CALL_EXPR_ARG (exp, 2);
10786 rtx op0 = expand_normal (arg0);
10787 rtx op1 = expand_normal (arg1);
10788 rtx op2 = expand_normal (arg2);
10789 rtx pat;
10790 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
10791 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
10792 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
10794 /* Invalid arguments. Bail before doing anything stoopid! */
10795 if (arg0 == error_mark_node
10796 || arg1 == error_mark_node
10797 || arg2 == error_mark_node)
10798 return const0_rtx;
10800 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
10801 op0 = copy_to_mode_reg (mode2, op0);
10802 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
10803 op1 = copy_to_mode_reg (mode0, op1);
10804 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10805 op2 = copy_to_mode_reg (mode1, op2);
10807 pat = GEN_FCN (icode) (op1, op2, op0);
10808 if (pat)
10809 emit_insn (pat);
10810 return NULL_RTX;
10813 static rtx
10814 paired_expand_stv_builtin (enum insn_code icode, tree exp)
10816 tree arg0 = CALL_EXPR_ARG (exp, 0);
10817 tree arg1 = CALL_EXPR_ARG (exp, 1);
10818 tree arg2 = CALL_EXPR_ARG (exp, 2);
10819 rtx op0 = expand_normal (arg0);
10820 rtx op1 = expand_normal (arg1);
10821 rtx op2 = expand_normal (arg2);
10822 rtx pat, addr;
10823 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10824 enum machine_mode mode1 = Pmode;
10825 enum machine_mode mode2 = Pmode;
10827 /* Invalid arguments. Bail before doing anything stoopid! */
10828 if (arg0 == error_mark_node
10829 || arg1 == error_mark_node
10830 || arg2 == error_mark_node)
10831 return const0_rtx;
10833 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
10834 op0 = copy_to_mode_reg (tmode, op0);
10836 op2 = copy_to_mode_reg (mode2, op2);
10838 if (op1 == const0_rtx)
10840 addr = gen_rtx_MEM (tmode, op2);
10842 else
10844 op1 = copy_to_mode_reg (mode1, op1);
10845 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10848 pat = GEN_FCN (icode) (addr, op0);
10849 if (pat)
10850 emit_insn (pat);
10851 return NULL_RTX;
10854 static rtx
10855 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10857 tree arg0 = CALL_EXPR_ARG (exp, 0);
10858 tree arg1 = CALL_EXPR_ARG (exp, 1);
10859 tree arg2 = CALL_EXPR_ARG (exp, 2);
10860 rtx op0 = expand_normal (arg0);
10861 rtx op1 = expand_normal (arg1);
10862 rtx op2 = expand_normal (arg2);
10863 rtx pat, addr;
10864 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10865 enum machine_mode smode = insn_data[icode].operand[1].mode;
10866 enum machine_mode mode1 = Pmode;
10867 enum machine_mode mode2 = Pmode;
10869 /* Invalid arguments. Bail before doing anything stoopid! */
10870 if (arg0 == error_mark_node
10871 || arg1 == error_mark_node
10872 || arg2 == error_mark_node)
10873 return const0_rtx;
10875 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10876 op0 = copy_to_mode_reg (smode, op0);
10878 op2 = copy_to_mode_reg (mode2, op2);
10880 if (op1 == const0_rtx)
10882 addr = gen_rtx_MEM (tmode, op2);
10884 else
10886 op1 = copy_to_mode_reg (mode1, op1);
10887 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10890 pat = GEN_FCN (icode) (addr, op0);
10891 if (pat)
10892 emit_insn (pat);
10893 return NULL_RTX;
10896 static rtx
10897 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10899 rtx pat;
10900 tree arg0 = CALL_EXPR_ARG (exp, 0);
10901 tree arg1 = CALL_EXPR_ARG (exp, 1);
10902 tree arg2 = CALL_EXPR_ARG (exp, 2);
10903 rtx op0 = expand_normal (arg0);
10904 rtx op1 = expand_normal (arg1);
10905 rtx op2 = expand_normal (arg2);
10906 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10907 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10908 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10909 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10911 if (icode == CODE_FOR_nothing)
10912 /* Builtin not supported on this processor. */
10913 return 0;
10915 /* If we got invalid arguments bail out before generating bad rtl. */
10916 if (arg0 == error_mark_node
10917 || arg1 == error_mark_node
10918 || arg2 == error_mark_node)
10919 return const0_rtx;
10921 /* Check and prepare argument depending on the instruction code.
10923 Note that a switch statement instead of the sequence of tests
10924 would be incorrect as many of the CODE_FOR values could be
10925 CODE_FOR_nothing and that would yield multiple alternatives
10926 with identical values. We'd never reach here at runtime in
10927 this case. */
10928 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10929 || icode == CODE_FOR_altivec_vsldoi_v4si
10930 || icode == CODE_FOR_altivec_vsldoi_v8hi
10931 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10933 /* Only allow 4-bit unsigned literals. */
10934 STRIP_NOPS (arg2);
10935 if (TREE_CODE (arg2) != INTEGER_CST
10936 || TREE_INT_CST_LOW (arg2) & ~0xf)
10938 error ("argument 3 must be a 4-bit unsigned literal");
10939 return const0_rtx;
10942 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10943 || icode == CODE_FOR_vsx_xxpermdi_v2di
10944 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10945 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10946 || icode == CODE_FOR_vsx_xxsldwi_v4si
10947 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10948 || icode == CODE_FOR_vsx_xxsldwi_v2di
10949 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10951 /* Only allow 2-bit unsigned literals. */
10952 STRIP_NOPS (arg2);
10953 if (TREE_CODE (arg2) != INTEGER_CST
10954 || TREE_INT_CST_LOW (arg2) & ~0x3)
10956 error ("argument 3 must be a 2-bit unsigned literal");
10957 return const0_rtx;
10960 else if (icode == CODE_FOR_vsx_set_v2df
10961 || icode == CODE_FOR_vsx_set_v2di)
10963 /* Only allow 1-bit unsigned literals. */
10964 STRIP_NOPS (arg2);
10965 if (TREE_CODE (arg2) != INTEGER_CST
10966 || TREE_INT_CST_LOW (arg2) & ~0x1)
10968 error ("argument 3 must be a 1-bit unsigned literal");
10969 return const0_rtx;
10972 else if (icode == CODE_FOR_crypto_vshasigmaw
10973 || icode == CODE_FOR_crypto_vshasigmad)
10975 /* Check whether the 2nd and 3rd arguments are integer constants and in
10976 range and prepare arguments. */
10977 STRIP_NOPS (arg1);
10978 if (TREE_CODE (arg1) != INTEGER_CST
10979 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
10981 error ("argument 2 must be 0 or 1");
10982 return const0_rtx;
10985 STRIP_NOPS (arg2);
10986 if (TREE_CODE (arg2) != INTEGER_CST
10987 || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 15))
10989 error ("argument 3 must be in the range 0..15");
10990 return const0_rtx;
10994 if (target == 0
10995 || GET_MODE (target) != tmode
10996 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10997 target = gen_reg_rtx (tmode);
10999 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11000 op0 = copy_to_mode_reg (mode0, op0);
11001 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11002 op1 = copy_to_mode_reg (mode1, op1);
11003 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11004 op2 = copy_to_mode_reg (mode2, op2);
11006 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
11007 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
11008 else
11009 pat = GEN_FCN (icode) (target, op0, op1, op2);
11010 if (! pat)
11011 return 0;
11012 emit_insn (pat);
11014 return target;
11017 /* Expand the lvx builtins. */
11018 static rtx
11019 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
11021 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11022 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
11023 tree arg0;
11024 enum machine_mode tmode, mode0;
11025 rtx pat, op0;
11026 enum insn_code icode;
11028 switch (fcode)
11030 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
11031 icode = CODE_FOR_vector_altivec_load_v16qi;
11032 break;
11033 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
11034 icode = CODE_FOR_vector_altivec_load_v8hi;
11035 break;
11036 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
11037 icode = CODE_FOR_vector_altivec_load_v4si;
11038 break;
11039 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
11040 icode = CODE_FOR_vector_altivec_load_v4sf;
11041 break;
11042 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
11043 icode = CODE_FOR_vector_altivec_load_v2df;
11044 break;
11045 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
11046 icode = CODE_FOR_vector_altivec_load_v2di;
11047 break;
11048 default:
11049 *expandedp = false;
11050 return NULL_RTX;
11053 *expandedp = true;
11055 arg0 = CALL_EXPR_ARG (exp, 0);
11056 op0 = expand_normal (arg0);
11057 tmode = insn_data[icode].operand[0].mode;
11058 mode0 = insn_data[icode].operand[1].mode;
11060 if (target == 0
11061 || GET_MODE (target) != tmode
11062 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11063 target = gen_reg_rtx (tmode);
11065 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11066 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11068 pat = GEN_FCN (icode) (target, op0);
11069 if (! pat)
11070 return 0;
11071 emit_insn (pat);
11072 return target;
11075 /* Expand the stvx builtins. */
11076 static rtx
11077 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
11078 bool *expandedp)
11080 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11081 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
11082 tree arg0, arg1;
11083 enum machine_mode mode0, mode1;
11084 rtx pat, op0, op1;
11085 enum insn_code icode;
11087 switch (fcode)
11089 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
11090 icode = CODE_FOR_vector_altivec_store_v16qi;
11091 break;
11092 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
11093 icode = CODE_FOR_vector_altivec_store_v8hi;
11094 break;
11095 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
11096 icode = CODE_FOR_vector_altivec_store_v4si;
11097 break;
11098 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
11099 icode = CODE_FOR_vector_altivec_store_v4sf;
11100 break;
11101 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
11102 icode = CODE_FOR_vector_altivec_store_v2df;
11103 break;
11104 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
11105 icode = CODE_FOR_vector_altivec_store_v2di;
11106 break;
11107 default:
11108 *expandedp = false;
11109 return NULL_RTX;
11112 arg0 = CALL_EXPR_ARG (exp, 0);
11113 arg1 = CALL_EXPR_ARG (exp, 1);
11114 op0 = expand_normal (arg0);
11115 op1 = expand_normal (arg1);
11116 mode0 = insn_data[icode].operand[0].mode;
11117 mode1 = insn_data[icode].operand[1].mode;
11119 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11120 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11121 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11122 op1 = copy_to_mode_reg (mode1, op1);
11124 pat = GEN_FCN (icode) (op0, op1);
11125 if (pat)
11126 emit_insn (pat);
11128 *expandedp = true;
11129 return NULL_RTX;
11132 /* Expand the dst builtins. */
11133 static rtx
11134 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
11135 bool *expandedp)
11137 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11138 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11139 tree arg0, arg1, arg2;
11140 enum machine_mode mode0, mode1;
11141 rtx pat, op0, op1, op2;
11142 const struct builtin_description *d;
11143 size_t i;
11145 *expandedp = false;
11147 /* Handle DST variants. */
11148 d = bdesc_dst;
11149 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
11150 if (d->code == fcode)
11152 arg0 = CALL_EXPR_ARG (exp, 0);
11153 arg1 = CALL_EXPR_ARG (exp, 1);
11154 arg2 = CALL_EXPR_ARG (exp, 2);
11155 op0 = expand_normal (arg0);
11156 op1 = expand_normal (arg1);
11157 op2 = expand_normal (arg2);
11158 mode0 = insn_data[d->icode].operand[0].mode;
11159 mode1 = insn_data[d->icode].operand[1].mode;
11161 /* Invalid arguments, bail out before generating bad rtl. */
11162 if (arg0 == error_mark_node
11163 || arg1 == error_mark_node
11164 || arg2 == error_mark_node)
11165 return const0_rtx;
11167 *expandedp = true;
11168 STRIP_NOPS (arg2);
11169 if (TREE_CODE (arg2) != INTEGER_CST
11170 || TREE_INT_CST_LOW (arg2) & ~0x3)
11172 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
11173 return const0_rtx;
11176 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
11177 op0 = copy_to_mode_reg (Pmode, op0);
11178 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
11179 op1 = copy_to_mode_reg (mode1, op1);
11181 pat = GEN_FCN (d->icode) (op0, op1, op2);
11182 if (pat != 0)
11183 emit_insn (pat);
11185 return NULL_RTX;
11188 return NULL_RTX;
11191 /* Expand vec_init builtin. */
11192 static rtx
11193 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
11195 enum machine_mode tmode = TYPE_MODE (type);
11196 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
11197 int i, n_elt = GET_MODE_NUNITS (tmode);
11198 rtvec v = rtvec_alloc (n_elt);
11200 gcc_assert (VECTOR_MODE_P (tmode));
11201 gcc_assert (n_elt == call_expr_nargs (exp));
11203 for (i = 0; i < n_elt; ++i)
11205 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
11206 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
11209 if (!target || !register_operand (target, tmode))
11210 target = gen_reg_rtx (tmode);
11212 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
11213 return target;
11216 /* Return the integer constant in ARG. Constrain it to be in the range
11217 of the subparts of VEC_TYPE; issue an error if not. */
11219 static int
11220 get_element_number (tree vec_type, tree arg)
11222 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
11224 if (!host_integerp (arg, 1)
11225 || (elt = tree_low_cst (arg, 1), elt > max))
11227 error ("selector must be an integer constant in the range 0..%wi", max);
11228 return 0;
11231 return elt;
11234 /* Expand vec_set builtin. */
11235 static rtx
11236 altivec_expand_vec_set_builtin (tree exp)
11238 enum machine_mode tmode, mode1;
11239 tree arg0, arg1, arg2;
11240 int elt;
11241 rtx op0, op1;
11243 arg0 = CALL_EXPR_ARG (exp, 0);
11244 arg1 = CALL_EXPR_ARG (exp, 1);
11245 arg2 = CALL_EXPR_ARG (exp, 2);
11247 tmode = TYPE_MODE (TREE_TYPE (arg0));
11248 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
11249 gcc_assert (VECTOR_MODE_P (tmode));
11251 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
11252 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
11253 elt = get_element_number (TREE_TYPE (arg0), arg2);
11255 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
11256 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
11258 op0 = force_reg (tmode, op0);
11259 op1 = force_reg (mode1, op1);
11261 rs6000_expand_vector_set (op0, op1, elt);
11263 return op0;
11266 /* Expand vec_ext builtin. */
11267 static rtx
11268 altivec_expand_vec_ext_builtin (tree exp, rtx target)
11270 enum machine_mode tmode, mode0;
11271 tree arg0, arg1;
11272 int elt;
11273 rtx op0;
11275 arg0 = CALL_EXPR_ARG (exp, 0);
11276 arg1 = CALL_EXPR_ARG (exp, 1);
11278 op0 = expand_normal (arg0);
11279 elt = get_element_number (TREE_TYPE (arg0), arg1);
11281 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
11282 mode0 = TYPE_MODE (TREE_TYPE (arg0));
11283 gcc_assert (VECTOR_MODE_P (mode0));
11285 op0 = force_reg (mode0, op0);
11287 if (optimize || !target || !register_operand (target, tmode))
11288 target = gen_reg_rtx (tmode);
11290 rs6000_expand_vector_extract (target, op0, elt);
11292 return target;
11295 /* Expand the builtin in EXP and store the result in TARGET. Store
11296 true in *EXPANDEDP if we found a builtin to expand. */
11297 static rtx
11298 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
11300 const struct builtin_description *d;
11301 size_t i;
11302 enum insn_code icode;
11303 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11304 tree arg0;
11305 rtx op0, pat;
11306 enum machine_mode tmode, mode0;
11307 enum rs6000_builtins fcode
11308 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11310 if (rs6000_overloaded_builtin_p (fcode))
11312 *expandedp = true;
11313 error ("unresolved overload for Altivec builtin %qF", fndecl);
11315 /* Given it is invalid, just generate a normal call. */
11316 return expand_call (exp, target, false);
11319 target = altivec_expand_ld_builtin (exp, target, expandedp);
11320 if (*expandedp)
11321 return target;
11323 target = altivec_expand_st_builtin (exp, target, expandedp);
11324 if (*expandedp)
11325 return target;
11327 target = altivec_expand_dst_builtin (exp, target, expandedp);
11328 if (*expandedp)
11329 return target;
11331 *expandedp = true;
11333 switch (fcode)
11335 case ALTIVEC_BUILTIN_STVX:
11336 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
11337 case ALTIVEC_BUILTIN_STVEBX:
11338 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
11339 case ALTIVEC_BUILTIN_STVEHX:
11340 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
11341 case ALTIVEC_BUILTIN_STVEWX:
11342 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
11343 case ALTIVEC_BUILTIN_STVXL:
11344 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
11346 case ALTIVEC_BUILTIN_STVLX:
11347 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
11348 case ALTIVEC_BUILTIN_STVLXL:
11349 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
11350 case ALTIVEC_BUILTIN_STVRX:
11351 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
11352 case ALTIVEC_BUILTIN_STVRXL:
11353 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
11355 case VSX_BUILTIN_STXVD2X_V2DF:
11356 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
11357 case VSX_BUILTIN_STXVD2X_V2DI:
11358 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
11359 case VSX_BUILTIN_STXVW4X_V4SF:
11360 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
11361 case VSX_BUILTIN_STXVW4X_V4SI:
11362 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
11363 case VSX_BUILTIN_STXVW4X_V8HI:
11364 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
11365 case VSX_BUILTIN_STXVW4X_V16QI:
11366 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
11368 case ALTIVEC_BUILTIN_MFVSCR:
11369 icode = CODE_FOR_altivec_mfvscr;
11370 tmode = insn_data[icode].operand[0].mode;
11372 if (target == 0
11373 || GET_MODE (target) != tmode
11374 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11375 target = gen_reg_rtx (tmode);
11377 pat = GEN_FCN (icode) (target);
11378 if (! pat)
11379 return 0;
11380 emit_insn (pat);
11381 return target;
11383 case ALTIVEC_BUILTIN_MTVSCR:
11384 icode = CODE_FOR_altivec_mtvscr;
11385 arg0 = CALL_EXPR_ARG (exp, 0);
11386 op0 = expand_normal (arg0);
11387 mode0 = insn_data[icode].operand[0].mode;
11389 /* If we got invalid arguments bail out before generating bad rtl. */
11390 if (arg0 == error_mark_node)
11391 return const0_rtx;
11393 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11394 op0 = copy_to_mode_reg (mode0, op0);
11396 pat = GEN_FCN (icode) (op0);
11397 if (pat)
11398 emit_insn (pat);
11399 return NULL_RTX;
11401 case ALTIVEC_BUILTIN_DSSALL:
11402 emit_insn (gen_altivec_dssall ());
11403 return NULL_RTX;
11405 case ALTIVEC_BUILTIN_DSS:
11406 icode = CODE_FOR_altivec_dss;
11407 arg0 = CALL_EXPR_ARG (exp, 0);
11408 STRIP_NOPS (arg0);
11409 op0 = expand_normal (arg0);
11410 mode0 = insn_data[icode].operand[0].mode;
11412 /* If we got invalid arguments bail out before generating bad rtl. */
11413 if (arg0 == error_mark_node)
11414 return const0_rtx;
11416 if (TREE_CODE (arg0) != INTEGER_CST
11417 || TREE_INT_CST_LOW (arg0) & ~0x3)
11419 error ("argument to dss must be a 2-bit unsigned literal");
11420 return const0_rtx;
11423 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11424 op0 = copy_to_mode_reg (mode0, op0);
11426 emit_insn (gen_altivec_dss (op0));
11427 return NULL_RTX;
11429 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
11430 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
11431 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
11432 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
11433 case VSX_BUILTIN_VEC_INIT_V2DF:
11434 case VSX_BUILTIN_VEC_INIT_V2DI:
11435 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
11437 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
11438 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
11439 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
11440 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
11441 case VSX_BUILTIN_VEC_SET_V2DF:
11442 case VSX_BUILTIN_VEC_SET_V2DI:
11443 return altivec_expand_vec_set_builtin (exp);
11445 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
11446 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
11447 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
11448 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
11449 case VSX_BUILTIN_VEC_EXT_V2DF:
11450 case VSX_BUILTIN_VEC_EXT_V2DI:
11451 return altivec_expand_vec_ext_builtin (exp, target);
11453 default:
11454 break;
11455 /* Fall through. */
11458 /* Expand abs* operations. */
11459 d = bdesc_abs;
11460 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
11461 if (d->code == fcode)
11462 return altivec_expand_abs_builtin (d->icode, exp, target);
11464 /* Expand the AltiVec predicates. */
11465 d = bdesc_altivec_preds;
11466 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
11467 if (d->code == fcode)
11468 return altivec_expand_predicate_builtin (d->icode, exp, target);
11470 /* LV* are funky. We initialized them differently. */
11471 switch (fcode)
11473 case ALTIVEC_BUILTIN_LVSL:
11474 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
11475 exp, target, false);
11476 case ALTIVEC_BUILTIN_LVSR:
11477 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
11478 exp, target, false);
11479 case ALTIVEC_BUILTIN_LVEBX:
11480 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
11481 exp, target, false);
11482 case ALTIVEC_BUILTIN_LVEHX:
11483 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
11484 exp, target, false);
11485 case ALTIVEC_BUILTIN_LVEWX:
11486 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
11487 exp, target, false);
11488 case ALTIVEC_BUILTIN_LVXL:
11489 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
11490 exp, target, false);
11491 case ALTIVEC_BUILTIN_LVX:
11492 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
11493 exp, target, false);
11494 case ALTIVEC_BUILTIN_LVLX:
11495 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
11496 exp, target, true);
11497 case ALTIVEC_BUILTIN_LVLXL:
11498 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
11499 exp, target, true);
11500 case ALTIVEC_BUILTIN_LVRX:
11501 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
11502 exp, target, true);
11503 case ALTIVEC_BUILTIN_LVRXL:
11504 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
11505 exp, target, true);
11506 case VSX_BUILTIN_LXVD2X_V2DF:
11507 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
11508 exp, target, false);
11509 case VSX_BUILTIN_LXVD2X_V2DI:
11510 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
11511 exp, target, false);
11512 case VSX_BUILTIN_LXVW4X_V4SF:
11513 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
11514 exp, target, false);
11515 case VSX_BUILTIN_LXVW4X_V4SI:
11516 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
11517 exp, target, false);
11518 case VSX_BUILTIN_LXVW4X_V8HI:
11519 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
11520 exp, target, false);
11521 case VSX_BUILTIN_LXVW4X_V16QI:
11522 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
11523 exp, target, false);
11524 break;
11525 default:
11526 break;
11527 /* Fall through. */
11530 *expandedp = false;
11531 return NULL_RTX;
11534 /* Expand the builtin in EXP and store the result in TARGET. Store
11535 true in *EXPANDEDP if we found a builtin to expand. */
11536 static rtx
11537 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
11539 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11540 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11541 const struct builtin_description *d;
11542 size_t i;
11544 *expandedp = true;
11546 switch (fcode)
11548 case PAIRED_BUILTIN_STX:
11549 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
11550 case PAIRED_BUILTIN_LX:
11551 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
11552 default:
11553 break;
11554 /* Fall through. */
11557 /* Expand the paired predicates. */
11558 d = bdesc_paired_preds;
11559 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
11560 if (d->code == fcode)
11561 return paired_expand_predicate_builtin (d->icode, exp, target);
11563 *expandedp = false;
11564 return NULL_RTX;
11567 /* Binops that need to be initialized manually, but can be expanded
11568 automagically by rs6000_expand_binop_builtin. */
11569 static const struct builtin_description bdesc_2arg_spe[] =
11571 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
11572 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
11573 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
11574 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
11575 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
11576 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
11577 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
11578 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
11579 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
11580 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
11581 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
11582 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
11583 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
11584 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
11585 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
11586 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
11587 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
11588 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
11589 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
11590 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
11591 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
11592 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
11595 /* Expand the builtin in EXP and store the result in TARGET. Store
11596 true in *EXPANDEDP if we found a builtin to expand.
11598 This expands the SPE builtins that are not simple unary and binary
11599 operations. */
11600 static rtx
11601 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
11603 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11604 tree arg1, arg0;
11605 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11606 enum insn_code icode;
11607 enum machine_mode tmode, mode0;
11608 rtx pat, op0;
11609 const struct builtin_description *d;
11610 size_t i;
11612 *expandedp = true;
11614 /* Syntax check for a 5-bit unsigned immediate. */
11615 switch (fcode)
11617 case SPE_BUILTIN_EVSTDD:
11618 case SPE_BUILTIN_EVSTDH:
11619 case SPE_BUILTIN_EVSTDW:
11620 case SPE_BUILTIN_EVSTWHE:
11621 case SPE_BUILTIN_EVSTWHO:
11622 case SPE_BUILTIN_EVSTWWE:
11623 case SPE_BUILTIN_EVSTWWO:
11624 arg1 = CALL_EXPR_ARG (exp, 2);
11625 if (TREE_CODE (arg1) != INTEGER_CST
11626 || TREE_INT_CST_LOW (arg1) & ~0x1f)
11628 error ("argument 2 must be a 5-bit unsigned literal");
11629 return const0_rtx;
11631 break;
11632 default:
11633 break;
11636 /* The evsplat*i instructions are not quite generic. */
11637 switch (fcode)
11639 case SPE_BUILTIN_EVSPLATFI:
11640 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
11641 exp, target);
11642 case SPE_BUILTIN_EVSPLATI:
11643 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
11644 exp, target);
11645 default:
11646 break;
11649 d = bdesc_2arg_spe;
11650 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
11651 if (d->code == fcode)
11652 return rs6000_expand_binop_builtin (d->icode, exp, target);
11654 d = bdesc_spe_predicates;
11655 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
11656 if (d->code == fcode)
11657 return spe_expand_predicate_builtin (d->icode, exp, target);
11659 d = bdesc_spe_evsel;
11660 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
11661 if (d->code == fcode)
11662 return spe_expand_evsel_builtin (d->icode, exp, target);
11664 switch (fcode)
11666 case SPE_BUILTIN_EVSTDDX:
11667 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
11668 case SPE_BUILTIN_EVSTDHX:
11669 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
11670 case SPE_BUILTIN_EVSTDWX:
11671 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
11672 case SPE_BUILTIN_EVSTWHEX:
11673 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
11674 case SPE_BUILTIN_EVSTWHOX:
11675 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
11676 case SPE_BUILTIN_EVSTWWEX:
11677 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
11678 case SPE_BUILTIN_EVSTWWOX:
11679 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
11680 case SPE_BUILTIN_EVSTDD:
11681 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
11682 case SPE_BUILTIN_EVSTDH:
11683 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
11684 case SPE_BUILTIN_EVSTDW:
11685 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
11686 case SPE_BUILTIN_EVSTWHE:
11687 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
11688 case SPE_BUILTIN_EVSTWHO:
11689 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
11690 case SPE_BUILTIN_EVSTWWE:
11691 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
11692 case SPE_BUILTIN_EVSTWWO:
11693 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
11694 case SPE_BUILTIN_MFSPEFSCR:
11695 icode = CODE_FOR_spe_mfspefscr;
11696 tmode = insn_data[icode].operand[0].mode;
11698 if (target == 0
11699 || GET_MODE (target) != tmode
11700 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11701 target = gen_reg_rtx (tmode);
11703 pat = GEN_FCN (icode) (target);
11704 if (! pat)
11705 return 0;
11706 emit_insn (pat);
11707 return target;
11708 case SPE_BUILTIN_MTSPEFSCR:
11709 icode = CODE_FOR_spe_mtspefscr;
11710 arg0 = CALL_EXPR_ARG (exp, 0);
11711 op0 = expand_normal (arg0);
11712 mode0 = insn_data[icode].operand[0].mode;
11714 if (arg0 == error_mark_node)
11715 return const0_rtx;
11717 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11718 op0 = copy_to_mode_reg (mode0, op0);
11720 pat = GEN_FCN (icode) (op0);
11721 if (pat)
11722 emit_insn (pat);
11723 return NULL_RTX;
11724 default:
11725 break;
11728 *expandedp = false;
11729 return NULL_RTX;
11732 static rtx
11733 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11735 rtx pat, scratch, tmp;
11736 tree form = CALL_EXPR_ARG (exp, 0);
11737 tree arg0 = CALL_EXPR_ARG (exp, 1);
11738 tree arg1 = CALL_EXPR_ARG (exp, 2);
11739 rtx op0 = expand_normal (arg0);
11740 rtx op1 = expand_normal (arg1);
11741 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11742 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11743 int form_int;
11744 enum rtx_code code;
11746 if (TREE_CODE (form) != INTEGER_CST)
11748 error ("argument 1 of __builtin_paired_predicate must be a constant");
11749 return const0_rtx;
11751 else
11752 form_int = TREE_INT_CST_LOW (form);
11754 gcc_assert (mode0 == mode1);
11756 if (arg0 == error_mark_node || arg1 == error_mark_node)
11757 return const0_rtx;
11759 if (target == 0
11760 || GET_MODE (target) != SImode
11761 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
11762 target = gen_reg_rtx (SImode);
11763 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
11764 op0 = copy_to_mode_reg (mode0, op0);
11765 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
11766 op1 = copy_to_mode_reg (mode1, op1);
11768 scratch = gen_reg_rtx (CCFPmode);
11770 pat = GEN_FCN (icode) (scratch, op0, op1);
11771 if (!pat)
11772 return const0_rtx;
11774 emit_insn (pat);
11776 switch (form_int)
11778 /* LT bit. */
11779 case 0:
11780 code = LT;
11781 break;
11782 /* GT bit. */
11783 case 1:
11784 code = GT;
11785 break;
11786 /* EQ bit. */
11787 case 2:
11788 code = EQ;
11789 break;
11790 /* UN bit. */
11791 case 3:
11792 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11793 return target;
11794 default:
11795 error ("argument 1 of __builtin_paired_predicate is out of range");
11796 return const0_rtx;
11799 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11800 emit_move_insn (target, tmp);
11801 return target;
11804 static rtx
11805 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11807 rtx pat, scratch, tmp;
11808 tree form = CALL_EXPR_ARG (exp, 0);
11809 tree arg0 = CALL_EXPR_ARG (exp, 1);
11810 tree arg1 = CALL_EXPR_ARG (exp, 2);
11811 rtx op0 = expand_normal (arg0);
11812 rtx op1 = expand_normal (arg1);
11813 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11814 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11815 int form_int;
11816 enum rtx_code code;
11818 if (TREE_CODE (form) != INTEGER_CST)
11820 error ("argument 1 of __builtin_spe_predicate must be a constant");
11821 return const0_rtx;
11823 else
11824 form_int = TREE_INT_CST_LOW (form);
11826 gcc_assert (mode0 == mode1);
11828 if (arg0 == error_mark_node || arg1 == error_mark_node)
11829 return const0_rtx;
11831 if (target == 0
11832 || GET_MODE (target) != SImode
11833 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
11834 target = gen_reg_rtx (SImode);
11836 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11837 op0 = copy_to_mode_reg (mode0, op0);
11838 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11839 op1 = copy_to_mode_reg (mode1, op1);
11841 scratch = gen_reg_rtx (CCmode);
11843 pat = GEN_FCN (icode) (scratch, op0, op1);
11844 if (! pat)
11845 return const0_rtx;
11846 emit_insn (pat);
11848 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11849 _lower_. We use one compare, but look in different bits of the
11850 CR for each variant.
11852 There are 2 elements in each SPE simd type (upper/lower). The CR
11853 bits are set as follows:
11855 BIT0 | BIT 1 | BIT 2 | BIT 3
11856 U | L | (U | L) | (U & L)
11858 So, for an "all" relationship, BIT 3 would be set.
11859 For an "any" relationship, BIT 2 would be set. Etc.
11861 Following traditional nomenclature, these bits map to:
11863 BIT0 | BIT 1 | BIT 2 | BIT 3
11864 LT | GT | EQ | OV
11866 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11869 switch (form_int)
11871 /* All variant. OV bit. */
11872 case 0:
11873 /* We need to get to the OV bit, which is the ORDERED bit. We
11874 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11875 that's ugly and will make validate_condition_mode die.
11876 So let's just use another pattern. */
11877 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11878 return target;
11879 /* Any variant. EQ bit. */
11880 case 1:
11881 code = EQ;
11882 break;
11883 /* Upper variant. LT bit. */
11884 case 2:
11885 code = LT;
11886 break;
11887 /* Lower variant. GT bit. */
11888 case 3:
11889 code = GT;
11890 break;
11891 default:
11892 error ("argument 1 of __builtin_spe_predicate is out of range");
11893 return const0_rtx;
11896 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11897 emit_move_insn (target, tmp);
11899 return target;
11902 /* The evsel builtins look like this:
11904 e = __builtin_spe_evsel_OP (a, b, c, d);
11906 and work like this:
11908 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11909 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11912 static rtx
11913 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11915 rtx pat, scratch;
11916 tree arg0 = CALL_EXPR_ARG (exp, 0);
11917 tree arg1 = CALL_EXPR_ARG (exp, 1);
11918 tree arg2 = CALL_EXPR_ARG (exp, 2);
11919 tree arg3 = CALL_EXPR_ARG (exp, 3);
11920 rtx op0 = expand_normal (arg0);
11921 rtx op1 = expand_normal (arg1);
11922 rtx op2 = expand_normal (arg2);
11923 rtx op3 = expand_normal (arg3);
11924 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11925 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11927 gcc_assert (mode0 == mode1);
11929 if (arg0 == error_mark_node || arg1 == error_mark_node
11930 || arg2 == error_mark_node || arg3 == error_mark_node)
11931 return const0_rtx;
11933 if (target == 0
11934 || GET_MODE (target) != mode0
11935 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11936 target = gen_reg_rtx (mode0);
11938 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11939 op0 = copy_to_mode_reg (mode0, op0);
11940 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11941 op1 = copy_to_mode_reg (mode0, op1);
11942 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11943 op2 = copy_to_mode_reg (mode0, op2);
11944 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11945 op3 = copy_to_mode_reg (mode0, op3);
11947 /* Generate the compare. */
11948 scratch = gen_reg_rtx (CCmode);
11949 pat = GEN_FCN (icode) (scratch, op0, op1);
11950 if (! pat)
11951 return const0_rtx;
11952 emit_insn (pat);
11954 if (mode0 == V2SImode)
11955 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11956 else
11957 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11959 return target;
11962 /* Raise an error message for a builtin function that is called without the
11963 appropriate target options being set. */
11965 static void
11966 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11968 size_t uns_fncode = (size_t)fncode;
11969 const char *name = rs6000_builtin_info[uns_fncode].name;
11970 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
11972 gcc_assert (name != NULL);
11973 if ((fnmask & RS6000_BTM_CELL) != 0)
11974 error ("Builtin function %s is only valid for the cell processor", name);
11975 else if ((fnmask & RS6000_BTM_VSX) != 0)
11976 error ("Builtin function %s requires the -mvsx option", name);
11977 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11978 error ("Builtin function %s requires the -maltivec option", name);
11979 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11980 error ("Builtin function %s requires the -mpaired option", name);
11981 else if ((fnmask & RS6000_BTM_SPE) != 0)
11982 error ("Builtin function %s requires the -mspe option", name);
11983 else
11984 error ("Builtin function %s is not supported with the current options",
11985 name);
11988 /* Expand an expression EXP that calls a built-in function,
11989 with result going to TARGET if that's convenient
11990 (and in mode MODE if that's convenient).
11991 SUBTARGET may be used as the target for computing one of EXP's operands.
11992 IGNORE is nonzero if the value is to be ignored. */
11994 static rtx
11995 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11996 enum machine_mode mode ATTRIBUTE_UNUSED,
11997 int ignore ATTRIBUTE_UNUSED)
11999 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12000 enum rs6000_builtins fcode
12001 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
12002 size_t uns_fcode = (size_t)fcode;
12003 const struct builtin_description *d;
12004 size_t i;
12005 rtx ret;
12006 bool success;
12007 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
12008 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
12010 if (TARGET_DEBUG_BUILTIN)
12012 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
12013 const char *name1 = rs6000_builtin_info[uns_fcode].name;
12014 const char *name2 = ((icode != CODE_FOR_nothing)
12015 ? get_insn_name ((int)icode)
12016 : "nothing");
12017 const char *name3;
12019 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
12021 default: name3 = "unknown"; break;
12022 case RS6000_BTC_SPECIAL: name3 = "special"; break;
12023 case RS6000_BTC_UNARY: name3 = "unary"; break;
12024 case RS6000_BTC_BINARY: name3 = "binary"; break;
12025 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
12026 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
12027 case RS6000_BTC_ABS: name3 = "abs"; break;
12028 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
12029 case RS6000_BTC_DST: name3 = "dst"; break;
12033 fprintf (stderr,
12034 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
12035 (name1) ? name1 : "---", fcode,
12036 (name2) ? name2 : "---", (int)icode,
12037 name3,
12038 func_valid_p ? "" : ", not valid");
12041 if (!func_valid_p)
12043 rs6000_invalid_builtin (fcode);
12045 /* Given it is invalid, just generate a normal call. */
12046 return expand_call (exp, target, ignore);
12049 switch (fcode)
12051 case RS6000_BUILTIN_RECIP:
12052 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
12054 case RS6000_BUILTIN_RECIPF:
12055 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
12057 case RS6000_BUILTIN_RSQRTF:
12058 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
12060 case RS6000_BUILTIN_RSQRT:
12061 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
12063 case POWER7_BUILTIN_BPERMD:
12064 return rs6000_expand_binop_builtin (((TARGET_64BIT)
12065 ? CODE_FOR_bpermd_di
12066 : CODE_FOR_bpermd_si), exp, target);
12068 case RS6000_BUILTIN_GET_TB:
12069 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
12070 target);
12072 case RS6000_BUILTIN_MFTB:
12073 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
12074 ? CODE_FOR_rs6000_mftb_di
12075 : CODE_FOR_rs6000_mftb_si),
12076 target);
12078 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
12079 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
12081 int icode = (int) CODE_FOR_altivec_lvsr;
12082 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12083 enum machine_mode mode = insn_data[icode].operand[1].mode;
12084 tree arg;
12085 rtx op, addr, pat;
12087 gcc_assert (TARGET_ALTIVEC);
12089 arg = CALL_EXPR_ARG (exp, 0);
12090 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
12091 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
12092 addr = memory_address (mode, op);
12093 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
12094 op = addr;
12095 else
12097 /* For the load case need to negate the address. */
12098 op = gen_reg_rtx (GET_MODE (addr));
12099 emit_insn (gen_rtx_SET (VOIDmode, op,
12100 gen_rtx_NEG (GET_MODE (addr), addr)));
12102 op = gen_rtx_MEM (mode, op);
12104 if (target == 0
12105 || GET_MODE (target) != tmode
12106 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12107 target = gen_reg_rtx (tmode);
12109 /*pat = gen_altivec_lvsr (target, op);*/
12110 pat = GEN_FCN (icode) (target, op);
12111 if (!pat)
12112 return 0;
12113 emit_insn (pat);
12115 return target;
12118 case ALTIVEC_BUILTIN_VCFUX:
12119 case ALTIVEC_BUILTIN_VCFSX:
12120 case ALTIVEC_BUILTIN_VCTUXS:
12121 case ALTIVEC_BUILTIN_VCTSXS:
12122 /* FIXME: There's got to be a nicer way to handle this case than
12123 constructing a new CALL_EXPR. */
12124 if (call_expr_nargs (exp) == 1)
12126 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
12127 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
12129 break;
12131 default:
12132 break;
12135 if (TARGET_ALTIVEC)
12137 ret = altivec_expand_builtin (exp, target, &success);
12139 if (success)
12140 return ret;
12142 if (TARGET_SPE)
12144 ret = spe_expand_builtin (exp, target, &success);
12146 if (success)
12147 return ret;
12149 if (TARGET_PAIRED_FLOAT)
12151 ret = paired_expand_builtin (exp, target, &success);
12153 if (success)
12154 return ret;
12157 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
12159 /* Handle simple unary operations. */
12160 d = bdesc_1arg;
12161 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12162 if (d->code == fcode)
12163 return rs6000_expand_unop_builtin (d->icode, exp, target);
12165 /* Handle simple binary operations. */
12166 d = bdesc_2arg;
12167 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12168 if (d->code == fcode)
12169 return rs6000_expand_binop_builtin (d->icode, exp, target);
12171 /* Handle simple ternary operations. */
12172 d = bdesc_3arg;
12173 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12174 if (d->code == fcode)
12175 return rs6000_expand_ternop_builtin (d->icode, exp, target);
12177 gcc_unreachable ();
12180 static void
12181 rs6000_init_builtins (void)
12183 tree tdecl;
12184 tree ftype;
12185 enum machine_mode mode;
12187 if (TARGET_DEBUG_BUILTIN)
12188 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
12189 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
12190 (TARGET_SPE) ? ", spe" : "",
12191 (TARGET_ALTIVEC) ? ", altivec" : "",
12192 (TARGET_VSX) ? ", vsx" : "");
12194 V2SI_type_node = build_vector_type (intSI_type_node, 2);
12195 V2SF_type_node = build_vector_type (float_type_node, 2);
12196 V2DI_type_node = build_vector_type (intDI_type_node, 2);
12197 V2DF_type_node = build_vector_type (double_type_node, 2);
12198 V4HI_type_node = build_vector_type (intHI_type_node, 4);
12199 V4SI_type_node = build_vector_type (intSI_type_node, 4);
12200 V4SF_type_node = build_vector_type (float_type_node, 4);
12201 V8HI_type_node = build_vector_type (intHI_type_node, 8);
12202 V16QI_type_node = build_vector_type (intQI_type_node, 16);
12204 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
12205 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
12206 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
12207 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
12209 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
12210 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
12211 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
12212 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
12214 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
12215 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
12216 'vector unsigned short'. */
12218 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
12219 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
12220 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
12221 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
12222 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
12224 long_integer_type_internal_node = long_integer_type_node;
12225 long_unsigned_type_internal_node = long_unsigned_type_node;
12226 long_long_integer_type_internal_node = long_long_integer_type_node;
12227 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
12228 intQI_type_internal_node = intQI_type_node;
12229 uintQI_type_internal_node = unsigned_intQI_type_node;
12230 intHI_type_internal_node = intHI_type_node;
12231 uintHI_type_internal_node = unsigned_intHI_type_node;
12232 intSI_type_internal_node = intSI_type_node;
12233 uintSI_type_internal_node = unsigned_intSI_type_node;
12234 intDI_type_internal_node = intDI_type_node;
12235 uintDI_type_internal_node = unsigned_intDI_type_node;
12236 float_type_internal_node = float_type_node;
12237 double_type_internal_node = double_type_node;
12238 void_type_internal_node = void_type_node;
12240 /* Initialize the modes for builtin_function_type, mapping a machine mode to
12241 tree type node. */
12242 builtin_mode_to_type[QImode][0] = integer_type_node;
12243 builtin_mode_to_type[HImode][0] = integer_type_node;
12244 builtin_mode_to_type[SImode][0] = intSI_type_node;
12245 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
12246 builtin_mode_to_type[DImode][0] = intDI_type_node;
12247 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
12248 builtin_mode_to_type[SFmode][0] = float_type_node;
12249 builtin_mode_to_type[DFmode][0] = double_type_node;
12250 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
12251 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
12252 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
12253 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
12254 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
12255 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
12256 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
12257 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
12258 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
12259 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
12260 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
12261 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
12262 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
12264 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
12265 TYPE_NAME (bool_char_type_node) = tdecl;
12267 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
12268 TYPE_NAME (bool_short_type_node) = tdecl;
12270 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
12271 TYPE_NAME (bool_int_type_node) = tdecl;
12273 tdecl = add_builtin_type ("__pixel", pixel_type_node);
12274 TYPE_NAME (pixel_type_node) = tdecl;
12276 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
12277 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
12278 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
12279 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
12280 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
12282 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
12283 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
12285 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
12286 TYPE_NAME (V16QI_type_node) = tdecl;
12288 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
12289 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
12291 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
12292 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
12294 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
12295 TYPE_NAME (V8HI_type_node) = tdecl;
12297 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
12298 TYPE_NAME (bool_V8HI_type_node) = tdecl;
12300 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
12301 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
12303 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
12304 TYPE_NAME (V4SI_type_node) = tdecl;
12306 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
12307 TYPE_NAME (bool_V4SI_type_node) = tdecl;
12309 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
12310 TYPE_NAME (V4SF_type_node) = tdecl;
12312 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
12313 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
12315 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
12316 TYPE_NAME (V2DF_type_node) = tdecl;
12318 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
12319 TYPE_NAME (V2DI_type_node) = tdecl;
12321 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
12322 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
12324 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
12325 TYPE_NAME (bool_V2DI_type_node) = tdecl;
12327 /* Paired and SPE builtins are only available if you build a compiler with
12328 the appropriate options, so only create those builtins with the
12329 appropriate compiler option. Create Altivec and VSX builtins on machines
12330 with at least the general purpose extensions (970 and newer) to allow the
12331 use of the target attribute. */
12332 if (TARGET_PAIRED_FLOAT)
12333 paired_init_builtins ();
12334 if (TARGET_SPE)
12335 spe_init_builtins ();
12336 if (TARGET_EXTRA_BUILTINS)
12337 altivec_init_builtins ();
12338 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
12339 rs6000_common_init_builtins ();
12341 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
12342 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
12343 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
12345 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
12346 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
12347 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
12349 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
12350 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
12351 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
12353 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
12354 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
12355 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
12357 mode = (TARGET_64BIT) ? DImode : SImode;
12358 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
12359 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
12360 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
12362 ftype = build_function_type_list (unsigned_intDI_type_node,
12363 NULL_TREE);
12364 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
12366 if (TARGET_64BIT)
12367 ftype = build_function_type_list (unsigned_intDI_type_node,
12368 NULL_TREE);
12369 else
12370 ftype = build_function_type_list (unsigned_intSI_type_node,
12371 NULL_TREE);
12372 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
12374 #if TARGET_XCOFF
12375 /* AIX libm provides clog as __clog. */
12376 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
12377 set_user_assembler_name (tdecl, "__clog");
12378 #endif
12380 #ifdef SUBTARGET_INIT_BUILTINS
12381 SUBTARGET_INIT_BUILTINS;
12382 #endif
12385 /* Returns the rs6000 builtin decl for CODE. */
12387 static tree
12388 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
12390 HOST_WIDE_INT fnmask;
12392 if (code >= RS6000_BUILTIN_COUNT)
12393 return error_mark_node;
12395 fnmask = rs6000_builtin_info[code].mask;
12396 if ((fnmask & rs6000_builtin_mask) != fnmask)
12398 rs6000_invalid_builtin ((enum rs6000_builtins)code);
12399 return error_mark_node;
12402 return rs6000_builtin_decls[code];
12405 static void
12406 spe_init_builtins (void)
12408 tree puint_type_node = build_pointer_type (unsigned_type_node);
12409 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
12410 const struct builtin_description *d;
12411 size_t i;
12413 tree v2si_ftype_4_v2si
12414 = build_function_type_list (opaque_V2SI_type_node,
12415 opaque_V2SI_type_node,
12416 opaque_V2SI_type_node,
12417 opaque_V2SI_type_node,
12418 opaque_V2SI_type_node,
12419 NULL_TREE);
12421 tree v2sf_ftype_4_v2sf
12422 = build_function_type_list (opaque_V2SF_type_node,
12423 opaque_V2SF_type_node,
12424 opaque_V2SF_type_node,
12425 opaque_V2SF_type_node,
12426 opaque_V2SF_type_node,
12427 NULL_TREE);
12429 tree int_ftype_int_v2si_v2si
12430 = build_function_type_list (integer_type_node,
12431 integer_type_node,
12432 opaque_V2SI_type_node,
12433 opaque_V2SI_type_node,
12434 NULL_TREE);
12436 tree int_ftype_int_v2sf_v2sf
12437 = build_function_type_list (integer_type_node,
12438 integer_type_node,
12439 opaque_V2SF_type_node,
12440 opaque_V2SF_type_node,
12441 NULL_TREE);
12443 tree void_ftype_v2si_puint_int
12444 = build_function_type_list (void_type_node,
12445 opaque_V2SI_type_node,
12446 puint_type_node,
12447 integer_type_node,
12448 NULL_TREE);
12450 tree void_ftype_v2si_puint_char
12451 = build_function_type_list (void_type_node,
12452 opaque_V2SI_type_node,
12453 puint_type_node,
12454 char_type_node,
12455 NULL_TREE);
12457 tree void_ftype_v2si_pv2si_int
12458 = build_function_type_list (void_type_node,
12459 opaque_V2SI_type_node,
12460 opaque_p_V2SI_type_node,
12461 integer_type_node,
12462 NULL_TREE);
12464 tree void_ftype_v2si_pv2si_char
12465 = build_function_type_list (void_type_node,
12466 opaque_V2SI_type_node,
12467 opaque_p_V2SI_type_node,
12468 char_type_node,
12469 NULL_TREE);
12471 tree void_ftype_int
12472 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
12474 tree int_ftype_void
12475 = build_function_type_list (integer_type_node, NULL_TREE);
12477 tree v2si_ftype_pv2si_int
12478 = build_function_type_list (opaque_V2SI_type_node,
12479 opaque_p_V2SI_type_node,
12480 integer_type_node,
12481 NULL_TREE);
12483 tree v2si_ftype_puint_int
12484 = build_function_type_list (opaque_V2SI_type_node,
12485 puint_type_node,
12486 integer_type_node,
12487 NULL_TREE);
12489 tree v2si_ftype_pushort_int
12490 = build_function_type_list (opaque_V2SI_type_node,
12491 pushort_type_node,
12492 integer_type_node,
12493 NULL_TREE);
12495 tree v2si_ftype_signed_char
12496 = build_function_type_list (opaque_V2SI_type_node,
12497 signed_char_type_node,
12498 NULL_TREE);
12500 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
12502 /* Initialize irregular SPE builtins. */
12504 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
12505 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
12506 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
12507 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
12508 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
12509 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
12510 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
12511 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
12512 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
12513 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
12514 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
12515 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
12516 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
12517 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
12518 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
12519 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
12520 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
12521 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
12523 /* Loads. */
12524 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
12525 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
12526 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
12527 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
12528 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
12529 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
12530 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
12531 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
12532 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
12533 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
12534 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
12535 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
12536 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
12537 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
12538 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
12539 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
12540 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
12541 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
12542 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
12543 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
12544 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
12545 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
12547 /* Predicates. */
12548 d = bdesc_spe_predicates;
12549 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
12551 tree type;
12553 switch (insn_data[d->icode].operand[1].mode)
12555 case V2SImode:
12556 type = int_ftype_int_v2si_v2si;
12557 break;
12558 case V2SFmode:
12559 type = int_ftype_int_v2sf_v2sf;
12560 break;
12561 default:
12562 gcc_unreachable ();
12565 def_builtin (d->name, type, d->code);
12568 /* Evsel predicates. */
12569 d = bdesc_spe_evsel;
12570 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
12572 tree type;
12574 switch (insn_data[d->icode].operand[1].mode)
12576 case V2SImode:
12577 type = v2si_ftype_4_v2si;
12578 break;
12579 case V2SFmode:
12580 type = v2sf_ftype_4_v2sf;
12581 break;
12582 default:
12583 gcc_unreachable ();
12586 def_builtin (d->name, type, d->code);
12590 static void
12591 paired_init_builtins (void)
12593 const struct builtin_description *d;
12594 size_t i;
12596 tree int_ftype_int_v2sf_v2sf
12597 = build_function_type_list (integer_type_node,
12598 integer_type_node,
12599 V2SF_type_node,
12600 V2SF_type_node,
12601 NULL_TREE);
12602 tree pcfloat_type_node =
12603 build_pointer_type (build_qualified_type
12604 (float_type_node, TYPE_QUAL_CONST));
12606 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
12607 long_integer_type_node,
12608 pcfloat_type_node,
12609 NULL_TREE);
12610 tree void_ftype_v2sf_long_pcfloat =
12611 build_function_type_list (void_type_node,
12612 V2SF_type_node,
12613 long_integer_type_node,
12614 pcfloat_type_node,
12615 NULL_TREE);
12618 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
12619 PAIRED_BUILTIN_LX);
12622 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
12623 PAIRED_BUILTIN_STX);
12625 /* Predicates. */
12626 d = bdesc_paired_preds;
12627 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
12629 tree type;
12631 if (TARGET_DEBUG_BUILTIN)
12632 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
12633 (int)i, get_insn_name (d->icode), (int)d->icode,
12634 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
12636 switch (insn_data[d->icode].operand[1].mode)
12638 case V2SFmode:
12639 type = int_ftype_int_v2sf_v2sf;
12640 break;
12641 default:
12642 gcc_unreachable ();
12645 def_builtin (d->name, type, d->code);
12649 static void
12650 altivec_init_builtins (void)
12652 const struct builtin_description *d;
12653 size_t i;
12654 tree ftype;
12655 tree decl;
12657 tree pvoid_type_node = build_pointer_type (void_type_node);
12659 tree pcvoid_type_node
12660 = build_pointer_type (build_qualified_type (void_type_node,
12661 TYPE_QUAL_CONST));
12663 tree int_ftype_opaque
12664 = build_function_type_list (integer_type_node,
12665 opaque_V4SI_type_node, NULL_TREE);
12666 tree opaque_ftype_opaque
12667 = build_function_type_list (integer_type_node, NULL_TREE);
12668 tree opaque_ftype_opaque_int
12669 = build_function_type_list (opaque_V4SI_type_node,
12670 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
12671 tree opaque_ftype_opaque_opaque_int
12672 = build_function_type_list (opaque_V4SI_type_node,
12673 opaque_V4SI_type_node, opaque_V4SI_type_node,
12674 integer_type_node, NULL_TREE);
12675 tree int_ftype_int_opaque_opaque
12676 = build_function_type_list (integer_type_node,
12677 integer_type_node, opaque_V4SI_type_node,
12678 opaque_V4SI_type_node, NULL_TREE);
12679 tree int_ftype_int_v4si_v4si
12680 = build_function_type_list (integer_type_node,
12681 integer_type_node, V4SI_type_node,
12682 V4SI_type_node, NULL_TREE);
12683 tree int_ftype_int_v2di_v2di
12684 = build_function_type_list (integer_type_node,
12685 integer_type_node, V2DI_type_node,
12686 V2DI_type_node, NULL_TREE);
12687 tree void_ftype_v4si
12688 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
12689 tree v8hi_ftype_void
12690 = build_function_type_list (V8HI_type_node, NULL_TREE);
12691 tree void_ftype_void
12692 = build_function_type_list (void_type_node, NULL_TREE);
12693 tree void_ftype_int
12694 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
12696 tree opaque_ftype_long_pcvoid
12697 = build_function_type_list (opaque_V4SI_type_node,
12698 long_integer_type_node, pcvoid_type_node,
12699 NULL_TREE);
12700 tree v16qi_ftype_long_pcvoid
12701 = build_function_type_list (V16QI_type_node,
12702 long_integer_type_node, pcvoid_type_node,
12703 NULL_TREE);
12704 tree v8hi_ftype_long_pcvoid
12705 = build_function_type_list (V8HI_type_node,
12706 long_integer_type_node, pcvoid_type_node,
12707 NULL_TREE);
12708 tree v4si_ftype_long_pcvoid
12709 = build_function_type_list (V4SI_type_node,
12710 long_integer_type_node, pcvoid_type_node,
12711 NULL_TREE);
12712 tree v4sf_ftype_long_pcvoid
12713 = build_function_type_list (V4SF_type_node,
12714 long_integer_type_node, pcvoid_type_node,
12715 NULL_TREE);
12716 tree v2df_ftype_long_pcvoid
12717 = build_function_type_list (V2DF_type_node,
12718 long_integer_type_node, pcvoid_type_node,
12719 NULL_TREE);
12720 tree v2di_ftype_long_pcvoid
12721 = build_function_type_list (V2DI_type_node,
12722 long_integer_type_node, pcvoid_type_node,
12723 NULL_TREE);
12725 tree void_ftype_opaque_long_pvoid
12726 = build_function_type_list (void_type_node,
12727 opaque_V4SI_type_node, long_integer_type_node,
12728 pvoid_type_node, NULL_TREE);
12729 tree void_ftype_v4si_long_pvoid
12730 = build_function_type_list (void_type_node,
12731 V4SI_type_node, long_integer_type_node,
12732 pvoid_type_node, NULL_TREE);
12733 tree void_ftype_v16qi_long_pvoid
12734 = build_function_type_list (void_type_node,
12735 V16QI_type_node, long_integer_type_node,
12736 pvoid_type_node, NULL_TREE);
12737 tree void_ftype_v8hi_long_pvoid
12738 = build_function_type_list (void_type_node,
12739 V8HI_type_node, long_integer_type_node,
12740 pvoid_type_node, NULL_TREE);
12741 tree void_ftype_v4sf_long_pvoid
12742 = build_function_type_list (void_type_node,
12743 V4SF_type_node, long_integer_type_node,
12744 pvoid_type_node, NULL_TREE);
12745 tree void_ftype_v2df_long_pvoid
12746 = build_function_type_list (void_type_node,
12747 V2DF_type_node, long_integer_type_node,
12748 pvoid_type_node, NULL_TREE);
12749 tree void_ftype_v2di_long_pvoid
12750 = build_function_type_list (void_type_node,
12751 V2DI_type_node, long_integer_type_node,
12752 pvoid_type_node, NULL_TREE);
12753 tree int_ftype_int_v8hi_v8hi
12754 = build_function_type_list (integer_type_node,
12755 integer_type_node, V8HI_type_node,
12756 V8HI_type_node, NULL_TREE);
12757 tree int_ftype_int_v16qi_v16qi
12758 = build_function_type_list (integer_type_node,
12759 integer_type_node, V16QI_type_node,
12760 V16QI_type_node, NULL_TREE);
12761 tree int_ftype_int_v4sf_v4sf
12762 = build_function_type_list (integer_type_node,
12763 integer_type_node, V4SF_type_node,
12764 V4SF_type_node, NULL_TREE);
12765 tree int_ftype_int_v2df_v2df
12766 = build_function_type_list (integer_type_node,
12767 integer_type_node, V2DF_type_node,
12768 V2DF_type_node, NULL_TREE);
12769 tree v2di_ftype_v2di
12770 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
12771 tree v4si_ftype_v4si
12772 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
12773 tree v8hi_ftype_v8hi
12774 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
12775 tree v16qi_ftype_v16qi
12776 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
12777 tree v4sf_ftype_v4sf
12778 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12779 tree v2df_ftype_v2df
12780 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12781 tree void_ftype_pcvoid_int_int
12782 = build_function_type_list (void_type_node,
12783 pcvoid_type_node, integer_type_node,
12784 integer_type_node, NULL_TREE);
12786 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
12787 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
12788 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
12789 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
12790 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
12791 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
12792 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
12793 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
12794 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
12795 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
12796 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
12797 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
12798 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
12799 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
12800 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
12801 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
12802 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
12803 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
12804 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
12805 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
12806 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
12807 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
12808 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
12809 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
12810 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
12811 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
12812 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
12813 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
12814 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
12815 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
12817 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
12818 VSX_BUILTIN_LXVD2X_V2DF);
12819 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
12820 VSX_BUILTIN_LXVD2X_V2DI);
12821 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
12822 VSX_BUILTIN_LXVW4X_V4SF);
12823 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
12824 VSX_BUILTIN_LXVW4X_V4SI);
12825 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
12826 VSX_BUILTIN_LXVW4X_V8HI);
12827 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
12828 VSX_BUILTIN_LXVW4X_V16QI);
12829 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
12830 VSX_BUILTIN_STXVD2X_V2DF);
12831 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
12832 VSX_BUILTIN_STXVD2X_V2DI);
12833 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
12834 VSX_BUILTIN_STXVW4X_V4SF);
12835 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
12836 VSX_BUILTIN_STXVW4X_V4SI);
12837 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
12838 VSX_BUILTIN_STXVW4X_V8HI);
12839 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
12840 VSX_BUILTIN_STXVW4X_V16QI);
12841 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
12842 VSX_BUILTIN_VEC_LD);
12843 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
12844 VSX_BUILTIN_VEC_ST);
12846 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
12847 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
12848 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
12850 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
12851 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
12852 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
12853 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
12854 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
12855 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
12856 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
12857 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
12858 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
12859 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
12860 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
12861 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
12863 /* Cell builtins. */
12864 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
12865 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
12866 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
12867 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
12869 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
12870 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
12871 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
12872 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
12874 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
12875 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
12876 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
12877 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
12879 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
12880 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
12881 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
12882 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
12884 /* Add the DST variants. */
12885 d = bdesc_dst;
12886 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12887 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
12889 /* Initialize the predicates. */
12890 d = bdesc_altivec_preds;
12891 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12893 enum machine_mode mode1;
12894 tree type;
12896 if (rs6000_overloaded_builtin_p (d->code))
12897 mode1 = VOIDmode;
12898 else
12899 mode1 = insn_data[d->icode].operand[1].mode;
12901 switch (mode1)
12903 case VOIDmode:
12904 type = int_ftype_int_opaque_opaque;
12905 break;
12906 case V2DImode:
12907 type = int_ftype_int_v2di_v2di;
12908 break;
12909 case V4SImode:
12910 type = int_ftype_int_v4si_v4si;
12911 break;
12912 case V8HImode:
12913 type = int_ftype_int_v8hi_v8hi;
12914 break;
12915 case V16QImode:
12916 type = int_ftype_int_v16qi_v16qi;
12917 break;
12918 case V4SFmode:
12919 type = int_ftype_int_v4sf_v4sf;
12920 break;
12921 case V2DFmode:
12922 type = int_ftype_int_v2df_v2df;
12923 break;
12924 default:
12925 gcc_unreachable ();
12928 def_builtin (d->name, type, d->code);
12931 /* Initialize the abs* operators. */
12932 d = bdesc_abs;
12933 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12935 enum machine_mode mode0;
12936 tree type;
12938 mode0 = insn_data[d->icode].operand[0].mode;
12940 switch (mode0)
12942 case V2DImode:
12943 type = v2di_ftype_v2di;
12944 break;
12945 case V4SImode:
12946 type = v4si_ftype_v4si;
12947 break;
12948 case V8HImode:
12949 type = v8hi_ftype_v8hi;
12950 break;
12951 case V16QImode:
12952 type = v16qi_ftype_v16qi;
12953 break;
12954 case V4SFmode:
12955 type = v4sf_ftype_v4sf;
12956 break;
12957 case V2DFmode:
12958 type = v2df_ftype_v2df;
12959 break;
12960 default:
12961 gcc_unreachable ();
12964 def_builtin (d->name, type, d->code);
12967 /* Initialize target builtin that implements
12968 targetm.vectorize.builtin_mask_for_load. */
12970 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12971 v16qi_ftype_long_pcvoid,
12972 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12973 BUILT_IN_MD, NULL, NULL_TREE);
12974 TREE_READONLY (decl) = 1;
12975 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12976 altivec_builtin_mask_for_load = decl;
12978 /* Access to the vec_init patterns. */
12979 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12980 integer_type_node, integer_type_node,
12981 integer_type_node, NULL_TREE);
12982 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12984 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12985 short_integer_type_node,
12986 short_integer_type_node,
12987 short_integer_type_node,
12988 short_integer_type_node,
12989 short_integer_type_node,
12990 short_integer_type_node,
12991 short_integer_type_node, NULL_TREE);
12992 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12994 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12995 char_type_node, char_type_node,
12996 char_type_node, char_type_node,
12997 char_type_node, char_type_node,
12998 char_type_node, char_type_node,
12999 char_type_node, char_type_node,
13000 char_type_node, char_type_node,
13001 char_type_node, char_type_node,
13002 char_type_node, NULL_TREE);
13003 def_builtin ("__builtin_vec_init_v16qi", ftype,
13004 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
13006 ftype = build_function_type_list (V4SF_type_node, float_type_node,
13007 float_type_node, float_type_node,
13008 float_type_node, NULL_TREE);
13009 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
13011 /* VSX builtins. */
13012 ftype = build_function_type_list (V2DF_type_node, double_type_node,
13013 double_type_node, NULL_TREE);
13014 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
13016 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
13017 intDI_type_node, NULL_TREE);
13018 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
13020 /* Access to the vec_set patterns. */
13021 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
13022 intSI_type_node,
13023 integer_type_node, NULL_TREE);
13024 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
13026 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
13027 intHI_type_node,
13028 integer_type_node, NULL_TREE);
13029 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
13031 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
13032 intQI_type_node,
13033 integer_type_node, NULL_TREE);
13034 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
13036 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
13037 float_type_node,
13038 integer_type_node, NULL_TREE);
13039 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
13041 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
13042 double_type_node,
13043 integer_type_node, NULL_TREE);
13044 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
13046 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
13047 intDI_type_node,
13048 integer_type_node, NULL_TREE);
13049 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
13051 /* Access to the vec_extract patterns. */
13052 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
13053 integer_type_node, NULL_TREE);
13054 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
13056 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
13057 integer_type_node, NULL_TREE);
13058 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
13060 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
13061 integer_type_node, NULL_TREE);
13062 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
13064 ftype = build_function_type_list (float_type_node, V4SF_type_node,
13065 integer_type_node, NULL_TREE);
13066 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
13068 ftype = build_function_type_list (double_type_node, V2DF_type_node,
13069 integer_type_node, NULL_TREE);
13070 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
13072 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
13073 integer_type_node, NULL_TREE);
13074 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
13077 /* Hash function for builtin functions with up to 3 arguments and a return
13078 type. */
13079 static unsigned
13080 builtin_hash_function (const void *hash_entry)
13082 unsigned ret = 0;
13083 int i;
13084 const struct builtin_hash_struct *bh =
13085 (const struct builtin_hash_struct *) hash_entry;
13087 for (i = 0; i < 4; i++)
13089 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
13090 ret = (ret * 2) + bh->uns_p[i];
13093 return ret;
13096 /* Compare builtin hash entries H1 and H2 for equivalence. */
13097 static int
13098 builtin_hash_eq (const void *h1, const void *h2)
13100 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
13101 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
13103 return ((p1->mode[0] == p2->mode[0])
13104 && (p1->mode[1] == p2->mode[1])
13105 && (p1->mode[2] == p2->mode[2])
13106 && (p1->mode[3] == p2->mode[3])
13107 && (p1->uns_p[0] == p2->uns_p[0])
13108 && (p1->uns_p[1] == p2->uns_p[1])
13109 && (p1->uns_p[2] == p2->uns_p[2])
13110 && (p1->uns_p[3] == p2->uns_p[3]));
13113 /* Map types for builtin functions with an explicit return type and up to 3
13114 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
13115 of the argument. */
13116 static tree
13117 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
13118 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
13119 enum rs6000_builtins builtin, const char *name)
13121 struct builtin_hash_struct h;
13122 struct builtin_hash_struct *h2;
13123 void **found;
13124 int num_args = 3;
13125 int i;
13126 tree ret_type = NULL_TREE;
13127 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
13129 /* Create builtin_hash_table. */
13130 if (builtin_hash_table == NULL)
13131 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
13132 builtin_hash_eq, NULL);
13134 h.type = NULL_TREE;
13135 h.mode[0] = mode_ret;
13136 h.mode[1] = mode_arg0;
13137 h.mode[2] = mode_arg1;
13138 h.mode[3] = mode_arg2;
13139 h.uns_p[0] = 0;
13140 h.uns_p[1] = 0;
13141 h.uns_p[2] = 0;
13142 h.uns_p[3] = 0;
13144 /* If the builtin is a type that produces unsigned results or takes unsigned
13145 arguments, and it is returned as a decl for the vectorizer (such as
13146 widening multiplies, permute), make sure the arguments and return value
13147 are type correct. */
13148 switch (builtin)
13150 /* unsigned 1 argument functions. */
13151 case CRYPTO_BUILTIN_VSBOX:
13152 case P8V_BUILTIN_VGBBD:
13153 h.uns_p[0] = 1;
13154 h.uns_p[1] = 1;
13155 break;
13157 /* unsigned 2 argument functions. */
13158 case ALTIVEC_BUILTIN_VMULEUB_UNS:
13159 case ALTIVEC_BUILTIN_VMULEUH_UNS:
13160 case ALTIVEC_BUILTIN_VMULOUB_UNS:
13161 case ALTIVEC_BUILTIN_VMULOUH_UNS:
13162 case CRYPTO_BUILTIN_VCIPHER:
13163 case CRYPTO_BUILTIN_VCIPHERLAST:
13164 case CRYPTO_BUILTIN_VNCIPHER:
13165 case CRYPTO_BUILTIN_VNCIPHERLAST:
13166 case CRYPTO_BUILTIN_VPMSUMB:
13167 case CRYPTO_BUILTIN_VPMSUMH:
13168 case CRYPTO_BUILTIN_VPMSUMW:
13169 case CRYPTO_BUILTIN_VPMSUMD:
13170 case CRYPTO_BUILTIN_VPMSUM:
13171 h.uns_p[0] = 1;
13172 h.uns_p[1] = 1;
13173 h.uns_p[2] = 1;
13174 break;
13176 /* unsigned 3 argument functions. */
13177 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
13178 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
13179 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
13180 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
13181 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
13182 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
13183 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
13184 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
13185 case VSX_BUILTIN_VPERM_16QI_UNS:
13186 case VSX_BUILTIN_VPERM_8HI_UNS:
13187 case VSX_BUILTIN_VPERM_4SI_UNS:
13188 case VSX_BUILTIN_VPERM_2DI_UNS:
13189 case VSX_BUILTIN_XXSEL_16QI_UNS:
13190 case VSX_BUILTIN_XXSEL_8HI_UNS:
13191 case VSX_BUILTIN_XXSEL_4SI_UNS:
13192 case VSX_BUILTIN_XXSEL_2DI_UNS:
13193 case CRYPTO_BUILTIN_VPERMXOR:
13194 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
13195 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
13196 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
13197 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
13198 case CRYPTO_BUILTIN_VSHASIGMAW:
13199 case CRYPTO_BUILTIN_VSHASIGMAD:
13200 case CRYPTO_BUILTIN_VSHASIGMA:
13201 h.uns_p[0] = 1;
13202 h.uns_p[1] = 1;
13203 h.uns_p[2] = 1;
13204 h.uns_p[3] = 1;
13205 break;
13207 /* signed permute functions with unsigned char mask. */
13208 case ALTIVEC_BUILTIN_VPERM_16QI:
13209 case ALTIVEC_BUILTIN_VPERM_8HI:
13210 case ALTIVEC_BUILTIN_VPERM_4SI:
13211 case ALTIVEC_BUILTIN_VPERM_4SF:
13212 case ALTIVEC_BUILTIN_VPERM_2DI:
13213 case ALTIVEC_BUILTIN_VPERM_2DF:
13214 case VSX_BUILTIN_VPERM_16QI:
13215 case VSX_BUILTIN_VPERM_8HI:
13216 case VSX_BUILTIN_VPERM_4SI:
13217 case VSX_BUILTIN_VPERM_4SF:
13218 case VSX_BUILTIN_VPERM_2DI:
13219 case VSX_BUILTIN_VPERM_2DF:
13220 h.uns_p[3] = 1;
13221 break;
13223 /* unsigned args, signed return. */
13224 case VSX_BUILTIN_XVCVUXDDP_UNS:
13225 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
13226 h.uns_p[1] = 1;
13227 break;
13229 /* signed args, unsigned return. */
13230 case VSX_BUILTIN_XVCVDPUXDS_UNS:
13231 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
13232 h.uns_p[0] = 1;
13233 break;
13235 default:
13236 break;
13239 /* Figure out how many args are present. */
13240 while (num_args > 0 && h.mode[num_args] == VOIDmode)
13241 num_args--;
13243 if (num_args == 0)
13244 fatal_error ("internal error: builtin function %s had no type", name);
13246 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
13247 if (!ret_type && h.uns_p[0])
13248 ret_type = builtin_mode_to_type[h.mode[0]][0];
13250 if (!ret_type)
13251 fatal_error ("internal error: builtin function %s had an unexpected "
13252 "return type %s", name, GET_MODE_NAME (h.mode[0]));
13254 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
13255 arg_type[i] = NULL_TREE;
13257 for (i = 0; i < num_args; i++)
13259 int m = (int) h.mode[i+1];
13260 int uns_p = h.uns_p[i+1];
13262 arg_type[i] = builtin_mode_to_type[m][uns_p];
13263 if (!arg_type[i] && uns_p)
13264 arg_type[i] = builtin_mode_to_type[m][0];
13266 if (!arg_type[i])
13267 fatal_error ("internal error: builtin function %s, argument %d "
13268 "had unexpected argument type %s", name, i,
13269 GET_MODE_NAME (m));
13272 found = htab_find_slot (builtin_hash_table, &h, INSERT);
13273 if (*found == NULL)
13275 h2 = ggc_alloc_builtin_hash_struct ();
13276 *h2 = h;
13277 *found = (void *)h2;
13279 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
13280 arg_type[2], NULL_TREE);
13283 return ((struct builtin_hash_struct *)(*found))->type;
13286 static void
13287 rs6000_common_init_builtins (void)
13289 const struct builtin_description *d;
13290 size_t i;
13292 tree opaque_ftype_opaque = NULL_TREE;
13293 tree opaque_ftype_opaque_opaque = NULL_TREE;
13294 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
13295 tree v2si_ftype_qi = NULL_TREE;
13296 tree v2si_ftype_v2si_qi = NULL_TREE;
13297 tree v2si_ftype_int_qi = NULL_TREE;
13298 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
13300 if (!TARGET_PAIRED_FLOAT)
13302 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
13303 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
13306 /* Paired and SPE builtins are only available if you build a compiler with
13307 the appropriate options, so only create those builtins with the
13308 appropriate compiler option. Create Altivec and VSX builtins on machines
13309 with at least the general purpose extensions (970 and newer) to allow the
13310 use of the target attribute.. */
13312 if (TARGET_EXTRA_BUILTINS)
13313 builtin_mask |= RS6000_BTM_COMMON;
13315 /* Add the ternary operators. */
13316 d = bdesc_3arg;
13317 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
13319 tree type;
13320 HOST_WIDE_INT mask = d->mask;
13322 if ((mask & builtin_mask) != mask)
13324 if (TARGET_DEBUG_BUILTIN)
13325 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
13326 continue;
13329 if (rs6000_overloaded_builtin_p (d->code))
13331 if (! (type = opaque_ftype_opaque_opaque_opaque))
13332 type = opaque_ftype_opaque_opaque_opaque
13333 = build_function_type_list (opaque_V4SI_type_node,
13334 opaque_V4SI_type_node,
13335 opaque_V4SI_type_node,
13336 opaque_V4SI_type_node,
13337 NULL_TREE);
13339 else
13341 enum insn_code icode = d->icode;
13342 if (d->name == 0)
13344 if (TARGET_DEBUG_BUILTIN)
13345 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
13346 (long unsigned)i);
13348 continue;
13351 if (icode == CODE_FOR_nothing)
13353 if (TARGET_DEBUG_BUILTIN)
13354 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
13355 d->name);
13357 continue;
13360 type = builtin_function_type (insn_data[icode].operand[0].mode,
13361 insn_data[icode].operand[1].mode,
13362 insn_data[icode].operand[2].mode,
13363 insn_data[icode].operand[3].mode,
13364 d->code, d->name);
13367 def_builtin (d->name, type, d->code);
13370 /* Add the binary operators. */
13371 d = bdesc_2arg;
13372 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13374 enum machine_mode mode0, mode1, mode2;
13375 tree type;
13376 HOST_WIDE_INT mask = d->mask;
13378 if ((mask & builtin_mask) != mask)
13380 if (TARGET_DEBUG_BUILTIN)
13381 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
13382 continue;
13385 if (rs6000_overloaded_builtin_p (d->code))
13387 if (! (type = opaque_ftype_opaque_opaque))
13388 type = opaque_ftype_opaque_opaque
13389 = build_function_type_list (opaque_V4SI_type_node,
13390 opaque_V4SI_type_node,
13391 opaque_V4SI_type_node,
13392 NULL_TREE);
13394 else
13396 enum insn_code icode = d->icode;
13397 if (d->name == 0)
13399 if (TARGET_DEBUG_BUILTIN)
13400 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
13401 (long unsigned)i);
13403 continue;
13406 if (icode == CODE_FOR_nothing)
13408 if (TARGET_DEBUG_BUILTIN)
13409 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
13410 d->name);
13412 continue;
13415 mode0 = insn_data[icode].operand[0].mode;
13416 mode1 = insn_data[icode].operand[1].mode;
13417 mode2 = insn_data[icode].operand[2].mode;
13419 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
13421 if (! (type = v2si_ftype_v2si_qi))
13422 type = v2si_ftype_v2si_qi
13423 = build_function_type_list (opaque_V2SI_type_node,
13424 opaque_V2SI_type_node,
13425 char_type_node,
13426 NULL_TREE);
13429 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
13430 && mode2 == QImode)
13432 if (! (type = v2si_ftype_int_qi))
13433 type = v2si_ftype_int_qi
13434 = build_function_type_list (opaque_V2SI_type_node,
13435 integer_type_node,
13436 char_type_node,
13437 NULL_TREE);
13440 else
13441 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
13442 d->code, d->name);
13445 def_builtin (d->name, type, d->code);
13448 /* Add the simple unary operators. */
13449 d = bdesc_1arg;
13450 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13452 enum machine_mode mode0, mode1;
13453 tree type;
13454 HOST_WIDE_INT mask = d->mask;
13456 if ((mask & builtin_mask) != mask)
13458 if (TARGET_DEBUG_BUILTIN)
13459 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
13460 continue;
13463 if (rs6000_overloaded_builtin_p (d->code))
13465 if (! (type = opaque_ftype_opaque))
13466 type = opaque_ftype_opaque
13467 = build_function_type_list (opaque_V4SI_type_node,
13468 opaque_V4SI_type_node,
13469 NULL_TREE);
13471 else
13473 enum insn_code icode = d->icode;
13474 if (d->name == 0)
13476 if (TARGET_DEBUG_BUILTIN)
13477 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
13478 (long unsigned)i);
13480 continue;
13483 if (icode == CODE_FOR_nothing)
13485 if (TARGET_DEBUG_BUILTIN)
13486 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
13487 d->name);
13489 continue;
13492 mode0 = insn_data[icode].operand[0].mode;
13493 mode1 = insn_data[icode].operand[1].mode;
13495 if (mode0 == V2SImode && mode1 == QImode)
13497 if (! (type = v2si_ftype_qi))
13498 type = v2si_ftype_qi
13499 = build_function_type_list (opaque_V2SI_type_node,
13500 char_type_node,
13501 NULL_TREE);
13504 else
13505 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
13506 d->code, d->name);
13509 def_builtin (d->name, type, d->code);
13513 static void
13514 rs6000_init_libfuncs (void)
13516 if (!TARGET_IEEEQUAD)
13517 /* AIX/Darwin/64-bit Linux quad floating point routines. */
13518 if (!TARGET_XL_COMPAT)
13520 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
13521 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
13522 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
13523 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
13525 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
13527 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
13528 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
13529 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
13530 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
13531 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
13532 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
13533 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
13535 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
13536 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
13537 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
13538 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
13539 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
13540 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
13541 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
13542 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
13545 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
13546 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
13548 else
13550 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
13551 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
13552 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
13553 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
13555 else
13557 /* 32-bit SVR4 quad floating point routines. */
13559 set_optab_libfunc (add_optab, TFmode, "_q_add");
13560 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
13561 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
13562 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
13563 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
13564 if (TARGET_PPC_GPOPT)
13565 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
13567 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
13568 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
13569 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
13570 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
13571 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
13572 set_optab_libfunc (le_optab, TFmode, "_q_fle");
13574 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
13575 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
13576 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
13577 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
13578 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
13579 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
13580 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
13581 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
13586 /* Expand a block clear operation, and return 1 if successful. Return 0
13587 if we should let the compiler generate normal code.
13589 operands[0] is the destination
13590 operands[1] is the length
13591 operands[3] is the alignment */
13594 expand_block_clear (rtx operands[])
13596 rtx orig_dest = operands[0];
13597 rtx bytes_rtx = operands[1];
13598 rtx align_rtx = operands[3];
13599 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
13600 HOST_WIDE_INT align;
13601 HOST_WIDE_INT bytes;
13602 int offset;
13603 int clear_bytes;
13604 int clear_step;
13606 /* If this is not a fixed size move, just call memcpy */
13607 if (! constp)
13608 return 0;
13610 /* This must be a fixed size alignment */
13611 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
13612 align = INTVAL (align_rtx) * BITS_PER_UNIT;
13614 /* Anything to clear? */
13615 bytes = INTVAL (bytes_rtx);
13616 if (bytes <= 0)
13617 return 1;
13619 /* Use the builtin memset after a point, to avoid huge code bloat.
13620 When optimize_size, avoid any significant code bloat; calling
13621 memset is about 4 instructions, so allow for one instruction to
13622 load zero and three to do clearing. */
13623 if (TARGET_ALTIVEC && align >= 128)
13624 clear_step = 16;
13625 else if (TARGET_POWERPC64 && align >= 32)
13626 clear_step = 8;
13627 else if (TARGET_SPE && align >= 64)
13628 clear_step = 8;
13629 else
13630 clear_step = 4;
13632 if (optimize_size && bytes > 3 * clear_step)
13633 return 0;
13634 if (! optimize_size && bytes > 8 * clear_step)
13635 return 0;
13637 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
13639 enum machine_mode mode = BLKmode;
13640 rtx dest;
13642 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
13644 clear_bytes = 16;
13645 mode = V4SImode;
13647 else if (bytes >= 8 && TARGET_SPE && align >= 64)
13649 clear_bytes = 8;
13650 mode = V2SImode;
13652 else if (bytes >= 8 && TARGET_POWERPC64
13653 /* 64-bit loads and stores require word-aligned
13654 displacements. */
13655 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13657 clear_bytes = 8;
13658 mode = DImode;
13660 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13661 { /* move 4 bytes */
13662 clear_bytes = 4;
13663 mode = SImode;
13665 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13666 { /* move 2 bytes */
13667 clear_bytes = 2;
13668 mode = HImode;
13670 else /* move 1 byte at a time */
13672 clear_bytes = 1;
13673 mode = QImode;
13676 dest = adjust_address (orig_dest, mode, offset);
13678 emit_move_insn (dest, CONST0_RTX (mode));
13681 return 1;
13685 /* Expand a block move operation, and return 1 if successful. Return 0
13686 if we should let the compiler generate normal code.
13688 operands[0] is the destination
13689 operands[1] is the source
13690 operands[2] is the length
13691 operands[3] is the alignment */
13693 #define MAX_MOVE_REG 4
13696 expand_block_move (rtx operands[])
13698 rtx orig_dest = operands[0];
13699 rtx orig_src = operands[1];
13700 rtx bytes_rtx = operands[2];
13701 rtx align_rtx = operands[3];
13702 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
13703 int align;
13704 int bytes;
13705 int offset;
13706 int move_bytes;
13707 rtx stores[MAX_MOVE_REG];
13708 int num_reg = 0;
13710 /* If this is not a fixed size move, just call memcpy */
13711 if (! constp)
13712 return 0;
13714 /* This must be a fixed size alignment */
13715 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
13716 align = INTVAL (align_rtx) * BITS_PER_UNIT;
13718 /* Anything to move? */
13719 bytes = INTVAL (bytes_rtx);
13720 if (bytes <= 0)
13721 return 1;
13723 if (bytes > rs6000_block_move_inline_limit)
13724 return 0;
13726 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
13728 union {
13729 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
13730 rtx (*mov) (rtx, rtx);
13731 } gen_func;
13732 enum machine_mode mode = BLKmode;
13733 rtx src, dest;
13735 /* Altivec first, since it will be faster than a string move
13736 when it applies, and usually not significantly larger. */
13737 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
13739 move_bytes = 16;
13740 mode = V4SImode;
13741 gen_func.mov = gen_movv4si;
13743 else if (TARGET_SPE && bytes >= 8 && align >= 64)
13745 move_bytes = 8;
13746 mode = V2SImode;
13747 gen_func.mov = gen_movv2si;
13749 else if (TARGET_STRING
13750 && bytes > 24 /* move up to 32 bytes at a time */
13751 && ! fixed_regs[5]
13752 && ! fixed_regs[6]
13753 && ! fixed_regs[7]
13754 && ! fixed_regs[8]
13755 && ! fixed_regs[9]
13756 && ! fixed_regs[10]
13757 && ! fixed_regs[11]
13758 && ! fixed_regs[12])
13760 move_bytes = (bytes > 32) ? 32 : bytes;
13761 gen_func.movmemsi = gen_movmemsi_8reg;
13763 else if (TARGET_STRING
13764 && bytes > 16 /* move up to 24 bytes at a time */
13765 && ! fixed_regs[5]
13766 && ! fixed_regs[6]
13767 && ! fixed_regs[7]
13768 && ! fixed_regs[8]
13769 && ! fixed_regs[9]
13770 && ! fixed_regs[10])
13772 move_bytes = (bytes > 24) ? 24 : bytes;
13773 gen_func.movmemsi = gen_movmemsi_6reg;
13775 else if (TARGET_STRING
13776 && bytes > 8 /* move up to 16 bytes at a time */
13777 && ! fixed_regs[5]
13778 && ! fixed_regs[6]
13779 && ! fixed_regs[7]
13780 && ! fixed_regs[8])
13782 move_bytes = (bytes > 16) ? 16 : bytes;
13783 gen_func.movmemsi = gen_movmemsi_4reg;
13785 else if (bytes >= 8 && TARGET_POWERPC64
13786 /* 64-bit loads and stores require word-aligned
13787 displacements. */
13788 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13790 move_bytes = 8;
13791 mode = DImode;
13792 gen_func.mov = gen_movdi;
13794 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
13795 { /* move up to 8 bytes at a time */
13796 move_bytes = (bytes > 8) ? 8 : bytes;
13797 gen_func.movmemsi = gen_movmemsi_2reg;
13799 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13800 { /* move 4 bytes */
13801 move_bytes = 4;
13802 mode = SImode;
13803 gen_func.mov = gen_movsi;
13805 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13806 { /* move 2 bytes */
13807 move_bytes = 2;
13808 mode = HImode;
13809 gen_func.mov = gen_movhi;
13811 else if (TARGET_STRING && bytes > 1)
13812 { /* move up to 4 bytes at a time */
13813 move_bytes = (bytes > 4) ? 4 : bytes;
13814 gen_func.movmemsi = gen_movmemsi_1reg;
13816 else /* move 1 byte at a time */
13818 move_bytes = 1;
13819 mode = QImode;
13820 gen_func.mov = gen_movqi;
13823 src = adjust_address (orig_src, mode, offset);
13824 dest = adjust_address (orig_dest, mode, offset);
13826 if (mode != BLKmode)
13828 rtx tmp_reg = gen_reg_rtx (mode);
13830 emit_insn ((*gen_func.mov) (tmp_reg, src));
13831 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
13834 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
13836 int i;
13837 for (i = 0; i < num_reg; i++)
13838 emit_insn (stores[i]);
13839 num_reg = 0;
13842 if (mode == BLKmode)
13844 /* Move the address into scratch registers. The movmemsi
13845 patterns require zero offset. */
13846 if (!REG_P (XEXP (src, 0)))
13848 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
13849 src = replace_equiv_address (src, src_reg);
13851 set_mem_size (src, move_bytes);
13853 if (!REG_P (XEXP (dest, 0)))
13855 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
13856 dest = replace_equiv_address (dest, dest_reg);
13858 set_mem_size (dest, move_bytes);
13860 emit_insn ((*gen_func.movmemsi) (dest, src,
13861 GEN_INT (move_bytes & 31),
13862 align_rtx));
13866 return 1;
13870 /* Return a string to perform a load_multiple operation.
13871 operands[0] is the vector.
13872 operands[1] is the source address.
13873 operands[2] is the first destination register. */
13875 const char *
13876 rs6000_output_load_multiple (rtx operands[3])
13878 /* We have to handle the case where the pseudo used to contain the address
13879 is assigned to one of the output registers. */
13880 int i, j;
13881 int words = XVECLEN (operands[0], 0);
13882 rtx xop[10];
13884 if (XVECLEN (operands[0], 0) == 1)
13885 return "lwz %2,0(%1)";
13887 for (i = 0; i < words; i++)
13888 if (refers_to_regno_p (REGNO (operands[2]) + i,
13889 REGNO (operands[2]) + i + 1, operands[1], 0))
13891 if (i == words-1)
13893 xop[0] = GEN_INT (4 * (words-1));
13894 xop[1] = operands[1];
13895 xop[2] = operands[2];
13896 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
13897 return "";
13899 else if (i == 0)
13901 xop[0] = GEN_INT (4 * (words-1));
13902 xop[1] = operands[1];
13903 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
13904 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
13905 return "";
13907 else
13909 for (j = 0; j < words; j++)
13910 if (j != i)
13912 xop[0] = GEN_INT (j * 4);
13913 xop[1] = operands[1];
13914 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
13915 output_asm_insn ("lwz %2,%0(%1)", xop);
13917 xop[0] = GEN_INT (i * 4);
13918 xop[1] = operands[1];
13919 output_asm_insn ("lwz %1,%0(%1)", xop);
13920 return "";
13924 return "lswi %2,%1,%N0";
13928 /* A validation routine: say whether CODE, a condition code, and MODE
13929 match. The other alternatives either don't make sense or should
13930 never be generated. */
13932 void
13933 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
13935 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
13936 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
13937 && GET_MODE_CLASS (mode) == MODE_CC);
13939 /* These don't make sense. */
13940 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
13941 || mode != CCUNSmode);
13943 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
13944 || mode == CCUNSmode);
13946 gcc_assert (mode == CCFPmode
13947 || (code != ORDERED && code != UNORDERED
13948 && code != UNEQ && code != LTGT
13949 && code != UNGT && code != UNLT
13950 && code != UNGE && code != UNLE));
13952 /* These should never be generated except for
13953 flag_finite_math_only. */
13954 gcc_assert (mode != CCFPmode
13955 || flag_finite_math_only
13956 || (code != LE && code != GE
13957 && code != UNEQ && code != LTGT
13958 && code != UNGT && code != UNLT));
13960 /* These are invalid; the information is not there. */
13961 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
13965 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13966 mask required to convert the result of a rotate insn into a shift
13967 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13970 includes_lshift_p (rtx shiftop, rtx andop)
13972 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13974 shift_mask <<= INTVAL (shiftop);
13976 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13979 /* Similar, but for right shift. */
13982 includes_rshift_p (rtx shiftop, rtx andop)
13984 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13986 shift_mask >>= INTVAL (shiftop);
13988 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13991 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13992 to perform a left shift. It must have exactly SHIFTOP least
13993 significant 0's, then one or more 1's, then zero or more 0's. */
13996 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13998 if (GET_CODE (andop) == CONST_INT)
14000 HOST_WIDE_INT c, lsb, shift_mask;
14002 c = INTVAL (andop);
14003 if (c == 0 || c == ~0)
14004 return 0;
14006 shift_mask = ~0;
14007 shift_mask <<= INTVAL (shiftop);
14009 /* Find the least significant one bit. */
14010 lsb = c & -c;
14012 /* It must coincide with the LSB of the shift mask. */
14013 if (-lsb != shift_mask)
14014 return 0;
14016 /* Invert to look for the next transition (if any). */
14017 c = ~c;
14019 /* Remove the low group of ones (originally low group of zeros). */
14020 c &= -lsb;
14022 /* Again find the lsb, and check we have all 1's above. */
14023 lsb = c & -c;
14024 return c == -lsb;
14026 else
14027 return 0;
14030 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
14031 to perform a left shift. It must have SHIFTOP or more least
14032 significant 0's, with the remainder of the word 1's. */
14035 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
14037 if (GET_CODE (andop) == CONST_INT)
14039 HOST_WIDE_INT c, lsb, shift_mask;
14041 shift_mask = ~0;
14042 shift_mask <<= INTVAL (shiftop);
14043 c = INTVAL (andop);
14045 /* Find the least significant one bit. */
14046 lsb = c & -c;
14048 /* It must be covered by the shift mask.
14049 This test also rejects c == 0. */
14050 if ((lsb & shift_mask) == 0)
14051 return 0;
14053 /* Check we have all 1's above the transition, and reject all 1's. */
14054 return c == -lsb && lsb != 1;
14056 else
14057 return 0;
14060 /* Return 1 if operands will generate a valid arguments to rlwimi
14061 instruction for insert with right shift in 64-bit mode. The mask may
14062 not start on the first bit or stop on the last bit because wrap-around
14063 effects of instruction do not correspond to semantics of RTL insn. */
14066 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
14068 if (INTVAL (startop) > 32
14069 && INTVAL (startop) < 64
14070 && INTVAL (sizeop) > 1
14071 && INTVAL (sizeop) + INTVAL (startop) < 64
14072 && INTVAL (shiftop) > 0
14073 && INTVAL (sizeop) + INTVAL (shiftop) < 32
14074 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
14075 return 1;
14077 return 0;
14080 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
14081 for lfq and stfq insns iff the registers are hard registers. */
14084 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
14086 /* We might have been passed a SUBREG. */
14087 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
14088 return 0;
14090 /* We might have been passed non floating point registers. */
14091 if (!FP_REGNO_P (REGNO (reg1))
14092 || !FP_REGNO_P (REGNO (reg2)))
14093 return 0;
14095 return (REGNO (reg1) == REGNO (reg2) - 1);
14098 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
14099 addr1 and addr2 must be in consecutive memory locations
14100 (addr2 == addr1 + 8). */
14103 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
14105 rtx addr1, addr2;
14106 unsigned int reg1, reg2;
14107 int offset1, offset2;
14109 /* The mems cannot be volatile. */
14110 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
14111 return 0;
14113 addr1 = XEXP (mem1, 0);
14114 addr2 = XEXP (mem2, 0);
14116 /* Extract an offset (if used) from the first addr. */
14117 if (GET_CODE (addr1) == PLUS)
14119 /* If not a REG, return zero. */
14120 if (GET_CODE (XEXP (addr1, 0)) != REG)
14121 return 0;
14122 else
14124 reg1 = REGNO (XEXP (addr1, 0));
14125 /* The offset must be constant! */
14126 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
14127 return 0;
14128 offset1 = INTVAL (XEXP (addr1, 1));
14131 else if (GET_CODE (addr1) != REG)
14132 return 0;
14133 else
14135 reg1 = REGNO (addr1);
14136 /* This was a simple (mem (reg)) expression. Offset is 0. */
14137 offset1 = 0;
14140 /* And now for the second addr. */
14141 if (GET_CODE (addr2) == PLUS)
14143 /* If not a REG, return zero. */
14144 if (GET_CODE (XEXP (addr2, 0)) != REG)
14145 return 0;
14146 else
14148 reg2 = REGNO (XEXP (addr2, 0));
14149 /* The offset must be constant. */
14150 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
14151 return 0;
14152 offset2 = INTVAL (XEXP (addr2, 1));
14155 else if (GET_CODE (addr2) != REG)
14156 return 0;
14157 else
14159 reg2 = REGNO (addr2);
14160 /* This was a simple (mem (reg)) expression. Offset is 0. */
14161 offset2 = 0;
14164 /* Both of these must have the same base register. */
14165 if (reg1 != reg2)
14166 return 0;
14168 /* The offset for the second addr must be 8 more than the first addr. */
14169 if (offset2 != offset1 + 8)
14170 return 0;
14172 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
14173 instructions. */
14174 return 1;
14179 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
14181 static bool eliminated = false;
14182 rtx ret;
14184 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
14185 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14186 else
14188 rtx mem = cfun->machine->sdmode_stack_slot;
14189 gcc_assert (mem != NULL_RTX);
14191 if (!eliminated)
14193 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
14194 cfun->machine->sdmode_stack_slot = mem;
14195 eliminated = true;
14197 ret = mem;
14200 if (TARGET_DEBUG_ADDR)
14202 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
14203 GET_MODE_NAME (mode));
14204 if (!ret)
14205 fprintf (stderr, "\tNULL_RTX\n");
14206 else
14207 debug_rtx (ret);
14210 return ret;
14213 static tree
14214 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
14216 /* Don't walk into types. */
14217 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
14219 *walk_subtrees = 0;
14220 return NULL_TREE;
14223 switch (TREE_CODE (*tp))
14225 case VAR_DECL:
14226 case PARM_DECL:
14227 case FIELD_DECL:
14228 case RESULT_DECL:
14229 case SSA_NAME:
14230 case REAL_CST:
14231 case MEM_REF:
14232 case VIEW_CONVERT_EXPR:
14233 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
14234 return *tp;
14235 break;
14236 default:
14237 break;
14240 return NULL_TREE;
14243 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
14244 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
14245 only work on the traditional altivec registers, note if an altivec register
14246 was choosen. */
14248 static enum rs6000_reg_type
14249 register_to_reg_type (rtx reg, bool *is_altivec)
14251 HOST_WIDE_INT regno;
14252 enum reg_class rclass;
14254 if (GET_CODE (reg) == SUBREG)
14255 reg = SUBREG_REG (reg);
14257 if (!REG_P (reg))
14258 return NO_REG_TYPE;
14260 regno = REGNO (reg);
14261 if (regno >= FIRST_PSEUDO_REGISTER)
14263 if (!lra_in_progress && !reload_in_progress && !reload_completed)
14264 return PSEUDO_REG_TYPE;
14266 regno = true_regnum (reg);
14267 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
14268 return PSEUDO_REG_TYPE;
14271 gcc_assert (regno >= 0);
14273 if (is_altivec && ALTIVEC_REGNO_P (regno))
14274 *is_altivec = true;
14276 rclass = rs6000_regno_regclass[regno];
14277 return reg_class_to_reg_type[(int)rclass];
14280 /* Helper function for rs6000_secondary_reload to return true if a move to a
14281 different register classe is really a simple move. */
14283 static bool
14284 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
14285 enum rs6000_reg_type from_type,
14286 enum machine_mode mode)
14288 int size;
14290 /* Add support for various direct moves available. In this function, we only
14291 look at cases where we don't need any extra registers, and one or more
14292 simple move insns are issued. At present, 32-bit integers are not allowed
14293 in FPR/VSX registers. Single precision binary floating is not a simple
14294 move because we need to convert to the single precision memory layout.
14295 The 4-byte SDmode can be moved. */
14296 size = GET_MODE_SIZE (mode);
14297 if (TARGET_DIRECT_MOVE
14298 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
14299 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14300 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
14301 return true;
14303 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
14304 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
14305 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
14306 return true;
14308 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
14309 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
14310 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
14311 return true;
14313 return false;
14316 /* Power8 helper function for rs6000_secondary_reload, handle all of the
14317 special direct moves that involve allocating an extra register, return the
14318 insn code of the helper function if there is such a function or
14319 CODE_FOR_nothing if not. */
14321 static bool
14322 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
14323 enum rs6000_reg_type from_type,
14324 enum machine_mode mode,
14325 secondary_reload_info *sri,
14326 bool altivec_p)
14328 bool ret = false;
14329 enum insn_code icode = CODE_FOR_nothing;
14330 int cost = 0;
14331 int size = GET_MODE_SIZE (mode);
14333 if (TARGET_POWERPC64)
14335 if (size == 16)
14337 /* Handle moving 128-bit values from GPRs to VSX point registers on
14338 power8 when running in 64-bit mode using XXPERMDI to glue the two
14339 64-bit values back together. */
14340 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
14342 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
14343 icode = reload_vsx_gpr[(int)mode];
14346 /* Handle moving 128-bit values from VSX point registers to GPRs on
14347 power8 when running in 64-bit mode using XXPERMDI to get access to the
14348 bottom 64-bit value. */
14349 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14351 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
14352 icode = reload_gpr_vsx[(int)mode];
14356 else if (mode == SFmode)
14358 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14360 cost = 3; /* xscvdpspn, mfvsrd, and. */
14361 icode = reload_gpr_vsx[(int)mode];
14364 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
14366 cost = 2; /* mtvsrz, xscvspdpn. */
14367 icode = reload_vsx_gpr[(int)mode];
14372 if (TARGET_POWERPC64 && size == 16)
14374 /* Handle moving 128-bit values from GPRs to VSX point registers on
14375 power8 when running in 64-bit mode using XXPERMDI to glue the two
14376 64-bit values back together. */
14377 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
14379 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
14380 icode = reload_vsx_gpr[(int)mode];
14383 /* Handle moving 128-bit values from VSX point registers to GPRs on
14384 power8 when running in 64-bit mode using XXPERMDI to get access to the
14385 bottom 64-bit value. */
14386 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
14388 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
14389 icode = reload_gpr_vsx[(int)mode];
14393 else if (!TARGET_POWERPC64 && size == 8)
14395 /* Handle moving 64-bit values from GPRs to floating point registers on
14396 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
14397 values back together. Altivec register classes must be handled
14398 specially since a different instruction is used, and the secondary
14399 reload support requires a single instruction class in the scratch
14400 register constraint. However, right now TFmode is not allowed in
14401 Altivec registers, so the pattern will never match. */
14402 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
14404 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
14405 icode = reload_fpr_gpr[(int)mode];
14409 if (icode != CODE_FOR_nothing)
14411 ret = true;
14412 if (sri)
14414 sri->icode = icode;
14415 sri->extra_cost = cost;
14419 return ret;
14422 /* Return whether a move between two register classes can be done either
14423 directly (simple move) or via a pattern that uses a single extra temporary
14424 (using power8's direct move in this case. */
14426 static bool
14427 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
14428 enum rs6000_reg_type from_type,
14429 enum machine_mode mode,
14430 secondary_reload_info *sri,
14431 bool altivec_p)
14433 /* Fall back to load/store reloads if either type is not a register. */
14434 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
14435 return false;
14437 /* If we haven't allocated registers yet, assume the move can be done for the
14438 standard register types. */
14439 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
14440 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
14441 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
14442 return true;
14444 /* Moves to the same set of registers is a simple move for non-specialized
14445 registers. */
14446 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
14447 return true;
14449 /* Check whether a simple move can be done directly. */
14450 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
14452 if (sri)
14454 sri->icode = CODE_FOR_nothing;
14455 sri->extra_cost = 0;
14457 return true;
14460 /* Now check if we can do it in a few steps. */
14461 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
14462 altivec_p);
14465 /* Inform reload about cases where moving X with a mode MODE to a register in
14466 RCLASS requires an extra scratch or immediate register. Return the class
14467 needed for the immediate register.
14469 For VSX and Altivec, we may need a register to convert sp+offset into
14470 reg+sp.
14472 For misaligned 64-bit gpr loads and stores we need a register to
14473 convert an offset address to indirect. */
14475 static reg_class_t
14476 rs6000_secondary_reload (bool in_p,
14477 rtx x,
14478 reg_class_t rclass_i,
14479 enum machine_mode mode,
14480 secondary_reload_info *sri)
14482 enum reg_class rclass = (enum reg_class) rclass_i;
14483 reg_class_t ret = ALL_REGS;
14484 enum insn_code icode;
14485 bool default_p = false;
14487 sri->icode = CODE_FOR_nothing;
14488 icode = rs6000_vector_reload[mode][in_p != false];
14490 if (REG_P (x) || register_operand (x, mode))
14492 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
14493 bool altivec_p = (rclass == ALTIVEC_REGS);
14494 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
14496 if (!in_p)
14498 enum rs6000_reg_type exchange = to_type;
14499 to_type = from_type;
14500 from_type = exchange;
14503 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
14504 altivec_p))
14506 icode = (enum insn_code)sri->icode;
14507 default_p = false;
14508 ret = NO_REGS;
14512 /* Handle vector moves with reload helper functions. */
14513 if (ret == ALL_REGS && icode != CODE_FOR_nothing)
14515 ret = NO_REGS;
14516 sri->icode = CODE_FOR_nothing;
14517 sri->extra_cost = 0;
14519 if (GET_CODE (x) == MEM)
14521 rtx addr = XEXP (x, 0);
14523 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
14524 an extra register in that case, but it would need an extra
14525 register if the addressing is reg+reg or (reg+reg)&(-16). Special
14526 case load/store quad. */
14527 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
14529 if (TARGET_POWERPC64 && TARGET_QUAD_MEMORY
14530 && GET_MODE_SIZE (mode) == 16
14531 && quad_memory_operand (x, mode))
14533 sri->icode = icode;
14534 sri->extra_cost = 2;
14537 else if (!legitimate_indirect_address_p (addr, false)
14538 && !rs6000_legitimate_offset_address_p (PTImode, addr,
14539 false, true))
14541 sri->icode = icode;
14542 /* account for splitting the loads, and converting the
14543 address from reg+reg to reg. */
14544 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
14545 + ((GET_CODE (addr) == AND) ? 1 : 0));
14548 /* Allow scalar loads to/from the traditional floating point
14549 registers, even if VSX memory is set. */
14550 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
14551 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
14552 && (legitimate_indirect_address_p (addr, false)
14553 || legitimate_indirect_address_p (addr, false)
14554 || rs6000_legitimate_offset_address_p (mode, addr,
14555 false, true)))
14558 /* Loads to and stores from vector registers can only do reg+reg
14559 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
14560 scalar modes loading up the traditional floating point registers
14561 to use offset addresses. */
14562 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
14563 || rclass == FLOAT_REGS || rclass == NO_REGS)
14565 if (!VECTOR_MEM_ALTIVEC_P (mode)
14566 && GET_CODE (addr) == AND
14567 && GET_CODE (XEXP (addr, 1)) == CONST_INT
14568 && INTVAL (XEXP (addr, 1)) == -16
14569 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
14570 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
14572 sri->icode = icode;
14573 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
14574 ? 2 : 1);
14576 else if (!legitimate_indirect_address_p (addr, false)
14577 && (rclass == NO_REGS
14578 || !legitimate_indexed_address_p (addr, false)))
14580 sri->icode = icode;
14581 sri->extra_cost = 1;
14583 else
14584 icode = CODE_FOR_nothing;
14586 /* Any other loads, including to pseudo registers which haven't been
14587 assigned to a register yet, default to require a scratch
14588 register. */
14589 else
14591 sri->icode = icode;
14592 sri->extra_cost = 2;
14595 else if (REG_P (x))
14597 int regno = true_regnum (x);
14599 icode = CODE_FOR_nothing;
14600 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
14601 default_p = true;
14602 else
14604 enum reg_class xclass = REGNO_REG_CLASS (regno);
14605 enum rs6000_reg_type rtype1 = reg_class_to_reg_type[(int)rclass];
14606 enum rs6000_reg_type rtype2 = reg_class_to_reg_type[(int)xclass];
14608 /* If memory is needed, use default_secondary_reload to create the
14609 stack slot. */
14610 if (rtype1 != rtype2 || !IS_STD_REG_TYPE (rtype1))
14611 default_p = true;
14612 else
14613 ret = NO_REGS;
14616 else
14617 default_p = true;
14619 else if (TARGET_POWERPC64
14620 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
14621 && MEM_P (x)
14622 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
14624 rtx addr = XEXP (x, 0);
14625 rtx off = address_offset (addr);
14627 if (off != NULL_RTX)
14629 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
14630 unsigned HOST_WIDE_INT offset = INTVAL (off);
14632 /* We need a secondary reload when our legitimate_address_p
14633 says the address is good (as otherwise the entire address
14634 will be reloaded), and the offset is not a multiple of
14635 four or we have an address wrap. Address wrap will only
14636 occur for LO_SUMs since legitimate_offset_address_p
14637 rejects addresses for 16-byte mems that will wrap. */
14638 if (GET_CODE (addr) == LO_SUM
14639 ? (1 /* legitimate_address_p allows any offset for lo_sum */
14640 && ((offset & 3) != 0
14641 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
14642 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
14643 && (offset & 3) != 0))
14645 if (in_p)
14646 sri->icode = CODE_FOR_reload_di_load;
14647 else
14648 sri->icode = CODE_FOR_reload_di_store;
14649 sri->extra_cost = 2;
14650 ret = NO_REGS;
14652 else
14653 default_p = true;
14655 else
14656 default_p = true;
14658 else if (!TARGET_POWERPC64
14659 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
14660 && MEM_P (x)
14661 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
14663 rtx addr = XEXP (x, 0);
14664 rtx off = address_offset (addr);
14666 if (off != NULL_RTX)
14668 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
14669 unsigned HOST_WIDE_INT offset = INTVAL (off);
14671 /* We need a secondary reload when our legitimate_address_p
14672 says the address is good (as otherwise the entire address
14673 will be reloaded), and we have a wrap.
14675 legitimate_lo_sum_address_p allows LO_SUM addresses to
14676 have any offset so test for wrap in the low 16 bits.
14678 legitimate_offset_address_p checks for the range
14679 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
14680 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
14681 [0x7ff4,0x7fff] respectively, so test for the
14682 intersection of these ranges, [0x7ffc,0x7fff] and
14683 [0x7ff4,0x7ff7] respectively.
14685 Note that the address we see here may have been
14686 manipulated by legitimize_reload_address. */
14687 if (GET_CODE (addr) == LO_SUM
14688 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
14689 : offset - (0x8000 - extra) < UNITS_PER_WORD)
14691 if (in_p)
14692 sri->icode = CODE_FOR_reload_si_load;
14693 else
14694 sri->icode = CODE_FOR_reload_si_store;
14695 sri->extra_cost = 2;
14696 ret = NO_REGS;
14698 else
14699 default_p = true;
14701 else
14702 default_p = true;
14704 else
14705 default_p = true;
14707 if (default_p)
14708 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
14710 gcc_assert (ret != ALL_REGS);
14712 if (TARGET_DEBUG_ADDR)
14714 fprintf (stderr,
14715 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
14716 "mode = %s",
14717 reg_class_names[ret],
14718 in_p ? "true" : "false",
14719 reg_class_names[rclass],
14720 GET_MODE_NAME (mode));
14722 if (default_p)
14723 fprintf (stderr, ", default secondary reload");
14725 if (sri->icode != CODE_FOR_nothing)
14726 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
14727 insn_data[sri->icode].name, sri->extra_cost);
14728 else
14729 fprintf (stderr, "\n");
14731 debug_rtx (x);
14734 return ret;
14737 /* Better tracing for rs6000_secondary_reload_inner. */
14739 static void
14740 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
14741 bool store_p)
14743 rtx set, clobber;
14745 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
14747 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
14748 store_p ? "store" : "load");
14750 if (store_p)
14751 set = gen_rtx_SET (VOIDmode, mem, reg);
14752 else
14753 set = gen_rtx_SET (VOIDmode, reg, mem);
14755 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
14756 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
14759 static void
14760 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
14761 bool store_p)
14763 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
14764 gcc_unreachable ();
14767 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
14768 to SP+reg addressing. */
14770 void
14771 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
14773 int regno = true_regnum (reg);
14774 enum machine_mode mode = GET_MODE (reg);
14775 enum reg_class rclass;
14776 rtx addr;
14777 rtx and_op2 = NULL_RTX;
14778 rtx addr_op1;
14779 rtx addr_op2;
14780 rtx scratch_or_premodify = scratch;
14781 rtx and_rtx;
14782 rtx cc_clobber;
14784 if (TARGET_DEBUG_ADDR)
14785 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
14787 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
14788 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14790 if (GET_CODE (mem) != MEM)
14791 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14793 rclass = REGNO_REG_CLASS (regno);
14794 addr = XEXP (mem, 0);
14796 switch (rclass)
14798 /* GPRs can handle reg + small constant, all other addresses need to use
14799 the scratch register. */
14800 case GENERAL_REGS:
14801 case BASE_REGS:
14802 if (GET_CODE (addr) == AND)
14804 and_op2 = XEXP (addr, 1);
14805 addr = XEXP (addr, 0);
14808 if (GET_CODE (addr) == PRE_MODIFY)
14810 scratch_or_premodify = XEXP (addr, 0);
14811 if (!REG_P (scratch_or_premodify))
14812 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14814 if (GET_CODE (XEXP (addr, 1)) != PLUS)
14815 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14817 addr = XEXP (addr, 1);
14820 if (GET_CODE (addr) == PLUS
14821 && (and_op2 != NULL_RTX
14822 || !rs6000_legitimate_offset_address_p (PTImode, addr,
14823 false, true)))
14825 addr_op1 = XEXP (addr, 0);
14826 addr_op2 = XEXP (addr, 1);
14827 if (!legitimate_indirect_address_p (addr_op1, false))
14828 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14830 if (!REG_P (addr_op2)
14831 && (GET_CODE (addr_op2) != CONST_INT
14832 || !satisfies_constraint_I (addr_op2)))
14834 if (TARGET_DEBUG_ADDR)
14836 fprintf (stderr,
14837 "\nMove plus addr to register %s, mode = %s: ",
14838 rs6000_reg_names[REGNO (scratch)],
14839 GET_MODE_NAME (mode));
14840 debug_rtx (addr_op2);
14842 rs6000_emit_move (scratch, addr_op2, Pmode);
14843 addr_op2 = scratch;
14846 emit_insn (gen_rtx_SET (VOIDmode,
14847 scratch_or_premodify,
14848 gen_rtx_PLUS (Pmode,
14849 addr_op1,
14850 addr_op2)));
14852 addr = scratch_or_premodify;
14853 scratch_or_premodify = scratch;
14855 else if (!legitimate_indirect_address_p (addr, false)
14856 && !rs6000_legitimate_offset_address_p (PTImode, addr,
14857 false, true))
14859 if (TARGET_DEBUG_ADDR)
14861 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
14862 rs6000_reg_names[REGNO (scratch_or_premodify)],
14863 GET_MODE_NAME (mode));
14864 debug_rtx (addr);
14866 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14867 addr = scratch_or_premodify;
14868 scratch_or_premodify = scratch;
14870 break;
14872 /* Float registers can do offset+reg addressing for scalar types. */
14873 case FLOAT_REGS:
14874 if (legitimate_indirect_address_p (addr, false) /* reg */
14875 || legitimate_indexed_address_p (addr, false) /* reg+reg */
14876 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
14877 && and_op2 == NULL_RTX
14878 && scratch_or_premodify == scratch
14879 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
14880 break;
14882 /* If this isn't a legacy floating point load/store, fall through to the
14883 VSX defaults. */
14885 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
14886 addresses into a scratch register. */
14887 case VSX_REGS:
14888 case ALTIVEC_REGS:
14890 /* With float regs, we need to handle the AND ourselves, since we can't
14891 use the Altivec instruction with an implicit AND -16. Allow scalar
14892 loads to float registers to use reg+offset even if VSX. */
14893 if (GET_CODE (addr) == AND
14894 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
14895 || GET_CODE (XEXP (addr, 1)) != CONST_INT
14896 || INTVAL (XEXP (addr, 1)) != -16
14897 || !VECTOR_MEM_ALTIVEC_P (mode)))
14899 and_op2 = XEXP (addr, 1);
14900 addr = XEXP (addr, 0);
14903 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
14904 as the address later. */
14905 if (GET_CODE (addr) == PRE_MODIFY
14906 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
14907 && (rclass != FLOAT_REGS
14908 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
14909 || and_op2 != NULL_RTX
14910 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
14912 scratch_or_premodify = XEXP (addr, 0);
14913 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
14914 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14916 if (GET_CODE (XEXP (addr, 1)) != PLUS)
14917 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14919 addr = XEXP (addr, 1);
14922 if (legitimate_indirect_address_p (addr, false) /* reg */
14923 || legitimate_indexed_address_p (addr, false) /* reg+reg */
14924 || (GET_CODE (addr) == AND /* Altivec memory */
14925 && rclass == ALTIVEC_REGS
14926 && GET_CODE (XEXP (addr, 1)) == CONST_INT
14927 && INTVAL (XEXP (addr, 1)) == -16
14928 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
14929 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
14932 else if (GET_CODE (addr) == PLUS)
14934 addr_op1 = XEXP (addr, 0);
14935 addr_op2 = XEXP (addr, 1);
14936 if (!REG_P (addr_op1))
14937 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14939 if (TARGET_DEBUG_ADDR)
14941 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
14942 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
14943 debug_rtx (addr_op2);
14945 rs6000_emit_move (scratch, addr_op2, Pmode);
14946 emit_insn (gen_rtx_SET (VOIDmode,
14947 scratch_or_premodify,
14948 gen_rtx_PLUS (Pmode,
14949 addr_op1,
14950 scratch)));
14951 addr = scratch_or_premodify;
14952 scratch_or_premodify = scratch;
14955 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
14956 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
14957 || REG_P (addr))
14959 if (TARGET_DEBUG_ADDR)
14961 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
14962 rs6000_reg_names[REGNO (scratch_or_premodify)],
14963 GET_MODE_NAME (mode));
14964 debug_rtx (addr);
14967 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14968 addr = scratch_or_premodify;
14969 scratch_or_premodify = scratch;
14972 else
14973 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14975 break;
14977 default:
14978 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14981 /* If the original address involved a pre-modify that we couldn't use the VSX
14982 memory instruction with update, and we haven't taken care of already,
14983 store the address in the pre-modify register and use that as the
14984 address. */
14985 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
14987 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
14988 addr = scratch_or_premodify;
14991 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
14992 memory instruction, recreate the AND now, including the clobber which is
14993 generated by the general ANDSI3/ANDDI3 patterns for the
14994 andi. instruction. */
14995 if (and_op2 != NULL_RTX)
14997 if (! legitimate_indirect_address_p (addr, false))
14999 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
15000 addr = scratch;
15003 if (TARGET_DEBUG_ADDR)
15005 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
15006 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
15007 debug_rtx (and_op2);
15010 and_rtx = gen_rtx_SET (VOIDmode,
15011 scratch,
15012 gen_rtx_AND (Pmode,
15013 addr,
15014 and_op2));
15016 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
15017 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15018 gen_rtvec (2, and_rtx, cc_clobber)));
15019 addr = scratch;
15022 /* Adjust the address if it changed. */
15023 if (addr != XEXP (mem, 0))
15025 mem = replace_equiv_address_nv (mem, addr);
15026 if (TARGET_DEBUG_ADDR)
15027 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
15030 /* Now create the move. */
15031 if (store_p)
15032 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
15033 else
15034 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
15036 return;
15039 /* Convert reloads involving 64-bit gprs and misaligned offset
15040 addressing, or multiple 32-bit gprs and offsets that are too large,
15041 to use indirect addressing. */
15043 void
15044 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
15046 int regno = true_regnum (reg);
15047 enum reg_class rclass;
15048 rtx addr;
15049 rtx scratch_or_premodify = scratch;
15051 if (TARGET_DEBUG_ADDR)
15053 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
15054 store_p ? "store" : "load");
15055 fprintf (stderr, "reg:\n");
15056 debug_rtx (reg);
15057 fprintf (stderr, "mem:\n");
15058 debug_rtx (mem);
15059 fprintf (stderr, "scratch:\n");
15060 debug_rtx (scratch);
15063 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
15064 gcc_assert (GET_CODE (mem) == MEM);
15065 rclass = REGNO_REG_CLASS (regno);
15066 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
15067 addr = XEXP (mem, 0);
15069 if (GET_CODE (addr) == PRE_MODIFY)
15071 scratch_or_premodify = XEXP (addr, 0);
15072 gcc_assert (REG_P (scratch_or_premodify));
15073 addr = XEXP (addr, 1);
15075 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
15077 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
15079 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
15081 /* Now create the move. */
15082 if (store_p)
15083 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
15084 else
15085 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
15087 return;
15090 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
15091 this function has any SDmode references. If we are on a power7 or later, we
15092 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
15093 can load/store the value. */
15095 static void
15096 rs6000_alloc_sdmode_stack_slot (void)
15098 tree t;
15099 basic_block bb;
15100 gimple_stmt_iterator gsi;
15102 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
15104 if (TARGET_NO_SDMODE_STACK)
15105 return;
15107 FOR_EACH_BB (bb)
15108 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
15110 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
15111 if (ret)
15113 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
15114 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
15115 SDmode, 0);
15116 return;
15120 /* Check for any SDmode parameters of the function. */
15121 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
15123 if (TREE_TYPE (t) == error_mark_node)
15124 continue;
15126 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
15127 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
15129 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
15130 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
15131 SDmode, 0);
15132 return;
15137 static void
15138 rs6000_instantiate_decls (void)
15140 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
15141 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
15144 /* Given an rtx X being reloaded into a reg required to be
15145 in class CLASS, return the class of reg to actually use.
15146 In general this is just CLASS; but on some machines
15147 in some cases it is preferable to use a more restrictive class.
15149 On the RS/6000, we have to return NO_REGS when we want to reload a
15150 floating-point CONST_DOUBLE to force it to be copied to memory.
15152 We also don't want to reload integer values into floating-point
15153 registers if we can at all help it. In fact, this can
15154 cause reload to die, if it tries to generate a reload of CTR
15155 into a FP register and discovers it doesn't have the memory location
15156 required.
15158 ??? Would it be a good idea to have reload do the converse, that is
15159 try to reload floating modes into FP registers if possible?
15162 static enum reg_class
15163 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
15165 enum machine_mode mode = GET_MODE (x);
15167 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
15168 return rclass;
15170 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
15171 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
15172 && easy_vector_constant (x, mode))
15173 return ALTIVEC_REGS;
15175 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
15176 return NO_REGS;
15178 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
15179 return GENERAL_REGS;
15181 /* For VSX, prefer the traditional registers for 64-bit values because we can
15182 use the non-VSX loads. Prefer the Altivec registers if Altivec is
15183 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
15184 prefer Altivec loads.. */
15185 if (rclass == VSX_REGS)
15187 if (GET_MODE_SIZE (mode) <= 8)
15188 return FLOAT_REGS;
15190 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
15191 return ALTIVEC_REGS;
15193 return rclass;
15196 return rclass;
15199 /* Debug version of rs6000_preferred_reload_class. */
15200 static enum reg_class
15201 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
15203 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
15205 fprintf (stderr,
15206 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
15207 "mode = %s, x:\n",
15208 reg_class_names[ret], reg_class_names[rclass],
15209 GET_MODE_NAME (GET_MODE (x)));
15210 debug_rtx (x);
15212 return ret;
15215 /* If we are copying between FP or AltiVec registers and anything else, we need
15216 a memory location. The exception is when we are targeting ppc64 and the
15217 move to/from fpr to gpr instructions are available. Also, under VSX, you
15218 can copy vector registers from the FP register set to the Altivec register
15219 set and vice versa. */
15221 static bool
15222 rs6000_secondary_memory_needed (enum reg_class from_class,
15223 enum reg_class to_class,
15224 enum machine_mode mode)
15226 enum rs6000_reg_type from_type, to_type;
15227 bool altivec_p = ((from_class == ALTIVEC_REGS)
15228 || (to_class == ALTIVEC_REGS));
15230 /* If a simple/direct move is available, we don't need secondary memory */
15231 from_type = reg_class_to_reg_type[(int)from_class];
15232 to_type = reg_class_to_reg_type[(int)to_class];
15234 if (rs6000_secondary_reload_move (to_type, from_type, mode,
15235 (secondary_reload_info *)0, altivec_p))
15236 return false;
15238 /* If we have a floating point or vector register class, we need to use
15239 memory to transfer the data. */
15240 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
15241 return true;
15243 return false;
15246 /* Debug version of rs6000_secondary_memory_needed. */
15247 static bool
15248 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
15249 enum reg_class to_class,
15250 enum machine_mode mode)
15252 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
15254 fprintf (stderr,
15255 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
15256 "to_class = %s, mode = %s\n",
15257 ret ? "true" : "false",
15258 reg_class_names[from_class],
15259 reg_class_names[to_class],
15260 GET_MODE_NAME (mode));
15262 return ret;
15265 /* Return the register class of a scratch register needed to copy IN into
15266 or out of a register in RCLASS in MODE. If it can be done directly,
15267 NO_REGS is returned. */
15269 static enum reg_class
15270 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
15271 rtx in)
15273 int regno;
15275 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
15276 #if TARGET_MACHO
15277 && MACHOPIC_INDIRECT
15278 #endif
15281 /* We cannot copy a symbolic operand directly into anything
15282 other than BASE_REGS for TARGET_ELF. So indicate that a
15283 register from BASE_REGS is needed as an intermediate
15284 register.
15286 On Darwin, pic addresses require a load from memory, which
15287 needs a base register. */
15288 if (rclass != BASE_REGS
15289 && (GET_CODE (in) == SYMBOL_REF
15290 || GET_CODE (in) == HIGH
15291 || GET_CODE (in) == LABEL_REF
15292 || GET_CODE (in) == CONST))
15293 return BASE_REGS;
15296 if (GET_CODE (in) == REG)
15298 regno = REGNO (in);
15299 if (regno >= FIRST_PSEUDO_REGISTER)
15301 regno = true_regnum (in);
15302 if (regno >= FIRST_PSEUDO_REGISTER)
15303 regno = -1;
15306 else if (GET_CODE (in) == SUBREG)
15308 regno = true_regnum (in);
15309 if (regno >= FIRST_PSEUDO_REGISTER)
15310 regno = -1;
15312 else
15313 regno = -1;
15315 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
15316 into anything. */
15317 if (rclass == GENERAL_REGS || rclass == BASE_REGS
15318 || (regno >= 0 && INT_REGNO_P (regno)))
15319 return NO_REGS;
15321 /* Constants, memory, and FP registers can go into FP registers. */
15322 if ((regno == -1 || FP_REGNO_P (regno))
15323 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
15324 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
15326 /* Memory, and FP/altivec registers can go into fp/altivec registers under
15327 VSX. However, for scalar variables, use the traditional floating point
15328 registers so that we can use offset+register addressing. */
15329 if (TARGET_VSX
15330 && (regno == -1 || VSX_REGNO_P (regno))
15331 && VSX_REG_CLASS_P (rclass))
15333 if (GET_MODE_SIZE (mode) < 16)
15334 return FLOAT_REGS;
15336 return NO_REGS;
15339 /* Memory, and AltiVec registers can go into AltiVec registers. */
15340 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
15341 && rclass == ALTIVEC_REGS)
15342 return NO_REGS;
15344 /* We can copy among the CR registers. */
15345 if ((rclass == CR_REGS || rclass == CR0_REGS)
15346 && regno >= 0 && CR_REGNO_P (regno))
15347 return NO_REGS;
15349 /* Otherwise, we need GENERAL_REGS. */
15350 return GENERAL_REGS;
15353 /* Debug version of rs6000_secondary_reload_class. */
15354 static enum reg_class
15355 rs6000_debug_secondary_reload_class (enum reg_class rclass,
15356 enum machine_mode mode, rtx in)
15358 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
15359 fprintf (stderr,
15360 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
15361 "mode = %s, input rtx:\n",
15362 reg_class_names[ret], reg_class_names[rclass],
15363 GET_MODE_NAME (mode));
15364 debug_rtx (in);
15366 return ret;
15369 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
15371 static bool
15372 rs6000_cannot_change_mode_class (enum machine_mode from,
15373 enum machine_mode to,
15374 enum reg_class rclass)
15376 unsigned from_size = GET_MODE_SIZE (from);
15377 unsigned to_size = GET_MODE_SIZE (to);
15379 if (from_size != to_size)
15381 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
15383 if (reg_classes_intersect_p (xclass, rclass))
15385 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
15386 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
15388 /* Don't allow 64-bit types to overlap with 128-bit types that take a
15389 single register under VSX because the scalar part of the register
15390 is in the upper 64-bits, and not the lower 64-bits. Types like
15391 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
15392 IEEE floating point can't overlap, and neither can small
15393 values. */
15395 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
15396 return true;
15398 if (from_size < 8 || to_size < 8)
15399 return true;
15401 if (from_size == 8 && (8 * to_nregs) != to_size)
15402 return true;
15404 if (to_size == 8 && (8 * from_nregs) != from_size)
15405 return true;
15407 return false;
15409 else
15410 return false;
15413 if (TARGET_E500_DOUBLE
15414 && ((((to) == DFmode) + ((from) == DFmode)) == 1
15415 || (((to) == TFmode) + ((from) == TFmode)) == 1
15416 || (((to) == DDmode) + ((from) == DDmode)) == 1
15417 || (((to) == TDmode) + ((from) == TDmode)) == 1
15418 || (((to) == DImode) + ((from) == DImode)) == 1))
15419 return true;
15421 /* Since the VSX register set includes traditional floating point registers
15422 and altivec registers, just check for the size being different instead of
15423 trying to check whether the modes are vector modes. Otherwise it won't
15424 allow say DF and DI to change classes. For types like TFmode and TDmode
15425 that take 2 64-bit registers, rather than a single 128-bit register, don't
15426 allow subregs of those types to other 128 bit types. */
15427 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
15429 unsigned num_regs = (from_size + 15) / 16;
15430 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
15431 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
15432 return true;
15434 return (from_size != 8 && from_size != 16);
15437 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
15438 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
15439 return true;
15441 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
15442 && reg_classes_intersect_p (GENERAL_REGS, rclass))
15443 return true;
15445 return false;
15448 /* Debug version of rs6000_cannot_change_mode_class. */
15449 static bool
15450 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
15451 enum machine_mode to,
15452 enum reg_class rclass)
15454 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
15456 fprintf (stderr,
15457 "rs6000_cannot_change_mode_class, return %s, from = %s, "
15458 "to = %s, rclass = %s\n",
15459 ret ? "true" : "false",
15460 GET_MODE_NAME (from), GET_MODE_NAME (to),
15461 reg_class_names[rclass]);
15463 return ret;
15466 /* Return a string to do a move operation of 128 bits of data. */
15468 const char *
15469 rs6000_output_move_128bit (rtx operands[])
15471 rtx dest = operands[0];
15472 rtx src = operands[1];
15473 enum machine_mode mode = GET_MODE (dest);
15474 int dest_regno;
15475 int src_regno;
15476 bool dest_gpr_p, dest_fp_p, dest_av_p, dest_vsx_p;
15477 bool src_gpr_p, src_fp_p, src_av_p, src_vsx_p;
15479 if (REG_P (dest))
15481 dest_regno = REGNO (dest);
15482 dest_gpr_p = INT_REGNO_P (dest_regno);
15483 dest_fp_p = FP_REGNO_P (dest_regno);
15484 dest_av_p = ALTIVEC_REGNO_P (dest_regno);
15485 dest_vsx_p = dest_fp_p | dest_av_p;
15487 else
15489 dest_regno = -1;
15490 dest_gpr_p = dest_fp_p = dest_av_p = dest_vsx_p = false;
15493 if (REG_P (src))
15495 src_regno = REGNO (src);
15496 src_gpr_p = INT_REGNO_P (src_regno);
15497 src_fp_p = FP_REGNO_P (src_regno);
15498 src_av_p = ALTIVEC_REGNO_P (src_regno);
15499 src_vsx_p = src_fp_p | src_av_p;
15501 else
15503 src_regno = -1;
15504 src_gpr_p = src_fp_p = src_av_p = src_vsx_p = false;
15507 /* Register moves. */
15508 if (dest_regno >= 0 && src_regno >= 0)
15510 if (dest_gpr_p)
15512 if (src_gpr_p)
15513 return "#";
15515 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
15516 return "#";
15519 else if (TARGET_VSX && dest_vsx_p)
15521 if (src_vsx_p)
15522 return "xxlor %x0,%x1,%x1";
15524 else if (TARGET_DIRECT_MOVE && src_gpr_p)
15525 return "#";
15528 else if (TARGET_ALTIVEC && dest_av_p && src_av_p)
15529 return "vor %0,%1,%1";
15531 else if (dest_fp_p && src_fp_p)
15532 return "#";
15535 /* Loads. */
15536 else if (dest_regno >= 0 && MEM_P (src))
15538 if (dest_gpr_p)
15540 if (TARGET_QUAD_MEMORY && (dest_regno & 1) == 0
15541 && quad_memory_operand (src, mode)
15542 && !reg_overlap_mentioned_p (dest, src))
15544 /* lq/stq only has DQ-form, so avoid X-form that %y produces. */
15545 return REG_P (XEXP (src, 0)) ? "lq %0,%1" : "lq %0,%y1";
15547 else
15548 return "#";
15551 else if (TARGET_ALTIVEC && dest_av_p
15552 && altivec_indexed_or_indirect_operand (src, mode))
15553 return "lvx %0,%y1";
15555 else if (TARGET_VSX && dest_vsx_p)
15557 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
15558 return "lxvw4x %x0,%y1";
15559 else
15560 return "lxvd2x %x0,%y1";
15563 else if (TARGET_ALTIVEC && dest_av_p)
15564 return "lvx %0,%y1";
15566 else if (dest_fp_p)
15567 return "#";
15570 /* Stores. */
15571 else if (src_regno >= 0 && MEM_P (dest))
15573 if (src_gpr_p)
15575 if (TARGET_QUAD_MEMORY && (src_regno & 1) == 0
15576 && quad_memory_operand (dest, mode))
15578 /* lq/stq only has DQ-form, so avoid X-form that %y produces. */
15579 return REG_P (XEXP (dest, 0)) ? "stq %1,%0" : "stq %1,%y0";
15581 else
15582 return "#";
15585 else if (TARGET_ALTIVEC && src_av_p
15586 && altivec_indexed_or_indirect_operand (src, mode))
15587 return "stvx %1,%y0";
15589 else if (TARGET_VSX && src_vsx_p)
15591 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
15592 return "stxvw4x %x1,%y0";
15593 else
15594 return "stxvd2x %x1,%y0";
15597 else if (TARGET_ALTIVEC && src_av_p)
15598 return "stvx %1,%y0";
15600 else if (src_fp_p)
15601 return "#";
15604 /* Constants. */
15605 else if (dest_regno >= 0
15606 && (GET_CODE (src) == CONST_INT
15607 || GET_CODE (src) == CONST_DOUBLE
15608 || GET_CODE (src) == CONST_VECTOR))
15610 if (dest_gpr_p)
15611 return "#";
15613 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
15614 return "xxlxor %x0,%x0,%x0";
15616 else if (TARGET_ALTIVEC && dest_av_p)
15617 return output_vec_const_move (operands);
15620 if (TARGET_DEBUG_ADDR)
15622 fprintf (stderr, "\n===== Bad 128 bit move:\n");
15623 debug_rtx (gen_rtx_SET (VOIDmode, dest, src));
15626 gcc_unreachable ();
15630 /* Given a comparison operation, return the bit number in CCR to test. We
15631 know this is a valid comparison.
15633 SCC_P is 1 if this is for an scc. That means that %D will have been
15634 used instead of %C, so the bits will be in different places.
15636 Return -1 if OP isn't a valid comparison for some reason. */
15639 ccr_bit (rtx op, int scc_p)
15641 enum rtx_code code = GET_CODE (op);
15642 enum machine_mode cc_mode;
15643 int cc_regnum;
15644 int base_bit;
15645 rtx reg;
15647 if (!COMPARISON_P (op))
15648 return -1;
15650 reg = XEXP (op, 0);
15652 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
15654 cc_mode = GET_MODE (reg);
15655 cc_regnum = REGNO (reg);
15656 base_bit = 4 * (cc_regnum - CR0_REGNO);
15658 validate_condition_mode (code, cc_mode);
15660 /* When generating a sCOND operation, only positive conditions are
15661 allowed. */
15662 gcc_assert (!scc_p
15663 || code == EQ || code == GT || code == LT || code == UNORDERED
15664 || code == GTU || code == LTU);
15666 switch (code)
15668 case NE:
15669 return scc_p ? base_bit + 3 : base_bit + 2;
15670 case EQ:
15671 return base_bit + 2;
15672 case GT: case GTU: case UNLE:
15673 return base_bit + 1;
15674 case LT: case LTU: case UNGE:
15675 return base_bit;
15676 case ORDERED: case UNORDERED:
15677 return base_bit + 3;
15679 case GE: case GEU:
15680 /* If scc, we will have done a cror to put the bit in the
15681 unordered position. So test that bit. For integer, this is ! LT
15682 unless this is an scc insn. */
15683 return scc_p ? base_bit + 3 : base_bit;
15685 case LE: case LEU:
15686 return scc_p ? base_bit + 3 : base_bit + 1;
15688 default:
15689 gcc_unreachable ();
15693 /* Return the GOT register. */
15696 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
15698 /* The second flow pass currently (June 1999) can't update
15699 regs_ever_live without disturbing other parts of the compiler, so
15700 update it here to make the prolog/epilogue code happy. */
15701 if (!can_create_pseudo_p ()
15702 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
15703 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
15705 crtl->uses_pic_offset_table = 1;
15707 return pic_offset_table_rtx;
15710 static rs6000_stack_t stack_info;
15712 /* Function to init struct machine_function.
15713 This will be called, via a pointer variable,
15714 from push_function_context. */
15716 static struct machine_function *
15717 rs6000_init_machine_status (void)
15719 stack_info.reload_completed = 0;
15720 return ggc_alloc_cleared_machine_function ();
15723 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
15726 extract_MB (rtx op)
15728 int i;
15729 unsigned long val = INTVAL (op);
15731 /* If the high bit is zero, the value is the first 1 bit we find
15732 from the left. */
15733 if ((val & 0x80000000) == 0)
15735 gcc_assert (val & 0xffffffff);
15737 i = 1;
15738 while (((val <<= 1) & 0x80000000) == 0)
15739 ++i;
15740 return i;
15743 /* If the high bit is set and the low bit is not, or the mask is all
15744 1's, the value is zero. */
15745 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
15746 return 0;
15748 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
15749 from the right. */
15750 i = 31;
15751 while (((val >>= 1) & 1) != 0)
15752 --i;
15754 return i;
15758 extract_ME (rtx op)
15760 int i;
15761 unsigned long val = INTVAL (op);
15763 /* If the low bit is zero, the value is the first 1 bit we find from
15764 the right. */
15765 if ((val & 1) == 0)
15767 gcc_assert (val & 0xffffffff);
15769 i = 30;
15770 while (((val >>= 1) & 1) == 0)
15771 --i;
15773 return i;
15776 /* If the low bit is set and the high bit is not, or the mask is all
15777 1's, the value is 31. */
15778 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
15779 return 31;
15781 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
15782 from the left. */
15783 i = 0;
15784 while (((val <<= 1) & 0x80000000) != 0)
15785 ++i;
15787 return i;
15790 /* Locate some local-dynamic symbol still in use by this function
15791 so that we can print its name in some tls_ld pattern. */
15793 static const char *
15794 rs6000_get_some_local_dynamic_name (void)
15796 rtx insn;
15798 if (cfun->machine->some_ld_name)
15799 return cfun->machine->some_ld_name;
15801 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
15802 if (INSN_P (insn)
15803 && for_each_rtx (&PATTERN (insn),
15804 rs6000_get_some_local_dynamic_name_1, 0))
15805 return cfun->machine->some_ld_name;
15807 gcc_unreachable ();
15810 /* Helper function for rs6000_get_some_local_dynamic_name. */
15812 static int
15813 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
15815 rtx x = *px;
15817 if (GET_CODE (x) == SYMBOL_REF)
15819 const char *str = XSTR (x, 0);
15820 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
15822 cfun->machine->some_ld_name = str;
15823 return 1;
15827 return 0;
15830 /* Write out a function code label. */
15832 void
15833 rs6000_output_function_entry (FILE *file, const char *fname)
15835 if (fname[0] != '.')
15837 switch (DEFAULT_ABI)
15839 default:
15840 gcc_unreachable ();
15842 case ABI_AIX:
15843 if (DOT_SYMBOLS)
15844 putc ('.', file);
15845 else
15846 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
15847 break;
15849 case ABI_V4:
15850 case ABI_DARWIN:
15851 break;
15855 RS6000_OUTPUT_BASENAME (file, fname);
15858 /* Print an operand. Recognize special options, documented below. */
15860 #if TARGET_ELF
15861 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
15862 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
15863 #else
15864 #define SMALL_DATA_RELOC "sda21"
15865 #define SMALL_DATA_REG 0
15866 #endif
15868 void
15869 print_operand (FILE *file, rtx x, int code)
15871 int i;
15872 unsigned HOST_WIDE_INT uval;
15874 switch (code)
15876 /* %a is output_address. */
15878 case 'b':
15879 /* If constant, low-order 16 bits of constant, unsigned.
15880 Otherwise, write normally. */
15881 if (INT_P (x))
15882 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
15883 else
15884 print_operand (file, x, 0);
15885 return;
15887 case 'B':
15888 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
15889 for 64-bit mask direction. */
15890 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
15891 return;
15893 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
15894 output_operand. */
15896 case 'D':
15897 /* Like 'J' but get to the GT bit only. */
15898 gcc_assert (REG_P (x));
15900 /* Bit 1 is GT bit. */
15901 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
15903 /* Add one for shift count in rlinm for scc. */
15904 fprintf (file, "%d", i + 1);
15905 return;
15907 case 'E':
15908 /* X is a CR register. Print the number of the EQ bit of the CR */
15909 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15910 output_operand_lossage ("invalid %%E value");
15911 else
15912 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
15913 return;
15915 case 'f':
15916 /* X is a CR register. Print the shift count needed to move it
15917 to the high-order four bits. */
15918 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15919 output_operand_lossage ("invalid %%f value");
15920 else
15921 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
15922 return;
15924 case 'F':
15925 /* Similar, but print the count for the rotate in the opposite
15926 direction. */
15927 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15928 output_operand_lossage ("invalid %%F value");
15929 else
15930 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
15931 return;
15933 case 'G':
15934 /* X is a constant integer. If it is negative, print "m",
15935 otherwise print "z". This is to make an aze or ame insn. */
15936 if (GET_CODE (x) != CONST_INT)
15937 output_operand_lossage ("invalid %%G value");
15938 else if (INTVAL (x) >= 0)
15939 putc ('z', file);
15940 else
15941 putc ('m', file);
15942 return;
15944 case 'h':
15945 /* If constant, output low-order five bits. Otherwise, write
15946 normally. */
15947 if (INT_P (x))
15948 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
15949 else
15950 print_operand (file, x, 0);
15951 return;
15953 case 'H':
15954 /* If constant, output low-order six bits. Otherwise, write
15955 normally. */
15956 if (INT_P (x))
15957 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
15958 else
15959 print_operand (file, x, 0);
15960 return;
15962 case 'I':
15963 /* Print `i' if this is a constant, else nothing. */
15964 if (INT_P (x))
15965 putc ('i', file);
15966 return;
15968 case 'j':
15969 /* Write the bit number in CCR for jump. */
15970 i = ccr_bit (x, 0);
15971 if (i == -1)
15972 output_operand_lossage ("invalid %%j code");
15973 else
15974 fprintf (file, "%d", i);
15975 return;
15977 case 'J':
15978 /* Similar, but add one for shift count in rlinm for scc and pass
15979 scc flag to `ccr_bit'. */
15980 i = ccr_bit (x, 1);
15981 if (i == -1)
15982 output_operand_lossage ("invalid %%J code");
15983 else
15984 /* If we want bit 31, write a shift count of zero, not 32. */
15985 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15986 return;
15988 case 'k':
15989 /* X must be a constant. Write the 1's complement of the
15990 constant. */
15991 if (! INT_P (x))
15992 output_operand_lossage ("invalid %%k value");
15993 else
15994 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
15995 return;
15997 case 'K':
15998 /* X must be a symbolic constant on ELF. Write an
15999 expression suitable for an 'addi' that adds in the low 16
16000 bits of the MEM. */
16001 if (GET_CODE (x) == CONST)
16003 if (GET_CODE (XEXP (x, 0)) != PLUS
16004 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
16005 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
16006 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
16007 output_operand_lossage ("invalid %%K value");
16009 print_operand_address (file, x);
16010 fputs ("@l", file);
16011 return;
16013 /* %l is output_asm_label. */
16015 case 'L':
16016 /* Write second word of DImode or DFmode reference. Works on register
16017 or non-indexed memory only. */
16018 if (REG_P (x))
16019 fputs (reg_names[REGNO (x) + 1], file);
16020 else if (MEM_P (x))
16022 /* Handle possible auto-increment. Since it is pre-increment and
16023 we have already done it, we can just use an offset of word. */
16024 if (GET_CODE (XEXP (x, 0)) == PRE_INC
16025 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
16026 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
16027 UNITS_PER_WORD));
16028 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16029 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
16030 UNITS_PER_WORD));
16031 else
16032 output_address (XEXP (adjust_address_nv (x, SImode,
16033 UNITS_PER_WORD),
16034 0));
16036 if (small_data_operand (x, GET_MODE (x)))
16037 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
16038 reg_names[SMALL_DATA_REG]);
16040 return;
16042 case 'm':
16043 /* MB value for a mask operand. */
16044 if (! mask_operand (x, SImode))
16045 output_operand_lossage ("invalid %%m value");
16047 fprintf (file, "%d", extract_MB (x));
16048 return;
16050 case 'M':
16051 /* ME value for a mask operand. */
16052 if (! mask_operand (x, SImode))
16053 output_operand_lossage ("invalid %%M value");
16055 fprintf (file, "%d", extract_ME (x));
16056 return;
16058 /* %n outputs the negative of its operand. */
16060 case 'N':
16061 /* Write the number of elements in the vector times 4. */
16062 if (GET_CODE (x) != PARALLEL)
16063 output_operand_lossage ("invalid %%N value");
16064 else
16065 fprintf (file, "%d", XVECLEN (x, 0) * 4);
16066 return;
16068 case 'O':
16069 /* Similar, but subtract 1 first. */
16070 if (GET_CODE (x) != PARALLEL)
16071 output_operand_lossage ("invalid %%O value");
16072 else
16073 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
16074 return;
16076 case 'p':
16077 /* X is a CONST_INT that is a power of two. Output the logarithm. */
16078 if (! INT_P (x)
16079 || INTVAL (x) < 0
16080 || (i = exact_log2 (INTVAL (x))) < 0)
16081 output_operand_lossage ("invalid %%p value");
16082 else
16083 fprintf (file, "%d", i);
16084 return;
16086 case 'P':
16087 /* The operand must be an indirect memory reference. The result
16088 is the register name. */
16089 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
16090 || REGNO (XEXP (x, 0)) >= 32)
16091 output_operand_lossage ("invalid %%P value");
16092 else
16093 fputs (reg_names[REGNO (XEXP (x, 0))], file);
16094 return;
16096 case 'q':
16097 /* This outputs the logical code corresponding to a boolean
16098 expression. The expression may have one or both operands
16099 negated (if one, only the first one). For condition register
16100 logical operations, it will also treat the negated
16101 CR codes as NOTs, but not handle NOTs of them. */
16103 const char *const *t = 0;
16104 const char *s;
16105 enum rtx_code code = GET_CODE (x);
16106 static const char * const tbl[3][3] = {
16107 { "and", "andc", "nor" },
16108 { "or", "orc", "nand" },
16109 { "xor", "eqv", "xor" } };
16111 if (code == AND)
16112 t = tbl[0];
16113 else if (code == IOR)
16114 t = tbl[1];
16115 else if (code == XOR)
16116 t = tbl[2];
16117 else
16118 output_operand_lossage ("invalid %%q value");
16120 if (GET_CODE (XEXP (x, 0)) != NOT)
16121 s = t[0];
16122 else
16124 if (GET_CODE (XEXP (x, 1)) == NOT)
16125 s = t[2];
16126 else
16127 s = t[1];
16130 fputs (s, file);
16132 return;
16134 case 'Q':
16135 if (! TARGET_MFCRF)
16136 return;
16137 fputc (',', file);
16138 /* FALLTHRU */
16140 case 'R':
16141 /* X is a CR register. Print the mask for `mtcrf'. */
16142 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
16143 output_operand_lossage ("invalid %%R value");
16144 else
16145 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
16146 return;
16148 case 's':
16149 /* Low 5 bits of 32 - value */
16150 if (! INT_P (x))
16151 output_operand_lossage ("invalid %%s value");
16152 else
16153 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
16154 return;
16156 case 'S':
16157 /* PowerPC64 mask position. All 0's is excluded.
16158 CONST_INT 32-bit mask is considered sign-extended so any
16159 transition must occur within the CONST_INT, not on the boundary. */
16160 if (! mask64_operand (x, DImode))
16161 output_operand_lossage ("invalid %%S value");
16163 uval = INTVAL (x);
16165 if (uval & 1) /* Clear Left */
16167 #if HOST_BITS_PER_WIDE_INT > 64
16168 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
16169 #endif
16170 i = 64;
16172 else /* Clear Right */
16174 uval = ~uval;
16175 #if HOST_BITS_PER_WIDE_INT > 64
16176 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
16177 #endif
16178 i = 63;
16180 while (uval != 0)
16181 --i, uval >>= 1;
16182 gcc_assert (i >= 0);
16183 fprintf (file, "%d", i);
16184 return;
16186 case 't':
16187 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
16188 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
16190 /* Bit 3 is OV bit. */
16191 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
16193 /* If we want bit 31, write a shift count of zero, not 32. */
16194 fprintf (file, "%d", i == 31 ? 0 : i + 1);
16195 return;
16197 case 'T':
16198 /* Print the symbolic name of a branch target register. */
16199 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
16200 && REGNO (x) != CTR_REGNO))
16201 output_operand_lossage ("invalid %%T value");
16202 else if (REGNO (x) == LR_REGNO)
16203 fputs ("lr", file);
16204 else
16205 fputs ("ctr", file);
16206 return;
16208 case 'u':
16209 /* High-order 16 bits of constant for use in unsigned operand. */
16210 if (! INT_P (x))
16211 output_operand_lossage ("invalid %%u value");
16212 else
16213 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
16214 (INTVAL (x) >> 16) & 0xffff);
16215 return;
16217 case 'v':
16218 /* High-order 16 bits of constant for use in signed operand. */
16219 if (! INT_P (x))
16220 output_operand_lossage ("invalid %%v value");
16221 else
16222 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
16223 (INTVAL (x) >> 16) & 0xffff);
16224 return;
16226 case 'U':
16227 /* Print `u' if this has an auto-increment or auto-decrement. */
16228 if (MEM_P (x)
16229 && (GET_CODE (XEXP (x, 0)) == PRE_INC
16230 || GET_CODE (XEXP (x, 0)) == PRE_DEC
16231 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
16232 putc ('u', file);
16233 return;
16235 case 'V':
16236 /* Print the trap code for this operand. */
16237 switch (GET_CODE (x))
16239 case EQ:
16240 fputs ("eq", file); /* 4 */
16241 break;
16242 case NE:
16243 fputs ("ne", file); /* 24 */
16244 break;
16245 case LT:
16246 fputs ("lt", file); /* 16 */
16247 break;
16248 case LE:
16249 fputs ("le", file); /* 20 */
16250 break;
16251 case GT:
16252 fputs ("gt", file); /* 8 */
16253 break;
16254 case GE:
16255 fputs ("ge", file); /* 12 */
16256 break;
16257 case LTU:
16258 fputs ("llt", file); /* 2 */
16259 break;
16260 case LEU:
16261 fputs ("lle", file); /* 6 */
16262 break;
16263 case GTU:
16264 fputs ("lgt", file); /* 1 */
16265 break;
16266 case GEU:
16267 fputs ("lge", file); /* 5 */
16268 break;
16269 default:
16270 gcc_unreachable ();
16272 break;
16274 case 'w':
16275 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
16276 normally. */
16277 if (INT_P (x))
16278 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
16279 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
16280 else
16281 print_operand (file, x, 0);
16282 return;
16284 case 'W':
16285 /* MB value for a PowerPC64 rldic operand. */
16286 i = clz_hwi (INTVAL (x));
16288 fprintf (file, "%d", i);
16289 return;
16291 case 'x':
16292 /* X is a FPR or Altivec register used in a VSX context. */
16293 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
16294 output_operand_lossage ("invalid %%x value");
16295 else
16297 int reg = REGNO (x);
16298 int vsx_reg = (FP_REGNO_P (reg)
16299 ? reg - 32
16300 : reg - FIRST_ALTIVEC_REGNO + 32);
16302 #ifdef TARGET_REGNAMES
16303 if (TARGET_REGNAMES)
16304 fprintf (file, "%%vs%d", vsx_reg);
16305 else
16306 #endif
16307 fprintf (file, "%d", vsx_reg);
16309 return;
16311 case 'X':
16312 if (MEM_P (x)
16313 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
16314 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
16315 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
16316 putc ('x', file);
16317 return;
16319 case 'Y':
16320 /* Like 'L', for third word of TImode/PTImode */
16321 if (REG_P (x))
16322 fputs (reg_names[REGNO (x) + 2], file);
16323 else if (MEM_P (x))
16325 if (GET_CODE (XEXP (x, 0)) == PRE_INC
16326 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
16327 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
16328 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16329 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
16330 else
16331 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
16332 if (small_data_operand (x, GET_MODE (x)))
16333 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
16334 reg_names[SMALL_DATA_REG]);
16336 return;
16338 case 'z':
16339 /* X is a SYMBOL_REF. Write out the name preceded by a
16340 period and without any trailing data in brackets. Used for function
16341 names. If we are configured for System V (or the embedded ABI) on
16342 the PowerPC, do not emit the period, since those systems do not use
16343 TOCs and the like. */
16344 gcc_assert (GET_CODE (x) == SYMBOL_REF);
16346 /* For macho, check to see if we need a stub. */
16347 if (TARGET_MACHO)
16349 const char *name = XSTR (x, 0);
16350 #if TARGET_MACHO
16351 if (darwin_emit_branch_islands
16352 && MACHOPIC_INDIRECT
16353 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
16354 name = machopic_indirection_name (x, /*stub_p=*/true);
16355 #endif
16356 assemble_name (file, name);
16358 else if (!DOT_SYMBOLS)
16359 assemble_name (file, XSTR (x, 0));
16360 else
16361 rs6000_output_function_entry (file, XSTR (x, 0));
16362 return;
16364 case 'Z':
16365 /* Like 'L', for last word of TImode/PTImode. */
16366 if (REG_P (x))
16367 fputs (reg_names[REGNO (x) + 3], file);
16368 else if (MEM_P (x))
16370 if (GET_CODE (XEXP (x, 0)) == PRE_INC
16371 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
16372 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
16373 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16374 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
16375 else
16376 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
16377 if (small_data_operand (x, GET_MODE (x)))
16378 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
16379 reg_names[SMALL_DATA_REG]);
16381 return;
16383 /* Print AltiVec or SPE memory operand. */
16384 case 'y':
16386 rtx tmp;
16388 gcc_assert (MEM_P (x));
16390 tmp = XEXP (x, 0);
16392 /* Ugly hack because %y is overloaded. */
16393 if ((TARGET_SPE || TARGET_E500_DOUBLE)
16394 && (GET_MODE_SIZE (GET_MODE (x)) == 8
16395 || GET_MODE (x) == TFmode
16396 || GET_MODE (x) == TImode
16397 || GET_MODE (x) == PTImode))
16399 /* Handle [reg]. */
16400 if (REG_P (tmp))
16402 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
16403 break;
16405 /* Handle [reg+UIMM]. */
16406 else if (GET_CODE (tmp) == PLUS &&
16407 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
16409 int x;
16411 gcc_assert (REG_P (XEXP (tmp, 0)));
16413 x = INTVAL (XEXP (tmp, 1));
16414 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
16415 break;
16418 /* Fall through. Must be [reg+reg]. */
16420 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
16421 && GET_CODE (tmp) == AND
16422 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
16423 && INTVAL (XEXP (tmp, 1)) == -16)
16424 tmp = XEXP (tmp, 0);
16425 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
16426 && GET_CODE (tmp) == PRE_MODIFY)
16427 tmp = XEXP (tmp, 1);
16428 if (REG_P (tmp))
16429 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
16430 else
16432 if (!GET_CODE (tmp) == PLUS
16433 || !REG_P (XEXP (tmp, 0))
16434 || !REG_P (XEXP (tmp, 1)))
16436 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
16437 break;
16440 if (REGNO (XEXP (tmp, 0)) == 0)
16441 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
16442 reg_names[ REGNO (XEXP (tmp, 0)) ]);
16443 else
16444 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
16445 reg_names[ REGNO (XEXP (tmp, 1)) ]);
16447 break;
16450 case 0:
16451 if (REG_P (x))
16452 fprintf (file, "%s", reg_names[REGNO (x)]);
16453 else if (MEM_P (x))
16455 /* We need to handle PRE_INC and PRE_DEC here, since we need to
16456 know the width from the mode. */
16457 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
16458 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
16459 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
16460 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
16461 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
16462 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
16463 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
16464 output_address (XEXP (XEXP (x, 0), 1));
16465 else
16466 output_address (XEXP (x, 0));
16468 else
16470 if (toc_relative_expr_p (x, false))
16471 /* This hack along with a corresponding hack in
16472 rs6000_output_addr_const_extra arranges to output addends
16473 where the assembler expects to find them. eg.
16474 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
16475 without this hack would be output as "x@toc+4". We
16476 want "x+4@toc". */
16477 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
16478 else
16479 output_addr_const (file, x);
16481 return;
16483 case '&':
16484 assemble_name (file, rs6000_get_some_local_dynamic_name ());
16485 return;
16487 default:
16488 output_operand_lossage ("invalid %%xn code");
16492 /* Print the address of an operand. */
16494 void
16495 print_operand_address (FILE *file, rtx x)
16497 if (REG_P (x))
16498 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
16499 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
16500 || GET_CODE (x) == LABEL_REF)
16502 output_addr_const (file, x);
16503 if (small_data_operand (x, GET_MODE (x)))
16504 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
16505 reg_names[SMALL_DATA_REG]);
16506 else
16507 gcc_assert (!TARGET_TOC);
16509 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
16510 && REG_P (XEXP (x, 1)))
16512 if (REGNO (XEXP (x, 0)) == 0)
16513 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
16514 reg_names[ REGNO (XEXP (x, 0)) ]);
16515 else
16516 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
16517 reg_names[ REGNO (XEXP (x, 1)) ]);
16519 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
16520 && GET_CODE (XEXP (x, 1)) == CONST_INT)
16521 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
16522 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
16523 #if TARGET_MACHO
16524 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
16525 && CONSTANT_P (XEXP (x, 1)))
16527 fprintf (file, "lo16(");
16528 output_addr_const (file, XEXP (x, 1));
16529 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
16531 #endif
16532 #if TARGET_ELF
16533 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
16534 && CONSTANT_P (XEXP (x, 1)))
16536 output_addr_const (file, XEXP (x, 1));
16537 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
16539 #endif
16540 else if (toc_relative_expr_p (x, false))
16542 /* This hack along with a corresponding hack in
16543 rs6000_output_addr_const_extra arranges to output addends
16544 where the assembler expects to find them. eg.
16545 (lo_sum (reg 9)
16546 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
16547 without this hack would be output as "x@toc+8@l(9)". We
16548 want "x+8@toc@l(9)". */
16549 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
16550 if (GET_CODE (x) == LO_SUM)
16551 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
16552 else
16553 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
16555 else
16556 gcc_unreachable ();
16559 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
16561 static bool
16562 rs6000_output_addr_const_extra (FILE *file, rtx x)
16564 if (GET_CODE (x) == UNSPEC)
16565 switch (XINT (x, 1))
16567 case UNSPEC_TOCREL:
16568 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
16569 && REG_P (XVECEXP (x, 0, 1))
16570 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
16571 output_addr_const (file, XVECEXP (x, 0, 0));
16572 if (x == tocrel_base && tocrel_offset != const0_rtx)
16574 if (INTVAL (tocrel_offset) >= 0)
16575 fprintf (file, "+");
16576 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
16578 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
16580 putc ('-', file);
16581 assemble_name (file, toc_label_name);
16583 else if (TARGET_ELF)
16584 fputs ("@toc", file);
16585 return true;
16587 #if TARGET_MACHO
16588 case UNSPEC_MACHOPIC_OFFSET:
16589 output_addr_const (file, XVECEXP (x, 0, 0));
16590 putc ('-', file);
16591 machopic_output_function_base_name (file);
16592 return true;
16593 #endif
16595 return false;
16598 /* Target hook for assembling integer objects. The PowerPC version has
16599 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
16600 is defined. It also needs to handle DI-mode objects on 64-bit
16601 targets. */
16603 static bool
16604 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
16606 #ifdef RELOCATABLE_NEEDS_FIXUP
16607 /* Special handling for SI values. */
16608 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
16610 static int recurse = 0;
16612 /* For -mrelocatable, we mark all addresses that need to be fixed up in
16613 the .fixup section. Since the TOC section is already relocated, we
16614 don't need to mark it here. We used to skip the text section, but it
16615 should never be valid for relocated addresses to be placed in the text
16616 section. */
16617 if (TARGET_RELOCATABLE
16618 && in_section != toc_section
16619 && !recurse
16620 && GET_CODE (x) != CONST_INT
16621 && GET_CODE (x) != CONST_DOUBLE
16622 && CONSTANT_P (x))
16624 char buf[256];
16626 recurse = 1;
16627 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
16628 fixuplabelno++;
16629 ASM_OUTPUT_LABEL (asm_out_file, buf);
16630 fprintf (asm_out_file, "\t.long\t(");
16631 output_addr_const (asm_out_file, x);
16632 fprintf (asm_out_file, ")@fixup\n");
16633 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
16634 ASM_OUTPUT_ALIGN (asm_out_file, 2);
16635 fprintf (asm_out_file, "\t.long\t");
16636 assemble_name (asm_out_file, buf);
16637 fprintf (asm_out_file, "\n\t.previous\n");
16638 recurse = 0;
16639 return true;
16641 /* Remove initial .'s to turn a -mcall-aixdesc function
16642 address into the address of the descriptor, not the function
16643 itself. */
16644 else if (GET_CODE (x) == SYMBOL_REF
16645 && XSTR (x, 0)[0] == '.'
16646 && DEFAULT_ABI == ABI_AIX)
16648 const char *name = XSTR (x, 0);
16649 while (*name == '.')
16650 name++;
16652 fprintf (asm_out_file, "\t.long\t%s\n", name);
16653 return true;
16656 #endif /* RELOCATABLE_NEEDS_FIXUP */
16657 return default_assemble_integer (x, size, aligned_p);
16660 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
16661 /* Emit an assembler directive to set symbol visibility for DECL to
16662 VISIBILITY_TYPE. */
16664 static void
16665 rs6000_assemble_visibility (tree decl, int vis)
16667 if (TARGET_XCOFF)
16668 return;
16670 /* Functions need to have their entry point symbol visibility set as
16671 well as their descriptor symbol visibility. */
16672 if (DEFAULT_ABI == ABI_AIX
16673 && DOT_SYMBOLS
16674 && TREE_CODE (decl) == FUNCTION_DECL)
16676 static const char * const visibility_types[] = {
16677 NULL, "internal", "hidden", "protected"
16680 const char *name, *type;
16682 name = ((* targetm.strip_name_encoding)
16683 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
16684 type = visibility_types[vis];
16686 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
16687 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
16689 else
16690 default_assemble_visibility (decl, vis);
16692 #endif
16694 enum rtx_code
16695 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
16697 /* Reversal of FP compares takes care -- an ordered compare
16698 becomes an unordered compare and vice versa. */
16699 if (mode == CCFPmode
16700 && (!flag_finite_math_only
16701 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
16702 || code == UNEQ || code == LTGT))
16703 return reverse_condition_maybe_unordered (code);
16704 else
16705 return reverse_condition (code);
16708 /* Generate a compare for CODE. Return a brand-new rtx that
16709 represents the result of the compare. */
16711 static rtx
16712 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
16714 enum machine_mode comp_mode;
16715 rtx compare_result;
16716 enum rtx_code code = GET_CODE (cmp);
16717 rtx op0 = XEXP (cmp, 0);
16718 rtx op1 = XEXP (cmp, 1);
16720 if (FLOAT_MODE_P (mode))
16721 comp_mode = CCFPmode;
16722 else if (code == GTU || code == LTU
16723 || code == GEU || code == LEU)
16724 comp_mode = CCUNSmode;
16725 else if ((code == EQ || code == NE)
16726 && unsigned_reg_p (op0)
16727 && (unsigned_reg_p (op1)
16728 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
16729 /* These are unsigned values, perhaps there will be a later
16730 ordering compare that can be shared with this one. */
16731 comp_mode = CCUNSmode;
16732 else
16733 comp_mode = CCmode;
16735 /* If we have an unsigned compare, make sure we don't have a signed value as
16736 an immediate. */
16737 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
16738 && INTVAL (op1) < 0)
16740 op0 = copy_rtx_if_shared (op0);
16741 op1 = force_reg (GET_MODE (op0), op1);
16742 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
16745 /* First, the compare. */
16746 compare_result = gen_reg_rtx (comp_mode);
16748 /* E500 FP compare instructions on the GPRs. Yuck! */
16749 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
16750 && FLOAT_MODE_P (mode))
16752 rtx cmp, or_result, compare_result2;
16753 enum machine_mode op_mode = GET_MODE (op0);
16754 bool reverse_p;
16756 if (op_mode == VOIDmode)
16757 op_mode = GET_MODE (op1);
16759 /* First reverse the condition codes that aren't directly supported. */
16760 switch (code)
16762 case NE:
16763 case UNLT:
16764 case UNLE:
16765 case UNGT:
16766 case UNGE:
16767 code = reverse_condition_maybe_unordered (code);
16768 reverse_p = true;
16769 break;
16771 case EQ:
16772 case LT:
16773 case LE:
16774 case GT:
16775 case GE:
16776 reverse_p = false;
16777 break;
16779 default:
16780 gcc_unreachable ();
16783 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
16784 This explains the following mess. */
16786 switch (code)
16788 case EQ:
16789 switch (op_mode)
16791 case SFmode:
16792 cmp = (flag_finite_math_only && !flag_trapping_math)
16793 ? gen_tstsfeq_gpr (compare_result, op0, op1)
16794 : gen_cmpsfeq_gpr (compare_result, op0, op1);
16795 break;
16797 case DFmode:
16798 cmp = (flag_finite_math_only && !flag_trapping_math)
16799 ? gen_tstdfeq_gpr (compare_result, op0, op1)
16800 : gen_cmpdfeq_gpr (compare_result, op0, op1);
16801 break;
16803 case TFmode:
16804 cmp = (flag_finite_math_only && !flag_trapping_math)
16805 ? gen_tsttfeq_gpr (compare_result, op0, op1)
16806 : gen_cmptfeq_gpr (compare_result, op0, op1);
16807 break;
16809 default:
16810 gcc_unreachable ();
16812 break;
16814 case GT:
16815 case GE:
16816 switch (op_mode)
16818 case SFmode:
16819 cmp = (flag_finite_math_only && !flag_trapping_math)
16820 ? gen_tstsfgt_gpr (compare_result, op0, op1)
16821 : gen_cmpsfgt_gpr (compare_result, op0, op1);
16822 break;
16824 case DFmode:
16825 cmp = (flag_finite_math_only && !flag_trapping_math)
16826 ? gen_tstdfgt_gpr (compare_result, op0, op1)
16827 : gen_cmpdfgt_gpr (compare_result, op0, op1);
16828 break;
16830 case TFmode:
16831 cmp = (flag_finite_math_only && !flag_trapping_math)
16832 ? gen_tsttfgt_gpr (compare_result, op0, op1)
16833 : gen_cmptfgt_gpr (compare_result, op0, op1);
16834 break;
16836 default:
16837 gcc_unreachable ();
16839 break;
16841 case LT:
16842 case LE:
16843 switch (op_mode)
16845 case SFmode:
16846 cmp = (flag_finite_math_only && !flag_trapping_math)
16847 ? gen_tstsflt_gpr (compare_result, op0, op1)
16848 : gen_cmpsflt_gpr (compare_result, op0, op1);
16849 break;
16851 case DFmode:
16852 cmp = (flag_finite_math_only && !flag_trapping_math)
16853 ? gen_tstdflt_gpr (compare_result, op0, op1)
16854 : gen_cmpdflt_gpr (compare_result, op0, op1);
16855 break;
16857 case TFmode:
16858 cmp = (flag_finite_math_only && !flag_trapping_math)
16859 ? gen_tsttflt_gpr (compare_result, op0, op1)
16860 : gen_cmptflt_gpr (compare_result, op0, op1);
16861 break;
16863 default:
16864 gcc_unreachable ();
16866 break;
16868 default:
16869 gcc_unreachable ();
16872 /* Synthesize LE and GE from LT/GT || EQ. */
16873 if (code == LE || code == GE)
16875 emit_insn (cmp);
16877 compare_result2 = gen_reg_rtx (CCFPmode);
16879 /* Do the EQ. */
16880 switch (op_mode)
16882 case SFmode:
16883 cmp = (flag_finite_math_only && !flag_trapping_math)
16884 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
16885 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
16886 break;
16888 case DFmode:
16889 cmp = (flag_finite_math_only && !flag_trapping_math)
16890 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
16891 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
16892 break;
16894 case TFmode:
16895 cmp = (flag_finite_math_only && !flag_trapping_math)
16896 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
16897 : gen_cmptfeq_gpr (compare_result2, op0, op1);
16898 break;
16900 default:
16901 gcc_unreachable ();
16904 emit_insn (cmp);
16906 /* OR them together. */
16907 or_result = gen_reg_rtx (CCFPmode);
16908 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
16909 compare_result2);
16910 compare_result = or_result;
16913 code = reverse_p ? NE : EQ;
16915 emit_insn (cmp);
16917 else
16919 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
16920 CLOBBERs to match cmptf_internal2 pattern. */
16921 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
16922 && GET_MODE (op0) == TFmode
16923 && !TARGET_IEEEQUAD
16924 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
16925 emit_insn (gen_rtx_PARALLEL (VOIDmode,
16926 gen_rtvec (10,
16927 gen_rtx_SET (VOIDmode,
16928 compare_result,
16929 gen_rtx_COMPARE (comp_mode, op0, op1)),
16930 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16931 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16932 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16933 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16934 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16935 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16936 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16937 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16938 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
16939 else if (GET_CODE (op1) == UNSPEC
16940 && XINT (op1, 1) == UNSPEC_SP_TEST)
16942 rtx op1b = XVECEXP (op1, 0, 0);
16943 comp_mode = CCEQmode;
16944 compare_result = gen_reg_rtx (CCEQmode);
16945 if (TARGET_64BIT)
16946 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
16947 else
16948 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
16950 else
16951 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
16952 gen_rtx_COMPARE (comp_mode, op0, op1)));
16955 /* Some kinds of FP comparisons need an OR operation;
16956 under flag_finite_math_only we don't bother. */
16957 if (FLOAT_MODE_P (mode)
16958 && !flag_finite_math_only
16959 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
16960 && (code == LE || code == GE
16961 || code == UNEQ || code == LTGT
16962 || code == UNGT || code == UNLT))
16964 enum rtx_code or1, or2;
16965 rtx or1_rtx, or2_rtx, compare2_rtx;
16966 rtx or_result = gen_reg_rtx (CCEQmode);
16968 switch (code)
16970 case LE: or1 = LT; or2 = EQ; break;
16971 case GE: or1 = GT; or2 = EQ; break;
16972 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
16973 case LTGT: or1 = LT; or2 = GT; break;
16974 case UNGT: or1 = UNORDERED; or2 = GT; break;
16975 case UNLT: or1 = UNORDERED; or2 = LT; break;
16976 default: gcc_unreachable ();
16978 validate_condition_mode (or1, comp_mode);
16979 validate_condition_mode (or2, comp_mode);
16980 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
16981 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
16982 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
16983 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
16984 const_true_rtx);
16985 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
16987 compare_result = or_result;
16988 code = EQ;
16991 validate_condition_mode (code, GET_MODE (compare_result));
16993 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
16997 /* Emit the RTL for an sISEL pattern. */
16999 void
17000 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
17002 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
17005 void
17006 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
17008 rtx condition_rtx;
17009 enum machine_mode op_mode;
17010 enum rtx_code cond_code;
17011 rtx result = operands[0];
17013 if (TARGET_ISEL && (mode == SImode || mode == DImode))
17015 rs6000_emit_sISEL (mode, operands);
17016 return;
17019 condition_rtx = rs6000_generate_compare (operands[1], mode);
17020 cond_code = GET_CODE (condition_rtx);
17022 if (FLOAT_MODE_P (mode)
17023 && !TARGET_FPRS && TARGET_HARD_FLOAT)
17025 rtx t;
17027 PUT_MODE (condition_rtx, SImode);
17028 t = XEXP (condition_rtx, 0);
17030 gcc_assert (cond_code == NE || cond_code == EQ);
17032 if (cond_code == NE)
17033 emit_insn (gen_e500_flip_gt_bit (t, t));
17035 emit_insn (gen_move_from_CR_gt_bit (result, t));
17036 return;
17039 if (cond_code == NE
17040 || cond_code == GE || cond_code == LE
17041 || cond_code == GEU || cond_code == LEU
17042 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
17044 rtx not_result = gen_reg_rtx (CCEQmode);
17045 rtx not_op, rev_cond_rtx;
17046 enum machine_mode cc_mode;
17048 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
17050 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
17051 SImode, XEXP (condition_rtx, 0), const0_rtx);
17052 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
17053 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
17054 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
17057 op_mode = GET_MODE (XEXP (operands[1], 0));
17058 if (op_mode == VOIDmode)
17059 op_mode = GET_MODE (XEXP (operands[1], 1));
17061 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
17063 PUT_MODE (condition_rtx, DImode);
17064 convert_move (result, condition_rtx, 0);
17066 else
17068 PUT_MODE (condition_rtx, SImode);
17069 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
17073 /* Emit a branch of kind CODE to location LOC. */
17075 void
17076 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
17078 rtx condition_rtx, loc_ref;
17080 condition_rtx = rs6000_generate_compare (operands[0], mode);
17081 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
17082 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
17083 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
17084 loc_ref, pc_rtx)));
17087 /* Return the string to output a conditional branch to LABEL, which is
17088 the operand template of the label, or NULL if the branch is really a
17089 conditional return.
17091 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
17092 condition code register and its mode specifies what kind of
17093 comparison we made.
17095 REVERSED is nonzero if we should reverse the sense of the comparison.
17097 INSN is the insn. */
17099 char *
17100 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
17102 static char string[64];
17103 enum rtx_code code = GET_CODE (op);
17104 rtx cc_reg = XEXP (op, 0);
17105 enum machine_mode mode = GET_MODE (cc_reg);
17106 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
17107 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
17108 int really_reversed = reversed ^ need_longbranch;
17109 char *s = string;
17110 const char *ccode;
17111 const char *pred;
17112 rtx note;
17114 validate_condition_mode (code, mode);
17116 /* Work out which way this really branches. We could use
17117 reverse_condition_maybe_unordered here always but this
17118 makes the resulting assembler clearer. */
17119 if (really_reversed)
17121 /* Reversal of FP compares takes care -- an ordered compare
17122 becomes an unordered compare and vice versa. */
17123 if (mode == CCFPmode)
17124 code = reverse_condition_maybe_unordered (code);
17125 else
17126 code = reverse_condition (code);
17129 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
17131 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
17132 to the GT bit. */
17133 switch (code)
17135 case EQ:
17136 /* Opposite of GT. */
17137 code = GT;
17138 break;
17140 case NE:
17141 code = UNLE;
17142 break;
17144 default:
17145 gcc_unreachable ();
17149 switch (code)
17151 /* Not all of these are actually distinct opcodes, but
17152 we distinguish them for clarity of the resulting assembler. */
17153 case NE: case LTGT:
17154 ccode = "ne"; break;
17155 case EQ: case UNEQ:
17156 ccode = "eq"; break;
17157 case GE: case GEU:
17158 ccode = "ge"; break;
17159 case GT: case GTU: case UNGT:
17160 ccode = "gt"; break;
17161 case LE: case LEU:
17162 ccode = "le"; break;
17163 case LT: case LTU: case UNLT:
17164 ccode = "lt"; break;
17165 case UNORDERED: ccode = "un"; break;
17166 case ORDERED: ccode = "nu"; break;
17167 case UNGE: ccode = "nl"; break;
17168 case UNLE: ccode = "ng"; break;
17169 default:
17170 gcc_unreachable ();
17173 /* Maybe we have a guess as to how likely the branch is. */
17174 pred = "";
17175 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
17176 if (note != NULL_RTX)
17178 /* PROB is the difference from 50%. */
17179 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
17181 /* Only hint for highly probable/improbable branches on newer
17182 cpus as static prediction overrides processor dynamic
17183 prediction. For older cpus we may as well always hint, but
17184 assume not taken for branches that are very close to 50% as a
17185 mispredicted taken branch is more expensive than a
17186 mispredicted not-taken branch. */
17187 if (rs6000_always_hint
17188 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
17189 && br_prob_note_reliable_p (note)))
17191 if (abs (prob) > REG_BR_PROB_BASE / 20
17192 && ((prob > 0) ^ need_longbranch))
17193 pred = "+";
17194 else
17195 pred = "-";
17199 if (label == NULL)
17200 s += sprintf (s, "b%slr%s ", ccode, pred);
17201 else
17202 s += sprintf (s, "b%s%s ", ccode, pred);
17204 /* We need to escape any '%' characters in the reg_names string.
17205 Assume they'd only be the first character.... */
17206 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
17207 *s++ = '%';
17208 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
17210 if (label != NULL)
17212 /* If the branch distance was too far, we may have to use an
17213 unconditional branch to go the distance. */
17214 if (need_longbranch)
17215 s += sprintf (s, ",$+8\n\tb %s", label);
17216 else
17217 s += sprintf (s, ",%s", label);
17220 return string;
17223 /* Return the string to flip the GT bit on a CR. */
17224 char *
17225 output_e500_flip_gt_bit (rtx dst, rtx src)
17227 static char string[64];
17228 int a, b;
17230 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
17231 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
17233 /* GT bit. */
17234 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
17235 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
17237 sprintf (string, "crnot %d,%d", a, b);
17238 return string;
17241 /* Return insn for VSX or Altivec comparisons. */
17243 static rtx
17244 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
17246 rtx mask;
17247 enum machine_mode mode = GET_MODE (op0);
17249 switch (code)
17251 default:
17252 break;
17254 case GE:
17255 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
17256 return NULL_RTX;
17258 case EQ:
17259 case GT:
17260 case GTU:
17261 case ORDERED:
17262 case UNORDERED:
17263 case UNEQ:
17264 case LTGT:
17265 mask = gen_reg_rtx (mode);
17266 emit_insn (gen_rtx_SET (VOIDmode,
17267 mask,
17268 gen_rtx_fmt_ee (code, mode, op0, op1)));
17269 return mask;
17272 return NULL_RTX;
17275 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
17276 DMODE is expected destination mode. This is a recursive function. */
17278 static rtx
17279 rs6000_emit_vector_compare (enum rtx_code rcode,
17280 rtx op0, rtx op1,
17281 enum machine_mode dmode)
17283 rtx mask;
17284 bool swap_operands = false;
17285 bool try_again = false;
17287 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
17288 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
17290 /* See if the comparison works as is. */
17291 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
17292 if (mask)
17293 return mask;
17295 switch (rcode)
17297 case LT:
17298 rcode = GT;
17299 swap_operands = true;
17300 try_again = true;
17301 break;
17302 case LTU:
17303 rcode = GTU;
17304 swap_operands = true;
17305 try_again = true;
17306 break;
17307 case NE:
17308 case UNLE:
17309 case UNLT:
17310 case UNGE:
17311 case UNGT:
17312 /* Invert condition and try again.
17313 e.g., A != B becomes ~(A==B). */
17315 enum rtx_code rev_code;
17316 enum insn_code nor_code;
17317 rtx mask2;
17319 rev_code = reverse_condition_maybe_unordered (rcode);
17320 if (rev_code == UNKNOWN)
17321 return NULL_RTX;
17323 nor_code = optab_handler (one_cmpl_optab, dmode);
17324 if (nor_code == CODE_FOR_nothing)
17325 return NULL_RTX;
17327 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
17328 if (!mask2)
17329 return NULL_RTX;
17331 mask = gen_reg_rtx (dmode);
17332 emit_insn (GEN_FCN (nor_code) (mask, mask2));
17333 return mask;
17335 break;
17336 case GE:
17337 case GEU:
17338 case LE:
17339 case LEU:
17340 /* Try GT/GTU/LT/LTU OR EQ */
17342 rtx c_rtx, eq_rtx;
17343 enum insn_code ior_code;
17344 enum rtx_code new_code;
17346 switch (rcode)
17348 case GE:
17349 new_code = GT;
17350 break;
17352 case GEU:
17353 new_code = GTU;
17354 break;
17356 case LE:
17357 new_code = LT;
17358 break;
17360 case LEU:
17361 new_code = LTU;
17362 break;
17364 default:
17365 gcc_unreachable ();
17368 ior_code = optab_handler (ior_optab, dmode);
17369 if (ior_code == CODE_FOR_nothing)
17370 return NULL_RTX;
17372 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
17373 if (!c_rtx)
17374 return NULL_RTX;
17376 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
17377 if (!eq_rtx)
17378 return NULL_RTX;
17380 mask = gen_reg_rtx (dmode);
17381 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
17382 return mask;
17384 break;
17385 default:
17386 return NULL_RTX;
17389 if (try_again)
17391 if (swap_operands)
17393 rtx tmp;
17394 tmp = op0;
17395 op0 = op1;
17396 op1 = tmp;
17399 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
17400 if (mask)
17401 return mask;
17404 /* You only get two chances. */
17405 return NULL_RTX;
17408 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
17409 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
17410 operands for the relation operation COND. */
17413 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
17414 rtx cond, rtx cc_op0, rtx cc_op1)
17416 enum machine_mode dest_mode = GET_MODE (dest);
17417 enum machine_mode mask_mode = GET_MODE (cc_op0);
17418 enum rtx_code rcode = GET_CODE (cond);
17419 enum machine_mode cc_mode = CCmode;
17420 rtx mask;
17421 rtx cond2;
17422 rtx tmp;
17423 bool invert_move = false;
17425 if (VECTOR_UNIT_NONE_P (dest_mode))
17426 return 0;
17428 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
17429 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
17431 switch (rcode)
17433 /* Swap operands if we can, and fall back to doing the operation as
17434 specified, and doing a NOR to invert the test. */
17435 case NE:
17436 case UNLE:
17437 case UNLT:
17438 case UNGE:
17439 case UNGT:
17440 /* Invert condition and try again.
17441 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
17442 invert_move = true;
17443 rcode = reverse_condition_maybe_unordered (rcode);
17444 if (rcode == UNKNOWN)
17445 return 0;
17446 break;
17448 /* Mark unsigned tests with CCUNSmode. */
17449 case GTU:
17450 case GEU:
17451 case LTU:
17452 case LEU:
17453 cc_mode = CCUNSmode;
17454 break;
17456 default:
17457 break;
17460 /* Get the vector mask for the given relational operations. */
17461 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
17463 if (!mask)
17464 return 0;
17466 if (invert_move)
17468 tmp = op_true;
17469 op_true = op_false;
17470 op_false = tmp;
17473 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
17474 CONST0_RTX (dest_mode));
17475 emit_insn (gen_rtx_SET (VOIDmode,
17476 dest,
17477 gen_rtx_IF_THEN_ELSE (dest_mode,
17478 cond2,
17479 op_true,
17480 op_false)));
17481 return 1;
17484 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
17485 operands of the last comparison is nonzero/true, FALSE_COND if it
17486 is zero/false. Return 0 if the hardware has no such operation. */
17489 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
17491 enum rtx_code code = GET_CODE (op);
17492 rtx op0 = XEXP (op, 0);
17493 rtx op1 = XEXP (op, 1);
17494 REAL_VALUE_TYPE c1;
17495 enum machine_mode compare_mode = GET_MODE (op0);
17496 enum machine_mode result_mode = GET_MODE (dest);
17497 rtx temp;
17498 bool is_against_zero;
17500 /* These modes should always match. */
17501 if (GET_MODE (op1) != compare_mode
17502 /* In the isel case however, we can use a compare immediate, so
17503 op1 may be a small constant. */
17504 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
17505 return 0;
17506 if (GET_MODE (true_cond) != result_mode)
17507 return 0;
17508 if (GET_MODE (false_cond) != result_mode)
17509 return 0;
17511 /* Don't allow using floating point comparisons for integer results for
17512 now. */
17513 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
17514 return 0;
17516 /* First, work out if the hardware can do this at all, or
17517 if it's too slow.... */
17518 if (!FLOAT_MODE_P (compare_mode))
17520 if (TARGET_ISEL)
17521 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
17522 return 0;
17524 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
17525 && SCALAR_FLOAT_MODE_P (compare_mode))
17526 return 0;
17528 is_against_zero = op1 == CONST0_RTX (compare_mode);
17530 /* A floating-point subtract might overflow, underflow, or produce
17531 an inexact result, thus changing the floating-point flags, so it
17532 can't be generated if we care about that. It's safe if one side
17533 of the construct is zero, since then no subtract will be
17534 generated. */
17535 if (SCALAR_FLOAT_MODE_P (compare_mode)
17536 && flag_trapping_math && ! is_against_zero)
17537 return 0;
17539 /* Eliminate half of the comparisons by switching operands, this
17540 makes the remaining code simpler. */
17541 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
17542 || code == LTGT || code == LT || code == UNLE)
17544 code = reverse_condition_maybe_unordered (code);
17545 temp = true_cond;
17546 true_cond = false_cond;
17547 false_cond = temp;
17550 /* UNEQ and LTGT take four instructions for a comparison with zero,
17551 it'll probably be faster to use a branch here too. */
17552 if (code == UNEQ && HONOR_NANS (compare_mode))
17553 return 0;
17555 if (GET_CODE (op1) == CONST_DOUBLE)
17556 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
17558 /* We're going to try to implement comparisons by performing
17559 a subtract, then comparing against zero. Unfortunately,
17560 Inf - Inf is NaN which is not zero, and so if we don't
17561 know that the operand is finite and the comparison
17562 would treat EQ different to UNORDERED, we can't do it. */
17563 if (HONOR_INFINITIES (compare_mode)
17564 && code != GT && code != UNGE
17565 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
17566 /* Constructs of the form (a OP b ? a : b) are safe. */
17567 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
17568 || (! rtx_equal_p (op0, true_cond)
17569 && ! rtx_equal_p (op1, true_cond))))
17570 return 0;
17572 /* At this point we know we can use fsel. */
17574 /* Reduce the comparison to a comparison against zero. */
17575 if (! is_against_zero)
17577 temp = gen_reg_rtx (compare_mode);
17578 emit_insn (gen_rtx_SET (VOIDmode, temp,
17579 gen_rtx_MINUS (compare_mode, op0, op1)));
17580 op0 = temp;
17581 op1 = CONST0_RTX (compare_mode);
17584 /* If we don't care about NaNs we can reduce some of the comparisons
17585 down to faster ones. */
17586 if (! HONOR_NANS (compare_mode))
17587 switch (code)
17589 case GT:
17590 code = LE;
17591 temp = true_cond;
17592 true_cond = false_cond;
17593 false_cond = temp;
17594 break;
17595 case UNGE:
17596 code = GE;
17597 break;
17598 case UNEQ:
17599 code = EQ;
17600 break;
17601 default:
17602 break;
17605 /* Now, reduce everything down to a GE. */
17606 switch (code)
17608 case GE:
17609 break;
17611 case LE:
17612 temp = gen_reg_rtx (compare_mode);
17613 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
17614 op0 = temp;
17615 break;
17617 case ORDERED:
17618 temp = gen_reg_rtx (compare_mode);
17619 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
17620 op0 = temp;
17621 break;
17623 case EQ:
17624 temp = gen_reg_rtx (compare_mode);
17625 emit_insn (gen_rtx_SET (VOIDmode, temp,
17626 gen_rtx_NEG (compare_mode,
17627 gen_rtx_ABS (compare_mode, op0))));
17628 op0 = temp;
17629 break;
17631 case UNGE:
17632 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
17633 temp = gen_reg_rtx (result_mode);
17634 emit_insn (gen_rtx_SET (VOIDmode, temp,
17635 gen_rtx_IF_THEN_ELSE (result_mode,
17636 gen_rtx_GE (VOIDmode,
17637 op0, op1),
17638 true_cond, false_cond)));
17639 false_cond = true_cond;
17640 true_cond = temp;
17642 temp = gen_reg_rtx (compare_mode);
17643 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
17644 op0 = temp;
17645 break;
17647 case GT:
17648 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
17649 temp = gen_reg_rtx (result_mode);
17650 emit_insn (gen_rtx_SET (VOIDmode, temp,
17651 gen_rtx_IF_THEN_ELSE (result_mode,
17652 gen_rtx_GE (VOIDmode,
17653 op0, op1),
17654 true_cond, false_cond)));
17655 true_cond = false_cond;
17656 false_cond = temp;
17658 temp = gen_reg_rtx (compare_mode);
17659 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
17660 op0 = temp;
17661 break;
17663 default:
17664 gcc_unreachable ();
17667 emit_insn (gen_rtx_SET (VOIDmode, dest,
17668 gen_rtx_IF_THEN_ELSE (result_mode,
17669 gen_rtx_GE (VOIDmode,
17670 op0, op1),
17671 true_cond, false_cond)));
17672 return 1;
17675 /* Same as above, but for ints (isel). */
17677 static int
17678 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
17680 rtx condition_rtx, cr;
17681 enum machine_mode mode = GET_MODE (dest);
17682 enum rtx_code cond_code;
17683 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
17684 bool signedp;
17686 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
17687 return 0;
17689 /* We still have to do the compare, because isel doesn't do a
17690 compare, it just looks at the CRx bits set by a previous compare
17691 instruction. */
17692 condition_rtx = rs6000_generate_compare (op, mode);
17693 cond_code = GET_CODE (condition_rtx);
17694 cr = XEXP (condition_rtx, 0);
17695 signedp = GET_MODE (cr) == CCmode;
17697 isel_func = (mode == SImode
17698 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
17699 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
17701 switch (cond_code)
17703 case LT: case GT: case LTU: case GTU: case EQ:
17704 /* isel handles these directly. */
17705 break;
17707 default:
17708 /* We need to swap the sense of the comparison. */
17710 rtx t = true_cond;
17711 true_cond = false_cond;
17712 false_cond = t;
17713 PUT_CODE (condition_rtx, reverse_condition (cond_code));
17715 break;
17718 false_cond = force_reg (mode, false_cond);
17719 if (true_cond != const0_rtx)
17720 true_cond = force_reg (mode, true_cond);
17722 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
17724 return 1;
17727 const char *
17728 output_isel (rtx *operands)
17730 enum rtx_code code;
17732 code = GET_CODE (operands[1]);
17734 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
17736 gcc_assert (GET_CODE (operands[2]) == REG
17737 && GET_CODE (operands[3]) == REG);
17738 PUT_CODE (operands[1], reverse_condition (code));
17739 return "isel %0,%3,%2,%j1";
17742 return "isel %0,%2,%3,%j1";
17745 void
17746 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
17748 enum machine_mode mode = GET_MODE (op0);
17749 enum rtx_code c;
17750 rtx target;
17752 /* VSX/altivec have direct min/max insns. */
17753 if ((code == SMAX || code == SMIN)
17754 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
17755 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
17757 emit_insn (gen_rtx_SET (VOIDmode,
17758 dest,
17759 gen_rtx_fmt_ee (code, mode, op0, op1)));
17760 return;
17763 if (code == SMAX || code == SMIN)
17764 c = GE;
17765 else
17766 c = GEU;
17768 if (code == SMAX || code == UMAX)
17769 target = emit_conditional_move (dest, c, op0, op1, mode,
17770 op0, op1, mode, 0);
17771 else
17772 target = emit_conditional_move (dest, c, op0, op1, mode,
17773 op1, op0, mode, 0);
17774 gcc_assert (target);
17775 if (target != dest)
17776 emit_move_insn (dest, target);
17779 /* A subroutine of the atomic operation splitters. Jump to LABEL if
17780 COND is true. Mark the jump as unlikely to be taken. */
17782 static void
17783 emit_unlikely_jump (rtx cond, rtx label)
17785 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
17786 rtx x;
17788 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
17789 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
17790 add_reg_note (x, REG_BR_PROB, very_unlikely);
17793 /* A subroutine of the atomic operation splitters. Emit a load-locked
17794 instruction in MODE. For QI/HImode, possibly use a pattern than includes
17795 the zero_extend operation. */
17797 static void
17798 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
17800 rtx (*fn) (rtx, rtx) = NULL;
17802 switch (mode)
17804 case QImode:
17805 fn = gen_load_lockedqi;
17806 break;
17807 case HImode:
17808 fn = gen_load_lockedhi;
17809 break;
17810 case SImode:
17811 if (GET_MODE (mem) == QImode)
17812 fn = gen_load_lockedqi_si;
17813 else if (GET_MODE (mem) == HImode)
17814 fn = gen_load_lockedhi_si;
17815 else
17816 fn = gen_load_lockedsi;
17817 break;
17818 case DImode:
17819 fn = gen_load_lockeddi;
17820 break;
17821 case TImode:
17822 fn = gen_load_lockedti;
17823 break;
17824 default:
17825 gcc_unreachable ();
17827 emit_insn (fn (reg, mem));
17830 /* A subroutine of the atomic operation splitters. Emit a store-conditional
17831 instruction in MODE. */
17833 static void
17834 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
17836 rtx (*fn) (rtx, rtx, rtx) = NULL;
17838 switch (mode)
17840 case QImode:
17841 fn = gen_store_conditionalqi;
17842 break;
17843 case HImode:
17844 fn = gen_store_conditionalhi;
17845 break;
17846 case SImode:
17847 fn = gen_store_conditionalsi;
17848 break;
17849 case DImode:
17850 fn = gen_store_conditionaldi;
17851 break;
17852 case TImode:
17853 fn = gen_store_conditionalti;
17854 break;
17855 default:
17856 gcc_unreachable ();
17859 /* Emit sync before stwcx. to address PPC405 Erratum. */
17860 if (PPC405_ERRATUM77)
17861 emit_insn (gen_hwsync ());
17863 emit_insn (fn (res, mem, val));
17866 /* Expand barriers before and after a load_locked/store_cond sequence. */
17868 static rtx
17869 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
17871 rtx addr = XEXP (mem, 0);
17872 int strict_p = (reload_in_progress || reload_completed);
17874 if (!legitimate_indirect_address_p (addr, strict_p)
17875 && !legitimate_indexed_address_p (addr, strict_p))
17877 addr = force_reg (Pmode, addr);
17878 mem = replace_equiv_address_nv (mem, addr);
17881 switch (model)
17883 case MEMMODEL_RELAXED:
17884 case MEMMODEL_CONSUME:
17885 case MEMMODEL_ACQUIRE:
17886 break;
17887 case MEMMODEL_RELEASE:
17888 case MEMMODEL_ACQ_REL:
17889 emit_insn (gen_lwsync ());
17890 break;
17891 case MEMMODEL_SEQ_CST:
17892 emit_insn (gen_hwsync ());
17893 break;
17894 default:
17895 gcc_unreachable ();
17897 return mem;
17900 static void
17901 rs6000_post_atomic_barrier (enum memmodel model)
17903 switch (model)
17905 case MEMMODEL_RELAXED:
17906 case MEMMODEL_CONSUME:
17907 case MEMMODEL_RELEASE:
17908 break;
17909 case MEMMODEL_ACQUIRE:
17910 case MEMMODEL_ACQ_REL:
17911 case MEMMODEL_SEQ_CST:
17912 emit_insn (gen_isync ());
17913 break;
17914 default:
17915 gcc_unreachable ();
17919 /* A subroutine of the various atomic expanders. For sub-word operations,
17920 we must adjust things to operate on SImode. Given the original MEM,
17921 return a new aligned memory. Also build and return the quantities by
17922 which to shift and mask. */
17924 static rtx
17925 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
17927 rtx addr, align, shift, mask, mem;
17928 HOST_WIDE_INT shift_mask;
17929 enum machine_mode mode = GET_MODE (orig_mem);
17931 /* For smaller modes, we have to implement this via SImode. */
17932 shift_mask = (mode == QImode ? 0x18 : 0x10);
17934 addr = XEXP (orig_mem, 0);
17935 addr = force_reg (GET_MODE (addr), addr);
17937 /* Aligned memory containing subword. Generate a new memory. We
17938 do not want any of the existing MEM_ATTR data, as we're now
17939 accessing memory outside the original object. */
17940 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
17941 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17942 mem = gen_rtx_MEM (SImode, align);
17943 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
17944 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
17945 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
17947 /* Shift amount for subword relative to aligned word. */
17948 shift = gen_reg_rtx (SImode);
17949 addr = gen_lowpart (SImode, addr);
17950 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
17951 if (WORDS_BIG_ENDIAN)
17952 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
17953 shift, 1, OPTAB_LIB_WIDEN);
17954 *pshift = shift;
17956 /* Mask for insertion. */
17957 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
17958 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
17959 *pmask = mask;
17961 return mem;
17964 /* A subroutine of the various atomic expanders. For sub-word operands,
17965 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
17967 static rtx
17968 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
17970 rtx x;
17972 x = gen_reg_rtx (SImode);
17973 emit_insn (gen_rtx_SET (VOIDmode, x,
17974 gen_rtx_AND (SImode,
17975 gen_rtx_NOT (SImode, mask),
17976 oldval)));
17978 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
17980 return x;
17983 /* A subroutine of the various atomic expanders. For sub-word operands,
17984 extract WIDE to NARROW via SHIFT. */
17986 static void
17987 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
17989 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
17990 wide, 1, OPTAB_LIB_WIDEN);
17991 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
17994 /* Expand an atomic compare and swap operation. */
17996 void
17997 rs6000_expand_atomic_compare_and_swap (rtx operands[])
17999 rtx boolval, retval, mem, oldval, newval, cond;
18000 rtx label1, label2, x, mask, shift;
18001 enum machine_mode mode, orig_mode;
18002 enum memmodel mod_s, mod_f;
18003 bool is_weak;
18005 boolval = operands[0];
18006 retval = operands[1];
18007 mem = operands[2];
18008 oldval = operands[3];
18009 newval = operands[4];
18010 is_weak = (INTVAL (operands[5]) != 0);
18011 mod_s = (enum memmodel) INTVAL (operands[6]);
18012 mod_f = (enum memmodel) INTVAL (operands[7]);
18013 orig_mode = mode = GET_MODE (mem);
18015 mask = shift = NULL_RTX;
18016 if (mode == QImode || mode == HImode)
18018 /* Before power8, we didn't have access to lbarx/lharx, so generate a
18019 lwarx and shift/mask operations. With power8, we need to do the
18020 comparison in SImode, but the store is still done in QI/HImode. */
18021 oldval = convert_modes (SImode, mode, oldval, 1);
18023 if (!TARGET_SYNC_HI_QI)
18025 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
18027 /* Shift and mask OLDVAL into position with the word. */
18028 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
18029 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18031 /* Shift and mask NEWVAL into position within the word. */
18032 newval = convert_modes (SImode, mode, newval, 1);
18033 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
18034 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18037 /* Prepare to adjust the return value. */
18038 retval = gen_reg_rtx (SImode);
18039 mode = SImode;
18041 else if (reg_overlap_mentioned_p (retval, oldval))
18042 oldval = copy_to_reg (oldval);
18044 mem = rs6000_pre_atomic_barrier (mem, mod_s);
18046 label1 = NULL_RTX;
18047 if (!is_weak)
18049 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
18050 emit_label (XEXP (label1, 0));
18052 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
18054 emit_load_locked (mode, retval, mem);
18056 x = retval;
18057 if (mask)
18059 x = expand_simple_binop (SImode, AND, retval, mask,
18060 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18063 cond = gen_reg_rtx (CCmode);
18064 /* If we have TImode, synthesize a comparison. */
18065 if (mode != TImode)
18066 x = gen_rtx_COMPARE (CCmode, x, oldval);
18067 else
18069 rtx xor1_result = gen_reg_rtx (DImode);
18070 rtx xor2_result = gen_reg_rtx (DImode);
18071 rtx or_result = gen_reg_rtx (DImode);
18072 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
18073 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
18074 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
18075 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
18077 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
18078 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
18079 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
18080 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
18083 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
18085 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18086 emit_unlikely_jump (x, label2);
18088 x = newval;
18089 if (mask)
18090 x = rs6000_mask_atomic_subword (retval, newval, mask);
18092 emit_store_conditional (orig_mode, cond, mem, x);
18094 if (!is_weak)
18096 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18097 emit_unlikely_jump (x, label1);
18100 if (mod_f != MEMMODEL_RELAXED)
18101 emit_label (XEXP (label2, 0));
18103 rs6000_post_atomic_barrier (mod_s);
18105 if (mod_f == MEMMODEL_RELAXED)
18106 emit_label (XEXP (label2, 0));
18108 if (shift)
18109 rs6000_finish_atomic_subword (operands[1], retval, shift);
18110 else if (mode != GET_MODE (operands[1]))
18111 convert_move (operands[1], retval, 1);
18113 /* In all cases, CR0 contains EQ on success, and NE on failure. */
18114 x = gen_rtx_EQ (SImode, cond, const0_rtx);
18115 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
18118 /* Expand an atomic exchange operation. */
18120 void
18121 rs6000_expand_atomic_exchange (rtx operands[])
18123 rtx retval, mem, val, cond;
18124 enum machine_mode mode;
18125 enum memmodel model;
18126 rtx label, x, mask, shift;
18128 retval = operands[0];
18129 mem = operands[1];
18130 val = operands[2];
18131 model = (enum memmodel) INTVAL (operands[3]);
18132 mode = GET_MODE (mem);
18134 mask = shift = NULL_RTX;
18135 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
18137 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
18139 /* Shift and mask VAL into position with the word. */
18140 val = convert_modes (SImode, mode, val, 1);
18141 val = expand_simple_binop (SImode, ASHIFT, val, shift,
18142 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18144 /* Prepare to adjust the return value. */
18145 retval = gen_reg_rtx (SImode);
18146 mode = SImode;
18149 mem = rs6000_pre_atomic_barrier (mem, model);
18151 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
18152 emit_label (XEXP (label, 0));
18154 emit_load_locked (mode, retval, mem);
18156 x = val;
18157 if (mask)
18158 x = rs6000_mask_atomic_subword (retval, val, mask);
18160 cond = gen_reg_rtx (CCmode);
18161 emit_store_conditional (mode, cond, mem, x);
18163 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18164 emit_unlikely_jump (x, label);
18166 rs6000_post_atomic_barrier (model);
18168 if (shift)
18169 rs6000_finish_atomic_subword (operands[0], retval, shift);
18172 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
18173 to perform. MEM is the memory on which to operate. VAL is the second
18174 operand of the binary operator. BEFORE and AFTER are optional locations to
18175 return the value of MEM either before of after the operation. MODEL_RTX
18176 is a CONST_INT containing the memory model to use. */
18178 void
18179 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
18180 rtx orig_before, rtx orig_after, rtx model_rtx)
18182 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
18183 enum machine_mode mode = GET_MODE (mem);
18184 enum machine_mode store_mode = mode;
18185 rtx label, x, cond, mask, shift;
18186 rtx before = orig_before, after = orig_after;
18188 mask = shift = NULL_RTX;
18189 /* On power8, we want to use SImode for the operation. On previous systems,
18190 use the operation in a subword and shift/mask to get the proper byte or
18191 halfword. */
18192 if (mode == QImode || mode == HImode)
18194 if (TARGET_SYNC_HI_QI)
18196 val = convert_modes (SImode, mode, val, 1);
18198 /* Prepare to adjust the return value. */
18199 before = gen_reg_rtx (SImode);
18200 if (after)
18201 after = gen_reg_rtx (SImode);
18202 mode = SImode;
18204 else
18206 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
18208 /* Shift and mask VAL into position with the word. */
18209 val = convert_modes (SImode, mode, val, 1);
18210 val = expand_simple_binop (SImode, ASHIFT, val, shift,
18211 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18213 switch (code)
18215 case IOR:
18216 case XOR:
18217 /* We've already zero-extended VAL. That is sufficient to
18218 make certain that it does not affect other bits. */
18219 mask = NULL;
18220 break;
18222 case AND:
18223 /* If we make certain that all of the other bits in VAL are
18224 set, that will be sufficient to not affect other bits. */
18225 x = gen_rtx_NOT (SImode, mask);
18226 x = gen_rtx_IOR (SImode, x, val);
18227 emit_insn (gen_rtx_SET (VOIDmode, val, x));
18228 mask = NULL;
18229 break;
18231 case NOT:
18232 case PLUS:
18233 case MINUS:
18234 /* These will all affect bits outside the field and need
18235 adjustment via MASK within the loop. */
18236 break;
18238 default:
18239 gcc_unreachable ();
18242 /* Prepare to adjust the return value. */
18243 before = gen_reg_rtx (SImode);
18244 if (after)
18245 after = gen_reg_rtx (SImode);
18246 store_mode = mode = SImode;
18250 mem = rs6000_pre_atomic_barrier (mem, model);
18252 label = gen_label_rtx ();
18253 emit_label (label);
18254 label = gen_rtx_LABEL_REF (VOIDmode, label);
18256 if (before == NULL_RTX)
18257 before = gen_reg_rtx (mode);
18259 emit_load_locked (mode, before, mem);
18261 if (code == NOT)
18263 x = expand_simple_binop (mode, AND, before, val,
18264 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18265 after = expand_simple_unop (mode, NOT, x, after, 1);
18267 else
18269 after = expand_simple_binop (mode, code, before, val,
18270 after, 1, OPTAB_LIB_WIDEN);
18273 x = after;
18274 if (mask)
18276 x = expand_simple_binop (SImode, AND, after, mask,
18277 NULL_RTX, 1, OPTAB_LIB_WIDEN);
18278 x = rs6000_mask_atomic_subword (before, x, mask);
18280 else if (store_mode != mode)
18281 x = convert_modes (store_mode, mode, x, 1);
18283 cond = gen_reg_rtx (CCmode);
18284 emit_store_conditional (store_mode, cond, mem, x);
18286 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
18287 emit_unlikely_jump (x, label);
18289 rs6000_post_atomic_barrier (model);
18291 if (shift)
18293 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
18294 then do the calcuations in a SImode register. */
18295 if (orig_before)
18296 rs6000_finish_atomic_subword (orig_before, before, shift);
18297 if (orig_after)
18298 rs6000_finish_atomic_subword (orig_after, after, shift);
18300 else if (store_mode != mode)
18302 /* QImode/HImode on machines with lbarx/lharx where we do the native
18303 operation and then do the calcuations in a SImode register. */
18304 if (orig_before)
18305 convert_move (orig_before, before, 1);
18306 if (orig_after)
18307 convert_move (orig_after, after, 1);
18309 else if (orig_after && after != orig_after)
18310 emit_move_insn (orig_after, after);
18313 /* Emit instructions to move SRC to DST. Called by splitters for
18314 multi-register moves. It will emit at most one instruction for
18315 each register that is accessed; that is, it won't emit li/lis pairs
18316 (or equivalent for 64-bit code). One of SRC or DST must be a hard
18317 register. */
18319 void
18320 rs6000_split_multireg_move (rtx dst, rtx src)
18322 /* The register number of the first register being moved. */
18323 int reg;
18324 /* The mode that is to be moved. */
18325 enum machine_mode mode;
18326 /* The mode that the move is being done in, and its size. */
18327 enum machine_mode reg_mode;
18328 int reg_mode_size;
18329 /* The number of registers that will be moved. */
18330 int nregs;
18332 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
18333 mode = GET_MODE (dst);
18334 nregs = hard_regno_nregs[reg][mode];
18335 if (FP_REGNO_P (reg))
18336 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
18337 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
18338 else if (ALTIVEC_REGNO_P (reg))
18339 reg_mode = V16QImode;
18340 else if (TARGET_E500_DOUBLE && mode == TFmode)
18341 reg_mode = DFmode;
18342 else
18343 reg_mode = word_mode;
18344 reg_mode_size = GET_MODE_SIZE (reg_mode);
18346 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
18348 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
18350 /* Move register range backwards, if we might have destructive
18351 overlap. */
18352 int i;
18353 for (i = nregs - 1; i >= 0; i--)
18354 emit_insn (gen_rtx_SET (VOIDmode,
18355 simplify_gen_subreg (reg_mode, dst, mode,
18356 i * reg_mode_size),
18357 simplify_gen_subreg (reg_mode, src, mode,
18358 i * reg_mode_size)));
18360 else
18362 int i;
18363 int j = -1;
18364 bool used_update = false;
18365 rtx restore_basereg = NULL_RTX;
18367 if (MEM_P (src) && INT_REGNO_P (reg))
18369 rtx breg;
18371 if (GET_CODE (XEXP (src, 0)) == PRE_INC
18372 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
18374 rtx delta_rtx;
18375 breg = XEXP (XEXP (src, 0), 0);
18376 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
18377 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
18378 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
18379 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
18380 src = replace_equiv_address (src, breg);
18382 else if (! rs6000_offsettable_memref_p (src, reg_mode))
18384 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
18386 rtx basereg = XEXP (XEXP (src, 0), 0);
18387 if (TARGET_UPDATE)
18389 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
18390 emit_insn (gen_rtx_SET (VOIDmode, ndst,
18391 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
18392 used_update = true;
18394 else
18395 emit_insn (gen_rtx_SET (VOIDmode, basereg,
18396 XEXP (XEXP (src, 0), 1)));
18397 src = replace_equiv_address (src, basereg);
18399 else
18401 rtx basereg = gen_rtx_REG (Pmode, reg);
18402 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
18403 src = replace_equiv_address (src, basereg);
18407 breg = XEXP (src, 0);
18408 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
18409 breg = XEXP (breg, 0);
18411 /* If the base register we are using to address memory is
18412 also a destination reg, then change that register last. */
18413 if (REG_P (breg)
18414 && REGNO (breg) >= REGNO (dst)
18415 && REGNO (breg) < REGNO (dst) + nregs)
18416 j = REGNO (breg) - REGNO (dst);
18418 else if (MEM_P (dst) && INT_REGNO_P (reg))
18420 rtx breg;
18422 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
18423 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
18425 rtx delta_rtx;
18426 breg = XEXP (XEXP (dst, 0), 0);
18427 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
18428 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
18429 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
18431 /* We have to update the breg before doing the store.
18432 Use store with update, if available. */
18434 if (TARGET_UPDATE)
18436 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
18437 emit_insn (TARGET_32BIT
18438 ? (TARGET_POWERPC64
18439 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
18440 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
18441 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
18442 used_update = true;
18444 else
18445 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
18446 dst = replace_equiv_address (dst, breg);
18448 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
18449 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
18451 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
18453 rtx basereg = XEXP (XEXP (dst, 0), 0);
18454 if (TARGET_UPDATE)
18456 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
18457 emit_insn (gen_rtx_SET (VOIDmode,
18458 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
18459 used_update = true;
18461 else
18462 emit_insn (gen_rtx_SET (VOIDmode, basereg,
18463 XEXP (XEXP (dst, 0), 1)));
18464 dst = replace_equiv_address (dst, basereg);
18466 else
18468 rtx basereg = XEXP (XEXP (dst, 0), 0);
18469 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
18470 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
18471 && REG_P (basereg)
18472 && REG_P (offsetreg)
18473 && REGNO (basereg) != REGNO (offsetreg));
18474 if (REGNO (basereg) == 0)
18476 rtx tmp = offsetreg;
18477 offsetreg = basereg;
18478 basereg = tmp;
18480 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
18481 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
18482 dst = replace_equiv_address (dst, basereg);
18485 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
18486 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
18489 for (i = 0; i < nregs; i++)
18491 /* Calculate index to next subword. */
18492 ++j;
18493 if (j == nregs)
18494 j = 0;
18496 /* If compiler already emitted move of first word by
18497 store with update, no need to do anything. */
18498 if (j == 0 && used_update)
18499 continue;
18501 emit_insn (gen_rtx_SET (VOIDmode,
18502 simplify_gen_subreg (reg_mode, dst, mode,
18503 j * reg_mode_size),
18504 simplify_gen_subreg (reg_mode, src, mode,
18505 j * reg_mode_size)));
18507 if (restore_basereg != NULL_RTX)
18508 emit_insn (restore_basereg);
18513 /* This page contains routines that are used to determine what the
18514 function prologue and epilogue code will do and write them out. */
18516 static inline bool
18517 save_reg_p (int r)
18519 return !call_used_regs[r] && df_regs_ever_live_p (r);
18522 /* Return the first fixed-point register that is required to be
18523 saved. 32 if none. */
18526 first_reg_to_save (void)
18528 int first_reg;
18530 /* Find lowest numbered live register. */
18531 for (first_reg = 13; first_reg <= 31; first_reg++)
18532 if (save_reg_p (first_reg))
18533 break;
18535 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
18536 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
18537 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
18538 || (TARGET_TOC && TARGET_MINIMAL_TOC))
18539 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
18540 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
18542 #if TARGET_MACHO
18543 if (flag_pic
18544 && crtl->uses_pic_offset_table
18545 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
18546 return RS6000_PIC_OFFSET_TABLE_REGNUM;
18547 #endif
18549 return first_reg;
18552 /* Similar, for FP regs. */
18555 first_fp_reg_to_save (void)
18557 int first_reg;
18559 /* Find lowest numbered live register. */
18560 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
18561 if (save_reg_p (first_reg))
18562 break;
18564 return first_reg;
18567 /* Similar, for AltiVec regs. */
18569 static int
18570 first_altivec_reg_to_save (void)
18572 int i;
18574 /* Stack frame remains as is unless we are in AltiVec ABI. */
18575 if (! TARGET_ALTIVEC_ABI)
18576 return LAST_ALTIVEC_REGNO + 1;
18578 /* On Darwin, the unwind routines are compiled without
18579 TARGET_ALTIVEC, and use save_world to save/restore the
18580 altivec registers when necessary. */
18581 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
18582 && ! TARGET_ALTIVEC)
18583 return FIRST_ALTIVEC_REGNO + 20;
18585 /* Find lowest numbered live register. */
18586 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
18587 if (save_reg_p (i))
18588 break;
18590 return i;
18593 /* Return a 32-bit mask of the AltiVec registers we need to set in
18594 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
18595 the 32-bit word is 0. */
18597 static unsigned int
18598 compute_vrsave_mask (void)
18600 unsigned int i, mask = 0;
18602 /* On Darwin, the unwind routines are compiled without
18603 TARGET_ALTIVEC, and use save_world to save/restore the
18604 call-saved altivec registers when necessary. */
18605 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
18606 && ! TARGET_ALTIVEC)
18607 mask |= 0xFFF;
18609 /* First, find out if we use _any_ altivec registers. */
18610 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
18611 if (df_regs_ever_live_p (i))
18612 mask |= ALTIVEC_REG_BIT (i);
18614 if (mask == 0)
18615 return mask;
18617 /* Next, remove the argument registers from the set. These must
18618 be in the VRSAVE mask set by the caller, so we don't need to add
18619 them in again. More importantly, the mask we compute here is
18620 used to generate CLOBBERs in the set_vrsave insn, and we do not
18621 wish the argument registers to die. */
18622 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
18623 mask &= ~ALTIVEC_REG_BIT (i);
18625 /* Similarly, remove the return value from the set. */
18627 bool yes = false;
18628 diddle_return_value (is_altivec_return_reg, &yes);
18629 if (yes)
18630 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
18633 return mask;
18636 /* For a very restricted set of circumstances, we can cut down the
18637 size of prologues/epilogues by calling our own save/restore-the-world
18638 routines. */
18640 static void
18641 compute_save_world_info (rs6000_stack_t *info_ptr)
18643 info_ptr->world_save_p = 1;
18644 info_ptr->world_save_p
18645 = (WORLD_SAVE_P (info_ptr)
18646 && DEFAULT_ABI == ABI_DARWIN
18647 && !cfun->has_nonlocal_label
18648 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
18649 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
18650 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
18651 && info_ptr->cr_save_p);
18653 /* This will not work in conjunction with sibcalls. Make sure there
18654 are none. (This check is expensive, but seldom executed.) */
18655 if (WORLD_SAVE_P (info_ptr))
18657 rtx insn;
18658 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
18659 if (CALL_P (insn) && SIBLING_CALL_P (insn))
18661 info_ptr->world_save_p = 0;
18662 break;
18666 if (WORLD_SAVE_P (info_ptr))
18668 /* Even if we're not touching VRsave, make sure there's room on the
18669 stack for it, if it looks like we're calling SAVE_WORLD, which
18670 will attempt to save it. */
18671 info_ptr->vrsave_size = 4;
18673 /* If we are going to save the world, we need to save the link register too. */
18674 info_ptr->lr_save_p = 1;
18676 /* "Save" the VRsave register too if we're saving the world. */
18677 if (info_ptr->vrsave_mask == 0)
18678 info_ptr->vrsave_mask = compute_vrsave_mask ();
18680 /* Because the Darwin register save/restore routines only handle
18681 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
18682 check. */
18683 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
18684 && (info_ptr->first_altivec_reg_save
18685 >= FIRST_SAVED_ALTIVEC_REGNO));
18687 return;
18691 static void
18692 is_altivec_return_reg (rtx reg, void *xyes)
18694 bool *yes = (bool *) xyes;
18695 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
18696 *yes = true;
18700 /* Look for user-defined global regs in the range FIRST to LAST-1.
18701 We should not restore these, and so cannot use lmw or out-of-line
18702 restore functions if there are any. We also can't save them
18703 (well, emit frame notes for them), because frame unwinding during
18704 exception handling will restore saved registers. */
18706 static bool
18707 global_regs_p (unsigned first, unsigned last)
18709 while (first < last)
18710 if (global_regs[first++])
18711 return true;
18712 return false;
18715 /* Determine the strategy for savings/restoring registers. */
18717 enum {
18718 SAVRES_MULTIPLE = 0x1,
18719 SAVE_INLINE_FPRS = 0x2,
18720 SAVE_INLINE_GPRS = 0x4,
18721 REST_INLINE_FPRS = 0x8,
18722 REST_INLINE_GPRS = 0x10,
18723 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
18724 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
18725 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
18726 SAVE_INLINE_VRS = 0x100,
18727 REST_INLINE_VRS = 0x200
18730 static int
18731 rs6000_savres_strategy (rs6000_stack_t *info,
18732 bool using_static_chain_p)
18734 int strategy = 0;
18735 bool lr_save_p;
18737 if (TARGET_MULTIPLE
18738 && !TARGET_POWERPC64
18739 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
18740 && info->first_gp_reg_save < 31
18741 && !global_regs_p (info->first_gp_reg_save, 32))
18742 strategy |= SAVRES_MULTIPLE;
18744 if (crtl->calls_eh_return
18745 || cfun->machine->ra_need_lr)
18746 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
18747 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
18748 | SAVE_INLINE_VRS | REST_INLINE_VRS);
18750 if (info->first_fp_reg_save == 64
18751 /* The out-of-line FP routines use double-precision stores;
18752 we can't use those routines if we don't have such stores. */
18753 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
18754 || global_regs_p (info->first_fp_reg_save, 64))
18755 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
18757 if (info->first_gp_reg_save == 32
18758 || (!(strategy & SAVRES_MULTIPLE)
18759 && global_regs_p (info->first_gp_reg_save, 32)))
18760 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18762 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
18763 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
18764 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18766 /* Define cutoff for using out-of-line functions to save registers. */
18767 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
18769 if (!optimize_size)
18771 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
18772 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18773 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18775 else
18777 /* Prefer out-of-line restore if it will exit. */
18778 if (info->first_fp_reg_save > 61)
18779 strategy |= SAVE_INLINE_FPRS;
18780 if (info->first_gp_reg_save > 29)
18782 if (info->first_fp_reg_save == 64)
18783 strategy |= SAVE_INLINE_GPRS;
18784 else
18785 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18787 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
18788 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18791 else if (DEFAULT_ABI == ABI_DARWIN)
18793 if (info->first_fp_reg_save > 60)
18794 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
18795 if (info->first_gp_reg_save > 29)
18796 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18797 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18799 else
18801 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
18802 if (info->first_fp_reg_save > 61)
18803 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
18804 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18805 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18808 /* Don't bother to try to save things out-of-line if r11 is occupied
18809 by the static chain. It would require too much fiddling and the
18810 static chain is rarely used anyway. FPRs are saved w.r.t the stack
18811 pointer on Darwin, and AIX uses r1 or r12. */
18812 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
18813 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
18814 | SAVE_INLINE_GPRS
18815 | SAVE_INLINE_VRS | REST_INLINE_VRS);
18817 /* We can only use the out-of-line routines to restore if we've
18818 saved all the registers from first_fp_reg_save in the prologue.
18819 Otherwise, we risk loading garbage. */
18820 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
18822 int i;
18824 for (i = info->first_fp_reg_save; i < 64; i++)
18825 if (!save_reg_p (i))
18827 strategy |= REST_INLINE_FPRS;
18828 break;
18832 /* If we are going to use store multiple, then don't even bother
18833 with the out-of-line routines, since the store-multiple
18834 instruction will always be smaller. */
18835 if ((strategy & SAVRES_MULTIPLE))
18836 strategy |= SAVE_INLINE_GPRS;
18838 /* info->lr_save_p isn't yet set if the only reason lr needs to be
18839 saved is an out-of-line save or restore. Set up the value for
18840 the next test (excluding out-of-line gpr restore). */
18841 lr_save_p = (info->lr_save_p
18842 || !(strategy & SAVE_INLINE_GPRS)
18843 || !(strategy & SAVE_INLINE_FPRS)
18844 || !(strategy & SAVE_INLINE_VRS)
18845 || !(strategy & REST_INLINE_FPRS)
18846 || !(strategy & REST_INLINE_VRS));
18848 /* The situation is more complicated with load multiple. We'd
18849 prefer to use the out-of-line routines for restores, since the
18850 "exit" out-of-line routines can handle the restore of LR and the
18851 frame teardown. However if doesn't make sense to use the
18852 out-of-line routine if that is the only reason we'd need to save
18853 LR, and we can't use the "exit" out-of-line gpr restore if we
18854 have saved some fprs; In those cases it is advantageous to use
18855 load multiple when available. */
18856 if ((strategy & SAVRES_MULTIPLE)
18857 && (!lr_save_p
18858 || info->first_fp_reg_save != 64))
18859 strategy |= REST_INLINE_GPRS;
18861 /* Saving CR interferes with the exit routines used on the SPE, so
18862 just punt here. */
18863 if (TARGET_SPE_ABI
18864 && info->spe_64bit_regs_used
18865 && info->cr_save_p)
18866 strategy |= REST_INLINE_GPRS;
18868 /* We can only use load multiple or the out-of-line routines to
18869 restore if we've used store multiple or out-of-line routines
18870 in the prologue, i.e. if we've saved all the registers from
18871 first_gp_reg_save. Otherwise, we risk loading garbage. */
18872 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
18873 == SAVE_INLINE_GPRS)
18875 int i;
18877 for (i = info->first_gp_reg_save; i < 32; i++)
18878 if (!save_reg_p (i))
18880 strategy |= REST_INLINE_GPRS;
18881 break;
18885 if (TARGET_ELF && TARGET_64BIT)
18887 if (!(strategy & SAVE_INLINE_FPRS))
18888 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
18889 else if (!(strategy & SAVE_INLINE_GPRS)
18890 && info->first_fp_reg_save == 64)
18891 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
18893 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
18894 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
18896 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
18897 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
18899 return strategy;
18902 /* Calculate the stack information for the current function. This is
18903 complicated by having two separate calling sequences, the AIX calling
18904 sequence and the V.4 calling sequence.
18906 AIX (and Darwin/Mac OS X) stack frames look like:
18907 32-bit 64-bit
18908 SP----> +---------------------------------------+
18909 | back chain to caller | 0 0
18910 +---------------------------------------+
18911 | saved CR | 4 8 (8-11)
18912 +---------------------------------------+
18913 | saved LR | 8 16
18914 +---------------------------------------+
18915 | reserved for compilers | 12 24
18916 +---------------------------------------+
18917 | reserved for binders | 16 32
18918 +---------------------------------------+
18919 | saved TOC pointer | 20 40
18920 +---------------------------------------+
18921 | Parameter save area (P) | 24 48
18922 +---------------------------------------+
18923 | Alloca space (A) | 24+P etc.
18924 +---------------------------------------+
18925 | Local variable space (L) | 24+P+A
18926 +---------------------------------------+
18927 | Float/int conversion temporary (X) | 24+P+A+L
18928 +---------------------------------------+
18929 | Save area for AltiVec registers (W) | 24+P+A+L+X
18930 +---------------------------------------+
18931 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
18932 +---------------------------------------+
18933 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
18934 +---------------------------------------+
18935 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
18936 +---------------------------------------+
18937 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
18938 +---------------------------------------+
18939 old SP->| back chain to caller's caller |
18940 +---------------------------------------+
18942 The required alignment for AIX configurations is two words (i.e., 8
18943 or 16 bytes).
18946 V.4 stack frames look like:
18948 SP----> +---------------------------------------+
18949 | back chain to caller | 0
18950 +---------------------------------------+
18951 | caller's saved LR | 4
18952 +---------------------------------------+
18953 | Parameter save area (P) | 8
18954 +---------------------------------------+
18955 | Alloca space (A) | 8+P
18956 +---------------------------------------+
18957 | Varargs save area (V) | 8+P+A
18958 +---------------------------------------+
18959 | Local variable space (L) | 8+P+A+V
18960 +---------------------------------------+
18961 | Float/int conversion temporary (X) | 8+P+A+V+L
18962 +---------------------------------------+
18963 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
18964 +---------------------------------------+
18965 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
18966 +---------------------------------------+
18967 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
18968 +---------------------------------------+
18969 | SPE: area for 64-bit GP registers |
18970 +---------------------------------------+
18971 | SPE alignment padding |
18972 +---------------------------------------+
18973 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
18974 +---------------------------------------+
18975 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
18976 +---------------------------------------+
18977 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
18978 +---------------------------------------+
18979 old SP->| back chain to caller's caller |
18980 +---------------------------------------+
18982 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
18983 given. (But note below and in sysv4.h that we require only 8 and
18984 may round up the size of our stack frame anyways. The historical
18985 reason is early versions of powerpc-linux which didn't properly
18986 align the stack at program startup. A happy side-effect is that
18987 -mno-eabi libraries can be used with -meabi programs.)
18989 The EABI configuration defaults to the V.4 layout. However,
18990 the stack alignment requirements may differ. If -mno-eabi is not
18991 given, the required stack alignment is 8 bytes; if -mno-eabi is
18992 given, the required alignment is 16 bytes. (But see V.4 comment
18993 above.) */
18995 #ifndef ABI_STACK_BOUNDARY
18996 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
18997 #endif
18999 static rs6000_stack_t *
19000 rs6000_stack_info (void)
19002 rs6000_stack_t *info_ptr = &stack_info;
19003 int reg_size = TARGET_32BIT ? 4 : 8;
19004 int ehrd_size;
19005 int save_align;
19006 int first_gp;
19007 HOST_WIDE_INT non_fixed_size;
19008 bool using_static_chain_p;
19010 if (reload_completed && info_ptr->reload_completed)
19011 return info_ptr;
19013 memset (info_ptr, 0, sizeof (*info_ptr));
19014 info_ptr->reload_completed = reload_completed;
19016 if (TARGET_SPE)
19018 /* Cache value so we don't rescan instruction chain over and over. */
19019 if (cfun->machine->insn_chain_scanned_p == 0)
19020 cfun->machine->insn_chain_scanned_p
19021 = spe_func_has_64bit_regs_p () + 1;
19022 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
19025 /* Select which calling sequence. */
19026 info_ptr->abi = DEFAULT_ABI;
19028 /* Calculate which registers need to be saved & save area size. */
19029 info_ptr->first_gp_reg_save = first_reg_to_save ();
19030 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
19031 even if it currently looks like we won't. Reload may need it to
19032 get at a constant; if so, it will have already created a constant
19033 pool entry for it. */
19034 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
19035 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
19036 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
19037 && crtl->uses_const_pool
19038 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
19039 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
19040 else
19041 first_gp = info_ptr->first_gp_reg_save;
19043 info_ptr->gp_size = reg_size * (32 - first_gp);
19045 /* For the SPE, we have an additional upper 32-bits on each GPR.
19046 Ideally we should save the entire 64-bits only when the upper
19047 half is used in SIMD instructions. Since we only record
19048 registers live (not the size they are used in), this proves
19049 difficult because we'd have to traverse the instruction chain at
19050 the right time, taking reload into account. This is a real pain,
19051 so we opt to save the GPRs in 64-bits always if but one register
19052 gets used in 64-bits. Otherwise, all the registers in the frame
19053 get saved in 32-bits.
19055 So... since when we save all GPRs (except the SP) in 64-bits, the
19056 traditional GP save area will be empty. */
19057 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
19058 info_ptr->gp_size = 0;
19060 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
19061 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
19063 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
19064 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
19065 - info_ptr->first_altivec_reg_save);
19067 /* Does this function call anything? */
19068 info_ptr->calls_p = (! crtl->is_leaf
19069 || cfun->machine->ra_needs_full_frame);
19071 /* Determine if we need to save the condition code registers. */
19072 if (df_regs_ever_live_p (CR2_REGNO)
19073 || df_regs_ever_live_p (CR3_REGNO)
19074 || df_regs_ever_live_p (CR4_REGNO))
19076 info_ptr->cr_save_p = 1;
19077 if (DEFAULT_ABI == ABI_V4)
19078 info_ptr->cr_size = reg_size;
19081 /* If the current function calls __builtin_eh_return, then we need
19082 to allocate stack space for registers that will hold data for
19083 the exception handler. */
19084 if (crtl->calls_eh_return)
19086 unsigned int i;
19087 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
19088 continue;
19090 /* SPE saves EH registers in 64-bits. */
19091 ehrd_size = i * (TARGET_SPE_ABI
19092 && info_ptr->spe_64bit_regs_used != 0
19093 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
19095 else
19096 ehrd_size = 0;
19098 /* Determine various sizes. */
19099 info_ptr->reg_size = reg_size;
19100 info_ptr->fixed_size = RS6000_SAVE_AREA;
19101 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
19102 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
19103 TARGET_ALTIVEC ? 16 : 8);
19104 if (FRAME_GROWS_DOWNWARD)
19105 info_ptr->vars_size
19106 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
19107 + info_ptr->parm_size,
19108 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
19109 - (info_ptr->fixed_size + info_ptr->vars_size
19110 + info_ptr->parm_size);
19112 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
19113 info_ptr->spe_gp_size = 8 * (32 - first_gp);
19114 else
19115 info_ptr->spe_gp_size = 0;
19117 if (TARGET_ALTIVEC_ABI)
19118 info_ptr->vrsave_mask = compute_vrsave_mask ();
19119 else
19120 info_ptr->vrsave_mask = 0;
19122 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
19123 info_ptr->vrsave_size = 4;
19124 else
19125 info_ptr->vrsave_size = 0;
19127 compute_save_world_info (info_ptr);
19129 /* Calculate the offsets. */
19130 switch (DEFAULT_ABI)
19132 case ABI_NONE:
19133 default:
19134 gcc_unreachable ();
19136 case ABI_AIX:
19137 case ABI_DARWIN:
19138 info_ptr->fp_save_offset = - info_ptr->fp_size;
19139 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
19141 if (TARGET_ALTIVEC_ABI)
19143 info_ptr->vrsave_save_offset
19144 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
19146 /* Align stack so vector save area is on a quadword boundary.
19147 The padding goes above the vectors. */
19148 if (info_ptr->altivec_size != 0)
19149 info_ptr->altivec_padding_size
19150 = info_ptr->vrsave_save_offset & 0xF;
19151 else
19152 info_ptr->altivec_padding_size = 0;
19154 info_ptr->altivec_save_offset
19155 = info_ptr->vrsave_save_offset
19156 - info_ptr->altivec_padding_size
19157 - info_ptr->altivec_size;
19158 gcc_assert (info_ptr->altivec_size == 0
19159 || info_ptr->altivec_save_offset % 16 == 0);
19161 /* Adjust for AltiVec case. */
19162 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
19164 else
19165 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
19166 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
19167 info_ptr->lr_save_offset = 2*reg_size;
19168 break;
19170 case ABI_V4:
19171 info_ptr->fp_save_offset = - info_ptr->fp_size;
19172 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
19173 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
19175 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
19177 /* Align stack so SPE GPR save area is aligned on a
19178 double-word boundary. */
19179 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
19180 info_ptr->spe_padding_size
19181 = 8 - (-info_ptr->cr_save_offset % 8);
19182 else
19183 info_ptr->spe_padding_size = 0;
19185 info_ptr->spe_gp_save_offset
19186 = info_ptr->cr_save_offset
19187 - info_ptr->spe_padding_size
19188 - info_ptr->spe_gp_size;
19190 /* Adjust for SPE case. */
19191 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
19193 else if (TARGET_ALTIVEC_ABI)
19195 info_ptr->vrsave_save_offset
19196 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
19198 /* Align stack so vector save area is on a quadword boundary. */
19199 if (info_ptr->altivec_size != 0)
19200 info_ptr->altivec_padding_size
19201 = 16 - (-info_ptr->vrsave_save_offset % 16);
19202 else
19203 info_ptr->altivec_padding_size = 0;
19205 info_ptr->altivec_save_offset
19206 = info_ptr->vrsave_save_offset
19207 - info_ptr->altivec_padding_size
19208 - info_ptr->altivec_size;
19210 /* Adjust for AltiVec case. */
19211 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
19213 else
19214 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
19215 info_ptr->ehrd_offset -= ehrd_size;
19216 info_ptr->lr_save_offset = reg_size;
19217 break;
19220 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
19221 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
19222 + info_ptr->gp_size
19223 + info_ptr->altivec_size
19224 + info_ptr->altivec_padding_size
19225 + info_ptr->spe_gp_size
19226 + info_ptr->spe_padding_size
19227 + ehrd_size
19228 + info_ptr->cr_size
19229 + info_ptr->vrsave_size,
19230 save_align);
19232 non_fixed_size = (info_ptr->vars_size
19233 + info_ptr->parm_size
19234 + info_ptr->save_size);
19236 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
19237 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
19239 /* Determine if we need to save the link register. */
19240 if (info_ptr->calls_p
19241 || (DEFAULT_ABI == ABI_AIX
19242 && crtl->profile
19243 && !TARGET_PROFILE_KERNEL)
19244 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
19245 #ifdef TARGET_RELOCATABLE
19246 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
19247 #endif
19248 || rs6000_ra_ever_killed ())
19249 info_ptr->lr_save_p = 1;
19251 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19252 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19253 && call_used_regs[STATIC_CHAIN_REGNUM]);
19254 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
19255 using_static_chain_p);
19257 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
19258 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
19259 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
19260 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
19261 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
19262 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
19263 info_ptr->lr_save_p = 1;
19265 if (info_ptr->lr_save_p)
19266 df_set_regs_ever_live (LR_REGNO, true);
19268 /* Determine if we need to allocate any stack frame:
19270 For AIX we need to push the stack if a frame pointer is needed
19271 (because the stack might be dynamically adjusted), if we are
19272 debugging, if we make calls, or if the sum of fp_save, gp_save,
19273 and local variables are more than the space needed to save all
19274 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
19275 + 18*8 = 288 (GPR13 reserved).
19277 For V.4 we don't have the stack cushion that AIX uses, but assume
19278 that the debugger can handle stackless frames. */
19280 if (info_ptr->calls_p)
19281 info_ptr->push_p = 1;
19283 else if (DEFAULT_ABI == ABI_V4)
19284 info_ptr->push_p = non_fixed_size != 0;
19286 else if (frame_pointer_needed)
19287 info_ptr->push_p = 1;
19289 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
19290 info_ptr->push_p = 1;
19292 else
19293 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
19295 /* Zero offsets if we're not saving those registers. */
19296 if (info_ptr->fp_size == 0)
19297 info_ptr->fp_save_offset = 0;
19299 if (info_ptr->gp_size == 0)
19300 info_ptr->gp_save_offset = 0;
19302 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
19303 info_ptr->altivec_save_offset = 0;
19305 /* Zero VRSAVE offset if not saved and restored. */
19306 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
19307 info_ptr->vrsave_save_offset = 0;
19309 if (! TARGET_SPE_ABI
19310 || info_ptr->spe_64bit_regs_used == 0
19311 || info_ptr->spe_gp_size == 0)
19312 info_ptr->spe_gp_save_offset = 0;
19314 if (! info_ptr->lr_save_p)
19315 info_ptr->lr_save_offset = 0;
19317 if (! info_ptr->cr_save_p)
19318 info_ptr->cr_save_offset = 0;
19320 return info_ptr;
19323 /* Return true if the current function uses any GPRs in 64-bit SIMD
19324 mode. */
19326 static bool
19327 spe_func_has_64bit_regs_p (void)
19329 rtx insns, insn;
19331 /* Functions that save and restore all the call-saved registers will
19332 need to save/restore the registers in 64-bits. */
19333 if (crtl->calls_eh_return
19334 || cfun->calls_setjmp
19335 || crtl->has_nonlocal_goto)
19336 return true;
19338 insns = get_insns ();
19340 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
19342 if (INSN_P (insn))
19344 rtx i;
19346 /* FIXME: This should be implemented with attributes...
19348 (set_attr "spe64" "true")....then,
19349 if (get_spe64(insn)) return true;
19351 It's the only reliable way to do the stuff below. */
19353 i = PATTERN (insn);
19354 if (GET_CODE (i) == SET)
19356 enum machine_mode mode = GET_MODE (SET_SRC (i));
19358 if (SPE_VECTOR_MODE (mode))
19359 return true;
19360 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
19361 return true;
19366 return false;
19369 static void
19370 debug_stack_info (rs6000_stack_t *info)
19372 const char *abi_string;
19374 if (! info)
19375 info = rs6000_stack_info ();
19377 fprintf (stderr, "\nStack information for function %s:\n",
19378 ((current_function_decl && DECL_NAME (current_function_decl))
19379 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
19380 : "<unknown>"));
19382 switch (info->abi)
19384 default: abi_string = "Unknown"; break;
19385 case ABI_NONE: abi_string = "NONE"; break;
19386 case ABI_AIX: abi_string = "AIX"; break;
19387 case ABI_DARWIN: abi_string = "Darwin"; break;
19388 case ABI_V4: abi_string = "V.4"; break;
19391 fprintf (stderr, "\tABI = %5s\n", abi_string);
19393 if (TARGET_ALTIVEC_ABI)
19394 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
19396 if (TARGET_SPE_ABI)
19397 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
19399 if (info->first_gp_reg_save != 32)
19400 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
19402 if (info->first_fp_reg_save != 64)
19403 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
19405 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
19406 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
19407 info->first_altivec_reg_save);
19409 if (info->lr_save_p)
19410 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
19412 if (info->cr_save_p)
19413 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
19415 if (info->vrsave_mask)
19416 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
19418 if (info->push_p)
19419 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
19421 if (info->calls_p)
19422 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
19424 if (info->gp_save_offset)
19425 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
19427 if (info->fp_save_offset)
19428 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
19430 if (info->altivec_save_offset)
19431 fprintf (stderr, "\taltivec_save_offset = %5d\n",
19432 info->altivec_save_offset);
19434 if (info->spe_gp_save_offset)
19435 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
19436 info->spe_gp_save_offset);
19438 if (info->vrsave_save_offset)
19439 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
19440 info->vrsave_save_offset);
19442 if (info->lr_save_offset)
19443 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
19445 if (info->cr_save_offset)
19446 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
19448 if (info->varargs_save_offset)
19449 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
19451 if (info->total_size)
19452 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
19453 info->total_size);
19455 if (info->vars_size)
19456 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
19457 info->vars_size);
19459 if (info->parm_size)
19460 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
19462 if (info->fixed_size)
19463 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
19465 if (info->gp_size)
19466 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
19468 if (info->spe_gp_size)
19469 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
19471 if (info->fp_size)
19472 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
19474 if (info->altivec_size)
19475 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
19477 if (info->vrsave_size)
19478 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
19480 if (info->altivec_padding_size)
19481 fprintf (stderr, "\taltivec_padding_size= %5d\n",
19482 info->altivec_padding_size);
19484 if (info->spe_padding_size)
19485 fprintf (stderr, "\tspe_padding_size = %5d\n",
19486 info->spe_padding_size);
19488 if (info->cr_size)
19489 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
19491 if (info->save_size)
19492 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
19494 if (info->reg_size != 4)
19495 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
19497 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
19499 fprintf (stderr, "\n");
19503 rs6000_return_addr (int count, rtx frame)
19505 /* Currently we don't optimize very well between prolog and body
19506 code and for PIC code the code can be actually quite bad, so
19507 don't try to be too clever here. */
19508 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
19510 cfun->machine->ra_needs_full_frame = 1;
19512 return
19513 gen_rtx_MEM
19514 (Pmode,
19515 memory_address
19516 (Pmode,
19517 plus_constant (Pmode,
19518 copy_to_reg
19519 (gen_rtx_MEM (Pmode,
19520 memory_address (Pmode, frame))),
19521 RETURN_ADDRESS_OFFSET)));
19524 cfun->machine->ra_need_lr = 1;
19525 return get_hard_reg_initial_val (Pmode, LR_REGNO);
19528 /* Say whether a function is a candidate for sibcall handling or not. */
19530 static bool
19531 rs6000_function_ok_for_sibcall (tree decl, tree exp)
19533 tree fntype;
19535 if (decl)
19536 fntype = TREE_TYPE (decl);
19537 else
19538 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
19540 /* We can't do it if the called function has more vector parameters
19541 than the current function; there's nowhere to put the VRsave code. */
19542 if (TARGET_ALTIVEC_ABI
19543 && TARGET_ALTIVEC_VRSAVE
19544 && !(decl && decl == current_function_decl))
19546 function_args_iterator args_iter;
19547 tree type;
19548 int nvreg = 0;
19550 /* Functions with vector parameters are required to have a
19551 prototype, so the argument type info must be available
19552 here. */
19553 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
19554 if (TREE_CODE (type) == VECTOR_TYPE
19555 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
19556 nvreg++;
19558 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
19559 if (TREE_CODE (type) == VECTOR_TYPE
19560 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
19561 nvreg--;
19563 if (nvreg > 0)
19564 return false;
19567 /* Under the AIX ABI we can't allow calls to non-local functions,
19568 because the callee may have a different TOC pointer to the
19569 caller and there's no way to ensure we restore the TOC when we
19570 return. With the secure-plt SYSV ABI we can't make non-local
19571 calls when -fpic/PIC because the plt call stubs use r30. */
19572 if (DEFAULT_ABI == ABI_DARWIN
19573 || (DEFAULT_ABI == ABI_AIX
19574 && decl
19575 && !DECL_EXTERNAL (decl)
19576 && (*targetm.binds_local_p) (decl))
19577 || (DEFAULT_ABI == ABI_V4
19578 && (!TARGET_SECURE_PLT
19579 || !flag_pic
19580 || (decl
19581 && (*targetm.binds_local_p) (decl)))))
19583 tree attr_list = TYPE_ATTRIBUTES (fntype);
19585 if (!lookup_attribute ("longcall", attr_list)
19586 || lookup_attribute ("shortcall", attr_list))
19587 return true;
19590 return false;
19593 static int
19594 rs6000_ra_ever_killed (void)
19596 rtx top;
19597 rtx reg;
19598 rtx insn;
19600 if (cfun->is_thunk)
19601 return 0;
19603 if (cfun->machine->lr_save_state)
19604 return cfun->machine->lr_save_state - 1;
19606 /* regs_ever_live has LR marked as used if any sibcalls are present,
19607 but this should not force saving and restoring in the
19608 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
19609 clobbers LR, so that is inappropriate. */
19611 /* Also, the prologue can generate a store into LR that
19612 doesn't really count, like this:
19614 move LR->R0
19615 bcl to set PIC register
19616 move LR->R31
19617 move R0->LR
19619 When we're called from the epilogue, we need to avoid counting
19620 this as a store. */
19622 push_topmost_sequence ();
19623 top = get_insns ();
19624 pop_topmost_sequence ();
19625 reg = gen_rtx_REG (Pmode, LR_REGNO);
19627 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
19629 if (INSN_P (insn))
19631 if (CALL_P (insn))
19633 if (!SIBLING_CALL_P (insn))
19634 return 1;
19636 else if (find_regno_note (insn, REG_INC, LR_REGNO))
19637 return 1;
19638 else if (set_of (reg, insn) != NULL_RTX
19639 && !prologue_epilogue_contains (insn))
19640 return 1;
19643 return 0;
19646 /* Emit instructions needed to load the TOC register.
19647 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
19648 a constant pool; or for SVR4 -fpic. */
19650 void
19651 rs6000_emit_load_toc_table (int fromprolog)
19653 rtx dest;
19654 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
19656 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
19658 char buf[30];
19659 rtx lab, tmp1, tmp2, got;
19661 lab = gen_label_rtx ();
19662 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
19663 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
19664 if (flag_pic == 2)
19665 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
19666 else
19667 got = rs6000_got_sym ();
19668 tmp1 = tmp2 = dest;
19669 if (!fromprolog)
19671 tmp1 = gen_reg_rtx (Pmode);
19672 tmp2 = gen_reg_rtx (Pmode);
19674 emit_insn (gen_load_toc_v4_PIC_1 (lab));
19675 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
19676 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
19677 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
19679 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
19681 emit_insn (gen_load_toc_v4_pic_si ());
19682 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
19684 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
19686 char buf[30];
19687 rtx temp0 = (fromprolog
19688 ? gen_rtx_REG (Pmode, 0)
19689 : gen_reg_rtx (Pmode));
19691 if (fromprolog)
19693 rtx symF, symL;
19695 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
19696 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
19698 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
19699 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
19701 emit_insn (gen_load_toc_v4_PIC_1 (symF));
19702 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
19703 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
19705 else
19707 rtx tocsym, lab;
19709 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
19710 lab = gen_label_rtx ();
19711 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
19712 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
19713 if (TARGET_LINK_STACK)
19714 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
19715 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
19717 emit_insn (gen_addsi3 (dest, temp0, dest));
19719 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
19721 /* This is for AIX code running in non-PIC ELF32. */
19722 char buf[30];
19723 rtx realsym;
19724 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
19725 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
19727 emit_insn (gen_elf_high (dest, realsym));
19728 emit_insn (gen_elf_low (dest, dest, realsym));
19730 else
19732 gcc_assert (DEFAULT_ABI == ABI_AIX);
19734 if (TARGET_32BIT)
19735 emit_insn (gen_load_toc_aix_si (dest));
19736 else
19737 emit_insn (gen_load_toc_aix_di (dest));
19741 /* Emit instructions to restore the link register after determining where
19742 its value has been stored. */
19744 void
19745 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
19747 rs6000_stack_t *info = rs6000_stack_info ();
19748 rtx operands[2];
19750 operands[0] = source;
19751 operands[1] = scratch;
19753 if (info->lr_save_p)
19755 rtx frame_rtx = stack_pointer_rtx;
19756 HOST_WIDE_INT sp_offset = 0;
19757 rtx tmp;
19759 if (frame_pointer_needed
19760 || cfun->calls_alloca
19761 || info->total_size > 32767)
19763 tmp = gen_frame_mem (Pmode, frame_rtx);
19764 emit_move_insn (operands[1], tmp);
19765 frame_rtx = operands[1];
19767 else if (info->push_p)
19768 sp_offset = info->total_size;
19770 tmp = plus_constant (Pmode, frame_rtx,
19771 info->lr_save_offset + sp_offset);
19772 tmp = gen_frame_mem (Pmode, tmp);
19773 emit_move_insn (tmp, operands[0]);
19775 else
19776 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
19778 /* Freeze lr_save_p. We've just emitted rtl that depends on the
19779 state of lr_save_p so any change from here on would be a bug. In
19780 particular, stop rs6000_ra_ever_killed from considering the SET
19781 of lr we may have added just above. */
19782 cfun->machine->lr_save_state = info->lr_save_p + 1;
19785 static GTY(()) alias_set_type set = -1;
19787 alias_set_type
19788 get_TOC_alias_set (void)
19790 if (set == -1)
19791 set = new_alias_set ();
19792 return set;
19795 /* This returns nonzero if the current function uses the TOC. This is
19796 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
19797 is generated by the ABI_V4 load_toc_* patterns. */
19798 #if TARGET_ELF
19799 static int
19800 uses_TOC (void)
19802 rtx insn;
19804 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
19805 if (INSN_P (insn))
19807 rtx pat = PATTERN (insn);
19808 int i;
19810 if (GET_CODE (pat) == PARALLEL)
19811 for (i = 0; i < XVECLEN (pat, 0); i++)
19813 rtx sub = XVECEXP (pat, 0, i);
19814 if (GET_CODE (sub) == USE)
19816 sub = XEXP (sub, 0);
19817 if (GET_CODE (sub) == UNSPEC
19818 && XINT (sub, 1) == UNSPEC_TOC)
19819 return 1;
19823 return 0;
19825 #endif
19828 create_TOC_reference (rtx symbol, rtx largetoc_reg)
19830 rtx tocrel, tocreg, hi;
19832 if (TARGET_DEBUG_ADDR)
19834 if (GET_CODE (symbol) == SYMBOL_REF)
19835 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
19836 XSTR (symbol, 0));
19837 else
19839 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
19840 GET_RTX_NAME (GET_CODE (symbol)));
19841 debug_rtx (symbol);
19845 if (!can_create_pseudo_p ())
19846 df_set_regs_ever_live (TOC_REGISTER, true);
19848 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
19849 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
19850 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
19851 return tocrel;
19853 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
19854 if (largetoc_reg != NULL)
19856 emit_move_insn (largetoc_reg, hi);
19857 hi = largetoc_reg;
19859 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
19862 /* Issue assembly directives that create a reference to the given DWARF
19863 FRAME_TABLE_LABEL from the current function section. */
19864 void
19865 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
19867 fprintf (asm_out_file, "\t.ref %s\n",
19868 (* targetm.strip_name_encoding) (frame_table_label));
19871 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
19872 and the change to the stack pointer. */
19874 static void
19875 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
19877 rtvec p;
19878 int i;
19879 rtx regs[3];
19881 i = 0;
19882 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19883 if (hard_frame_needed)
19884 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
19885 if (!(REGNO (fp) == STACK_POINTER_REGNUM
19886 || (hard_frame_needed
19887 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
19888 regs[i++] = fp;
19890 p = rtvec_alloc (i);
19891 while (--i >= 0)
19893 rtx mem = gen_frame_mem (BLKmode, regs[i]);
19894 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
19897 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
19900 /* Emit the correct code for allocating stack space, as insns.
19901 If COPY_REG, make sure a copy of the old frame is left there.
19902 The generated code may use hard register 0 as a temporary. */
19904 static void
19905 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
19907 rtx insn;
19908 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19909 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
19910 rtx todec = gen_int_mode (-size, Pmode);
19911 rtx par, set, mem;
19913 if (INTVAL (todec) != -size)
19915 warning (0, "stack frame too large");
19916 emit_insn (gen_trap ());
19917 return;
19920 if (crtl->limit_stack)
19922 if (REG_P (stack_limit_rtx)
19923 && REGNO (stack_limit_rtx) > 1
19924 && REGNO (stack_limit_rtx) <= 31)
19926 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
19927 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
19928 const0_rtx));
19930 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
19931 && TARGET_32BIT
19932 && DEFAULT_ABI == ABI_V4)
19934 rtx toload = gen_rtx_CONST (VOIDmode,
19935 gen_rtx_PLUS (Pmode,
19936 stack_limit_rtx,
19937 GEN_INT (size)));
19939 emit_insn (gen_elf_high (tmp_reg, toload));
19940 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
19941 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
19942 const0_rtx));
19944 else
19945 warning (0, "stack limit expression is not supported");
19948 if (copy_reg)
19950 if (copy_off != 0)
19951 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
19952 else
19953 emit_move_insn (copy_reg, stack_reg);
19956 if (size > 32767)
19958 /* Need a note here so that try_split doesn't get confused. */
19959 if (get_last_insn () == NULL_RTX)
19960 emit_note (NOTE_INSN_DELETED);
19961 insn = emit_move_insn (tmp_reg, todec);
19962 try_split (PATTERN (insn), insn, 0);
19963 todec = tmp_reg;
19966 insn = emit_insn (TARGET_32BIT
19967 ? gen_movsi_update_stack (stack_reg, stack_reg,
19968 todec, stack_reg)
19969 : gen_movdi_di_update_stack (stack_reg, stack_reg,
19970 todec, stack_reg));
19971 /* Since we didn't use gen_frame_mem to generate the MEM, grab
19972 it now and set the alias set/attributes. The above gen_*_update
19973 calls will generate a PARALLEL with the MEM set being the first
19974 operation. */
19975 par = PATTERN (insn);
19976 gcc_assert (GET_CODE (par) == PARALLEL);
19977 set = XVECEXP (par, 0, 0);
19978 gcc_assert (GET_CODE (set) == SET);
19979 mem = SET_DEST (set);
19980 gcc_assert (MEM_P (mem));
19981 MEM_NOTRAP_P (mem) = 1;
19982 set_mem_alias_set (mem, get_frame_alias_set ());
19984 RTX_FRAME_RELATED_P (insn) = 1;
19985 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
19986 gen_rtx_SET (VOIDmode, stack_reg,
19987 gen_rtx_PLUS (Pmode, stack_reg,
19988 GEN_INT (-size))));
19991 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
19993 #if PROBE_INTERVAL > 32768
19994 #error Cannot use indexed addressing mode for stack probing
19995 #endif
19997 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
19998 inclusive. These are offsets from the current stack pointer. */
20000 static void
20001 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
20003 /* See if we have a constant small number of probes to generate. If so,
20004 that's the easy case. */
20005 if (first + size <= 32768)
20007 HOST_WIDE_INT i;
20009 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
20010 it exceeds SIZE. If only one probe is needed, this will not
20011 generate any code. Then probe at FIRST + SIZE. */
20012 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
20013 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
20014 -(first + i)));
20016 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
20017 -(first + size)));
20020 /* Otherwise, do the same as above, but in a loop. Note that we must be
20021 extra careful with variables wrapping around because we might be at
20022 the very top (or the very bottom) of the address space and we have
20023 to be able to handle this case properly; in particular, we use an
20024 equality test for the loop condition. */
20025 else
20027 HOST_WIDE_INT rounded_size;
20028 rtx r12 = gen_rtx_REG (Pmode, 12);
20029 rtx r0 = gen_rtx_REG (Pmode, 0);
20031 /* Sanity check for the addressing mode we're going to use. */
20032 gcc_assert (first <= 32768);
20034 /* Step 1: round SIZE to the previous multiple of the interval. */
20036 rounded_size = size & -PROBE_INTERVAL;
20039 /* Step 2: compute initial and final value of the loop counter. */
20041 /* TEST_ADDR = SP + FIRST. */
20042 emit_insn (gen_rtx_SET (VOIDmode, r12,
20043 plus_constant (Pmode, stack_pointer_rtx,
20044 -first)));
20046 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
20047 if (rounded_size > 32768)
20049 emit_move_insn (r0, GEN_INT (-rounded_size));
20050 emit_insn (gen_rtx_SET (VOIDmode, r0,
20051 gen_rtx_PLUS (Pmode, r12, r0)));
20053 else
20054 emit_insn (gen_rtx_SET (VOIDmode, r0,
20055 plus_constant (Pmode, r12, -rounded_size)));
20058 /* Step 3: the loop
20060 while (TEST_ADDR != LAST_ADDR)
20062 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
20063 probe at TEST_ADDR
20066 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
20067 until it is equal to ROUNDED_SIZE. */
20069 if (TARGET_64BIT)
20070 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
20071 else
20072 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
20075 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
20076 that SIZE is equal to ROUNDED_SIZE. */
20078 if (size != rounded_size)
20079 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
20083 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
20084 absolute addresses. */
20086 const char *
20087 output_probe_stack_range (rtx reg1, rtx reg2)
20089 static int labelno = 0;
20090 char loop_lab[32], end_lab[32];
20091 rtx xops[2];
20093 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
20094 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
20096 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
20098 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
20099 xops[0] = reg1;
20100 xops[1] = reg2;
20101 if (TARGET_64BIT)
20102 output_asm_insn ("cmpd 0,%0,%1", xops);
20103 else
20104 output_asm_insn ("cmpw 0,%0,%1", xops);
20106 fputs ("\tbeq 0,", asm_out_file);
20107 assemble_name_raw (asm_out_file, end_lab);
20108 fputc ('\n', asm_out_file);
20110 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
20111 xops[1] = GEN_INT (-PROBE_INTERVAL);
20112 output_asm_insn ("addi %0,%0,%1", xops);
20114 /* Probe at TEST_ADDR and branch. */
20115 xops[1] = gen_rtx_REG (Pmode, 0);
20116 output_asm_insn ("stw %1,0(%0)", xops);
20117 fprintf (asm_out_file, "\tb ");
20118 assemble_name_raw (asm_out_file, loop_lab);
20119 fputc ('\n', asm_out_file);
20121 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
20123 return "";
20126 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
20127 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
20128 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
20129 deduce these equivalences by itself so it wasn't necessary to hold
20130 its hand so much. Don't be tempted to always supply d2_f_d_e with
20131 the actual cfa register, ie. r31 when we are using a hard frame
20132 pointer. That fails when saving regs off r1, and sched moves the
20133 r31 setup past the reg saves. */
20135 static rtx
20136 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
20137 rtx reg2, rtx rreg)
20139 rtx real, temp;
20141 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
20143 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
20144 int i;
20146 gcc_checking_assert (val == 0);
20147 real = PATTERN (insn);
20148 if (GET_CODE (real) == PARALLEL)
20149 for (i = 0; i < XVECLEN (real, 0); i++)
20150 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
20152 rtx set = XVECEXP (real, 0, i);
20154 RTX_FRAME_RELATED_P (set) = 1;
20156 RTX_FRAME_RELATED_P (insn) = 1;
20157 return insn;
20160 /* copy_rtx will not make unique copies of registers, so we need to
20161 ensure we don't have unwanted sharing here. */
20162 if (reg == reg2)
20163 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
20165 if (reg == rreg)
20166 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
20168 real = copy_rtx (PATTERN (insn));
20170 if (reg2 != NULL_RTX)
20171 real = replace_rtx (real, reg2, rreg);
20173 if (REGNO (reg) == STACK_POINTER_REGNUM)
20174 gcc_checking_assert (val == 0);
20175 else
20176 real = replace_rtx (real, reg,
20177 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
20178 STACK_POINTER_REGNUM),
20179 GEN_INT (val)));
20181 /* We expect that 'real' is either a SET or a PARALLEL containing
20182 SETs (and possibly other stuff). In a PARALLEL, all the SETs
20183 are important so they all have to be marked RTX_FRAME_RELATED_P. */
20185 if (GET_CODE (real) == SET)
20187 rtx set = real;
20189 temp = simplify_rtx (SET_SRC (set));
20190 if (temp)
20191 SET_SRC (set) = temp;
20192 temp = simplify_rtx (SET_DEST (set));
20193 if (temp)
20194 SET_DEST (set) = temp;
20195 if (GET_CODE (SET_DEST (set)) == MEM)
20197 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
20198 if (temp)
20199 XEXP (SET_DEST (set), 0) = temp;
20202 else
20204 int i;
20206 gcc_assert (GET_CODE (real) == PARALLEL);
20207 for (i = 0; i < XVECLEN (real, 0); i++)
20208 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
20210 rtx set = XVECEXP (real, 0, i);
20212 temp = simplify_rtx (SET_SRC (set));
20213 if (temp)
20214 SET_SRC (set) = temp;
20215 temp = simplify_rtx (SET_DEST (set));
20216 if (temp)
20217 SET_DEST (set) = temp;
20218 if (GET_CODE (SET_DEST (set)) == MEM)
20220 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
20221 if (temp)
20222 XEXP (SET_DEST (set), 0) = temp;
20224 RTX_FRAME_RELATED_P (set) = 1;
20228 RTX_FRAME_RELATED_P (insn) = 1;
20229 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
20231 return insn;
20234 /* Returns an insn that has a vrsave set operation with the
20235 appropriate CLOBBERs. */
20237 static rtx
20238 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
20240 int nclobs, i;
20241 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
20242 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20244 clobs[0]
20245 = gen_rtx_SET (VOIDmode,
20246 vrsave,
20247 gen_rtx_UNSPEC_VOLATILE (SImode,
20248 gen_rtvec (2, reg, vrsave),
20249 UNSPECV_SET_VRSAVE));
20251 nclobs = 1;
20253 /* We need to clobber the registers in the mask so the scheduler
20254 does not move sets to VRSAVE before sets of AltiVec registers.
20256 However, if the function receives nonlocal gotos, reload will set
20257 all call saved registers live. We will end up with:
20259 (set (reg 999) (mem))
20260 (parallel [ (set (reg vrsave) (unspec blah))
20261 (clobber (reg 999))])
20263 The clobber will cause the store into reg 999 to be dead, and
20264 flow will attempt to delete an epilogue insn. In this case, we
20265 need an unspec use/set of the register. */
20267 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
20268 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20270 if (!epiloguep || call_used_regs [i])
20271 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
20272 gen_rtx_REG (V4SImode, i));
20273 else
20275 rtx reg = gen_rtx_REG (V4SImode, i);
20277 clobs[nclobs++]
20278 = gen_rtx_SET (VOIDmode,
20279 reg,
20280 gen_rtx_UNSPEC (V4SImode,
20281 gen_rtvec (1, reg), 27));
20285 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
20287 for (i = 0; i < nclobs; ++i)
20288 XVECEXP (insn, 0, i) = clobs[i];
20290 return insn;
20293 static rtx
20294 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
20296 rtx addr, mem;
20298 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
20299 mem = gen_frame_mem (GET_MODE (reg), addr);
20300 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
20303 static rtx
20304 gen_frame_load (rtx reg, rtx frame_reg, int offset)
20306 return gen_frame_set (reg, frame_reg, offset, false);
20309 static rtx
20310 gen_frame_store (rtx reg, rtx frame_reg, int offset)
20312 return gen_frame_set (reg, frame_reg, offset, true);
20315 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
20316 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
20318 static rtx
20319 emit_frame_save (rtx frame_reg, enum machine_mode mode,
20320 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
20322 rtx reg, insn;
20324 /* Some cases that need register indexed addressing. */
20325 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
20326 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
20327 || (TARGET_E500_DOUBLE && mode == DFmode)
20328 || (TARGET_SPE_ABI
20329 && SPE_VECTOR_MODE (mode)
20330 && !SPE_CONST_OFFSET_OK (offset))));
20332 reg = gen_rtx_REG (mode, regno);
20333 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
20334 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
20335 NULL_RTX, NULL_RTX);
20338 /* Emit an offset memory reference suitable for a frame store, while
20339 converting to a valid addressing mode. */
20341 static rtx
20342 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
20344 rtx int_rtx, offset_rtx;
20346 int_rtx = GEN_INT (offset);
20348 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
20349 || (TARGET_E500_DOUBLE && mode == DFmode))
20351 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
20352 emit_move_insn (offset_rtx, int_rtx);
20354 else
20355 offset_rtx = int_rtx;
20357 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
20360 #ifndef TARGET_FIX_AND_CONTINUE
20361 #define TARGET_FIX_AND_CONTINUE 0
20362 #endif
20364 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
20365 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
20366 #define LAST_SAVRES_REGISTER 31
20367 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
20369 enum {
20370 SAVRES_LR = 0x1,
20371 SAVRES_SAVE = 0x2,
20372 SAVRES_REG = 0x0c,
20373 SAVRES_GPR = 0,
20374 SAVRES_FPR = 4,
20375 SAVRES_VR = 8
20378 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
20380 /* Temporary holding space for an out-of-line register save/restore
20381 routine name. */
20382 static char savres_routine_name[30];
20384 /* Return the name for an out-of-line register save/restore routine.
20385 We are saving/restoring GPRs if GPR is true. */
20387 static char *
20388 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
20390 const char *prefix = "";
20391 const char *suffix = "";
20393 /* Different targets are supposed to define
20394 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
20395 routine name could be defined with:
20397 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
20399 This is a nice idea in practice, but in reality, things are
20400 complicated in several ways:
20402 - ELF targets have save/restore routines for GPRs.
20404 - SPE targets use different prefixes for 32/64-bit registers, and
20405 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
20407 - PPC64 ELF targets have routines for save/restore of GPRs that
20408 differ in what they do with the link register, so having a set
20409 prefix doesn't work. (We only use one of the save routines at
20410 the moment, though.)
20412 - PPC32 elf targets have "exit" versions of the restore routines
20413 that restore the link register and can save some extra space.
20414 These require an extra suffix. (There are also "tail" versions
20415 of the restore routines and "GOT" versions of the save routines,
20416 but we don't generate those at present. Same problems apply,
20417 though.)
20419 We deal with all this by synthesizing our own prefix/suffix and
20420 using that for the simple sprintf call shown above. */
20421 if (TARGET_SPE)
20423 /* No floating point saves on the SPE. */
20424 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
20426 if ((sel & SAVRES_SAVE))
20427 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
20428 else
20429 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
20431 if ((sel & SAVRES_LR))
20432 suffix = "_x";
20434 else if (DEFAULT_ABI == ABI_V4)
20436 if (TARGET_64BIT)
20437 goto aix_names;
20439 if ((sel & SAVRES_REG) == SAVRES_GPR)
20440 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
20441 else if ((sel & SAVRES_REG) == SAVRES_FPR)
20442 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
20443 else if ((sel & SAVRES_REG) == SAVRES_VR)
20444 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
20445 else
20446 abort ();
20448 if ((sel & SAVRES_LR))
20449 suffix = "_x";
20451 else if (DEFAULT_ABI == ABI_AIX)
20453 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
20454 /* No out-of-line save/restore routines for GPRs on AIX. */
20455 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
20456 #endif
20458 aix_names:
20459 if ((sel & SAVRES_REG) == SAVRES_GPR)
20460 prefix = ((sel & SAVRES_SAVE)
20461 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
20462 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
20463 else if ((sel & SAVRES_REG) == SAVRES_FPR)
20465 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
20466 if ((sel & SAVRES_LR))
20467 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
20468 else
20469 #endif
20471 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
20472 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
20475 else if ((sel & SAVRES_REG) == SAVRES_VR)
20476 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
20477 else
20478 abort ();
20481 if (DEFAULT_ABI == ABI_DARWIN)
20483 /* The Darwin approach is (slightly) different, in order to be
20484 compatible with code generated by the system toolchain. There is a
20485 single symbol for the start of save sequence, and the code here
20486 embeds an offset into that code on the basis of the first register
20487 to be saved. */
20488 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
20489 if ((sel & SAVRES_REG) == SAVRES_GPR)
20490 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
20491 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
20492 (regno - 13) * 4, prefix, regno);
20493 else if ((sel & SAVRES_REG) == SAVRES_FPR)
20494 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
20495 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
20496 else if ((sel & SAVRES_REG) == SAVRES_VR)
20497 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
20498 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
20499 else
20500 abort ();
20502 else
20503 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
20505 return savres_routine_name;
20508 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
20509 We are saving/restoring GPRs if GPR is true. */
20511 static rtx
20512 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
20514 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
20515 ? info->first_gp_reg_save
20516 : (sel & SAVRES_REG) == SAVRES_FPR
20517 ? info->first_fp_reg_save - 32
20518 : (sel & SAVRES_REG) == SAVRES_VR
20519 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
20520 : -1);
20521 rtx sym;
20522 int select = sel;
20524 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
20525 versions of the gpr routines. */
20526 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
20527 && info->spe_64bit_regs_used)
20528 select ^= SAVRES_FPR ^ SAVRES_GPR;
20530 /* Don't generate bogus routine names. */
20531 gcc_assert (FIRST_SAVRES_REGISTER <= regno
20532 && regno <= LAST_SAVRES_REGISTER
20533 && select >= 0 && select <= 12);
20535 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
20537 if (sym == NULL)
20539 char *name;
20541 name = rs6000_savres_routine_name (info, regno, sel);
20543 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
20544 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
20545 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
20548 return sym;
20551 /* Emit a sequence of insns, including a stack tie if needed, for
20552 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
20553 reset the stack pointer, but move the base of the frame into
20554 reg UPDT_REGNO for use by out-of-line register restore routines. */
20556 static rtx
20557 rs6000_emit_stack_reset (rs6000_stack_t *info,
20558 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
20559 unsigned updt_regno)
20561 rtx updt_reg_rtx;
20563 /* This blockage is needed so that sched doesn't decide to move
20564 the sp change before the register restores. */
20565 if (DEFAULT_ABI == ABI_V4
20566 || (TARGET_SPE_ABI
20567 && info->spe_64bit_regs_used != 0
20568 && info->first_gp_reg_save != 32))
20569 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
20571 /* If we are restoring registers out-of-line, we will be using the
20572 "exit" variants of the restore routines, which will reset the
20573 stack for us. But we do need to point updt_reg into the
20574 right place for those routines. */
20575 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
20577 if (frame_off != 0)
20578 return emit_insn (gen_add3_insn (updt_reg_rtx,
20579 frame_reg_rtx, GEN_INT (frame_off)));
20580 else if (REGNO (frame_reg_rtx) != updt_regno)
20581 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
20583 return NULL_RTX;
20586 /* Return the register number used as a pointer by out-of-line
20587 save/restore functions. */
20589 static inline unsigned
20590 ptr_regno_for_savres (int sel)
20592 if (DEFAULT_ABI == ABI_AIX)
20593 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
20594 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
20597 /* Construct a parallel rtx describing the effect of a call to an
20598 out-of-line register save/restore routine, and emit the insn
20599 or jump_insn as appropriate. */
20601 static rtx
20602 rs6000_emit_savres_rtx (rs6000_stack_t *info,
20603 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
20604 enum machine_mode reg_mode, int sel)
20606 int i;
20607 int offset, start_reg, end_reg, n_regs, use_reg;
20608 int reg_size = GET_MODE_SIZE (reg_mode);
20609 rtx sym;
20610 rtvec p;
20611 rtx par, insn;
20613 offset = 0;
20614 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
20615 ? info->first_gp_reg_save
20616 : (sel & SAVRES_REG) == SAVRES_FPR
20617 ? info->first_fp_reg_save
20618 : (sel & SAVRES_REG) == SAVRES_VR
20619 ? info->first_altivec_reg_save
20620 : -1);
20621 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
20622 ? 32
20623 : (sel & SAVRES_REG) == SAVRES_FPR
20624 ? 64
20625 : (sel & SAVRES_REG) == SAVRES_VR
20626 ? LAST_ALTIVEC_REGNO + 1
20627 : -1);
20628 n_regs = end_reg - start_reg;
20629 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
20630 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
20631 + n_regs);
20633 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
20634 RTVEC_ELT (p, offset++) = ret_rtx;
20636 RTVEC_ELT (p, offset++)
20637 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
20639 sym = rs6000_savres_routine_sym (info, sel);
20640 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
20642 use_reg = ptr_regno_for_savres (sel);
20643 if ((sel & SAVRES_REG) == SAVRES_VR)
20645 /* Vector regs are saved/restored using [reg+reg] addressing. */
20646 RTVEC_ELT (p, offset++)
20647 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
20648 RTVEC_ELT (p, offset++)
20649 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
20651 else
20652 RTVEC_ELT (p, offset++)
20653 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
20655 for (i = 0; i < end_reg - start_reg; i++)
20656 RTVEC_ELT (p, i + offset)
20657 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
20658 frame_reg_rtx, save_area_offset + reg_size * i,
20659 (sel & SAVRES_SAVE) != 0);
20661 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
20662 RTVEC_ELT (p, i + offset)
20663 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
20665 par = gen_rtx_PARALLEL (VOIDmode, p);
20667 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
20669 insn = emit_jump_insn (par);
20670 JUMP_LABEL (insn) = ret_rtx;
20672 else
20673 insn = emit_insn (par);
20674 return insn;
20677 /* Determine whether the gp REG is really used. */
20679 static bool
20680 rs6000_reg_live_or_pic_offset_p (int reg)
20682 /* If the function calls eh_return, claim used all the registers that would
20683 be checked for liveness otherwise. This is required for the PIC offset
20684 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
20685 register allocation purposes in this case. */
20687 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
20688 && (!call_used_regs[reg]
20689 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
20690 && !TARGET_SINGLE_PIC_BASE
20691 && TARGET_TOC && TARGET_MINIMAL_TOC)))
20692 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
20693 && !TARGET_SINGLE_PIC_BASE
20694 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
20695 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
20698 /* Emit function prologue as insns. */
20700 void
20701 rs6000_emit_prologue (void)
20703 rs6000_stack_t *info = rs6000_stack_info ();
20704 enum machine_mode reg_mode = Pmode;
20705 int reg_size = TARGET_32BIT ? 4 : 8;
20706 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
20707 rtx frame_reg_rtx = sp_reg_rtx;
20708 unsigned int cr_save_regno;
20709 rtx cr_save_rtx = NULL_RTX;
20710 rtx insn;
20711 int strategy;
20712 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
20713 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
20714 && call_used_regs[STATIC_CHAIN_REGNUM]);
20715 /* Offset to top of frame for frame_reg and sp respectively. */
20716 HOST_WIDE_INT frame_off = 0;
20717 HOST_WIDE_INT sp_off = 0;
20719 #ifdef ENABLE_CHECKING
20720 /* Track and check usage of r0, r11, r12. */
20721 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
20722 #define START_USE(R) do \
20724 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
20725 reg_inuse |= 1 << (R); \
20726 } while (0)
20727 #define END_USE(R) do \
20729 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
20730 reg_inuse &= ~(1 << (R)); \
20731 } while (0)
20732 #define NOT_INUSE(R) do \
20734 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
20735 } while (0)
20736 #else
20737 #define START_USE(R) do {} while (0)
20738 #define END_USE(R) do {} while (0)
20739 #define NOT_INUSE(R) do {} while (0)
20740 #endif
20742 if (flag_stack_usage_info)
20743 current_function_static_stack_size = info->total_size;
20745 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
20746 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
20748 if (TARGET_FIX_AND_CONTINUE)
20750 /* gdb on darwin arranges to forward a function from the old
20751 address by modifying the first 5 instructions of the function
20752 to branch to the overriding function. This is necessary to
20753 permit function pointers that point to the old function to
20754 actually forward to the new function. */
20755 emit_insn (gen_nop ());
20756 emit_insn (gen_nop ());
20757 emit_insn (gen_nop ());
20758 emit_insn (gen_nop ());
20759 emit_insn (gen_nop ());
20762 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20764 reg_mode = V2SImode;
20765 reg_size = 8;
20768 /* Handle world saves specially here. */
20769 if (WORLD_SAVE_P (info))
20771 int i, j, sz;
20772 rtx treg;
20773 rtvec p;
20774 rtx reg0;
20776 /* save_world expects lr in r0. */
20777 reg0 = gen_rtx_REG (Pmode, 0);
20778 if (info->lr_save_p)
20780 insn = emit_move_insn (reg0,
20781 gen_rtx_REG (Pmode, LR_REGNO));
20782 RTX_FRAME_RELATED_P (insn) = 1;
20785 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
20786 assumptions about the offsets of various bits of the stack
20787 frame. */
20788 gcc_assert (info->gp_save_offset == -220
20789 && info->fp_save_offset == -144
20790 && info->lr_save_offset == 8
20791 && info->cr_save_offset == 4
20792 && info->push_p
20793 && info->lr_save_p
20794 && (!crtl->calls_eh_return
20795 || info->ehrd_offset == -432)
20796 && info->vrsave_save_offset == -224
20797 && info->altivec_save_offset == -416);
20799 treg = gen_rtx_REG (SImode, 11);
20800 emit_move_insn (treg, GEN_INT (-info->total_size));
20802 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
20803 in R11. It also clobbers R12, so beware! */
20805 /* Preserve CR2 for save_world prologues */
20806 sz = 5;
20807 sz += 32 - info->first_gp_reg_save;
20808 sz += 64 - info->first_fp_reg_save;
20809 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
20810 p = rtvec_alloc (sz);
20811 j = 0;
20812 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
20813 gen_rtx_REG (SImode,
20814 LR_REGNO));
20815 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20816 gen_rtx_SYMBOL_REF (Pmode,
20817 "*save_world"));
20818 /* We do floats first so that the instruction pattern matches
20819 properly. */
20820 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
20821 RTVEC_ELT (p, j++)
20822 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20823 ? DFmode : SFmode,
20824 info->first_fp_reg_save + i),
20825 frame_reg_rtx,
20826 info->fp_save_offset + frame_off + 8 * i);
20827 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20828 RTVEC_ELT (p, j++)
20829 = gen_frame_store (gen_rtx_REG (V4SImode,
20830 info->first_altivec_reg_save + i),
20831 frame_reg_rtx,
20832 info->altivec_save_offset + frame_off + 16 * i);
20833 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20834 RTVEC_ELT (p, j++)
20835 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
20836 frame_reg_rtx,
20837 info->gp_save_offset + frame_off + reg_size * i);
20839 /* CR register traditionally saved as CR2. */
20840 RTVEC_ELT (p, j++)
20841 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
20842 frame_reg_rtx, info->cr_save_offset + frame_off);
20843 /* Explain about use of R0. */
20844 if (info->lr_save_p)
20845 RTVEC_ELT (p, j++)
20846 = gen_frame_store (reg0,
20847 frame_reg_rtx, info->lr_save_offset + frame_off);
20848 /* Explain what happens to the stack pointer. */
20850 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
20851 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
20854 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20855 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20856 treg, GEN_INT (-info->total_size));
20857 sp_off = frame_off = info->total_size;
20860 strategy = info->savres_strategy;
20862 /* For V.4, update stack before we do any saving and set back pointer. */
20863 if (! WORLD_SAVE_P (info)
20864 && info->push_p
20865 && (DEFAULT_ABI == ABI_V4
20866 || crtl->calls_eh_return))
20868 bool need_r11 = (TARGET_SPE
20869 ? (!(strategy & SAVE_INLINE_GPRS)
20870 && info->spe_64bit_regs_used == 0)
20871 : (!(strategy & SAVE_INLINE_FPRS)
20872 || !(strategy & SAVE_INLINE_GPRS)
20873 || !(strategy & SAVE_INLINE_VRS)));
20874 int ptr_regno = -1;
20875 rtx ptr_reg = NULL_RTX;
20876 int ptr_off = 0;
20878 if (info->total_size < 32767)
20879 frame_off = info->total_size;
20880 else if (need_r11)
20881 ptr_regno = 11;
20882 else if (info->cr_save_p
20883 || info->lr_save_p
20884 || info->first_fp_reg_save < 64
20885 || info->first_gp_reg_save < 32
20886 || info->altivec_size != 0
20887 || info->vrsave_mask != 0
20888 || crtl->calls_eh_return)
20889 ptr_regno = 12;
20890 else
20892 /* The prologue won't be saving any regs so there is no need
20893 to set up a frame register to access any frame save area.
20894 We also won't be using frame_off anywhere below, but set
20895 the correct value anyway to protect against future
20896 changes to this function. */
20897 frame_off = info->total_size;
20899 if (ptr_regno != -1)
20901 /* Set up the frame offset to that needed by the first
20902 out-of-line save function. */
20903 START_USE (ptr_regno);
20904 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20905 frame_reg_rtx = ptr_reg;
20906 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
20907 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
20908 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
20909 ptr_off = info->gp_save_offset + info->gp_size;
20910 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
20911 ptr_off = info->altivec_save_offset + info->altivec_size;
20912 frame_off = -ptr_off;
20914 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20915 sp_off = info->total_size;
20916 if (frame_reg_rtx != sp_reg_rtx)
20917 rs6000_emit_stack_tie (frame_reg_rtx, false);
20920 /* If we use the link register, get it into r0. */
20921 if (!WORLD_SAVE_P (info) && info->lr_save_p)
20923 rtx addr, reg, mem;
20925 reg = gen_rtx_REG (Pmode, 0);
20926 START_USE (0);
20927 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
20928 RTX_FRAME_RELATED_P (insn) = 1;
20930 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
20931 | SAVE_NOINLINE_FPRS_SAVES_LR)))
20933 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20934 GEN_INT (info->lr_save_offset + frame_off));
20935 mem = gen_rtx_MEM (Pmode, addr);
20936 /* This should not be of rs6000_sr_alias_set, because of
20937 __builtin_return_address. */
20939 insn = emit_move_insn (mem, reg);
20940 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20941 NULL_RTX, NULL_RTX);
20942 END_USE (0);
20946 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
20947 r12 will be needed by out-of-line gpr restore. */
20948 cr_save_regno = (DEFAULT_ABI == ABI_AIX
20949 && !(strategy & (SAVE_INLINE_GPRS
20950 | SAVE_NOINLINE_GPRS_SAVES_LR))
20951 ? 11 : 12);
20952 if (!WORLD_SAVE_P (info)
20953 && info->cr_save_p
20954 && REGNO (frame_reg_rtx) != cr_save_regno
20955 && !(using_static_chain_p && cr_save_regno == 11))
20957 rtx set;
20959 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
20960 START_USE (cr_save_regno);
20961 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20962 RTX_FRAME_RELATED_P (insn) = 1;
20963 /* Now, there's no way that dwarf2out_frame_debug_expr is going
20964 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
20965 But that's OK. All we have to do is specify that _one_ condition
20966 code register is saved in this stack slot. The thrower's epilogue
20967 will then restore all the call-saved registers.
20968 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
20969 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
20970 gen_rtx_REG (SImode, CR2_REGNO));
20971 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20974 /* Do any required saving of fpr's. If only one or two to save, do
20975 it ourselves. Otherwise, call function. */
20976 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
20978 int i;
20979 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
20980 if (save_reg_p (info->first_fp_reg_save + i))
20981 emit_frame_save (frame_reg_rtx,
20982 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20983 ? DFmode : SFmode),
20984 info->first_fp_reg_save + i,
20985 info->fp_save_offset + frame_off + 8 * i,
20986 sp_off - frame_off);
20988 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
20990 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20991 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20992 unsigned ptr_regno = ptr_regno_for_savres (sel);
20993 rtx ptr_reg = frame_reg_rtx;
20995 if (REGNO (frame_reg_rtx) == ptr_regno)
20996 gcc_checking_assert (frame_off == 0);
20997 else
20999 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21000 NOT_INUSE (ptr_regno);
21001 emit_insn (gen_add3_insn (ptr_reg,
21002 frame_reg_rtx, GEN_INT (frame_off)));
21004 insn = rs6000_emit_savres_rtx (info, ptr_reg,
21005 info->fp_save_offset,
21006 info->lr_save_offset,
21007 DFmode, sel);
21008 rs6000_frame_related (insn, ptr_reg, sp_off,
21009 NULL_RTX, NULL_RTX);
21010 if (lr)
21011 END_USE (0);
21014 /* Save GPRs. This is done as a PARALLEL if we are using
21015 the store-multiple instructions. */
21016 if (!WORLD_SAVE_P (info)
21017 && TARGET_SPE_ABI
21018 && info->spe_64bit_regs_used != 0
21019 && info->first_gp_reg_save != 32)
21021 int i;
21022 rtx spe_save_area_ptr;
21023 HOST_WIDE_INT save_off;
21024 int ool_adjust = 0;
21026 /* Determine whether we can address all of the registers that need
21027 to be saved with an offset from frame_reg_rtx that fits in
21028 the small const field for SPE memory instructions. */
21029 int spe_regs_addressable
21030 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21031 + reg_size * (32 - info->first_gp_reg_save - 1))
21032 && (strategy & SAVE_INLINE_GPRS));
21034 if (spe_regs_addressable)
21036 spe_save_area_ptr = frame_reg_rtx;
21037 save_off = frame_off;
21039 else
21041 /* Make r11 point to the start of the SPE save area. We need
21042 to be careful here if r11 is holding the static chain. If
21043 it is, then temporarily save it in r0. */
21044 HOST_WIDE_INT offset;
21046 if (!(strategy & SAVE_INLINE_GPRS))
21047 ool_adjust = 8 * (info->first_gp_reg_save
21048 - (FIRST_SAVRES_REGISTER + 1));
21049 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
21050 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
21051 save_off = frame_off - offset;
21053 if (using_static_chain_p)
21055 rtx r0 = gen_rtx_REG (Pmode, 0);
21057 START_USE (0);
21058 gcc_assert (info->first_gp_reg_save > 11);
21060 emit_move_insn (r0, spe_save_area_ptr);
21062 else if (REGNO (frame_reg_rtx) != 11)
21063 START_USE (11);
21065 emit_insn (gen_addsi3 (spe_save_area_ptr,
21066 frame_reg_rtx, GEN_INT (offset)));
21067 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
21068 frame_off = -info->spe_gp_save_offset + ool_adjust;
21071 if ((strategy & SAVE_INLINE_GPRS))
21073 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21074 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21075 emit_frame_save (spe_save_area_ptr, reg_mode,
21076 info->first_gp_reg_save + i,
21077 (info->spe_gp_save_offset + save_off
21078 + reg_size * i),
21079 sp_off - save_off);
21081 else
21083 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
21084 info->spe_gp_save_offset + save_off,
21085 0, reg_mode,
21086 SAVRES_SAVE | SAVRES_GPR);
21088 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
21089 NULL_RTX, NULL_RTX);
21092 /* Move the static chain pointer back. */
21093 if (!spe_regs_addressable)
21095 if (using_static_chain_p)
21097 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
21098 END_USE (0);
21100 else if (REGNO (frame_reg_rtx) != 11)
21101 END_USE (11);
21104 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
21106 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
21107 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
21108 unsigned ptr_regno = ptr_regno_for_savres (sel);
21109 rtx ptr_reg = frame_reg_rtx;
21110 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
21111 int end_save = info->gp_save_offset + info->gp_size;
21112 int ptr_off;
21114 if (!ptr_set_up)
21115 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21117 /* Need to adjust r11 (r12) if we saved any FPRs. */
21118 if (end_save + frame_off != 0)
21120 rtx offset = GEN_INT (end_save + frame_off);
21122 if (ptr_set_up)
21123 frame_off = -end_save;
21124 else
21125 NOT_INUSE (ptr_regno);
21126 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21128 else if (!ptr_set_up)
21130 NOT_INUSE (ptr_regno);
21131 emit_move_insn (ptr_reg, frame_reg_rtx);
21133 ptr_off = -end_save;
21134 insn = rs6000_emit_savres_rtx (info, ptr_reg,
21135 info->gp_save_offset + ptr_off,
21136 info->lr_save_offset + ptr_off,
21137 reg_mode, sel);
21138 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
21139 NULL_RTX, NULL_RTX);
21140 if (lr)
21141 END_USE (0);
21143 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
21145 rtvec p;
21146 int i;
21147 p = rtvec_alloc (32 - info->first_gp_reg_save);
21148 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21149 RTVEC_ELT (p, i)
21150 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21151 frame_reg_rtx,
21152 info->gp_save_offset + frame_off + reg_size * i);
21153 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21154 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21155 NULL_RTX, NULL_RTX);
21157 else if (!WORLD_SAVE_P (info))
21159 int i;
21160 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21161 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21162 emit_frame_save (frame_reg_rtx, reg_mode,
21163 info->first_gp_reg_save + i,
21164 info->gp_save_offset + frame_off + reg_size * i,
21165 sp_off - frame_off);
21168 if (crtl->calls_eh_return)
21170 unsigned int i;
21171 rtvec p;
21173 for (i = 0; ; ++i)
21175 unsigned int regno = EH_RETURN_DATA_REGNO (i);
21176 if (regno == INVALID_REGNUM)
21177 break;
21180 p = rtvec_alloc (i);
21182 for (i = 0; ; ++i)
21184 unsigned int regno = EH_RETURN_DATA_REGNO (i);
21185 if (regno == INVALID_REGNUM)
21186 break;
21188 insn
21189 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
21190 sp_reg_rtx,
21191 info->ehrd_offset + sp_off + reg_size * (int) i);
21192 RTVEC_ELT (p, i) = insn;
21193 RTX_FRAME_RELATED_P (insn) = 1;
21196 insn = emit_insn (gen_blockage ());
21197 RTX_FRAME_RELATED_P (insn) = 1;
21198 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
21201 /* In AIX ABI we need to make sure r2 is really saved. */
21202 if (TARGET_AIX && crtl->calls_eh_return)
21204 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
21205 rtx save_insn, join_insn, note;
21206 long toc_restore_insn;
21208 tmp_reg = gen_rtx_REG (Pmode, 11);
21209 tmp_reg_si = gen_rtx_REG (SImode, 11);
21210 if (using_static_chain_p)
21212 START_USE (0);
21213 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
21215 else
21216 START_USE (11);
21217 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
21218 /* Peek at instruction to which this function returns. If it's
21219 restoring r2, then we know we've already saved r2. We can't
21220 unconditionally save r2 because the value we have will already
21221 be updated if we arrived at this function via a plt call or
21222 toc adjusting stub. */
21223 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
21224 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
21225 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
21226 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
21227 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
21228 validate_condition_mode (EQ, CCUNSmode);
21229 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
21230 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
21231 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
21232 toc_save_done = gen_label_rtx ();
21233 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
21234 gen_rtx_EQ (VOIDmode, compare_result,
21235 const0_rtx),
21236 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
21237 pc_rtx);
21238 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
21239 JUMP_LABEL (jump) = toc_save_done;
21240 LABEL_NUSES (toc_save_done) += 1;
21242 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
21243 TOC_REGNUM, frame_off + 5 * reg_size,
21244 sp_off - frame_off);
21246 emit_label (toc_save_done);
21248 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
21249 have a CFG that has different saves along different paths.
21250 Move the note to a dummy blockage insn, which describes that
21251 R2 is unconditionally saved after the label. */
21252 /* ??? An alternate representation might be a special insn pattern
21253 containing both the branch and the store. That might let the
21254 code that minimizes the number of DW_CFA_advance opcodes better
21255 freedom in placing the annotations. */
21256 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
21257 if (note)
21258 remove_note (save_insn, note);
21259 else
21260 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
21261 copy_rtx (PATTERN (save_insn)), NULL_RTX);
21262 RTX_FRAME_RELATED_P (save_insn) = 0;
21264 join_insn = emit_insn (gen_blockage ());
21265 REG_NOTES (join_insn) = note;
21266 RTX_FRAME_RELATED_P (join_insn) = 1;
21268 if (using_static_chain_p)
21270 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
21271 END_USE (0);
21273 else
21274 END_USE (11);
21277 /* Save CR if we use any that must be preserved. */
21278 if (!WORLD_SAVE_P (info) && info->cr_save_p)
21280 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
21281 GEN_INT (info->cr_save_offset + frame_off));
21282 rtx mem = gen_frame_mem (SImode, addr);
21283 /* See the large comment above about why CR2_REGNO is used. */
21284 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
21286 /* If we didn't copy cr before, do so now using r0. */
21287 if (cr_save_rtx == NULL_RTX)
21289 rtx set;
21291 START_USE (0);
21292 cr_save_rtx = gen_rtx_REG (SImode, 0);
21293 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
21294 RTX_FRAME_RELATED_P (insn) = 1;
21295 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
21296 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
21298 insn = emit_move_insn (mem, cr_save_rtx);
21299 END_USE (REGNO (cr_save_rtx));
21301 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21302 NULL_RTX, NULL_RTX);
21305 /* Update stack and set back pointer unless this is V.4,
21306 for which it was done previously. */
21307 if (!WORLD_SAVE_P (info) && info->push_p
21308 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
21310 rtx ptr_reg = NULL;
21311 int ptr_off = 0;
21313 /* If saving altivec regs we need to be able to address all save
21314 locations using a 16-bit offset. */
21315 if ((strategy & SAVE_INLINE_VRS) == 0
21316 || (info->altivec_size != 0
21317 && (info->altivec_save_offset + info->altivec_size - 16
21318 + info->total_size - frame_off) > 32767)
21319 || (info->vrsave_size != 0
21320 && (info->vrsave_save_offset
21321 + info->total_size - frame_off) > 32767))
21323 int sel = SAVRES_SAVE | SAVRES_VR;
21324 unsigned ptr_regno = ptr_regno_for_savres (sel);
21326 if (using_static_chain_p
21327 && ptr_regno == STATIC_CHAIN_REGNUM)
21328 ptr_regno = 12;
21329 if (REGNO (frame_reg_rtx) != ptr_regno)
21330 START_USE (ptr_regno);
21331 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21332 frame_reg_rtx = ptr_reg;
21333 ptr_off = info->altivec_save_offset + info->altivec_size;
21334 frame_off = -ptr_off;
21336 else if (REGNO (frame_reg_rtx) == 1)
21337 frame_off = info->total_size;
21338 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
21339 sp_off = info->total_size;
21340 if (frame_reg_rtx != sp_reg_rtx)
21341 rs6000_emit_stack_tie (frame_reg_rtx, false);
21344 /* Set frame pointer, if needed. */
21345 if (frame_pointer_needed)
21347 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
21348 sp_reg_rtx);
21349 RTX_FRAME_RELATED_P (insn) = 1;
21352 /* Save AltiVec registers if needed. Save here because the red zone does
21353 not always include AltiVec registers. */
21354 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
21355 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
21357 int end_save = info->altivec_save_offset + info->altivec_size;
21358 int ptr_off;
21359 /* Oddly, the vector save/restore functions point r0 at the end
21360 of the save area, then use r11 or r12 to load offsets for
21361 [reg+reg] addressing. */
21362 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
21363 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
21364 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
21366 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
21367 NOT_INUSE (0);
21368 if (end_save + frame_off != 0)
21370 rtx offset = GEN_INT (end_save + frame_off);
21372 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21374 else
21375 emit_move_insn (ptr_reg, frame_reg_rtx);
21377 ptr_off = -end_save;
21378 insn = rs6000_emit_savres_rtx (info, scratch_reg,
21379 info->altivec_save_offset + ptr_off,
21380 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
21381 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
21382 NULL_RTX, NULL_RTX);
21383 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
21385 /* The oddity mentioned above clobbered our frame reg. */
21386 emit_move_insn (frame_reg_rtx, ptr_reg);
21387 frame_off = ptr_off;
21390 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
21391 && info->altivec_size != 0)
21393 int i;
21395 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21396 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
21398 rtx areg, savereg, mem;
21399 int offset;
21401 offset = (info->altivec_save_offset + frame_off
21402 + 16 * (i - info->first_altivec_reg_save));
21404 savereg = gen_rtx_REG (V4SImode, i);
21406 NOT_INUSE (0);
21407 areg = gen_rtx_REG (Pmode, 0);
21408 emit_move_insn (areg, GEN_INT (offset));
21410 /* AltiVec addressing mode is [reg+reg]. */
21411 mem = gen_frame_mem (V4SImode,
21412 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
21414 insn = emit_move_insn (mem, savereg);
21416 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
21417 areg, GEN_INT (offset));
21421 /* VRSAVE is a bit vector representing which AltiVec registers
21422 are used. The OS uses this to determine which vector
21423 registers to save on a context switch. We need to save
21424 VRSAVE on the stack frame, add whatever AltiVec registers we
21425 used in this function, and do the corresponding magic in the
21426 epilogue. */
21428 if (!WORLD_SAVE_P (info)
21429 && TARGET_ALTIVEC
21430 && TARGET_ALTIVEC_VRSAVE
21431 && info->vrsave_mask != 0)
21433 rtx reg, vrsave;
21434 int offset;
21435 int save_regno;
21437 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
21438 be using r12 as frame_reg_rtx and r11 as the static chain
21439 pointer for nested functions. */
21440 save_regno = 12;
21441 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
21442 save_regno = 11;
21443 else if (REGNO (frame_reg_rtx) == 12)
21445 save_regno = 11;
21446 if (using_static_chain_p)
21447 save_regno = 0;
21450 NOT_INUSE (save_regno);
21451 reg = gen_rtx_REG (SImode, save_regno);
21452 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
21453 if (TARGET_MACHO)
21454 emit_insn (gen_get_vrsave_internal (reg));
21455 else
21456 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
21458 /* Save VRSAVE. */
21459 offset = info->vrsave_save_offset + frame_off;
21460 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
21462 /* Include the registers in the mask. */
21463 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
21465 insn = emit_insn (generate_set_vrsave (reg, info, 0));
21468 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
21469 if (!TARGET_SINGLE_PIC_BASE
21470 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
21471 || (DEFAULT_ABI == ABI_V4
21472 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
21473 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
21475 /* If emit_load_toc_table will use the link register, we need to save
21476 it. We use R12 for this purpose because emit_load_toc_table
21477 can use register 0. This allows us to use a plain 'blr' to return
21478 from the procedure more often. */
21479 int save_LR_around_toc_setup = (TARGET_ELF
21480 && DEFAULT_ABI != ABI_AIX
21481 && flag_pic
21482 && ! info->lr_save_p
21483 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
21484 if (save_LR_around_toc_setup)
21486 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
21487 rtx tmp = gen_rtx_REG (Pmode, 12);
21489 insn = emit_move_insn (tmp, lr);
21490 RTX_FRAME_RELATED_P (insn) = 1;
21492 rs6000_emit_load_toc_table (TRUE);
21494 insn = emit_move_insn (lr, tmp);
21495 add_reg_note (insn, REG_CFA_RESTORE, lr);
21496 RTX_FRAME_RELATED_P (insn) = 1;
21498 else
21499 rs6000_emit_load_toc_table (TRUE);
21502 #if TARGET_MACHO
21503 if (!TARGET_SINGLE_PIC_BASE
21504 && DEFAULT_ABI == ABI_DARWIN
21505 && flag_pic && crtl->uses_pic_offset_table)
21507 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
21508 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
21510 /* Save and restore LR locally around this call (in R0). */
21511 if (!info->lr_save_p)
21512 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
21514 emit_insn (gen_load_macho_picbase (src));
21516 emit_move_insn (gen_rtx_REG (Pmode,
21517 RS6000_PIC_OFFSET_TABLE_REGNUM),
21518 lr);
21520 if (!info->lr_save_p)
21521 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
21523 #endif
21525 /* If we need to, save the TOC register after doing the stack setup.
21526 Do not emit eh frame info for this save. The unwinder wants info,
21527 conceptually attached to instructions in this function, about
21528 register values in the caller of this function. This R2 may have
21529 already been changed from the value in the caller.
21530 We don't attempt to write accurate DWARF EH frame info for R2
21531 because code emitted by gcc for a (non-pointer) function call
21532 doesn't save and restore R2. Instead, R2 is managed out-of-line
21533 by a linker generated plt call stub when the function resides in
21534 a shared library. This behaviour is costly to describe in DWARF,
21535 both in terms of the size of DWARF info and the time taken in the
21536 unwinder to interpret it. R2 changes, apart from the
21537 calls_eh_return case earlier in this function, are handled by
21538 linux-unwind.h frob_update_context. */
21539 if (rs6000_save_toc_in_prologue_p ())
21541 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
21542 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
21546 /* Write function prologue. */
21548 static void
21549 rs6000_output_function_prologue (FILE *file,
21550 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21552 rs6000_stack_t *info = rs6000_stack_info ();
21554 if (TARGET_DEBUG_STACK)
21555 debug_stack_info (info);
21557 /* Write .extern for any function we will call to save and restore
21558 fp values. */
21559 if (info->first_fp_reg_save < 64
21560 && !TARGET_MACHO
21561 && !TARGET_ELF)
21563 char *name;
21564 int regno = info->first_fp_reg_save - 32;
21566 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
21568 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
21569 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
21570 name = rs6000_savres_routine_name (info, regno, sel);
21571 fprintf (file, "\t.extern %s\n", name);
21573 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
21575 bool lr = (info->savres_strategy
21576 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21577 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21578 name = rs6000_savres_routine_name (info, regno, sel);
21579 fprintf (file, "\t.extern %s\n", name);
21583 rs6000_pic_labelno++;
21586 /* Non-zero if vmx regs are restored before the frame pop, zero if
21587 we restore after the pop when possible. */
21588 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
21590 /* Restoring cr is a two step process: loading a reg from the frame
21591 save, then moving the reg to cr. For ABI_V4 we must let the
21592 unwinder know that the stack location is no longer valid at or
21593 before the stack deallocation, but we can't emit a cfa_restore for
21594 cr at the stack deallocation like we do for other registers.
21595 The trouble is that it is possible for the move to cr to be
21596 scheduled after the stack deallocation. So say exactly where cr
21597 is located on each of the two insns. */
21599 static rtx
21600 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
21602 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
21603 rtx reg = gen_rtx_REG (SImode, regno);
21604 rtx insn = emit_move_insn (reg, mem);
21606 if (!exit_func && DEFAULT_ABI == ABI_V4)
21608 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
21609 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
21611 add_reg_note (insn, REG_CFA_REGISTER, set);
21612 RTX_FRAME_RELATED_P (insn) = 1;
21614 return reg;
21617 /* Reload CR from REG. */
21619 static void
21620 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
21622 int count = 0;
21623 int i;
21625 if (using_mfcr_multiple)
21627 for (i = 0; i < 8; i++)
21628 if (save_reg_p (CR0_REGNO + i))
21629 count++;
21630 gcc_assert (count);
21633 if (using_mfcr_multiple && count > 1)
21635 rtvec p;
21636 int ndx;
21638 p = rtvec_alloc (count);
21640 ndx = 0;
21641 for (i = 0; i < 8; i++)
21642 if (save_reg_p (CR0_REGNO + i))
21644 rtvec r = rtvec_alloc (2);
21645 RTVEC_ELT (r, 0) = reg;
21646 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
21647 RTVEC_ELT (p, ndx) =
21648 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
21649 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
21650 ndx++;
21652 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21653 gcc_assert (ndx == count);
21655 else
21656 for (i = 0; i < 8; i++)
21657 if (save_reg_p (CR0_REGNO + i))
21658 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
21659 reg));
21661 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
21663 rtx insn = get_last_insn ();
21664 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
21666 add_reg_note (insn, REG_CFA_RESTORE, cr);
21667 RTX_FRAME_RELATED_P (insn) = 1;
21671 /* Like cr, the move to lr instruction can be scheduled after the
21672 stack deallocation, but unlike cr, its stack frame save is still
21673 valid. So we only need to emit the cfa_restore on the correct
21674 instruction. */
21676 static void
21677 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
21679 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
21680 rtx reg = gen_rtx_REG (Pmode, regno);
21682 emit_move_insn (reg, mem);
21685 static void
21686 restore_saved_lr (int regno, bool exit_func)
21688 rtx reg = gen_rtx_REG (Pmode, regno);
21689 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
21690 rtx insn = emit_move_insn (lr, reg);
21692 if (!exit_func && flag_shrink_wrap)
21694 add_reg_note (insn, REG_CFA_RESTORE, lr);
21695 RTX_FRAME_RELATED_P (insn) = 1;
21699 static rtx
21700 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
21702 if (info->cr_save_p)
21703 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
21704 gen_rtx_REG (SImode, CR2_REGNO),
21705 cfa_restores);
21706 if (info->lr_save_p)
21707 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
21708 gen_rtx_REG (Pmode, LR_REGNO),
21709 cfa_restores);
21710 return cfa_restores;
21713 /* Return true if OFFSET from stack pointer can be clobbered by signals.
21714 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
21715 below stack pointer not cloberred by signals. */
21717 static inline bool
21718 offset_below_red_zone_p (HOST_WIDE_INT offset)
21720 return offset < (DEFAULT_ABI == ABI_V4
21722 : TARGET_32BIT ? -220 : -288);
21725 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
21727 static void
21728 emit_cfa_restores (rtx cfa_restores)
21730 rtx insn = get_last_insn ();
21731 rtx *loc = &REG_NOTES (insn);
21733 while (*loc)
21734 loc = &XEXP (*loc, 1);
21735 *loc = cfa_restores;
21736 RTX_FRAME_RELATED_P (insn) = 1;
21739 /* Emit function epilogue as insns. */
21741 void
21742 rs6000_emit_epilogue (int sibcall)
21744 rs6000_stack_t *info;
21745 int restoring_GPRs_inline;
21746 int restoring_FPRs_inline;
21747 int using_load_multiple;
21748 int using_mtcr_multiple;
21749 int use_backchain_to_restore_sp;
21750 int restore_lr;
21751 int strategy;
21752 HOST_WIDE_INT frame_off = 0;
21753 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
21754 rtx frame_reg_rtx = sp_reg_rtx;
21755 rtx cfa_restores = NULL_RTX;
21756 rtx insn;
21757 rtx cr_save_reg = NULL_RTX;
21758 enum machine_mode reg_mode = Pmode;
21759 int reg_size = TARGET_32BIT ? 4 : 8;
21760 int i;
21761 bool exit_func;
21762 unsigned ptr_regno;
21764 info = rs6000_stack_info ();
21766 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
21768 reg_mode = V2SImode;
21769 reg_size = 8;
21772 strategy = info->savres_strategy;
21773 using_load_multiple = strategy & SAVRES_MULTIPLE;
21774 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
21775 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
21776 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
21777 || rs6000_cpu == PROCESSOR_PPC603
21778 || rs6000_cpu == PROCESSOR_PPC750
21779 || optimize_size);
21780 /* Restore via the backchain when we have a large frame, since this
21781 is more efficient than an addis, addi pair. The second condition
21782 here will not trigger at the moment; We don't actually need a
21783 frame pointer for alloca, but the generic parts of the compiler
21784 give us one anyway. */
21785 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
21786 || (cfun->calls_alloca
21787 && !frame_pointer_needed));
21788 restore_lr = (info->lr_save_p
21789 && (restoring_FPRs_inline
21790 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
21791 && (restoring_GPRs_inline
21792 || info->first_fp_reg_save < 64));
21794 if (WORLD_SAVE_P (info))
21796 int i, j;
21797 char rname[30];
21798 const char *alloc_rname;
21799 rtvec p;
21801 /* eh_rest_world_r10 will return to the location saved in the LR
21802 stack slot (which is not likely to be our caller.)
21803 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
21804 rest_world is similar, except any R10 parameter is ignored.
21805 The exception-handling stuff that was here in 2.95 is no
21806 longer necessary. */
21808 p = rtvec_alloc (9
21810 + 32 - info->first_gp_reg_save
21811 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
21812 + 63 + 1 - info->first_fp_reg_save);
21814 strcpy (rname, ((crtl->calls_eh_return) ?
21815 "*eh_rest_world_r10" : "*rest_world"));
21816 alloc_rname = ggc_strdup (rname);
21818 j = 0;
21819 RTVEC_ELT (p, j++) = ret_rtx;
21820 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
21821 gen_rtx_REG (Pmode,
21822 LR_REGNO));
21823 RTVEC_ELT (p, j++)
21824 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
21825 /* The instruction pattern requires a clobber here;
21826 it is shared with the restVEC helper. */
21827 RTVEC_ELT (p, j++)
21828 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
21831 /* CR register traditionally saved as CR2. */
21832 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
21833 RTVEC_ELT (p, j++)
21834 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
21835 if (flag_shrink_wrap)
21837 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
21838 gen_rtx_REG (Pmode, LR_REGNO),
21839 cfa_restores);
21840 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21844 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21846 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21847 RTVEC_ELT (p, j++)
21848 = gen_frame_load (reg,
21849 frame_reg_rtx, info->gp_save_offset + reg_size * i);
21850 if (flag_shrink_wrap)
21851 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21853 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
21855 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
21856 RTVEC_ELT (p, j++)
21857 = gen_frame_load (reg,
21858 frame_reg_rtx, info->altivec_save_offset + 16 * i);
21859 if (flag_shrink_wrap)
21860 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21862 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
21864 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21865 ? DFmode : SFmode),
21866 info->first_fp_reg_save + i);
21867 RTVEC_ELT (p, j++)
21868 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
21869 if (flag_shrink_wrap)
21870 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21872 RTVEC_ELT (p, j++)
21873 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
21874 RTVEC_ELT (p, j++)
21875 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
21876 RTVEC_ELT (p, j++)
21877 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
21878 RTVEC_ELT (p, j++)
21879 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
21880 RTVEC_ELT (p, j++)
21881 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
21882 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21884 if (flag_shrink_wrap)
21886 REG_NOTES (insn) = cfa_restores;
21887 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21888 RTX_FRAME_RELATED_P (insn) = 1;
21890 return;
21893 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
21894 if (info->push_p)
21895 frame_off = info->total_size;
21897 /* Restore AltiVec registers if we must do so before adjusting the
21898 stack. */
21899 if (TARGET_ALTIVEC_ABI
21900 && info->altivec_size != 0
21901 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21902 || (DEFAULT_ABI != ABI_V4
21903 && offset_below_red_zone_p (info->altivec_save_offset))))
21905 int i;
21906 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
21908 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
21909 if (use_backchain_to_restore_sp)
21911 int frame_regno = 11;
21913 if ((strategy & REST_INLINE_VRS) == 0)
21915 /* Of r11 and r12, select the one not clobbered by an
21916 out-of-line restore function for the frame register. */
21917 frame_regno = 11 + 12 - scratch_regno;
21919 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
21920 emit_move_insn (frame_reg_rtx,
21921 gen_rtx_MEM (Pmode, sp_reg_rtx));
21922 frame_off = 0;
21924 else if (frame_pointer_needed)
21925 frame_reg_rtx = hard_frame_pointer_rtx;
21927 if ((strategy & REST_INLINE_VRS) == 0)
21929 int end_save = info->altivec_save_offset + info->altivec_size;
21930 int ptr_off;
21931 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
21932 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
21934 if (end_save + frame_off != 0)
21936 rtx offset = GEN_INT (end_save + frame_off);
21938 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21940 else
21941 emit_move_insn (ptr_reg, frame_reg_rtx);
21943 ptr_off = -end_save;
21944 insn = rs6000_emit_savres_rtx (info, scratch_reg,
21945 info->altivec_save_offset + ptr_off,
21946 0, V4SImode, SAVRES_VR);
21948 else
21950 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21951 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
21953 rtx addr, areg, mem, reg;
21955 areg = gen_rtx_REG (Pmode, 0);
21956 emit_move_insn
21957 (areg, GEN_INT (info->altivec_save_offset
21958 + frame_off
21959 + 16 * (i - info->first_altivec_reg_save)));
21961 /* AltiVec addressing mode is [reg+reg]. */
21962 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
21963 mem = gen_frame_mem (V4SImode, addr);
21965 reg = gen_rtx_REG (V4SImode, i);
21966 emit_move_insn (reg, mem);
21970 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21971 if (((strategy & REST_INLINE_VRS) == 0
21972 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
21973 && (flag_shrink_wrap
21974 || (offset_below_red_zone_p
21975 (info->altivec_save_offset
21976 + 16 * (i - info->first_altivec_reg_save)))))
21978 rtx reg = gen_rtx_REG (V4SImode, i);
21979 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21983 /* Restore VRSAVE if we must do so before adjusting the stack. */
21984 if (TARGET_ALTIVEC
21985 && TARGET_ALTIVEC_VRSAVE
21986 && info->vrsave_mask != 0
21987 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21988 || (DEFAULT_ABI != ABI_V4
21989 && offset_below_red_zone_p (info->vrsave_save_offset))))
21991 rtx reg;
21993 if (frame_reg_rtx == sp_reg_rtx)
21995 if (use_backchain_to_restore_sp)
21997 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21998 emit_move_insn (frame_reg_rtx,
21999 gen_rtx_MEM (Pmode, sp_reg_rtx));
22000 frame_off = 0;
22002 else if (frame_pointer_needed)
22003 frame_reg_rtx = hard_frame_pointer_rtx;
22006 reg = gen_rtx_REG (SImode, 12);
22007 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22008 info->vrsave_save_offset + frame_off));
22010 emit_insn (generate_set_vrsave (reg, info, 1));
22013 insn = NULL_RTX;
22014 /* If we have a large stack frame, restore the old stack pointer
22015 using the backchain. */
22016 if (use_backchain_to_restore_sp)
22018 if (frame_reg_rtx == sp_reg_rtx)
22020 /* Under V.4, don't reset the stack pointer until after we're done
22021 loading the saved registers. */
22022 if (DEFAULT_ABI == ABI_V4)
22023 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
22025 insn = emit_move_insn (frame_reg_rtx,
22026 gen_rtx_MEM (Pmode, sp_reg_rtx));
22027 frame_off = 0;
22029 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22030 && DEFAULT_ABI == ABI_V4)
22031 /* frame_reg_rtx has been set up by the altivec restore. */
22033 else
22035 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
22036 frame_reg_rtx = sp_reg_rtx;
22039 /* If we have a frame pointer, we can restore the old stack pointer
22040 from it. */
22041 else if (frame_pointer_needed)
22043 frame_reg_rtx = sp_reg_rtx;
22044 if (DEFAULT_ABI == ABI_V4)
22045 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
22046 /* Prevent reordering memory accesses against stack pointer restore. */
22047 else if (cfun->calls_alloca
22048 || offset_below_red_zone_p (-info->total_size))
22049 rs6000_emit_stack_tie (frame_reg_rtx, true);
22051 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
22052 GEN_INT (info->total_size)));
22053 frame_off = 0;
22055 else if (info->push_p
22056 && DEFAULT_ABI != ABI_V4
22057 && !crtl->calls_eh_return)
22059 /* Prevent reordering memory accesses against stack pointer restore. */
22060 if (cfun->calls_alloca
22061 || offset_below_red_zone_p (-info->total_size))
22062 rs6000_emit_stack_tie (frame_reg_rtx, false);
22063 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
22064 GEN_INT (info->total_size)));
22065 frame_off = 0;
22067 if (insn && frame_reg_rtx == sp_reg_rtx)
22069 if (cfa_restores)
22071 REG_NOTES (insn) = cfa_restores;
22072 cfa_restores = NULL_RTX;
22074 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
22075 RTX_FRAME_RELATED_P (insn) = 1;
22078 /* Restore AltiVec registers if we have not done so already. */
22079 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22080 && TARGET_ALTIVEC_ABI
22081 && info->altivec_size != 0
22082 && (DEFAULT_ABI == ABI_V4
22083 || !offset_below_red_zone_p (info->altivec_save_offset)))
22085 int i;
22087 if ((strategy & REST_INLINE_VRS) == 0)
22089 int end_save = info->altivec_save_offset + info->altivec_size;
22090 int ptr_off;
22091 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
22092 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
22093 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
22095 if (end_save + frame_off != 0)
22097 rtx offset = GEN_INT (end_save + frame_off);
22099 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
22101 else
22102 emit_move_insn (ptr_reg, frame_reg_rtx);
22104 ptr_off = -end_save;
22105 insn = rs6000_emit_savres_rtx (info, scratch_reg,
22106 info->altivec_save_offset + ptr_off,
22107 0, V4SImode, SAVRES_VR);
22108 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
22110 /* Frame reg was clobbered by out-of-line save. Restore it
22111 from ptr_reg, and if we are calling out-of-line gpr or
22112 fpr restore set up the correct pointer and offset. */
22113 unsigned newptr_regno = 1;
22114 if (!restoring_GPRs_inline)
22116 bool lr = info->gp_save_offset + info->gp_size == 0;
22117 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
22118 newptr_regno = ptr_regno_for_savres (sel);
22119 end_save = info->gp_save_offset + info->gp_size;
22121 else if (!restoring_FPRs_inline)
22123 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
22124 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
22125 newptr_regno = ptr_regno_for_savres (sel);
22126 end_save = info->gp_save_offset + info->gp_size;
22129 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
22130 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
22132 if (end_save + ptr_off != 0)
22134 rtx offset = GEN_INT (end_save + ptr_off);
22136 frame_off = -end_save;
22137 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
22139 else
22141 frame_off = ptr_off;
22142 emit_move_insn (frame_reg_rtx, ptr_reg);
22146 else
22148 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
22149 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22151 rtx addr, areg, mem, reg;
22153 areg = gen_rtx_REG (Pmode, 0);
22154 emit_move_insn
22155 (areg, GEN_INT (info->altivec_save_offset
22156 + frame_off
22157 + 16 * (i - info->first_altivec_reg_save)));
22159 /* AltiVec addressing mode is [reg+reg]. */
22160 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
22161 mem = gen_frame_mem (V4SImode, addr);
22163 reg = gen_rtx_REG (V4SImode, i);
22164 emit_move_insn (reg, mem);
22168 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
22169 if (((strategy & REST_INLINE_VRS) == 0
22170 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
22171 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
22173 rtx reg = gen_rtx_REG (V4SImode, i);
22174 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22178 /* Restore VRSAVE if we have not done so already. */
22179 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
22180 && TARGET_ALTIVEC
22181 && TARGET_ALTIVEC_VRSAVE
22182 && info->vrsave_mask != 0
22183 && (DEFAULT_ABI == ABI_V4
22184 || !offset_below_red_zone_p (info->vrsave_save_offset)))
22186 rtx reg;
22188 reg = gen_rtx_REG (SImode, 12);
22189 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22190 info->vrsave_save_offset + frame_off));
22192 emit_insn (generate_set_vrsave (reg, info, 1));
22195 /* If we exit by an out-of-line restore function on ABI_V4 then that
22196 function will deallocate the stack, so we don't need to worry
22197 about the unwinder restoring cr from an invalid stack frame
22198 location. */
22199 exit_func = (!restoring_FPRs_inline
22200 || (!restoring_GPRs_inline
22201 && info->first_fp_reg_save == 64));
22203 /* Get the old lr if we saved it. If we are restoring registers
22204 out-of-line, then the out-of-line routines can do this for us. */
22205 if (restore_lr && restoring_GPRs_inline)
22206 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
22208 /* Get the old cr if we saved it. */
22209 if (info->cr_save_p)
22211 unsigned cr_save_regno = 12;
22213 if (!restoring_GPRs_inline)
22215 /* Ensure we don't use the register used by the out-of-line
22216 gpr register restore below. */
22217 bool lr = info->gp_save_offset + info->gp_size == 0;
22218 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
22219 int gpr_ptr_regno = ptr_regno_for_savres (sel);
22221 if (gpr_ptr_regno == 12)
22222 cr_save_regno = 11;
22223 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
22225 else if (REGNO (frame_reg_rtx) == 12)
22226 cr_save_regno = 11;
22228 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
22229 info->cr_save_offset + frame_off,
22230 exit_func);
22233 /* Set LR here to try to overlap restores below. */
22234 if (restore_lr && restoring_GPRs_inline)
22235 restore_saved_lr (0, exit_func);
22237 /* Load exception handler data registers, if needed. */
22238 if (crtl->calls_eh_return)
22240 unsigned int i, regno;
22242 if (TARGET_AIX)
22244 rtx reg = gen_rtx_REG (reg_mode, 2);
22245 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22246 frame_off + 5 * reg_size));
22249 for (i = 0; ; ++i)
22251 rtx mem;
22253 regno = EH_RETURN_DATA_REGNO (i);
22254 if (regno == INVALID_REGNUM)
22255 break;
22257 /* Note: possible use of r0 here to address SPE regs. */
22258 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
22259 info->ehrd_offset + frame_off
22260 + reg_size * (int) i);
22262 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
22266 /* Restore GPRs. This is done as a PARALLEL if we are using
22267 the load-multiple instructions. */
22268 if (TARGET_SPE_ABI
22269 && info->spe_64bit_regs_used
22270 && info->first_gp_reg_save != 32)
22272 /* Determine whether we can address all of the registers that need
22273 to be saved with an offset from frame_reg_rtx that fits in
22274 the small const field for SPE memory instructions. */
22275 int spe_regs_addressable
22276 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
22277 + reg_size * (32 - info->first_gp_reg_save - 1))
22278 && restoring_GPRs_inline);
22280 if (!spe_regs_addressable)
22282 int ool_adjust = 0;
22283 rtx old_frame_reg_rtx = frame_reg_rtx;
22284 /* Make r11 point to the start of the SPE save area. We worried about
22285 not clobbering it when we were saving registers in the prologue.
22286 There's no need to worry here because the static chain is passed
22287 anew to every function. */
22289 if (!restoring_GPRs_inline)
22290 ool_adjust = 8 * (info->first_gp_reg_save
22291 - (FIRST_SAVRES_REGISTER + 1));
22292 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
22293 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
22294 GEN_INT (info->spe_gp_save_offset
22295 + frame_off
22296 - ool_adjust)));
22297 /* Keep the invariant that frame_reg_rtx + frame_off points
22298 at the top of the stack frame. */
22299 frame_off = -info->spe_gp_save_offset + ool_adjust;
22302 if (restoring_GPRs_inline)
22304 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
22306 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22307 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
22309 rtx offset, addr, mem, reg;
22311 /* We're doing all this to ensure that the immediate offset
22312 fits into the immediate field of 'evldd'. */
22313 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
22315 offset = GEN_INT (spe_offset + reg_size * i);
22316 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
22317 mem = gen_rtx_MEM (V2SImode, addr);
22318 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
22320 emit_move_insn (reg, mem);
22323 else
22324 rs6000_emit_savres_rtx (info, frame_reg_rtx,
22325 info->spe_gp_save_offset + frame_off,
22326 info->lr_save_offset + frame_off,
22327 reg_mode,
22328 SAVRES_GPR | SAVRES_LR);
22330 else if (!restoring_GPRs_inline)
22332 /* We are jumping to an out-of-line function. */
22333 rtx ptr_reg;
22334 int end_save = info->gp_save_offset + info->gp_size;
22335 bool can_use_exit = end_save == 0;
22336 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
22337 int ptr_off;
22339 /* Emit stack reset code if we need it. */
22340 ptr_regno = ptr_regno_for_savres (sel);
22341 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
22342 if (can_use_exit)
22343 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
22344 else if (end_save + frame_off != 0)
22345 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
22346 GEN_INT (end_save + frame_off)));
22347 else if (REGNO (frame_reg_rtx) != ptr_regno)
22348 emit_move_insn (ptr_reg, frame_reg_rtx);
22349 if (REGNO (frame_reg_rtx) == ptr_regno)
22350 frame_off = -end_save;
22352 if (can_use_exit && info->cr_save_p)
22353 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
22355 ptr_off = -end_save;
22356 rs6000_emit_savres_rtx (info, ptr_reg,
22357 info->gp_save_offset + ptr_off,
22358 info->lr_save_offset + ptr_off,
22359 reg_mode, sel);
22361 else if (using_load_multiple)
22363 rtvec p;
22364 p = rtvec_alloc (32 - info->first_gp_reg_save);
22365 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22366 RTVEC_ELT (p, i)
22367 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
22368 frame_reg_rtx,
22369 info->gp_save_offset + frame_off + reg_size * i);
22370 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22372 else
22374 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22375 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
22376 emit_insn (gen_frame_load
22377 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
22378 frame_reg_rtx,
22379 info->gp_save_offset + frame_off + reg_size * i));
22382 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
22384 /* If the frame pointer was used then we can't delay emitting
22385 a REG_CFA_DEF_CFA note. This must happen on the insn that
22386 restores the frame pointer, r31. We may have already emitted
22387 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
22388 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
22389 be harmless if emitted. */
22390 if (frame_pointer_needed)
22392 insn = get_last_insn ();
22393 add_reg_note (insn, REG_CFA_DEF_CFA,
22394 plus_constant (Pmode, frame_reg_rtx, frame_off));
22395 RTX_FRAME_RELATED_P (insn) = 1;
22398 /* Set up cfa_restores. We always need these when
22399 shrink-wrapping. If not shrink-wrapping then we only need
22400 the cfa_restore when the stack location is no longer valid.
22401 The cfa_restores must be emitted on or before the insn that
22402 invalidates the stack, and of course must not be emitted
22403 before the insn that actually does the restore. The latter
22404 is why it is a bad idea to emit the cfa_restores as a group
22405 on the last instruction here that actually does a restore:
22406 That insn may be reordered with respect to others doing
22407 restores. */
22408 if (flag_shrink_wrap
22409 && !restoring_GPRs_inline
22410 && info->first_fp_reg_save == 64)
22411 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
22413 for (i = info->first_gp_reg_save; i < 32; i++)
22414 if (!restoring_GPRs_inline
22415 || using_load_multiple
22416 || rs6000_reg_live_or_pic_offset_p (i))
22418 rtx reg = gen_rtx_REG (reg_mode, i);
22420 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22424 if (!restoring_GPRs_inline
22425 && info->first_fp_reg_save == 64)
22427 /* We are jumping to an out-of-line function. */
22428 if (cfa_restores)
22429 emit_cfa_restores (cfa_restores);
22430 return;
22433 if (restore_lr && !restoring_GPRs_inline)
22435 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
22436 restore_saved_lr (0, exit_func);
22439 /* Restore fpr's if we need to do it without calling a function. */
22440 if (restoring_FPRs_inline)
22441 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
22442 if (save_reg_p (info->first_fp_reg_save + i))
22444 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
22445 ? DFmode : SFmode),
22446 info->first_fp_reg_save + i);
22447 emit_insn (gen_frame_load (reg, frame_reg_rtx,
22448 info->fp_save_offset + frame_off + 8 * i));
22449 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
22450 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
22453 /* If we saved cr, restore it here. Just those that were used. */
22454 if (info->cr_save_p)
22455 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
22457 /* If this is V.4, unwind the stack pointer after all of the loads
22458 have been done, or set up r11 if we are restoring fp out of line. */
22459 ptr_regno = 1;
22460 if (!restoring_FPRs_inline)
22462 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
22463 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
22464 ptr_regno = ptr_regno_for_savres (sel);
22467 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
22468 if (REGNO (frame_reg_rtx) == ptr_regno)
22469 frame_off = 0;
22471 if (insn && restoring_FPRs_inline)
22473 if (cfa_restores)
22475 REG_NOTES (insn) = cfa_restores;
22476 cfa_restores = NULL_RTX;
22478 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
22479 RTX_FRAME_RELATED_P (insn) = 1;
22482 if (crtl->calls_eh_return)
22484 rtx sa = EH_RETURN_STACKADJ_RTX;
22485 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
22488 if (!sibcall)
22490 rtvec p;
22491 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
22492 if (! restoring_FPRs_inline)
22494 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
22495 RTVEC_ELT (p, 0) = ret_rtx;
22497 else
22499 if (cfa_restores)
22501 /* We can't hang the cfa_restores off a simple return,
22502 since the shrink-wrap code sometimes uses an existing
22503 return. This means there might be a path from
22504 pre-prologue code to this return, and dwarf2cfi code
22505 wants the eh_frame unwinder state to be the same on
22506 all paths to any point. So we need to emit the
22507 cfa_restores before the return. For -m64 we really
22508 don't need epilogue cfa_restores at all, except for
22509 this irritating dwarf2cfi with shrink-wrap
22510 requirement; The stack red-zone means eh_frame info
22511 from the prologue telling the unwinder to restore
22512 from the stack is perfectly good right to the end of
22513 the function. */
22514 emit_insn (gen_blockage ());
22515 emit_cfa_restores (cfa_restores);
22516 cfa_restores = NULL_RTX;
22518 p = rtvec_alloc (2);
22519 RTVEC_ELT (p, 0) = simple_return_rtx;
22522 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
22523 ? gen_rtx_USE (VOIDmode,
22524 gen_rtx_REG (Pmode, LR_REGNO))
22525 : gen_rtx_CLOBBER (VOIDmode,
22526 gen_rtx_REG (Pmode, LR_REGNO)));
22528 /* If we have to restore more than two FP registers, branch to the
22529 restore function. It will return to our caller. */
22530 if (! restoring_FPRs_inline)
22532 int i;
22533 rtx sym;
22535 if (flag_shrink_wrap)
22536 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
22538 sym = rs6000_savres_routine_sym (info,
22539 SAVRES_FPR | (lr ? SAVRES_LR : 0));
22540 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
22541 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
22542 gen_rtx_REG (Pmode,
22543 DEFAULT_ABI == ABI_AIX
22544 ? 1 : 11));
22545 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
22547 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
22549 RTVEC_ELT (p, i + 4)
22550 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
22551 if (flag_shrink_wrap)
22552 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
22553 cfa_restores);
22557 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
22560 if (cfa_restores)
22562 if (sibcall)
22563 /* Ensure the cfa_restores are hung off an insn that won't
22564 be reordered above other restores. */
22565 emit_insn (gen_blockage ());
22567 emit_cfa_restores (cfa_restores);
22571 /* Write function epilogue. */
22573 static void
22574 rs6000_output_function_epilogue (FILE *file,
22575 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
22577 #if TARGET_MACHO
22578 macho_branch_islands ();
22579 /* Mach-O doesn't support labels at the end of objects, so if
22580 it looks like we might want one, insert a NOP. */
22582 rtx insn = get_last_insn ();
22583 rtx deleted_debug_label = NULL_RTX;
22584 while (insn
22585 && NOTE_P (insn)
22586 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
22588 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
22589 notes only, instead set their CODE_LABEL_NUMBER to -1,
22590 otherwise there would be code generation differences
22591 in between -g and -g0. */
22592 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
22593 deleted_debug_label = insn;
22594 insn = PREV_INSN (insn);
22596 if (insn
22597 && (LABEL_P (insn)
22598 || (NOTE_P (insn)
22599 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
22600 fputs ("\tnop\n", file);
22601 else if (deleted_debug_label)
22602 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
22603 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
22604 CODE_LABEL_NUMBER (insn) = -1;
22606 #endif
22608 /* Output a traceback table here. See /usr/include/sys/debug.h for info
22609 on its format.
22611 We don't output a traceback table if -finhibit-size-directive was
22612 used. The documentation for -finhibit-size-directive reads
22613 ``don't output a @code{.size} assembler directive, or anything
22614 else that would cause trouble if the function is split in the
22615 middle, and the two halves are placed at locations far apart in
22616 memory.'' The traceback table has this property, since it
22617 includes the offset from the start of the function to the
22618 traceback table itself.
22620 System V.4 Powerpc's (and the embedded ABI derived from it) use a
22621 different traceback table. */
22622 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
22623 && rs6000_traceback != traceback_none && !cfun->is_thunk)
22625 const char *fname = NULL;
22626 const char *language_string = lang_hooks.name;
22627 int fixed_parms = 0, float_parms = 0, parm_info = 0;
22628 int i;
22629 int optional_tbtab;
22630 rs6000_stack_t *info = rs6000_stack_info ();
22632 if (rs6000_traceback == traceback_full)
22633 optional_tbtab = 1;
22634 else if (rs6000_traceback == traceback_part)
22635 optional_tbtab = 0;
22636 else
22637 optional_tbtab = !optimize_size && !TARGET_ELF;
22639 if (optional_tbtab)
22641 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
22642 while (*fname == '.') /* V.4 encodes . in the name */
22643 fname++;
22645 /* Need label immediately before tbtab, so we can compute
22646 its offset from the function start. */
22647 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
22648 ASM_OUTPUT_LABEL (file, fname);
22651 /* The .tbtab pseudo-op can only be used for the first eight
22652 expressions, since it can't handle the possibly variable
22653 length fields that follow. However, if you omit the optional
22654 fields, the assembler outputs zeros for all optional fields
22655 anyways, giving each variable length field is minimum length
22656 (as defined in sys/debug.h). Thus we can not use the .tbtab
22657 pseudo-op at all. */
22659 /* An all-zero word flags the start of the tbtab, for debuggers
22660 that have to find it by searching forward from the entry
22661 point or from the current pc. */
22662 fputs ("\t.long 0\n", file);
22664 /* Tbtab format type. Use format type 0. */
22665 fputs ("\t.byte 0,", file);
22667 /* Language type. Unfortunately, there does not seem to be any
22668 official way to discover the language being compiled, so we
22669 use language_string.
22670 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
22671 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
22672 a number, so for now use 9. LTO and Go aren't assigned numbers
22673 either, so for now use 0. */
22674 if (! strcmp (language_string, "GNU C")
22675 || ! strcmp (language_string, "GNU GIMPLE")
22676 || ! strcmp (language_string, "GNU Go"))
22677 i = 0;
22678 else if (! strcmp (language_string, "GNU F77")
22679 || ! strcmp (language_string, "GNU Fortran"))
22680 i = 1;
22681 else if (! strcmp (language_string, "GNU Pascal"))
22682 i = 2;
22683 else if (! strcmp (language_string, "GNU Ada"))
22684 i = 3;
22685 else if (! strcmp (language_string, "GNU C++")
22686 || ! strcmp (language_string, "GNU Objective-C++"))
22687 i = 9;
22688 else if (! strcmp (language_string, "GNU Java"))
22689 i = 13;
22690 else if (! strcmp (language_string, "GNU Objective-C"))
22691 i = 14;
22692 else
22693 gcc_unreachable ();
22694 fprintf (file, "%d,", i);
22696 /* 8 single bit fields: global linkage (not set for C extern linkage,
22697 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
22698 from start of procedure stored in tbtab, internal function, function
22699 has controlled storage, function has no toc, function uses fp,
22700 function logs/aborts fp operations. */
22701 /* Assume that fp operations are used if any fp reg must be saved. */
22702 fprintf (file, "%d,",
22703 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
22705 /* 6 bitfields: function is interrupt handler, name present in
22706 proc table, function calls alloca, on condition directives
22707 (controls stack walks, 3 bits), saves condition reg, saves
22708 link reg. */
22709 /* The `function calls alloca' bit seems to be set whenever reg 31 is
22710 set up as a frame pointer, even when there is no alloca call. */
22711 fprintf (file, "%d,",
22712 ((optional_tbtab << 6)
22713 | ((optional_tbtab & frame_pointer_needed) << 5)
22714 | (info->cr_save_p << 1)
22715 | (info->lr_save_p)));
22717 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
22718 (6 bits). */
22719 fprintf (file, "%d,",
22720 (info->push_p << 7) | (64 - info->first_fp_reg_save));
22722 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
22723 fprintf (file, "%d,", (32 - first_reg_to_save ()));
22725 if (optional_tbtab)
22727 /* Compute the parameter info from the function decl argument
22728 list. */
22729 tree decl;
22730 int next_parm_info_bit = 31;
22732 for (decl = DECL_ARGUMENTS (current_function_decl);
22733 decl; decl = DECL_CHAIN (decl))
22735 rtx parameter = DECL_INCOMING_RTL (decl);
22736 enum machine_mode mode = GET_MODE (parameter);
22738 if (GET_CODE (parameter) == REG)
22740 if (SCALAR_FLOAT_MODE_P (mode))
22742 int bits;
22744 float_parms++;
22746 switch (mode)
22748 case SFmode:
22749 case SDmode:
22750 bits = 0x2;
22751 break;
22753 case DFmode:
22754 case DDmode:
22755 case TFmode:
22756 case TDmode:
22757 bits = 0x3;
22758 break;
22760 default:
22761 gcc_unreachable ();
22764 /* If only one bit will fit, don't or in this entry. */
22765 if (next_parm_info_bit > 0)
22766 parm_info |= (bits << (next_parm_info_bit - 1));
22767 next_parm_info_bit -= 2;
22769 else
22771 fixed_parms += ((GET_MODE_SIZE (mode)
22772 + (UNITS_PER_WORD - 1))
22773 / UNITS_PER_WORD);
22774 next_parm_info_bit -= 1;
22780 /* Number of fixed point parameters. */
22781 /* This is actually the number of words of fixed point parameters; thus
22782 an 8 byte struct counts as 2; and thus the maximum value is 8. */
22783 fprintf (file, "%d,", fixed_parms);
22785 /* 2 bitfields: number of floating point parameters (7 bits), parameters
22786 all on stack. */
22787 /* This is actually the number of fp registers that hold parameters;
22788 and thus the maximum value is 13. */
22789 /* Set parameters on stack bit if parameters are not in their original
22790 registers, regardless of whether they are on the stack? Xlc
22791 seems to set the bit when not optimizing. */
22792 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
22794 if (! optional_tbtab)
22795 return;
22797 /* Optional fields follow. Some are variable length. */
22799 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
22800 11 double float. */
22801 /* There is an entry for each parameter in a register, in the order that
22802 they occur in the parameter list. Any intervening arguments on the
22803 stack are ignored. If the list overflows a long (max possible length
22804 34 bits) then completely leave off all elements that don't fit. */
22805 /* Only emit this long if there was at least one parameter. */
22806 if (fixed_parms || float_parms)
22807 fprintf (file, "\t.long %d\n", parm_info);
22809 /* Offset from start of code to tb table. */
22810 fputs ("\t.long ", file);
22811 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
22812 RS6000_OUTPUT_BASENAME (file, fname);
22813 putc ('-', file);
22814 rs6000_output_function_entry (file, fname);
22815 putc ('\n', file);
22817 /* Interrupt handler mask. */
22818 /* Omit this long, since we never set the interrupt handler bit
22819 above. */
22821 /* Number of CTL (controlled storage) anchors. */
22822 /* Omit this long, since the has_ctl bit is never set above. */
22824 /* Displacement into stack of each CTL anchor. */
22825 /* Omit this list of longs, because there are no CTL anchors. */
22827 /* Length of function name. */
22828 if (*fname == '*')
22829 ++fname;
22830 fprintf (file, "\t.short %d\n", (int) strlen (fname));
22832 /* Function name. */
22833 assemble_string (fname, strlen (fname));
22835 /* Register for alloca automatic storage; this is always reg 31.
22836 Only emit this if the alloca bit was set above. */
22837 if (frame_pointer_needed)
22838 fputs ("\t.byte 31\n", file);
22840 fputs ("\t.align 2\n", file);
22844 /* A C compound statement that outputs the assembler code for a thunk
22845 function, used to implement C++ virtual function calls with
22846 multiple inheritance. The thunk acts as a wrapper around a virtual
22847 function, adjusting the implicit object parameter before handing
22848 control off to the real function.
22850 First, emit code to add the integer DELTA to the location that
22851 contains the incoming first argument. Assume that this argument
22852 contains a pointer, and is the one used to pass the `this' pointer
22853 in C++. This is the incoming argument *before* the function
22854 prologue, e.g. `%o0' on a sparc. The addition must preserve the
22855 values of all other incoming arguments.
22857 After the addition, emit code to jump to FUNCTION, which is a
22858 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
22859 not touch the return address. Hence returning from FUNCTION will
22860 return to whoever called the current `thunk'.
22862 The effect must be as if FUNCTION had been called directly with the
22863 adjusted first argument. This macro is responsible for emitting
22864 all of the code for a thunk function; output_function_prologue()
22865 and output_function_epilogue() are not invoked.
22867 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
22868 been extracted from it.) It might possibly be useful on some
22869 targets, but probably not.
22871 If you do not define this macro, the target-independent code in the
22872 C++ frontend will generate a less efficient heavyweight thunk that
22873 calls FUNCTION instead of jumping to it. The generic approach does
22874 not support varargs. */
22876 static void
22877 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
22878 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
22879 tree function)
22881 rtx this_rtx, insn, funexp;
22883 reload_completed = 1;
22884 epilogue_completed = 1;
22886 /* Mark the end of the (empty) prologue. */
22887 emit_note (NOTE_INSN_PROLOGUE_END);
22889 /* Find the "this" pointer. If the function returns a structure,
22890 the structure return pointer is in r3. */
22891 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
22892 this_rtx = gen_rtx_REG (Pmode, 4);
22893 else
22894 this_rtx = gen_rtx_REG (Pmode, 3);
22896 /* Apply the constant offset, if required. */
22897 if (delta)
22898 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
22900 /* Apply the offset from the vtable, if required. */
22901 if (vcall_offset)
22903 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
22904 rtx tmp = gen_rtx_REG (Pmode, 12);
22906 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
22907 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
22909 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
22910 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
22912 else
22914 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
22916 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
22918 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
22921 /* Generate a tail call to the target function. */
22922 if (!TREE_USED (function))
22924 assemble_external (function);
22925 TREE_USED (function) = 1;
22927 funexp = XEXP (DECL_RTL (function), 0);
22928 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
22930 #if TARGET_MACHO
22931 if (MACHOPIC_INDIRECT)
22932 funexp = machopic_indirect_call_target (funexp);
22933 #endif
22935 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
22936 generate sibcall RTL explicitly. */
22937 insn = emit_call_insn (
22938 gen_rtx_PARALLEL (VOIDmode,
22939 gen_rtvec (4,
22940 gen_rtx_CALL (VOIDmode,
22941 funexp, const0_rtx),
22942 gen_rtx_USE (VOIDmode, const0_rtx),
22943 gen_rtx_USE (VOIDmode,
22944 gen_rtx_REG (SImode,
22945 LR_REGNO)),
22946 simple_return_rtx)));
22947 SIBLING_CALL_P (insn) = 1;
22948 emit_barrier ();
22950 /* Run just enough of rest_of_compilation to get the insns emitted.
22951 There's not really enough bulk here to make other passes such as
22952 instruction scheduling worth while. Note that use_thunk calls
22953 assemble_start_function and assemble_end_function. */
22954 insn = get_insns ();
22955 shorten_branches (insn);
22956 final_start_function (insn, file, 1);
22957 final (insn, file, 1);
22958 final_end_function ();
22960 reload_completed = 0;
22961 epilogue_completed = 0;
22964 /* A quick summary of the various types of 'constant-pool tables'
22965 under PowerPC:
22967 Target Flags Name One table per
22968 AIX (none) AIX TOC object file
22969 AIX -mfull-toc AIX TOC object file
22970 AIX -mminimal-toc AIX minimal TOC translation unit
22971 SVR4/EABI (none) SVR4 SDATA object file
22972 SVR4/EABI -fpic SVR4 pic object file
22973 SVR4/EABI -fPIC SVR4 PIC translation unit
22974 SVR4/EABI -mrelocatable EABI TOC function
22975 SVR4/EABI -maix AIX TOC object file
22976 SVR4/EABI -maix -mminimal-toc
22977 AIX minimal TOC translation unit
22979 Name Reg. Set by entries contains:
22980 made by addrs? fp? sum?
22982 AIX TOC 2 crt0 as Y option option
22983 AIX minimal TOC 30 prolog gcc Y Y option
22984 SVR4 SDATA 13 crt0 gcc N Y N
22985 SVR4 pic 30 prolog ld Y not yet N
22986 SVR4 PIC 30 prolog gcc Y option option
22987 EABI TOC 30 prolog gcc Y option option
22991 /* Hash functions for the hash table. */
22993 static unsigned
22994 rs6000_hash_constant (rtx k)
22996 enum rtx_code code = GET_CODE (k);
22997 enum machine_mode mode = GET_MODE (k);
22998 unsigned result = (code << 3) ^ mode;
22999 const char *format;
23000 int flen, fidx;
23002 format = GET_RTX_FORMAT (code);
23003 flen = strlen (format);
23004 fidx = 0;
23006 switch (code)
23008 case LABEL_REF:
23009 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
23011 case CONST_DOUBLE:
23012 if (mode != VOIDmode)
23013 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
23014 flen = 2;
23015 break;
23017 case CODE_LABEL:
23018 fidx = 3;
23019 break;
23021 default:
23022 break;
23025 for (; fidx < flen; fidx++)
23026 switch (format[fidx])
23028 case 's':
23030 unsigned i, len;
23031 const char *str = XSTR (k, fidx);
23032 len = strlen (str);
23033 result = result * 613 + len;
23034 for (i = 0; i < len; i++)
23035 result = result * 613 + (unsigned) str[i];
23036 break;
23038 case 'u':
23039 case 'e':
23040 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
23041 break;
23042 case 'i':
23043 case 'n':
23044 result = result * 613 + (unsigned) XINT (k, fidx);
23045 break;
23046 case 'w':
23047 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
23048 result = result * 613 + (unsigned) XWINT (k, fidx);
23049 else
23051 size_t i;
23052 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
23053 result = result * 613 + (unsigned) (XWINT (k, fidx)
23054 >> CHAR_BIT * i);
23056 break;
23057 case '0':
23058 break;
23059 default:
23060 gcc_unreachable ();
23063 return result;
23066 static unsigned
23067 toc_hash_function (const void *hash_entry)
23069 const struct toc_hash_struct *thc =
23070 (const struct toc_hash_struct *) hash_entry;
23071 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
23074 /* Compare H1 and H2 for equivalence. */
23076 static int
23077 toc_hash_eq (const void *h1, const void *h2)
23079 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
23080 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
23082 if (((const struct toc_hash_struct *) h1)->key_mode
23083 != ((const struct toc_hash_struct *) h2)->key_mode)
23084 return 0;
23086 return rtx_equal_p (r1, r2);
23089 /* These are the names given by the C++ front-end to vtables, and
23090 vtable-like objects. Ideally, this logic should not be here;
23091 instead, there should be some programmatic way of inquiring as
23092 to whether or not an object is a vtable. */
23094 #define VTABLE_NAME_P(NAME) \
23095 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
23096 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
23097 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
23098 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
23099 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
23101 #ifdef NO_DOLLAR_IN_LABEL
23102 /* Return a GGC-allocated character string translating dollar signs in
23103 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
23105 const char *
23106 rs6000_xcoff_strip_dollar (const char *name)
23108 char *strip, *p;
23109 const char *q;
23110 size_t len;
23112 q = (const char *) strchr (name, '$');
23114 if (q == 0 || q == name)
23115 return name;
23117 len = strlen (name);
23118 strip = XALLOCAVEC (char, len + 1);
23119 strcpy (strip, name);
23120 p = strip + (q - name);
23121 while (p)
23123 *p = '_';
23124 p = strchr (p + 1, '$');
23127 return ggc_alloc_string (strip, len);
23129 #endif
23131 void
23132 rs6000_output_symbol_ref (FILE *file, rtx x)
23134 /* Currently C++ toc references to vtables can be emitted before it
23135 is decided whether the vtable is public or private. If this is
23136 the case, then the linker will eventually complain that there is
23137 a reference to an unknown section. Thus, for vtables only,
23138 we emit the TOC reference to reference the symbol and not the
23139 section. */
23140 const char *name = XSTR (x, 0);
23142 if (VTABLE_NAME_P (name))
23144 RS6000_OUTPUT_BASENAME (file, name);
23146 else
23147 assemble_name (file, name);
23150 /* Output a TOC entry. We derive the entry name from what is being
23151 written. */
23153 void
23154 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
23156 char buf[256];
23157 const char *name = buf;
23158 rtx base = x;
23159 HOST_WIDE_INT offset = 0;
23161 gcc_assert (!TARGET_NO_TOC);
23163 /* When the linker won't eliminate them, don't output duplicate
23164 TOC entries (this happens on AIX if there is any kind of TOC,
23165 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
23166 CODE_LABELs. */
23167 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
23169 struct toc_hash_struct *h;
23170 void * * found;
23172 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
23173 time because GGC is not initialized at that point. */
23174 if (toc_hash_table == NULL)
23175 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
23176 toc_hash_eq, NULL);
23178 h = ggc_alloc_toc_hash_struct ();
23179 h->key = x;
23180 h->key_mode = mode;
23181 h->labelno = labelno;
23183 found = htab_find_slot (toc_hash_table, h, INSERT);
23184 if (*found == NULL)
23185 *found = h;
23186 else /* This is indeed a duplicate.
23187 Set this label equal to that label. */
23189 fputs ("\t.set ", file);
23190 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
23191 fprintf (file, "%d,", labelno);
23192 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
23193 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
23194 found)->labelno));
23196 #ifdef HAVE_AS_TLS
23197 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
23198 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
23199 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
23201 fputs ("\t.set ", file);
23202 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
23203 fprintf (file, "%d,", labelno);
23204 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
23205 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
23206 found)->labelno));
23208 #endif
23209 return;
23213 /* If we're going to put a double constant in the TOC, make sure it's
23214 aligned properly when strict alignment is on. */
23215 if (GET_CODE (x) == CONST_DOUBLE
23216 && STRICT_ALIGNMENT
23217 && GET_MODE_BITSIZE (mode) >= 64
23218 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
23219 ASM_OUTPUT_ALIGN (file, 3);
23222 (*targetm.asm_out.internal_label) (file, "LC", labelno);
23224 /* Handle FP constants specially. Note that if we have a minimal
23225 TOC, things we put here aren't actually in the TOC, so we can allow
23226 FP constants. */
23227 if (GET_CODE (x) == CONST_DOUBLE &&
23228 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
23230 REAL_VALUE_TYPE rv;
23231 long k[4];
23233 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
23234 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
23235 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
23236 else
23237 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
23239 if (TARGET_64BIT)
23241 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23242 fputs (DOUBLE_INT_ASM_OP, file);
23243 else
23244 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
23245 k[0] & 0xffffffff, k[1] & 0xffffffff,
23246 k[2] & 0xffffffff, k[3] & 0xffffffff);
23247 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
23248 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
23249 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
23250 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
23251 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
23252 return;
23254 else
23256 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23257 fputs ("\t.long ", file);
23258 else
23259 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
23260 k[0] & 0xffffffff, k[1] & 0xffffffff,
23261 k[2] & 0xffffffff, k[3] & 0xffffffff);
23262 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
23263 k[0] & 0xffffffff, k[1] & 0xffffffff,
23264 k[2] & 0xffffffff, k[3] & 0xffffffff);
23265 return;
23268 else if (GET_CODE (x) == CONST_DOUBLE &&
23269 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
23271 REAL_VALUE_TYPE rv;
23272 long k[2];
23274 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
23276 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
23277 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
23278 else
23279 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
23281 if (TARGET_64BIT)
23283 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23284 fputs (DOUBLE_INT_ASM_OP, file);
23285 else
23286 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
23287 k[0] & 0xffffffff, k[1] & 0xffffffff);
23288 fprintf (file, "0x%lx%08lx\n",
23289 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
23290 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
23291 return;
23293 else
23295 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23296 fputs ("\t.long ", file);
23297 else
23298 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
23299 k[0] & 0xffffffff, k[1] & 0xffffffff);
23300 fprintf (file, "0x%lx,0x%lx\n",
23301 k[0] & 0xffffffff, k[1] & 0xffffffff);
23302 return;
23305 else if (GET_CODE (x) == CONST_DOUBLE &&
23306 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
23308 REAL_VALUE_TYPE rv;
23309 long l;
23311 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
23312 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
23313 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
23314 else
23315 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
23317 if (TARGET_64BIT)
23319 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23320 fputs (DOUBLE_INT_ASM_OP, file);
23321 else
23322 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
23323 if (WORDS_BIG_ENDIAN)
23324 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
23325 else
23326 fprintf (file, "0x%lx\n", l & 0xffffffff);
23327 return;
23329 else
23331 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23332 fputs ("\t.long ", file);
23333 else
23334 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
23335 fprintf (file, "0x%lx\n", l & 0xffffffff);
23336 return;
23339 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
23341 unsigned HOST_WIDE_INT low;
23342 HOST_WIDE_INT high;
23344 low = INTVAL (x) & 0xffffffff;
23345 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
23347 /* TOC entries are always Pmode-sized, so when big-endian
23348 smaller integer constants in the TOC need to be padded.
23349 (This is still a win over putting the constants in
23350 a separate constant pool, because then we'd have
23351 to have both a TOC entry _and_ the actual constant.)
23353 For a 32-bit target, CONST_INT values are loaded and shifted
23354 entirely within `low' and can be stored in one TOC entry. */
23356 /* It would be easy to make this work, but it doesn't now. */
23357 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
23359 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
23361 low |= high << 32;
23362 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
23363 high = (HOST_WIDE_INT) low >> 32;
23364 low &= 0xffffffff;
23367 if (TARGET_64BIT)
23369 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23370 fputs (DOUBLE_INT_ASM_OP, file);
23371 else
23372 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
23373 (long) high & 0xffffffff, (long) low & 0xffffffff);
23374 fprintf (file, "0x%lx%08lx\n",
23375 (long) high & 0xffffffff, (long) low & 0xffffffff);
23376 return;
23378 else
23380 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
23382 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23383 fputs ("\t.long ", file);
23384 else
23385 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
23386 (long) high & 0xffffffff, (long) low & 0xffffffff);
23387 fprintf (file, "0x%lx,0x%lx\n",
23388 (long) high & 0xffffffff, (long) low & 0xffffffff);
23390 else
23392 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23393 fputs ("\t.long ", file);
23394 else
23395 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
23396 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
23398 return;
23402 if (GET_CODE (x) == CONST)
23404 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
23405 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
23407 base = XEXP (XEXP (x, 0), 0);
23408 offset = INTVAL (XEXP (XEXP (x, 0), 1));
23411 switch (GET_CODE (base))
23413 case SYMBOL_REF:
23414 name = XSTR (base, 0);
23415 break;
23417 case LABEL_REF:
23418 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
23419 CODE_LABEL_NUMBER (XEXP (base, 0)));
23420 break;
23422 case CODE_LABEL:
23423 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
23424 break;
23426 default:
23427 gcc_unreachable ();
23430 if (TARGET_ELF || TARGET_MINIMAL_TOC)
23431 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
23432 else
23434 fputs ("\t.tc ", file);
23435 RS6000_OUTPUT_BASENAME (file, name);
23437 if (offset < 0)
23438 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
23439 else if (offset)
23440 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
23442 /* Mark large TOC symbols on AIX with [TE] so they are mapped
23443 after other TOC symbols, reducing overflow of small TOC access
23444 to [TC] symbols. */
23445 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
23446 ? "[TE]," : "[TC],", file);
23449 /* Currently C++ toc references to vtables can be emitted before it
23450 is decided whether the vtable is public or private. If this is
23451 the case, then the linker will eventually complain that there is
23452 a TOC reference to an unknown section. Thus, for vtables only,
23453 we emit the TOC reference to reference the symbol and not the
23454 section. */
23455 if (VTABLE_NAME_P (name))
23457 RS6000_OUTPUT_BASENAME (file, name);
23458 if (offset < 0)
23459 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
23460 else if (offset > 0)
23461 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
23463 else
23464 output_addr_const (file, x);
23466 #if HAVE_AS_TLS
23467 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
23468 && SYMBOL_REF_TLS_MODEL (base) != 0)
23470 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
23471 fputs ("@le", file);
23472 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
23473 fputs ("@ie", file);
23474 /* Use global-dynamic for local-dynamic. */
23475 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
23476 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
23478 putc ('\n', file);
23479 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
23480 fputs ("\t.tc .", file);
23481 RS6000_OUTPUT_BASENAME (file, name);
23482 fputs ("[TC],", file);
23483 output_addr_const (file, x);
23484 fputs ("@m", file);
23487 #endif
23489 putc ('\n', file);
23492 /* Output an assembler pseudo-op to write an ASCII string of N characters
23493 starting at P to FILE.
23495 On the RS/6000, we have to do this using the .byte operation and
23496 write out special characters outside the quoted string.
23497 Also, the assembler is broken; very long strings are truncated,
23498 so we must artificially break them up early. */
23500 void
23501 output_ascii (FILE *file, const char *p, int n)
23503 char c;
23504 int i, count_string;
23505 const char *for_string = "\t.byte \"";
23506 const char *for_decimal = "\t.byte ";
23507 const char *to_close = NULL;
23509 count_string = 0;
23510 for (i = 0; i < n; i++)
23512 c = *p++;
23513 if (c >= ' ' && c < 0177)
23515 if (for_string)
23516 fputs (for_string, file);
23517 putc (c, file);
23519 /* Write two quotes to get one. */
23520 if (c == '"')
23522 putc (c, file);
23523 ++count_string;
23526 for_string = NULL;
23527 for_decimal = "\"\n\t.byte ";
23528 to_close = "\"\n";
23529 ++count_string;
23531 if (count_string >= 512)
23533 fputs (to_close, file);
23535 for_string = "\t.byte \"";
23536 for_decimal = "\t.byte ";
23537 to_close = NULL;
23538 count_string = 0;
23541 else
23543 if (for_decimal)
23544 fputs (for_decimal, file);
23545 fprintf (file, "%d", c);
23547 for_string = "\n\t.byte \"";
23548 for_decimal = ", ";
23549 to_close = "\n";
23550 count_string = 0;
23554 /* Now close the string if we have written one. Then end the line. */
23555 if (to_close)
23556 fputs (to_close, file);
23559 /* Generate a unique section name for FILENAME for a section type
23560 represented by SECTION_DESC. Output goes into BUF.
23562 SECTION_DESC can be any string, as long as it is different for each
23563 possible section type.
23565 We name the section in the same manner as xlc. The name begins with an
23566 underscore followed by the filename (after stripping any leading directory
23567 names) with the last period replaced by the string SECTION_DESC. If
23568 FILENAME does not contain a period, SECTION_DESC is appended to the end of
23569 the name. */
23571 void
23572 rs6000_gen_section_name (char **buf, const char *filename,
23573 const char *section_desc)
23575 const char *q, *after_last_slash, *last_period = 0;
23576 char *p;
23577 int len;
23579 after_last_slash = filename;
23580 for (q = filename; *q; q++)
23582 if (*q == '/')
23583 after_last_slash = q + 1;
23584 else if (*q == '.')
23585 last_period = q;
23588 len = strlen (after_last_slash) + strlen (section_desc) + 2;
23589 *buf = (char *) xmalloc (len);
23591 p = *buf;
23592 *p++ = '_';
23594 for (q = after_last_slash; *q; q++)
23596 if (q == last_period)
23598 strcpy (p, section_desc);
23599 p += strlen (section_desc);
23600 break;
23603 else if (ISALNUM (*q))
23604 *p++ = *q;
23607 if (last_period == 0)
23608 strcpy (p, section_desc);
23609 else
23610 *p = '\0';
23613 /* Emit profile function. */
23615 void
23616 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
23618 /* Non-standard profiling for kernels, which just saves LR then calls
23619 _mcount without worrying about arg saves. The idea is to change
23620 the function prologue as little as possible as it isn't easy to
23621 account for arg save/restore code added just for _mcount. */
23622 if (TARGET_PROFILE_KERNEL)
23623 return;
23625 if (DEFAULT_ABI == ABI_AIX)
23627 #ifndef NO_PROFILE_COUNTERS
23628 # define NO_PROFILE_COUNTERS 0
23629 #endif
23630 if (NO_PROFILE_COUNTERS)
23631 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
23632 LCT_NORMAL, VOIDmode, 0);
23633 else
23635 char buf[30];
23636 const char *label_name;
23637 rtx fun;
23639 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
23640 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
23641 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
23643 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
23644 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
23647 else if (DEFAULT_ABI == ABI_DARWIN)
23649 const char *mcount_name = RS6000_MCOUNT;
23650 int caller_addr_regno = LR_REGNO;
23652 /* Be conservative and always set this, at least for now. */
23653 crtl->uses_pic_offset_table = 1;
23655 #if TARGET_MACHO
23656 /* For PIC code, set up a stub and collect the caller's address
23657 from r0, which is where the prologue puts it. */
23658 if (MACHOPIC_INDIRECT
23659 && crtl->uses_pic_offset_table)
23660 caller_addr_regno = 0;
23661 #endif
23662 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
23663 LCT_NORMAL, VOIDmode, 1,
23664 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
23668 /* Write function profiler code. */
23670 void
23671 output_function_profiler (FILE *file, int labelno)
23673 char buf[100];
23675 switch (DEFAULT_ABI)
23677 default:
23678 gcc_unreachable ();
23680 case ABI_V4:
23681 if (!TARGET_32BIT)
23683 warning (0, "no profiling of 64-bit code for this ABI");
23684 return;
23686 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
23687 fprintf (file, "\tmflr %s\n", reg_names[0]);
23688 if (NO_PROFILE_COUNTERS)
23690 asm_fprintf (file, "\tstw %s,4(%s)\n",
23691 reg_names[0], reg_names[1]);
23693 else if (TARGET_SECURE_PLT && flag_pic)
23695 if (TARGET_LINK_STACK)
23697 char name[32];
23698 get_ppc476_thunk_name (name);
23699 asm_fprintf (file, "\tbl %s\n", name);
23701 else
23702 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
23703 asm_fprintf (file, "\tstw %s,4(%s)\n",
23704 reg_names[0], reg_names[1]);
23705 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
23706 asm_fprintf (file, "\taddis %s,%s,",
23707 reg_names[12], reg_names[12]);
23708 assemble_name (file, buf);
23709 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
23710 assemble_name (file, buf);
23711 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
23713 else if (flag_pic == 1)
23715 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
23716 asm_fprintf (file, "\tstw %s,4(%s)\n",
23717 reg_names[0], reg_names[1]);
23718 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
23719 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
23720 assemble_name (file, buf);
23721 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
23723 else if (flag_pic > 1)
23725 asm_fprintf (file, "\tstw %s,4(%s)\n",
23726 reg_names[0], reg_names[1]);
23727 /* Now, we need to get the address of the label. */
23728 if (TARGET_LINK_STACK)
23730 char name[32];
23731 get_ppc476_thunk_name (name);
23732 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
23733 assemble_name (file, buf);
23734 fputs ("-.\n1:", file);
23735 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
23736 asm_fprintf (file, "\taddi %s,%s,4\n",
23737 reg_names[11], reg_names[11]);
23739 else
23741 fputs ("\tbcl 20,31,1f\n\t.long ", file);
23742 assemble_name (file, buf);
23743 fputs ("-.\n1:", file);
23744 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
23746 asm_fprintf (file, "\tlwz %s,0(%s)\n",
23747 reg_names[0], reg_names[11]);
23748 asm_fprintf (file, "\tadd %s,%s,%s\n",
23749 reg_names[0], reg_names[0], reg_names[11]);
23751 else
23753 asm_fprintf (file, "\tlis %s,", reg_names[12]);
23754 assemble_name (file, buf);
23755 fputs ("@ha\n", file);
23756 asm_fprintf (file, "\tstw %s,4(%s)\n",
23757 reg_names[0], reg_names[1]);
23758 asm_fprintf (file, "\tla %s,", reg_names[0]);
23759 assemble_name (file, buf);
23760 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
23763 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
23764 fprintf (file, "\tbl %s%s\n",
23765 RS6000_MCOUNT, flag_pic ? "@plt" : "");
23766 break;
23768 case ABI_AIX:
23769 case ABI_DARWIN:
23770 if (!TARGET_PROFILE_KERNEL)
23772 /* Don't do anything, done in output_profile_hook (). */
23774 else
23776 gcc_assert (!TARGET_32BIT);
23778 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
23779 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
23781 if (cfun->static_chain_decl != NULL)
23783 asm_fprintf (file, "\tstd %s,24(%s)\n",
23784 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23785 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23786 asm_fprintf (file, "\tld %s,24(%s)\n",
23787 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23789 else
23790 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23792 break;
23798 /* The following variable value is the last issued insn. */
23800 static rtx last_scheduled_insn;
23802 /* The following variable helps to balance issuing of load and
23803 store instructions */
23805 static int load_store_pendulum;
23807 /* Power4 load update and store update instructions are cracked into a
23808 load or store and an integer insn which are executed in the same cycle.
23809 Branches have their own dispatch slot which does not count against the
23810 GCC issue rate, but it changes the program flow so there are no other
23811 instructions to issue in this cycle. */
23813 static int
23814 rs6000_variable_issue_1 (rtx insn, int more)
23816 last_scheduled_insn = insn;
23817 if (GET_CODE (PATTERN (insn)) == USE
23818 || GET_CODE (PATTERN (insn)) == CLOBBER)
23820 cached_can_issue_more = more;
23821 return cached_can_issue_more;
23824 if (insn_terminates_group_p (insn, current_group))
23826 cached_can_issue_more = 0;
23827 return cached_can_issue_more;
23830 /* If no reservation, but reach here */
23831 if (recog_memoized (insn) < 0)
23832 return more;
23834 if (rs6000_sched_groups)
23836 if (is_microcoded_insn (insn))
23837 cached_can_issue_more = 0;
23838 else if (is_cracked_insn (insn))
23839 cached_can_issue_more = more > 2 ? more - 2 : 0;
23840 else
23841 cached_can_issue_more = more - 1;
23843 return cached_can_issue_more;
23846 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
23847 return 0;
23849 cached_can_issue_more = more - 1;
23850 return cached_can_issue_more;
23853 static int
23854 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
23856 int r = rs6000_variable_issue_1 (insn, more);
23857 if (verbose)
23858 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
23859 return r;
23862 /* Adjust the cost of a scheduling dependency. Return the new cost of
23863 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
23865 static int
23866 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
23868 enum attr_type attr_type;
23870 if (! recog_memoized (insn))
23871 return 0;
23873 switch (REG_NOTE_KIND (link))
23875 case REG_DEP_TRUE:
23877 /* Data dependency; DEP_INSN writes a register that INSN reads
23878 some cycles later. */
23880 /* Separate a load from a narrower, dependent store. */
23881 if (rs6000_sched_groups
23882 && GET_CODE (PATTERN (insn)) == SET
23883 && GET_CODE (PATTERN (dep_insn)) == SET
23884 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
23885 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
23886 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
23887 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
23888 return cost + 14;
23890 attr_type = get_attr_type (insn);
23892 switch (attr_type)
23894 case TYPE_JMPREG:
23895 /* Tell the first scheduling pass about the latency between
23896 a mtctr and bctr (and mtlr and br/blr). The first
23897 scheduling pass will not know about this latency since
23898 the mtctr instruction, which has the latency associated
23899 to it, will be generated by reload. */
23900 return 4;
23901 case TYPE_BRANCH:
23902 /* Leave some extra cycles between a compare and its
23903 dependent branch, to inhibit expensive mispredicts. */
23904 if ((rs6000_cpu_attr == CPU_PPC603
23905 || rs6000_cpu_attr == CPU_PPC604
23906 || rs6000_cpu_attr == CPU_PPC604E
23907 || rs6000_cpu_attr == CPU_PPC620
23908 || rs6000_cpu_attr == CPU_PPC630
23909 || rs6000_cpu_attr == CPU_PPC750
23910 || rs6000_cpu_attr == CPU_PPC7400
23911 || rs6000_cpu_attr == CPU_PPC7450
23912 || rs6000_cpu_attr == CPU_PPCE5500
23913 || rs6000_cpu_attr == CPU_PPCE6500
23914 || rs6000_cpu_attr == CPU_POWER4
23915 || rs6000_cpu_attr == CPU_POWER5
23916 || rs6000_cpu_attr == CPU_POWER7
23917 || rs6000_cpu_attr == CPU_POWER8
23918 || rs6000_cpu_attr == CPU_CELL)
23919 && recog_memoized (dep_insn)
23920 && (INSN_CODE (dep_insn) >= 0))
23922 switch (get_attr_type (dep_insn))
23924 case TYPE_CMP:
23925 case TYPE_COMPARE:
23926 case TYPE_DELAYED_COMPARE:
23927 case TYPE_IMUL_COMPARE:
23928 case TYPE_LMUL_COMPARE:
23929 case TYPE_FPCOMPARE:
23930 case TYPE_CR_LOGICAL:
23931 case TYPE_DELAYED_CR:
23932 return cost + 2;
23933 default:
23934 break;
23936 break;
23938 case TYPE_STORE:
23939 case TYPE_STORE_U:
23940 case TYPE_STORE_UX:
23941 case TYPE_FPSTORE:
23942 case TYPE_FPSTORE_U:
23943 case TYPE_FPSTORE_UX:
23944 if ((rs6000_cpu == PROCESSOR_POWER6)
23945 && recog_memoized (dep_insn)
23946 && (INSN_CODE (dep_insn) >= 0))
23949 if (GET_CODE (PATTERN (insn)) != SET)
23950 /* If this happens, we have to extend this to schedule
23951 optimally. Return default for now. */
23952 return cost;
23954 /* Adjust the cost for the case where the value written
23955 by a fixed point operation is used as the address
23956 gen value on a store. */
23957 switch (get_attr_type (dep_insn))
23959 case TYPE_LOAD:
23960 case TYPE_LOAD_U:
23961 case TYPE_LOAD_UX:
23962 case TYPE_CNTLZ:
23964 if (! store_data_bypass_p (dep_insn, insn))
23965 return 4;
23966 break;
23968 case TYPE_LOAD_EXT:
23969 case TYPE_LOAD_EXT_U:
23970 case TYPE_LOAD_EXT_UX:
23971 case TYPE_VAR_SHIFT_ROTATE:
23972 case TYPE_VAR_DELAYED_COMPARE:
23974 if (! store_data_bypass_p (dep_insn, insn))
23975 return 6;
23976 break;
23978 case TYPE_INTEGER:
23979 case TYPE_COMPARE:
23980 case TYPE_FAST_COMPARE:
23981 case TYPE_EXTS:
23982 case TYPE_SHIFT:
23983 case TYPE_INSERT_WORD:
23984 case TYPE_INSERT_DWORD:
23985 case TYPE_FPLOAD_U:
23986 case TYPE_FPLOAD_UX:
23987 case TYPE_STORE_U:
23988 case TYPE_STORE_UX:
23989 case TYPE_FPSTORE_U:
23990 case TYPE_FPSTORE_UX:
23992 if (! store_data_bypass_p (dep_insn, insn))
23993 return 3;
23994 break;
23996 case TYPE_IMUL:
23997 case TYPE_IMUL2:
23998 case TYPE_IMUL3:
23999 case TYPE_LMUL:
24000 case TYPE_IMUL_COMPARE:
24001 case TYPE_LMUL_COMPARE:
24003 if (! store_data_bypass_p (dep_insn, insn))
24004 return 17;
24005 break;
24007 case TYPE_IDIV:
24009 if (! store_data_bypass_p (dep_insn, insn))
24010 return 45;
24011 break;
24013 case TYPE_LDIV:
24015 if (! store_data_bypass_p (dep_insn, insn))
24016 return 57;
24017 break;
24019 default:
24020 break;
24023 break;
24025 case TYPE_LOAD:
24026 case TYPE_LOAD_U:
24027 case TYPE_LOAD_UX:
24028 case TYPE_LOAD_EXT:
24029 case TYPE_LOAD_EXT_U:
24030 case TYPE_LOAD_EXT_UX:
24031 if ((rs6000_cpu == PROCESSOR_POWER6)
24032 && recog_memoized (dep_insn)
24033 && (INSN_CODE (dep_insn) >= 0))
24036 /* Adjust the cost for the case where the value written
24037 by a fixed point instruction is used within the address
24038 gen portion of a subsequent load(u)(x) */
24039 switch (get_attr_type (dep_insn))
24041 case TYPE_LOAD:
24042 case TYPE_LOAD_U:
24043 case TYPE_LOAD_UX:
24044 case TYPE_CNTLZ:
24046 if (set_to_load_agen (dep_insn, insn))
24047 return 4;
24048 break;
24050 case TYPE_LOAD_EXT:
24051 case TYPE_LOAD_EXT_U:
24052 case TYPE_LOAD_EXT_UX:
24053 case TYPE_VAR_SHIFT_ROTATE:
24054 case TYPE_VAR_DELAYED_COMPARE:
24056 if (set_to_load_agen (dep_insn, insn))
24057 return 6;
24058 break;
24060 case TYPE_INTEGER:
24061 case TYPE_COMPARE:
24062 case TYPE_FAST_COMPARE:
24063 case TYPE_EXTS:
24064 case TYPE_SHIFT:
24065 case TYPE_INSERT_WORD:
24066 case TYPE_INSERT_DWORD:
24067 case TYPE_FPLOAD_U:
24068 case TYPE_FPLOAD_UX:
24069 case TYPE_STORE_U:
24070 case TYPE_STORE_UX:
24071 case TYPE_FPSTORE_U:
24072 case TYPE_FPSTORE_UX:
24074 if (set_to_load_agen (dep_insn, insn))
24075 return 3;
24076 break;
24078 case TYPE_IMUL:
24079 case TYPE_IMUL2:
24080 case TYPE_IMUL3:
24081 case TYPE_LMUL:
24082 case TYPE_IMUL_COMPARE:
24083 case TYPE_LMUL_COMPARE:
24085 if (set_to_load_agen (dep_insn, insn))
24086 return 17;
24087 break;
24089 case TYPE_IDIV:
24091 if (set_to_load_agen (dep_insn, insn))
24092 return 45;
24093 break;
24095 case TYPE_LDIV:
24097 if (set_to_load_agen (dep_insn, insn))
24098 return 57;
24099 break;
24101 default:
24102 break;
24105 break;
24107 case TYPE_FPLOAD:
24108 if ((rs6000_cpu == PROCESSOR_POWER6)
24109 && recog_memoized (dep_insn)
24110 && (INSN_CODE (dep_insn) >= 0)
24111 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
24112 return 2;
24114 default:
24115 break;
24118 /* Fall out to return default cost. */
24120 break;
24122 case REG_DEP_OUTPUT:
24123 /* Output dependency; DEP_INSN writes a register that INSN writes some
24124 cycles later. */
24125 if ((rs6000_cpu == PROCESSOR_POWER6)
24126 && recog_memoized (dep_insn)
24127 && (INSN_CODE (dep_insn) >= 0))
24129 attr_type = get_attr_type (insn);
24131 switch (attr_type)
24133 case TYPE_FP:
24134 if (get_attr_type (dep_insn) == TYPE_FP)
24135 return 1;
24136 break;
24137 case TYPE_FPLOAD:
24138 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
24139 return 2;
24140 break;
24141 default:
24142 break;
24145 case REG_DEP_ANTI:
24146 /* Anti dependency; DEP_INSN reads a register that INSN writes some
24147 cycles later. */
24148 return 0;
24150 default:
24151 gcc_unreachable ();
24154 return cost;
24157 /* Debug version of rs6000_adjust_cost. */
24159 static int
24160 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
24162 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
24164 if (ret != cost)
24166 const char *dep;
24168 switch (REG_NOTE_KIND (link))
24170 default: dep = "unknown depencency"; break;
24171 case REG_DEP_TRUE: dep = "data dependency"; break;
24172 case REG_DEP_OUTPUT: dep = "output dependency"; break;
24173 case REG_DEP_ANTI: dep = "anti depencency"; break;
24176 fprintf (stderr,
24177 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
24178 "%s, insn:\n", ret, cost, dep);
24180 debug_rtx (insn);
24183 return ret;
24186 /* The function returns a true if INSN is microcoded.
24187 Return false otherwise. */
24189 static bool
24190 is_microcoded_insn (rtx insn)
24192 if (!insn || !NONDEBUG_INSN_P (insn)
24193 || GET_CODE (PATTERN (insn)) == USE
24194 || GET_CODE (PATTERN (insn)) == CLOBBER)
24195 return false;
24197 if (rs6000_cpu_attr == CPU_CELL)
24198 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
24200 if (rs6000_sched_groups
24201 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
24203 enum attr_type type = get_attr_type (insn);
24204 if (type == TYPE_LOAD_EXT_U
24205 || type == TYPE_LOAD_EXT_UX
24206 || type == TYPE_LOAD_UX
24207 || type == TYPE_STORE_UX
24208 || type == TYPE_MFCR)
24209 return true;
24212 return false;
24215 /* The function returns true if INSN is cracked into 2 instructions
24216 by the processor (and therefore occupies 2 issue slots). */
24218 static bool
24219 is_cracked_insn (rtx insn)
24221 if (!insn || !NONDEBUG_INSN_P (insn)
24222 || GET_CODE (PATTERN (insn)) == USE
24223 || GET_CODE (PATTERN (insn)) == CLOBBER)
24224 return false;
24226 if (rs6000_sched_groups
24227 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
24229 enum attr_type type = get_attr_type (insn);
24230 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
24231 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
24232 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
24233 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
24234 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
24235 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
24236 || type == TYPE_IDIV || type == TYPE_LDIV
24237 || type == TYPE_INSERT_WORD)
24238 return true;
24241 return false;
24244 /* The function returns true if INSN can be issued only from
24245 the branch slot. */
24247 static bool
24248 is_branch_slot_insn (rtx insn)
24250 if (!insn || !NONDEBUG_INSN_P (insn)
24251 || GET_CODE (PATTERN (insn)) == USE
24252 || GET_CODE (PATTERN (insn)) == CLOBBER)
24253 return false;
24255 if (rs6000_sched_groups)
24257 enum attr_type type = get_attr_type (insn);
24258 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
24259 return true;
24260 return false;
24263 return false;
24266 /* The function returns true if out_inst sets a value that is
24267 used in the address generation computation of in_insn */
24268 static bool
24269 set_to_load_agen (rtx out_insn, rtx in_insn)
24271 rtx out_set, in_set;
24273 /* For performance reasons, only handle the simple case where
24274 both loads are a single_set. */
24275 out_set = single_set (out_insn);
24276 if (out_set)
24278 in_set = single_set (in_insn);
24279 if (in_set)
24280 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
24283 return false;
24286 /* Try to determine base/offset/size parts of the given MEM.
24287 Return true if successful, false if all the values couldn't
24288 be determined.
24290 This function only looks for REG or REG+CONST address forms.
24291 REG+REG address form will return false. */
24293 static bool
24294 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
24295 HOST_WIDE_INT *size)
24297 rtx addr_rtx;
24298 if MEM_SIZE_KNOWN_P (mem)
24299 *size = MEM_SIZE (mem);
24300 else
24301 return false;
24303 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
24304 addr_rtx = XEXP (XEXP (mem, 0), 1);
24305 else
24306 addr_rtx = (XEXP (mem, 0));
24308 if (GET_CODE (addr_rtx) == REG)
24310 *base = addr_rtx;
24311 *offset = 0;
24313 else if (GET_CODE (addr_rtx) == PLUS
24314 && CONST_INT_P (XEXP (addr_rtx, 1)))
24316 *base = XEXP (addr_rtx, 0);
24317 *offset = INTVAL (XEXP (addr_rtx, 1));
24319 else
24320 return false;
24322 return true;
24325 /* The function returns true if the target storage location of
24326 mem1 is adjacent to the target storage location of mem2 */
24327 /* Return 1 if memory locations are adjacent. */
24329 static bool
24330 adjacent_mem_locations (rtx mem1, rtx mem2)
24332 rtx reg1, reg2;
24333 HOST_WIDE_INT off1, size1, off2, size2;
24335 if (get_memref_parts (mem1, &reg1, &off1, &size1)
24336 && get_memref_parts (mem2, &reg2, &off2, &size2))
24337 return ((REGNO (reg1) == REGNO (reg2))
24338 && ((off1 + size1 == off2)
24339 || (off2 + size2 == off1)));
24341 return false;
24344 /* This function returns true if it can be determined that the two MEM
24345 locations overlap by at least 1 byte based on base reg/offset/size. */
24347 static bool
24348 mem_locations_overlap (rtx mem1, rtx mem2)
24350 rtx reg1, reg2;
24351 HOST_WIDE_INT off1, size1, off2, size2;
24353 if (get_memref_parts (mem1, &reg1, &off1, &size1)
24354 && get_memref_parts (mem2, &reg2, &off2, &size2))
24355 return ((REGNO (reg1) == REGNO (reg2))
24356 && (((off1 <= off2) && (off1 + size1 > off2))
24357 || ((off2 <= off1) && (off2 + size2 > off1))));
24359 return false;
24362 /* A C statement (sans semicolon) to update the integer scheduling
24363 priority INSN_PRIORITY (INSN). Increase the priority to execute the
24364 INSN earlier, reduce the priority to execute INSN later. Do not
24365 define this macro if you do not need to adjust the scheduling
24366 priorities of insns. */
24368 static int
24369 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
24371 rtx load_mem, str_mem;
24372 /* On machines (like the 750) which have asymmetric integer units,
24373 where one integer unit can do multiply and divides and the other
24374 can't, reduce the priority of multiply/divide so it is scheduled
24375 before other integer operations. */
24377 #if 0
24378 if (! INSN_P (insn))
24379 return priority;
24381 if (GET_CODE (PATTERN (insn)) == USE)
24382 return priority;
24384 switch (rs6000_cpu_attr) {
24385 case CPU_PPC750:
24386 switch (get_attr_type (insn))
24388 default:
24389 break;
24391 case TYPE_IMUL:
24392 case TYPE_IDIV:
24393 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
24394 priority, priority);
24395 if (priority >= 0 && priority < 0x01000000)
24396 priority >>= 3;
24397 break;
24400 #endif
24402 if (insn_must_be_first_in_group (insn)
24403 && reload_completed
24404 && current_sched_info->sched_max_insns_priority
24405 && rs6000_sched_restricted_insns_priority)
24408 /* Prioritize insns that can be dispatched only in the first
24409 dispatch slot. */
24410 if (rs6000_sched_restricted_insns_priority == 1)
24411 /* Attach highest priority to insn. This means that in
24412 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
24413 precede 'priority' (critical path) considerations. */
24414 return current_sched_info->sched_max_insns_priority;
24415 else if (rs6000_sched_restricted_insns_priority == 2)
24416 /* Increase priority of insn by a minimal amount. This means that in
24417 haifa-sched.c:ready_sort(), only 'priority' (critical path)
24418 considerations precede dispatch-slot restriction considerations. */
24419 return (priority + 1);
24422 if (rs6000_cpu == PROCESSOR_POWER6
24423 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
24424 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
24425 /* Attach highest priority to insn if the scheduler has just issued two
24426 stores and this instruction is a load, or two loads and this instruction
24427 is a store. Power6 wants loads and stores scheduled alternately
24428 when possible */
24429 return current_sched_info->sched_max_insns_priority;
24431 return priority;
24434 /* Return true if the instruction is nonpipelined on the Cell. */
24435 static bool
24436 is_nonpipeline_insn (rtx insn)
24438 enum attr_type type;
24439 if (!insn || !NONDEBUG_INSN_P (insn)
24440 || GET_CODE (PATTERN (insn)) == USE
24441 || GET_CODE (PATTERN (insn)) == CLOBBER)
24442 return false;
24444 type = get_attr_type (insn);
24445 if (type == TYPE_IMUL
24446 || type == TYPE_IMUL2
24447 || type == TYPE_IMUL3
24448 || type == TYPE_LMUL
24449 || type == TYPE_IDIV
24450 || type == TYPE_LDIV
24451 || type == TYPE_SDIV
24452 || type == TYPE_DDIV
24453 || type == TYPE_SSQRT
24454 || type == TYPE_DSQRT
24455 || type == TYPE_MFCR
24456 || type == TYPE_MFCRF
24457 || type == TYPE_MFJMPR)
24459 return true;
24461 return false;
24465 /* Return how many instructions the machine can issue per cycle. */
24467 static int
24468 rs6000_issue_rate (void)
24470 /* Unless scheduling for register pressure, use issue rate of 1 for
24471 first scheduling pass to decrease degradation. */
24472 if (!reload_completed && !flag_sched_pressure)
24473 return 1;
24475 switch (rs6000_cpu_attr) {
24476 case CPU_RS64A:
24477 case CPU_PPC601: /* ? */
24478 case CPU_PPC7450:
24479 return 3;
24480 case CPU_PPC440:
24481 case CPU_PPC603:
24482 case CPU_PPC750:
24483 case CPU_PPC7400:
24484 case CPU_PPC8540:
24485 case CPU_PPC8548:
24486 case CPU_CELL:
24487 case CPU_PPCE300C2:
24488 case CPU_PPCE300C3:
24489 case CPU_PPCE500MC:
24490 case CPU_PPCE500MC64:
24491 case CPU_PPCE5500:
24492 case CPU_PPCE6500:
24493 case CPU_TITAN:
24494 return 2;
24495 case CPU_PPC476:
24496 case CPU_PPC604:
24497 case CPU_PPC604E:
24498 case CPU_PPC620:
24499 case CPU_PPC630:
24500 return 4;
24501 case CPU_POWER4:
24502 case CPU_POWER5:
24503 case CPU_POWER6:
24504 case CPU_POWER7:
24505 return 5;
24506 case CPU_POWER8:
24507 return 7;
24508 default:
24509 return 1;
24513 /* Return how many instructions to look ahead for better insn
24514 scheduling. */
24516 static int
24517 rs6000_use_sched_lookahead (void)
24519 switch (rs6000_cpu_attr)
24521 case CPU_PPC8540:
24522 case CPU_PPC8548:
24523 return 4;
24525 case CPU_CELL:
24526 return (reload_completed ? 8 : 0);
24528 default:
24529 return 0;
24533 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
24534 static int
24535 rs6000_use_sched_lookahead_guard (rtx insn)
24537 if (rs6000_cpu_attr != CPU_CELL)
24538 return 1;
24540 if (insn == NULL_RTX || !INSN_P (insn))
24541 abort ();
24543 if (!reload_completed
24544 || is_nonpipeline_insn (insn)
24545 || is_microcoded_insn (insn))
24546 return 0;
24548 return 1;
24551 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
24552 and return true. */
24554 static bool
24555 find_mem_ref (rtx pat, rtx *mem_ref)
24557 const char * fmt;
24558 int i, j;
24560 /* stack_tie does not produce any real memory traffic. */
24561 if (tie_operand (pat, VOIDmode))
24562 return false;
24564 if (GET_CODE (pat) == MEM)
24566 *mem_ref = pat;
24567 return true;
24570 /* Recursively process the pattern. */
24571 fmt = GET_RTX_FORMAT (GET_CODE (pat));
24573 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
24575 if (fmt[i] == 'e')
24577 if (find_mem_ref (XEXP (pat, i), mem_ref))
24578 return true;
24580 else if (fmt[i] == 'E')
24581 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
24583 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
24584 return true;
24588 return false;
24591 /* Determine if PAT is a PATTERN of a load insn. */
24593 static bool
24594 is_load_insn1 (rtx pat, rtx *load_mem)
24596 if (!pat || pat == NULL_RTX)
24597 return false;
24599 if (GET_CODE (pat) == SET)
24600 return find_mem_ref (SET_SRC (pat), load_mem);
24602 if (GET_CODE (pat) == PARALLEL)
24604 int i;
24606 for (i = 0; i < XVECLEN (pat, 0); i++)
24607 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
24608 return true;
24611 return false;
24614 /* Determine if INSN loads from memory. */
24616 static bool
24617 is_load_insn (rtx insn, rtx *load_mem)
24619 if (!insn || !INSN_P (insn))
24620 return false;
24622 if (CALL_P (insn))
24623 return false;
24625 return is_load_insn1 (PATTERN (insn), load_mem);
24628 /* Determine if PAT is a PATTERN of a store insn. */
24630 static bool
24631 is_store_insn1 (rtx pat, rtx *str_mem)
24633 if (!pat || pat == NULL_RTX)
24634 return false;
24636 if (GET_CODE (pat) == SET)
24637 return find_mem_ref (SET_DEST (pat), str_mem);
24639 if (GET_CODE (pat) == PARALLEL)
24641 int i;
24643 for (i = 0; i < XVECLEN (pat, 0); i++)
24644 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
24645 return true;
24648 return false;
24651 /* Determine if INSN stores to memory. */
24653 static bool
24654 is_store_insn (rtx insn, rtx *str_mem)
24656 if (!insn || !INSN_P (insn))
24657 return false;
24659 return is_store_insn1 (PATTERN (insn), str_mem);
24662 /* Returns whether the dependence between INSN and NEXT is considered
24663 costly by the given target. */
24665 static bool
24666 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
24668 rtx insn;
24669 rtx next;
24670 rtx load_mem, str_mem;
24672 /* If the flag is not enabled - no dependence is considered costly;
24673 allow all dependent insns in the same group.
24674 This is the most aggressive option. */
24675 if (rs6000_sched_costly_dep == no_dep_costly)
24676 return false;
24678 /* If the flag is set to 1 - a dependence is always considered costly;
24679 do not allow dependent instructions in the same group.
24680 This is the most conservative option. */
24681 if (rs6000_sched_costly_dep == all_deps_costly)
24682 return true;
24684 insn = DEP_PRO (dep);
24685 next = DEP_CON (dep);
24687 if (rs6000_sched_costly_dep == store_to_load_dep_costly
24688 && is_load_insn (next, &load_mem)
24689 && is_store_insn (insn, &str_mem))
24690 /* Prevent load after store in the same group. */
24691 return true;
24693 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
24694 && is_load_insn (next, &load_mem)
24695 && is_store_insn (insn, &str_mem)
24696 && DEP_TYPE (dep) == REG_DEP_TRUE
24697 && mem_locations_overlap(str_mem, load_mem))
24698 /* Prevent load after store in the same group if it is a true
24699 dependence. */
24700 return true;
24702 /* The flag is set to X; dependences with latency >= X are considered costly,
24703 and will not be scheduled in the same group. */
24704 if (rs6000_sched_costly_dep <= max_dep_latency
24705 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
24706 return true;
24708 return false;
24711 /* Return the next insn after INSN that is found before TAIL is reached,
24712 skipping any "non-active" insns - insns that will not actually occupy
24713 an issue slot. Return NULL_RTX if such an insn is not found. */
24715 static rtx
24716 get_next_active_insn (rtx insn, rtx tail)
24718 if (insn == NULL_RTX || insn == tail)
24719 return NULL_RTX;
24721 while (1)
24723 insn = NEXT_INSN (insn);
24724 if (insn == NULL_RTX || insn == tail)
24725 return NULL_RTX;
24727 if (CALL_P (insn)
24728 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
24729 || (NONJUMP_INSN_P (insn)
24730 && GET_CODE (PATTERN (insn)) != USE
24731 && GET_CODE (PATTERN (insn)) != CLOBBER
24732 && INSN_CODE (insn) != CODE_FOR_stack_tie))
24733 break;
24735 return insn;
24738 /* We are about to begin issuing insns for this clock cycle. */
24740 static int
24741 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
24742 rtx *ready ATTRIBUTE_UNUSED,
24743 int *pn_ready ATTRIBUTE_UNUSED,
24744 int clock_var ATTRIBUTE_UNUSED)
24746 int n_ready = *pn_ready;
24748 if (sched_verbose)
24749 fprintf (dump, "// rs6000_sched_reorder :\n");
24751 /* Reorder the ready list, if the second to last ready insn
24752 is a nonepipeline insn. */
24753 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
24755 if (is_nonpipeline_insn (ready[n_ready - 1])
24756 && (recog_memoized (ready[n_ready - 2]) > 0))
24757 /* Simply swap first two insns. */
24759 rtx tmp = ready[n_ready - 1];
24760 ready[n_ready - 1] = ready[n_ready - 2];
24761 ready[n_ready - 2] = tmp;
24765 if (rs6000_cpu == PROCESSOR_POWER6)
24766 load_store_pendulum = 0;
24768 return rs6000_issue_rate ();
24771 /* Like rs6000_sched_reorder, but called after issuing each insn. */
24773 static int
24774 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
24775 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
24777 if (sched_verbose)
24778 fprintf (dump, "// rs6000_sched_reorder2 :\n");
24780 /* For Power6, we need to handle some special cases to try and keep the
24781 store queue from overflowing and triggering expensive flushes.
24783 This code monitors how load and store instructions are being issued
24784 and skews the ready list one way or the other to increase the likelihood
24785 that a desired instruction is issued at the proper time.
24787 A couple of things are done. First, we maintain a "load_store_pendulum"
24788 to track the current state of load/store issue.
24790 - If the pendulum is at zero, then no loads or stores have been
24791 issued in the current cycle so we do nothing.
24793 - If the pendulum is 1, then a single load has been issued in this
24794 cycle and we attempt to locate another load in the ready list to
24795 issue with it.
24797 - If the pendulum is -2, then two stores have already been
24798 issued in this cycle, so we increase the priority of the first load
24799 in the ready list to increase it's likelihood of being chosen first
24800 in the next cycle.
24802 - If the pendulum is -1, then a single store has been issued in this
24803 cycle and we attempt to locate another store in the ready list to
24804 issue with it, preferring a store to an adjacent memory location to
24805 facilitate store pairing in the store queue.
24807 - If the pendulum is 2, then two loads have already been
24808 issued in this cycle, so we increase the priority of the first store
24809 in the ready list to increase it's likelihood of being chosen first
24810 in the next cycle.
24812 - If the pendulum < -2 or > 2, then do nothing.
24814 Note: This code covers the most common scenarios. There exist non
24815 load/store instructions which make use of the LSU and which
24816 would need to be accounted for to strictly model the behavior
24817 of the machine. Those instructions are currently unaccounted
24818 for to help minimize compile time overhead of this code.
24820 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
24822 int pos;
24823 int i;
24824 rtx tmp, load_mem, str_mem;
24826 if (is_store_insn (last_scheduled_insn, &str_mem))
24827 /* Issuing a store, swing the load_store_pendulum to the left */
24828 load_store_pendulum--;
24829 else if (is_load_insn (last_scheduled_insn, &load_mem))
24830 /* Issuing a load, swing the load_store_pendulum to the right */
24831 load_store_pendulum++;
24832 else
24833 return cached_can_issue_more;
24835 /* If the pendulum is balanced, or there is only one instruction on
24836 the ready list, then all is well, so return. */
24837 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
24838 return cached_can_issue_more;
24840 if (load_store_pendulum == 1)
24842 /* A load has been issued in this cycle. Scan the ready list
24843 for another load to issue with it */
24844 pos = *pn_ready-1;
24846 while (pos >= 0)
24848 if (is_load_insn (ready[pos], &load_mem))
24850 /* Found a load. Move it to the head of the ready list,
24851 and adjust it's priority so that it is more likely to
24852 stay there */
24853 tmp = ready[pos];
24854 for (i=pos; i<*pn_ready-1; i++)
24855 ready[i] = ready[i + 1];
24856 ready[*pn_ready-1] = tmp;
24858 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
24859 INSN_PRIORITY (tmp)++;
24860 break;
24862 pos--;
24865 else if (load_store_pendulum == -2)
24867 /* Two stores have been issued in this cycle. Increase the
24868 priority of the first load in the ready list to favor it for
24869 issuing in the next cycle. */
24870 pos = *pn_ready-1;
24872 while (pos >= 0)
24874 if (is_load_insn (ready[pos], &load_mem)
24875 && !sel_sched_p ()
24876 && INSN_PRIORITY_KNOWN (ready[pos]))
24878 INSN_PRIORITY (ready[pos])++;
24880 /* Adjust the pendulum to account for the fact that a load
24881 was found and increased in priority. This is to prevent
24882 increasing the priority of multiple loads */
24883 load_store_pendulum--;
24885 break;
24887 pos--;
24890 else if (load_store_pendulum == -1)
24892 /* A store has been issued in this cycle. Scan the ready list for
24893 another store to issue with it, preferring a store to an adjacent
24894 memory location */
24895 int first_store_pos = -1;
24897 pos = *pn_ready-1;
24899 while (pos >= 0)
24901 if (is_store_insn (ready[pos], &str_mem))
24903 rtx str_mem2;
24904 /* Maintain the index of the first store found on the
24905 list */
24906 if (first_store_pos == -1)
24907 first_store_pos = pos;
24909 if (is_store_insn (last_scheduled_insn, &str_mem2)
24910 && adjacent_mem_locations (str_mem, str_mem2))
24912 /* Found an adjacent store. Move it to the head of the
24913 ready list, and adjust it's priority so that it is
24914 more likely to stay there */
24915 tmp = ready[pos];
24916 for (i=pos; i<*pn_ready-1; i++)
24917 ready[i] = ready[i + 1];
24918 ready[*pn_ready-1] = tmp;
24920 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
24921 INSN_PRIORITY (tmp)++;
24923 first_store_pos = -1;
24925 break;
24928 pos--;
24931 if (first_store_pos >= 0)
24933 /* An adjacent store wasn't found, but a non-adjacent store was,
24934 so move the non-adjacent store to the front of the ready
24935 list, and adjust its priority so that it is more likely to
24936 stay there. */
24937 tmp = ready[first_store_pos];
24938 for (i=first_store_pos; i<*pn_ready-1; i++)
24939 ready[i] = ready[i + 1];
24940 ready[*pn_ready-1] = tmp;
24941 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
24942 INSN_PRIORITY (tmp)++;
24945 else if (load_store_pendulum == 2)
24947 /* Two loads have been issued in this cycle. Increase the priority
24948 of the first store in the ready list to favor it for issuing in
24949 the next cycle. */
24950 pos = *pn_ready-1;
24952 while (pos >= 0)
24954 if (is_store_insn (ready[pos], &str_mem)
24955 && !sel_sched_p ()
24956 && INSN_PRIORITY_KNOWN (ready[pos]))
24958 INSN_PRIORITY (ready[pos])++;
24960 /* Adjust the pendulum to account for the fact that a store
24961 was found and increased in priority. This is to prevent
24962 increasing the priority of multiple stores */
24963 load_store_pendulum++;
24965 break;
24967 pos--;
24972 return cached_can_issue_more;
24975 /* Return whether the presence of INSN causes a dispatch group termination
24976 of group WHICH_GROUP.
24978 If WHICH_GROUP == current_group, this function will return true if INSN
24979 causes the termination of the current group (i.e, the dispatch group to
24980 which INSN belongs). This means that INSN will be the last insn in the
24981 group it belongs to.
24983 If WHICH_GROUP == previous_group, this function will return true if INSN
24984 causes the termination of the previous group (i.e, the dispatch group that
24985 precedes the group to which INSN belongs). This means that INSN will be
24986 the first insn in the group it belongs to). */
24988 static bool
24989 insn_terminates_group_p (rtx insn, enum group_termination which_group)
24991 bool first, last;
24993 if (! insn)
24994 return false;
24996 first = insn_must_be_first_in_group (insn);
24997 last = insn_must_be_last_in_group (insn);
24999 if (first && last)
25000 return true;
25002 if (which_group == current_group)
25003 return last;
25004 else if (which_group == previous_group)
25005 return first;
25007 return false;
25011 static bool
25012 insn_must_be_first_in_group (rtx insn)
25014 enum attr_type type;
25016 if (!insn
25017 || NOTE_P (insn)
25018 || DEBUG_INSN_P (insn)
25019 || GET_CODE (PATTERN (insn)) == USE
25020 || GET_CODE (PATTERN (insn)) == CLOBBER)
25021 return false;
25023 switch (rs6000_cpu)
25025 case PROCESSOR_POWER5:
25026 if (is_cracked_insn (insn))
25027 return true;
25028 case PROCESSOR_POWER4:
25029 if (is_microcoded_insn (insn))
25030 return true;
25032 if (!rs6000_sched_groups)
25033 return false;
25035 type = get_attr_type (insn);
25037 switch (type)
25039 case TYPE_MFCR:
25040 case TYPE_MFCRF:
25041 case TYPE_MTCR:
25042 case TYPE_DELAYED_CR:
25043 case TYPE_CR_LOGICAL:
25044 case TYPE_MTJMPR:
25045 case TYPE_MFJMPR:
25046 case TYPE_IDIV:
25047 case TYPE_LDIV:
25048 case TYPE_LOAD_L:
25049 case TYPE_STORE_C:
25050 case TYPE_ISYNC:
25051 case TYPE_SYNC:
25052 return true;
25053 default:
25054 break;
25056 break;
25057 case PROCESSOR_POWER6:
25058 type = get_attr_type (insn);
25060 switch (type)
25062 case TYPE_INSERT_DWORD:
25063 case TYPE_EXTS:
25064 case TYPE_CNTLZ:
25065 case TYPE_SHIFT:
25066 case TYPE_VAR_SHIFT_ROTATE:
25067 case TYPE_TRAP:
25068 case TYPE_IMUL:
25069 case TYPE_IMUL2:
25070 case TYPE_IMUL3:
25071 case TYPE_LMUL:
25072 case TYPE_IDIV:
25073 case TYPE_INSERT_WORD:
25074 case TYPE_DELAYED_COMPARE:
25075 case TYPE_IMUL_COMPARE:
25076 case TYPE_LMUL_COMPARE:
25077 case TYPE_FPCOMPARE:
25078 case TYPE_MFCR:
25079 case TYPE_MTCR:
25080 case TYPE_MFJMPR:
25081 case TYPE_MTJMPR:
25082 case TYPE_ISYNC:
25083 case TYPE_SYNC:
25084 case TYPE_LOAD_L:
25085 case TYPE_STORE_C:
25086 case TYPE_LOAD_U:
25087 case TYPE_LOAD_UX:
25088 case TYPE_LOAD_EXT_UX:
25089 case TYPE_STORE_U:
25090 case TYPE_STORE_UX:
25091 case TYPE_FPLOAD_U:
25092 case TYPE_FPLOAD_UX:
25093 case TYPE_FPSTORE_U:
25094 case TYPE_FPSTORE_UX:
25095 return true;
25096 default:
25097 break;
25099 break;
25100 case PROCESSOR_POWER7:
25101 type = get_attr_type (insn);
25103 switch (type)
25105 case TYPE_CR_LOGICAL:
25106 case TYPE_MFCR:
25107 case TYPE_MFCRF:
25108 case TYPE_MTCR:
25109 case TYPE_IDIV:
25110 case TYPE_LDIV:
25111 case TYPE_COMPARE:
25112 case TYPE_DELAYED_COMPARE:
25113 case TYPE_VAR_DELAYED_COMPARE:
25114 case TYPE_ISYNC:
25115 case TYPE_LOAD_L:
25116 case TYPE_STORE_C:
25117 case TYPE_LOAD_U:
25118 case TYPE_LOAD_UX:
25119 case TYPE_LOAD_EXT:
25120 case TYPE_LOAD_EXT_U:
25121 case TYPE_LOAD_EXT_UX:
25122 case TYPE_STORE_U:
25123 case TYPE_STORE_UX:
25124 case TYPE_FPLOAD_U:
25125 case TYPE_FPLOAD_UX:
25126 case TYPE_FPSTORE_U:
25127 case TYPE_FPSTORE_UX:
25128 case TYPE_MFJMPR:
25129 case TYPE_MTJMPR:
25130 return true;
25131 default:
25132 break;
25134 break;
25135 case PROCESSOR_POWER8:
25136 type = get_attr_type (insn);
25138 switch (type)
25140 case TYPE_CR_LOGICAL:
25141 case TYPE_DELAYED_CR:
25142 case TYPE_MFCR:
25143 case TYPE_MFCRF:
25144 case TYPE_MTCR:
25145 case TYPE_COMPARE:
25146 case TYPE_DELAYED_COMPARE:
25147 case TYPE_VAR_DELAYED_COMPARE:
25148 case TYPE_IMUL_COMPARE:
25149 case TYPE_LMUL_COMPARE:
25150 case TYPE_SYNC:
25151 case TYPE_ISYNC:
25152 case TYPE_LOAD_L:
25153 case TYPE_STORE_C:
25154 case TYPE_LOAD_U:
25155 case TYPE_LOAD_UX:
25156 case TYPE_LOAD_EXT:
25157 case TYPE_LOAD_EXT_U:
25158 case TYPE_LOAD_EXT_UX:
25159 case TYPE_STORE_UX:
25160 case TYPE_VECSTORE:
25161 case TYPE_MFJMPR:
25162 case TYPE_MTJMPR:
25163 return true;
25164 default:
25165 break;
25167 break;
25168 default:
25169 break;
25172 return false;
25175 static bool
25176 insn_must_be_last_in_group (rtx insn)
25178 enum attr_type type;
25180 if (!insn
25181 || NOTE_P (insn)
25182 || DEBUG_INSN_P (insn)
25183 || GET_CODE (PATTERN (insn)) == USE
25184 || GET_CODE (PATTERN (insn)) == CLOBBER)
25185 return false;
25187 switch (rs6000_cpu) {
25188 case PROCESSOR_POWER4:
25189 case PROCESSOR_POWER5:
25190 if (is_microcoded_insn (insn))
25191 return true;
25193 if (is_branch_slot_insn (insn))
25194 return true;
25196 break;
25197 case PROCESSOR_POWER6:
25198 type = get_attr_type (insn);
25200 switch (type)
25202 case TYPE_EXTS:
25203 case TYPE_CNTLZ:
25204 case TYPE_SHIFT:
25205 case TYPE_VAR_SHIFT_ROTATE:
25206 case TYPE_TRAP:
25207 case TYPE_IMUL:
25208 case TYPE_IMUL2:
25209 case TYPE_IMUL3:
25210 case TYPE_LMUL:
25211 case TYPE_IDIV:
25212 case TYPE_DELAYED_COMPARE:
25213 case TYPE_IMUL_COMPARE:
25214 case TYPE_LMUL_COMPARE:
25215 case TYPE_FPCOMPARE:
25216 case TYPE_MFCR:
25217 case TYPE_MTCR:
25218 case TYPE_MFJMPR:
25219 case TYPE_MTJMPR:
25220 case TYPE_ISYNC:
25221 case TYPE_SYNC:
25222 case TYPE_LOAD_L:
25223 case TYPE_STORE_C:
25224 return true;
25225 default:
25226 break;
25228 break;
25229 case PROCESSOR_POWER7:
25230 type = get_attr_type (insn);
25232 switch (type)
25234 case TYPE_ISYNC:
25235 case TYPE_SYNC:
25236 case TYPE_LOAD_L:
25237 case TYPE_STORE_C:
25238 case TYPE_LOAD_EXT_U:
25239 case TYPE_LOAD_EXT_UX:
25240 case TYPE_STORE_UX:
25241 return true;
25242 default:
25243 break;
25245 break;
25246 case PROCESSOR_POWER8:
25247 type = get_attr_type (insn);
25249 switch (type)
25251 case TYPE_MFCR:
25252 case TYPE_MTCR:
25253 case TYPE_ISYNC:
25254 case TYPE_SYNC:
25255 case TYPE_LOAD_L:
25256 case TYPE_STORE_C:
25257 case TYPE_LOAD_EXT_U:
25258 case TYPE_LOAD_EXT_UX:
25259 case TYPE_STORE_UX:
25260 return true;
25261 default:
25262 break;
25264 break;
25265 default:
25266 break;
25269 return false;
25272 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
25273 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
25275 static bool
25276 is_costly_group (rtx *group_insns, rtx next_insn)
25278 int i;
25279 int issue_rate = rs6000_issue_rate ();
25281 for (i = 0; i < issue_rate; i++)
25283 sd_iterator_def sd_it;
25284 dep_t dep;
25285 rtx insn = group_insns[i];
25287 if (!insn)
25288 continue;
25290 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
25292 rtx next = DEP_CON (dep);
25294 if (next == next_insn
25295 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
25296 return true;
25300 return false;
25303 /* Utility of the function redefine_groups.
25304 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
25305 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
25306 to keep it "far" (in a separate group) from GROUP_INSNS, following
25307 one of the following schemes, depending on the value of the flag
25308 -minsert_sched_nops = X:
25309 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
25310 in order to force NEXT_INSN into a separate group.
25311 (2) X < sched_finish_regroup_exact: insert exactly X nops.
25312 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
25313 insertion (has a group just ended, how many vacant issue slots remain in the
25314 last group, and how many dispatch groups were encountered so far). */
25316 static int
25317 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
25318 rtx next_insn, bool *group_end, int can_issue_more,
25319 int *group_count)
25321 rtx nop;
25322 bool force;
25323 int issue_rate = rs6000_issue_rate ();
25324 bool end = *group_end;
25325 int i;
25327 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
25328 return can_issue_more;
25330 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
25331 return can_issue_more;
25333 force = is_costly_group (group_insns, next_insn);
25334 if (!force)
25335 return can_issue_more;
25337 if (sched_verbose > 6)
25338 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
25339 *group_count ,can_issue_more);
25341 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
25343 if (*group_end)
25344 can_issue_more = 0;
25346 /* Since only a branch can be issued in the last issue_slot, it is
25347 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
25348 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
25349 in this case the last nop will start a new group and the branch
25350 will be forced to the new group. */
25351 if (can_issue_more && !is_branch_slot_insn (next_insn))
25352 can_issue_more--;
25354 /* Do we have a special group ending nop? */
25355 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
25356 || rs6000_cpu_attr == CPU_POWER8)
25358 nop = gen_group_ending_nop ();
25359 emit_insn_before (nop, next_insn);
25360 can_issue_more = 0;
25362 else
25363 while (can_issue_more > 0)
25365 nop = gen_nop ();
25366 emit_insn_before (nop, next_insn);
25367 can_issue_more--;
25370 *group_end = true;
25371 return 0;
25374 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
25376 int n_nops = rs6000_sched_insert_nops;
25378 /* Nops can't be issued from the branch slot, so the effective
25379 issue_rate for nops is 'issue_rate - 1'. */
25380 if (can_issue_more == 0)
25381 can_issue_more = issue_rate;
25382 can_issue_more--;
25383 if (can_issue_more == 0)
25385 can_issue_more = issue_rate - 1;
25386 (*group_count)++;
25387 end = true;
25388 for (i = 0; i < issue_rate; i++)
25390 group_insns[i] = 0;
25394 while (n_nops > 0)
25396 nop = gen_nop ();
25397 emit_insn_before (nop, next_insn);
25398 if (can_issue_more == issue_rate - 1) /* new group begins */
25399 end = false;
25400 can_issue_more--;
25401 if (can_issue_more == 0)
25403 can_issue_more = issue_rate - 1;
25404 (*group_count)++;
25405 end = true;
25406 for (i = 0; i < issue_rate; i++)
25408 group_insns[i] = 0;
25411 n_nops--;
25414 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
25415 can_issue_more++;
25417 /* Is next_insn going to start a new group? */
25418 *group_end
25419 = (end
25420 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
25421 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
25422 || (can_issue_more < issue_rate &&
25423 insn_terminates_group_p (next_insn, previous_group)));
25424 if (*group_end && end)
25425 (*group_count)--;
25427 if (sched_verbose > 6)
25428 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
25429 *group_count, can_issue_more);
25430 return can_issue_more;
25433 return can_issue_more;
25436 /* This function tries to synch the dispatch groups that the compiler "sees"
25437 with the dispatch groups that the processor dispatcher is expected to
25438 form in practice. It tries to achieve this synchronization by forcing the
25439 estimated processor grouping on the compiler (as opposed to the function
25440 'pad_goups' which tries to force the scheduler's grouping on the processor).
25442 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
25443 examines the (estimated) dispatch groups that will be formed by the processor
25444 dispatcher. It marks these group boundaries to reflect the estimated
25445 processor grouping, overriding the grouping that the scheduler had marked.
25446 Depending on the value of the flag '-minsert-sched-nops' this function can
25447 force certain insns into separate groups or force a certain distance between
25448 them by inserting nops, for example, if there exists a "costly dependence"
25449 between the insns.
25451 The function estimates the group boundaries that the processor will form as
25452 follows: It keeps track of how many vacant issue slots are available after
25453 each insn. A subsequent insn will start a new group if one of the following
25454 4 cases applies:
25455 - no more vacant issue slots remain in the current dispatch group.
25456 - only the last issue slot, which is the branch slot, is vacant, but the next
25457 insn is not a branch.
25458 - only the last 2 or less issue slots, including the branch slot, are vacant,
25459 which means that a cracked insn (which occupies two issue slots) can't be
25460 issued in this group.
25461 - less than 'issue_rate' slots are vacant, and the next insn always needs to
25462 start a new group. */
25464 static int
25465 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
25467 rtx insn, next_insn;
25468 int issue_rate;
25469 int can_issue_more;
25470 int slot, i;
25471 bool group_end;
25472 int group_count = 0;
25473 rtx *group_insns;
25475 /* Initialize. */
25476 issue_rate = rs6000_issue_rate ();
25477 group_insns = XALLOCAVEC (rtx, issue_rate);
25478 for (i = 0; i < issue_rate; i++)
25480 group_insns[i] = 0;
25482 can_issue_more = issue_rate;
25483 slot = 0;
25484 insn = get_next_active_insn (prev_head_insn, tail);
25485 group_end = false;
25487 while (insn != NULL_RTX)
25489 slot = (issue_rate - can_issue_more);
25490 group_insns[slot] = insn;
25491 can_issue_more =
25492 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
25493 if (insn_terminates_group_p (insn, current_group))
25494 can_issue_more = 0;
25496 next_insn = get_next_active_insn (insn, tail);
25497 if (next_insn == NULL_RTX)
25498 return group_count + 1;
25500 /* Is next_insn going to start a new group? */
25501 group_end
25502 = (can_issue_more == 0
25503 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
25504 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
25505 || (can_issue_more < issue_rate &&
25506 insn_terminates_group_p (next_insn, previous_group)));
25508 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
25509 next_insn, &group_end, can_issue_more,
25510 &group_count);
25512 if (group_end)
25514 group_count++;
25515 can_issue_more = 0;
25516 for (i = 0; i < issue_rate; i++)
25518 group_insns[i] = 0;
25522 if (GET_MODE (next_insn) == TImode && can_issue_more)
25523 PUT_MODE (next_insn, VOIDmode);
25524 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
25525 PUT_MODE (next_insn, TImode);
25527 insn = next_insn;
25528 if (can_issue_more == 0)
25529 can_issue_more = issue_rate;
25530 } /* while */
25532 return group_count;
25535 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
25536 dispatch group boundaries that the scheduler had marked. Pad with nops
25537 any dispatch groups which have vacant issue slots, in order to force the
25538 scheduler's grouping on the processor dispatcher. The function
25539 returns the number of dispatch groups found. */
25541 static int
25542 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
25544 rtx insn, next_insn;
25545 rtx nop;
25546 int issue_rate;
25547 int can_issue_more;
25548 int group_end;
25549 int group_count = 0;
25551 /* Initialize issue_rate. */
25552 issue_rate = rs6000_issue_rate ();
25553 can_issue_more = issue_rate;
25555 insn = get_next_active_insn (prev_head_insn, tail);
25556 next_insn = get_next_active_insn (insn, tail);
25558 while (insn != NULL_RTX)
25560 can_issue_more =
25561 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
25563 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
25565 if (next_insn == NULL_RTX)
25566 break;
25568 if (group_end)
25570 /* If the scheduler had marked group termination at this location
25571 (between insn and next_insn), and neither insn nor next_insn will
25572 force group termination, pad the group with nops to force group
25573 termination. */
25574 if (can_issue_more
25575 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
25576 && !insn_terminates_group_p (insn, current_group)
25577 && !insn_terminates_group_p (next_insn, previous_group))
25579 if (!is_branch_slot_insn (next_insn))
25580 can_issue_more--;
25582 while (can_issue_more)
25584 nop = gen_nop ();
25585 emit_insn_before (nop, next_insn);
25586 can_issue_more--;
25590 can_issue_more = issue_rate;
25591 group_count++;
25594 insn = next_insn;
25595 next_insn = get_next_active_insn (insn, tail);
25598 return group_count;
25601 /* We're beginning a new block. Initialize data structures as necessary. */
25603 static void
25604 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
25605 int sched_verbose ATTRIBUTE_UNUSED,
25606 int max_ready ATTRIBUTE_UNUSED)
25608 last_scheduled_insn = NULL_RTX;
25609 load_store_pendulum = 0;
25612 /* The following function is called at the end of scheduling BB.
25613 After reload, it inserts nops at insn group bundling. */
25615 static void
25616 rs6000_sched_finish (FILE *dump, int sched_verbose)
25618 int n_groups;
25620 if (sched_verbose)
25621 fprintf (dump, "=== Finishing schedule.\n");
25623 if (reload_completed && rs6000_sched_groups)
25625 /* Do not run sched_finish hook when selective scheduling enabled. */
25626 if (sel_sched_p ())
25627 return;
25629 if (rs6000_sched_insert_nops == sched_finish_none)
25630 return;
25632 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
25633 n_groups = pad_groups (dump, sched_verbose,
25634 current_sched_info->prev_head,
25635 current_sched_info->next_tail);
25636 else
25637 n_groups = redefine_groups (dump, sched_verbose,
25638 current_sched_info->prev_head,
25639 current_sched_info->next_tail);
25641 if (sched_verbose >= 6)
25643 fprintf (dump, "ngroups = %d\n", n_groups);
25644 print_rtl (dump, current_sched_info->prev_head);
25645 fprintf (dump, "Done finish_sched\n");
25650 struct _rs6000_sched_context
25652 short cached_can_issue_more;
25653 rtx last_scheduled_insn;
25654 int load_store_pendulum;
25657 typedef struct _rs6000_sched_context rs6000_sched_context_def;
25658 typedef rs6000_sched_context_def *rs6000_sched_context_t;
25660 /* Allocate store for new scheduling context. */
25661 static void *
25662 rs6000_alloc_sched_context (void)
25664 return xmalloc (sizeof (rs6000_sched_context_def));
25667 /* If CLEAN_P is true then initializes _SC with clean data,
25668 and from the global context otherwise. */
25669 static void
25670 rs6000_init_sched_context (void *_sc, bool clean_p)
25672 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
25674 if (clean_p)
25676 sc->cached_can_issue_more = 0;
25677 sc->last_scheduled_insn = NULL_RTX;
25678 sc->load_store_pendulum = 0;
25680 else
25682 sc->cached_can_issue_more = cached_can_issue_more;
25683 sc->last_scheduled_insn = last_scheduled_insn;
25684 sc->load_store_pendulum = load_store_pendulum;
25688 /* Sets the global scheduling context to the one pointed to by _SC. */
25689 static void
25690 rs6000_set_sched_context (void *_sc)
25692 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
25694 gcc_assert (sc != NULL);
25696 cached_can_issue_more = sc->cached_can_issue_more;
25697 last_scheduled_insn = sc->last_scheduled_insn;
25698 load_store_pendulum = sc->load_store_pendulum;
25701 /* Free _SC. */
25702 static void
25703 rs6000_free_sched_context (void *_sc)
25705 gcc_assert (_sc != NULL);
25707 free (_sc);
25711 /* Length in units of the trampoline for entering a nested function. */
25714 rs6000_trampoline_size (void)
25716 int ret = 0;
25718 switch (DEFAULT_ABI)
25720 default:
25721 gcc_unreachable ();
25723 case ABI_AIX:
25724 ret = (TARGET_32BIT) ? 12 : 24;
25725 break;
25727 case ABI_DARWIN:
25728 case ABI_V4:
25729 ret = (TARGET_32BIT) ? 40 : 48;
25730 break;
25733 return ret;
25736 /* Emit RTL insns to initialize the variable parts of a trampoline.
25737 FNADDR is an RTX for the address of the function's pure code.
25738 CXT is an RTX for the static chain value for the function. */
25740 static void
25741 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
25743 int regsize = (TARGET_32BIT) ? 4 : 8;
25744 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
25745 rtx ctx_reg = force_reg (Pmode, cxt);
25746 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
25748 switch (DEFAULT_ABI)
25750 default:
25751 gcc_unreachable ();
25753 /* Under AIX, just build the 3 word function descriptor */
25754 case ABI_AIX:
25756 rtx fnmem, fn_reg, toc_reg;
25758 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
25759 error ("You cannot take the address of a nested function if you use "
25760 "the -mno-pointers-to-nested-functions option.");
25762 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
25763 fn_reg = gen_reg_rtx (Pmode);
25764 toc_reg = gen_reg_rtx (Pmode);
25766 /* Macro to shorten the code expansions below. */
25767 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
25769 m_tramp = replace_equiv_address (m_tramp, addr);
25771 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
25772 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
25773 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
25774 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
25775 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
25777 # undef MEM_PLUS
25779 break;
25781 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
25782 case ABI_DARWIN:
25783 case ABI_V4:
25784 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
25785 LCT_NORMAL, VOIDmode, 4,
25786 addr, Pmode,
25787 GEN_INT (rs6000_trampoline_size ()), SImode,
25788 fnaddr, Pmode,
25789 ctx_reg, Pmode);
25790 break;
25795 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
25796 identifier as an argument, so the front end shouldn't look it up. */
25798 static bool
25799 rs6000_attribute_takes_identifier_p (const_tree attr_id)
25801 return is_attribute_p ("altivec", attr_id);
25804 /* Handle the "altivec" attribute. The attribute may have
25805 arguments as follows:
25807 __attribute__((altivec(vector__)))
25808 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
25809 __attribute__((altivec(bool__))) (always followed by 'unsigned')
25811 and may appear more than once (e.g., 'vector bool char') in a
25812 given declaration. */
25814 static tree
25815 rs6000_handle_altivec_attribute (tree *node,
25816 tree name ATTRIBUTE_UNUSED,
25817 tree args,
25818 int flags ATTRIBUTE_UNUSED,
25819 bool *no_add_attrs)
25821 tree type = *node, result = NULL_TREE;
25822 enum machine_mode mode;
25823 int unsigned_p;
25824 char altivec_type
25825 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
25826 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
25827 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
25828 : '?');
25830 while (POINTER_TYPE_P (type)
25831 || TREE_CODE (type) == FUNCTION_TYPE
25832 || TREE_CODE (type) == METHOD_TYPE
25833 || TREE_CODE (type) == ARRAY_TYPE)
25834 type = TREE_TYPE (type);
25836 mode = TYPE_MODE (type);
25838 /* Check for invalid AltiVec type qualifiers. */
25839 if (type == long_double_type_node)
25840 error ("use of %<long double%> in AltiVec types is invalid");
25841 else if (type == boolean_type_node)
25842 error ("use of boolean types in AltiVec types is invalid");
25843 else if (TREE_CODE (type) == COMPLEX_TYPE)
25844 error ("use of %<complex%> in AltiVec types is invalid");
25845 else if (DECIMAL_FLOAT_MODE_P (mode))
25846 error ("use of decimal floating point types in AltiVec types is invalid");
25847 else if (!TARGET_VSX)
25849 if (type == long_unsigned_type_node || type == long_integer_type_node)
25851 if (TARGET_64BIT)
25852 error ("use of %<long%> in AltiVec types is invalid for "
25853 "64-bit code without -mvsx");
25854 else if (rs6000_warn_altivec_long)
25855 warning (0, "use of %<long%> in AltiVec types is deprecated; "
25856 "use %<int%>");
25858 else if (type == long_long_unsigned_type_node
25859 || type == long_long_integer_type_node)
25860 error ("use of %<long long%> in AltiVec types is invalid without "
25861 "-mvsx");
25862 else if (type == double_type_node)
25863 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
25866 switch (altivec_type)
25868 case 'v':
25869 unsigned_p = TYPE_UNSIGNED (type);
25870 switch (mode)
25872 case DImode:
25873 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
25874 break;
25875 case SImode:
25876 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
25877 break;
25878 case HImode:
25879 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
25880 break;
25881 case QImode:
25882 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
25883 break;
25884 case SFmode: result = V4SF_type_node; break;
25885 case DFmode: result = V2DF_type_node; break;
25886 /* If the user says 'vector int bool', we may be handed the 'bool'
25887 attribute _before_ the 'vector' attribute, and so select the
25888 proper type in the 'b' case below. */
25889 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
25890 case V2DImode: case V2DFmode:
25891 result = type;
25892 default: break;
25894 break;
25895 case 'b':
25896 switch (mode)
25898 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
25899 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
25900 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
25901 case QImode: case V16QImode: result = bool_V16QI_type_node;
25902 default: break;
25904 break;
25905 case 'p':
25906 switch (mode)
25908 case V8HImode: result = pixel_V8HI_type_node;
25909 default: break;
25911 default: break;
25914 /* Propagate qualifiers attached to the element type
25915 onto the vector type. */
25916 if (result && result != type && TYPE_QUALS (type))
25917 result = build_qualified_type (result, TYPE_QUALS (type));
25919 *no_add_attrs = true; /* No need to hang on to the attribute. */
25921 if (result)
25922 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
25924 return NULL_TREE;
25927 /* AltiVec defines four built-in scalar types that serve as vector
25928 elements; we must teach the compiler how to mangle them. */
25930 static const char *
25931 rs6000_mangle_type (const_tree type)
25933 type = TYPE_MAIN_VARIANT (type);
25935 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
25936 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
25937 return NULL;
25939 if (type == bool_char_type_node) return "U6__boolc";
25940 if (type == bool_short_type_node) return "U6__bools";
25941 if (type == pixel_type_node) return "u7__pixel";
25942 if (type == bool_int_type_node) return "U6__booli";
25943 if (type == bool_long_type_node) return "U6__booll";
25945 /* Mangle IBM extended float long double as `g' (__float128) on
25946 powerpc*-linux where long-double-64 previously was the default. */
25947 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
25948 && TARGET_ELF
25949 && TARGET_LONG_DOUBLE_128
25950 && !TARGET_IEEEQUAD)
25951 return "g";
25953 /* For all other types, use normal C++ mangling. */
25954 return NULL;
25957 /* Handle a "longcall" or "shortcall" attribute; arguments as in
25958 struct attribute_spec.handler. */
25960 static tree
25961 rs6000_handle_longcall_attribute (tree *node, tree name,
25962 tree args ATTRIBUTE_UNUSED,
25963 int flags ATTRIBUTE_UNUSED,
25964 bool *no_add_attrs)
25966 if (TREE_CODE (*node) != FUNCTION_TYPE
25967 && TREE_CODE (*node) != FIELD_DECL
25968 && TREE_CODE (*node) != TYPE_DECL)
25970 warning (OPT_Wattributes, "%qE attribute only applies to functions",
25971 name);
25972 *no_add_attrs = true;
25975 return NULL_TREE;
25978 /* Set longcall attributes on all functions declared when
25979 rs6000_default_long_calls is true. */
25980 static void
25981 rs6000_set_default_type_attributes (tree type)
25983 if (rs6000_default_long_calls
25984 && (TREE_CODE (type) == FUNCTION_TYPE
25985 || TREE_CODE (type) == METHOD_TYPE))
25986 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
25987 NULL_TREE,
25988 TYPE_ATTRIBUTES (type));
25990 #if TARGET_MACHO
25991 darwin_set_default_type_attributes (type);
25992 #endif
25995 /* Return a reference suitable for calling a function with the
25996 longcall attribute. */
25999 rs6000_longcall_ref (rtx call_ref)
26001 const char *call_name;
26002 tree node;
26004 if (GET_CODE (call_ref) != SYMBOL_REF)
26005 return call_ref;
26007 /* System V adds '.' to the internal name, so skip them. */
26008 call_name = XSTR (call_ref, 0);
26009 if (*call_name == '.')
26011 while (*call_name == '.')
26012 call_name++;
26014 node = get_identifier (call_name);
26015 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
26018 return force_reg (Pmode, call_ref);
26021 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
26022 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
26023 #endif
26025 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26026 struct attribute_spec.handler. */
26027 static tree
26028 rs6000_handle_struct_attribute (tree *node, tree name,
26029 tree args ATTRIBUTE_UNUSED,
26030 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26032 tree *type = NULL;
26033 if (DECL_P (*node))
26035 if (TREE_CODE (*node) == TYPE_DECL)
26036 type = &TREE_TYPE (*node);
26038 else
26039 type = node;
26041 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26042 || TREE_CODE (*type) == UNION_TYPE)))
26044 warning (OPT_Wattributes, "%qE attribute ignored", name);
26045 *no_add_attrs = true;
26048 else if ((is_attribute_p ("ms_struct", name)
26049 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26050 || ((is_attribute_p ("gcc_struct", name)
26051 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26053 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26054 name);
26055 *no_add_attrs = true;
26058 return NULL_TREE;
26061 static bool
26062 rs6000_ms_bitfield_layout_p (const_tree record_type)
26064 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
26065 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26066 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26069 #ifdef USING_ELFOS_H
26071 /* A get_unnamed_section callback, used for switching to toc_section. */
26073 static void
26074 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
26076 if (DEFAULT_ABI == ABI_AIX
26077 && TARGET_MINIMAL_TOC
26078 && !TARGET_RELOCATABLE)
26080 if (!toc_initialized)
26082 toc_initialized = 1;
26083 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
26084 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
26085 fprintf (asm_out_file, "\t.tc ");
26086 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
26087 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
26088 fprintf (asm_out_file, "\n");
26090 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
26091 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
26092 fprintf (asm_out_file, " = .+32768\n");
26094 else
26095 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
26097 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
26098 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
26099 else
26101 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
26102 if (!toc_initialized)
26104 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
26105 fprintf (asm_out_file, " = .+32768\n");
26106 toc_initialized = 1;
26111 /* Implement TARGET_ASM_INIT_SECTIONS. */
26113 static void
26114 rs6000_elf_asm_init_sections (void)
26116 toc_section
26117 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
26119 sdata2_section
26120 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
26121 SDATA2_SECTION_ASM_OP);
26124 /* Implement TARGET_SELECT_RTX_SECTION. */
26126 static section *
26127 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
26128 unsigned HOST_WIDE_INT align)
26130 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
26131 return toc_section;
26132 else
26133 return default_elf_select_rtx_section (mode, x, align);
26136 /* For a SYMBOL_REF, set generic flags and then perform some
26137 target-specific processing.
26139 When the AIX ABI is requested on a non-AIX system, replace the
26140 function name with the real name (with a leading .) rather than the
26141 function descriptor name. This saves a lot of overriding code to
26142 read the prefixes. */
26144 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
26145 static void
26146 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
26148 default_encode_section_info (decl, rtl, first);
26150 if (first
26151 && TREE_CODE (decl) == FUNCTION_DECL
26152 && !TARGET_AIX
26153 && DEFAULT_ABI == ABI_AIX)
26155 rtx sym_ref = XEXP (rtl, 0);
26156 size_t len = strlen (XSTR (sym_ref, 0));
26157 char *str = XALLOCAVEC (char, len + 2);
26158 str[0] = '.';
26159 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
26160 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
26164 static inline bool
26165 compare_section_name (const char *section, const char *templ)
26167 int len;
26169 len = strlen (templ);
26170 return (strncmp (section, templ, len) == 0
26171 && (section[len] == 0 || section[len] == '.'));
26174 bool
26175 rs6000_elf_in_small_data_p (const_tree decl)
26177 if (rs6000_sdata == SDATA_NONE)
26178 return false;
26180 /* We want to merge strings, so we never consider them small data. */
26181 if (TREE_CODE (decl) == STRING_CST)
26182 return false;
26184 /* Functions are never in the small data area. */
26185 if (TREE_CODE (decl) == FUNCTION_DECL)
26186 return false;
26188 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
26190 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
26191 if (compare_section_name (section, ".sdata")
26192 || compare_section_name (section, ".sdata2")
26193 || compare_section_name (section, ".gnu.linkonce.s")
26194 || compare_section_name (section, ".sbss")
26195 || compare_section_name (section, ".sbss2")
26196 || compare_section_name (section, ".gnu.linkonce.sb")
26197 || strcmp (section, ".PPC.EMB.sdata0") == 0
26198 || strcmp (section, ".PPC.EMB.sbss0") == 0)
26199 return true;
26201 else
26203 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
26205 if (size > 0
26206 && size <= g_switch_value
26207 /* If it's not public, and we're not going to reference it there,
26208 there's no need to put it in the small data section. */
26209 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
26210 return true;
26213 return false;
26216 #endif /* USING_ELFOS_H */
26218 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
26220 static bool
26221 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
26223 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
26226 /* Do not place thread-local symbols refs in the object blocks. */
26228 static bool
26229 rs6000_use_blocks_for_decl_p (const_tree decl)
26231 return !DECL_THREAD_LOCAL_P (decl);
26234 /* Return a REG that occurs in ADDR with coefficient 1.
26235 ADDR can be effectively incremented by incrementing REG.
26237 r0 is special and we must not select it as an address
26238 register by this routine since our caller will try to
26239 increment the returned register via an "la" instruction. */
26242 find_addr_reg (rtx addr)
26244 while (GET_CODE (addr) == PLUS)
26246 if (GET_CODE (XEXP (addr, 0)) == REG
26247 && REGNO (XEXP (addr, 0)) != 0)
26248 addr = XEXP (addr, 0);
26249 else if (GET_CODE (XEXP (addr, 1)) == REG
26250 && REGNO (XEXP (addr, 1)) != 0)
26251 addr = XEXP (addr, 1);
26252 else if (CONSTANT_P (XEXP (addr, 0)))
26253 addr = XEXP (addr, 1);
26254 else if (CONSTANT_P (XEXP (addr, 1)))
26255 addr = XEXP (addr, 0);
26256 else
26257 gcc_unreachable ();
26259 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
26260 return addr;
26263 void
26264 rs6000_fatal_bad_address (rtx op)
26266 fatal_insn ("bad address", op);
26269 #if TARGET_MACHO
26271 typedef struct branch_island_d {
26272 tree function_name;
26273 tree label_name;
26274 int line_number;
26275 } branch_island;
26278 static vec<branch_island, va_gc> *branch_islands;
26280 /* Remember to generate a branch island for far calls to the given
26281 function. */
26283 static void
26284 add_compiler_branch_island (tree label_name, tree function_name,
26285 int line_number)
26287 branch_island bi = {function_name, label_name, line_number};
26288 vec_safe_push (branch_islands, bi);
26291 /* Generate far-jump branch islands for everything recorded in
26292 branch_islands. Invoked immediately after the last instruction of
26293 the epilogue has been emitted; the branch islands must be appended
26294 to, and contiguous with, the function body. Mach-O stubs are
26295 generated in machopic_output_stub(). */
26297 static void
26298 macho_branch_islands (void)
26300 char tmp_buf[512];
26302 while (!vec_safe_is_empty (branch_islands))
26304 branch_island *bi = &branch_islands->last ();
26305 const char *label = IDENTIFIER_POINTER (bi->label_name);
26306 const char *name = IDENTIFIER_POINTER (bi->function_name);
26307 char name_buf[512];
26308 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
26309 if (name[0] == '*' || name[0] == '&')
26310 strcpy (name_buf, name+1);
26311 else
26313 name_buf[0] = '_';
26314 strcpy (name_buf+1, name);
26316 strcpy (tmp_buf, "\n");
26317 strcat (tmp_buf, label);
26318 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
26319 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
26320 dbxout_stabd (N_SLINE, bi->line_number);
26321 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
26322 if (flag_pic)
26324 if (TARGET_LINK_STACK)
26326 char name[32];
26327 get_ppc476_thunk_name (name);
26328 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
26329 strcat (tmp_buf, name);
26330 strcat (tmp_buf, "\n");
26331 strcat (tmp_buf, label);
26332 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
26334 else
26336 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
26337 strcat (tmp_buf, label);
26338 strcat (tmp_buf, "_pic\n");
26339 strcat (tmp_buf, label);
26340 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
26343 strcat (tmp_buf, "\taddis r11,r11,ha16(");
26344 strcat (tmp_buf, name_buf);
26345 strcat (tmp_buf, " - ");
26346 strcat (tmp_buf, label);
26347 strcat (tmp_buf, "_pic)\n");
26349 strcat (tmp_buf, "\tmtlr r0\n");
26351 strcat (tmp_buf, "\taddi r12,r11,lo16(");
26352 strcat (tmp_buf, name_buf);
26353 strcat (tmp_buf, " - ");
26354 strcat (tmp_buf, label);
26355 strcat (tmp_buf, "_pic)\n");
26357 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
26359 else
26361 strcat (tmp_buf, ":\nlis r12,hi16(");
26362 strcat (tmp_buf, name_buf);
26363 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
26364 strcat (tmp_buf, name_buf);
26365 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
26367 output_asm_insn (tmp_buf, 0);
26368 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
26369 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
26370 dbxout_stabd (N_SLINE, bi->line_number);
26371 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
26372 branch_islands->pop ();
26376 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
26377 already there or not. */
26379 static int
26380 no_previous_def (tree function_name)
26382 branch_island *bi;
26383 unsigned ix;
26385 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
26386 if (function_name == bi->function_name)
26387 return 0;
26388 return 1;
26391 /* GET_PREV_LABEL gets the label name from the previous definition of
26392 the function. */
26394 static tree
26395 get_prev_label (tree function_name)
26397 branch_island *bi;
26398 unsigned ix;
26400 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
26401 if (function_name == bi->function_name)
26402 return bi->label_name;
26403 return NULL_TREE;
26406 /* INSN is either a function call or a millicode call. It may have an
26407 unconditional jump in its delay slot.
26409 CALL_DEST is the routine we are calling. */
26411 char *
26412 output_call (rtx insn, rtx *operands, int dest_operand_number,
26413 int cookie_operand_number)
26415 static char buf[256];
26416 if (darwin_emit_branch_islands
26417 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
26418 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
26420 tree labelname;
26421 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
26423 if (no_previous_def (funname))
26425 rtx label_rtx = gen_label_rtx ();
26426 char *label_buf, temp_buf[256];
26427 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
26428 CODE_LABEL_NUMBER (label_rtx));
26429 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
26430 labelname = get_identifier (label_buf);
26431 add_compiler_branch_island (labelname, funname, insn_line (insn));
26433 else
26434 labelname = get_prev_label (funname);
26436 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
26437 instruction will reach 'foo', otherwise link as 'bl L42'".
26438 "L42" should be a 'branch island', that will do a far jump to
26439 'foo'. Branch islands are generated in
26440 macho_branch_islands(). */
26441 sprintf (buf, "jbsr %%z%d,%.246s",
26442 dest_operand_number, IDENTIFIER_POINTER (labelname));
26444 else
26445 sprintf (buf, "bl %%z%d", dest_operand_number);
26446 return buf;
26449 /* Generate PIC and indirect symbol stubs. */
26451 void
26452 machopic_output_stub (FILE *file, const char *symb, const char *stub)
26454 unsigned int length;
26455 char *symbol_name, *lazy_ptr_name;
26456 char *local_label_0;
26457 static int label = 0;
26459 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
26460 symb = (*targetm.strip_name_encoding) (symb);
26463 length = strlen (symb);
26464 symbol_name = XALLOCAVEC (char, length + 32);
26465 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
26467 lazy_ptr_name = XALLOCAVEC (char, length + 32);
26468 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
26470 if (flag_pic == 2)
26471 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
26472 else
26473 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
26475 if (flag_pic == 2)
26477 fprintf (file, "\t.align 5\n");
26479 fprintf (file, "%s:\n", stub);
26480 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26482 label++;
26483 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
26484 sprintf (local_label_0, "\"L%011d$spb\"", label);
26486 fprintf (file, "\tmflr r0\n");
26487 if (TARGET_LINK_STACK)
26489 char name[32];
26490 get_ppc476_thunk_name (name);
26491 fprintf (file, "\tbl %s\n", name);
26492 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
26494 else
26496 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
26497 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
26499 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
26500 lazy_ptr_name, local_label_0);
26501 fprintf (file, "\tmtlr r0\n");
26502 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
26503 (TARGET_64BIT ? "ldu" : "lwzu"),
26504 lazy_ptr_name, local_label_0);
26505 fprintf (file, "\tmtctr r12\n");
26506 fprintf (file, "\tbctr\n");
26508 else
26510 fprintf (file, "\t.align 4\n");
26512 fprintf (file, "%s:\n", stub);
26513 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26515 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
26516 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
26517 (TARGET_64BIT ? "ldu" : "lwzu"),
26518 lazy_ptr_name);
26519 fprintf (file, "\tmtctr r12\n");
26520 fprintf (file, "\tbctr\n");
26523 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
26524 fprintf (file, "%s:\n", lazy_ptr_name);
26525 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
26526 fprintf (file, "%sdyld_stub_binding_helper\n",
26527 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
26530 /* Legitimize PIC addresses. If the address is already
26531 position-independent, we return ORIG. Newly generated
26532 position-independent addresses go into a reg. This is REG if non
26533 zero, otherwise we allocate register(s) as necessary. */
26535 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
26538 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
26539 rtx reg)
26541 rtx base, offset;
26543 if (reg == NULL && ! reload_in_progress && ! reload_completed)
26544 reg = gen_reg_rtx (Pmode);
26546 if (GET_CODE (orig) == CONST)
26548 rtx reg_temp;
26550 if (GET_CODE (XEXP (orig, 0)) == PLUS
26551 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
26552 return orig;
26554 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
26556 /* Use a different reg for the intermediate value, as
26557 it will be marked UNCHANGING. */
26558 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
26559 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
26560 Pmode, reg_temp);
26561 offset =
26562 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
26563 Pmode, reg);
26565 if (GET_CODE (offset) == CONST_INT)
26567 if (SMALL_INT (offset))
26568 return plus_constant (Pmode, base, INTVAL (offset));
26569 else if (! reload_in_progress && ! reload_completed)
26570 offset = force_reg (Pmode, offset);
26571 else
26573 rtx mem = force_const_mem (Pmode, orig);
26574 return machopic_legitimize_pic_address (mem, Pmode, reg);
26577 return gen_rtx_PLUS (Pmode, base, offset);
26580 /* Fall back on generic machopic code. */
26581 return machopic_legitimize_pic_address (orig, mode, reg);
26584 /* Output a .machine directive for the Darwin assembler, and call
26585 the generic start_file routine. */
26587 static void
26588 rs6000_darwin_file_start (void)
26590 static const struct
26592 const char *arg;
26593 const char *name;
26594 HOST_WIDE_INT if_set;
26595 } mapping[] = {
26596 { "ppc64", "ppc64", MASK_64BIT },
26597 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
26598 { "power4", "ppc970", 0 },
26599 { "G5", "ppc970", 0 },
26600 { "7450", "ppc7450", 0 },
26601 { "7400", "ppc7400", MASK_ALTIVEC },
26602 { "G4", "ppc7400", 0 },
26603 { "750", "ppc750", 0 },
26604 { "740", "ppc750", 0 },
26605 { "G3", "ppc750", 0 },
26606 { "604e", "ppc604e", 0 },
26607 { "604", "ppc604", 0 },
26608 { "603e", "ppc603", 0 },
26609 { "603", "ppc603", 0 },
26610 { "601", "ppc601", 0 },
26611 { NULL, "ppc", 0 } };
26612 const char *cpu_id = "";
26613 size_t i;
26615 rs6000_file_start ();
26616 darwin_file_start ();
26618 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
26620 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
26621 cpu_id = rs6000_default_cpu;
26623 if (global_options_set.x_rs6000_cpu_index)
26624 cpu_id = processor_target_table[rs6000_cpu_index].name;
26626 /* Look through the mapping array. Pick the first name that either
26627 matches the argument, has a bit set in IF_SET that is also set
26628 in the target flags, or has a NULL name. */
26630 i = 0;
26631 while (mapping[i].arg != NULL
26632 && strcmp (mapping[i].arg, cpu_id) != 0
26633 && (mapping[i].if_set & rs6000_isa_flags) == 0)
26634 i++;
26636 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
26639 #endif /* TARGET_MACHO */
26641 #if TARGET_ELF
26642 static int
26643 rs6000_elf_reloc_rw_mask (void)
26645 if (flag_pic)
26646 return 3;
26647 else if (DEFAULT_ABI == ABI_AIX)
26648 return 2;
26649 else
26650 return 0;
26653 /* Record an element in the table of global constructors. SYMBOL is
26654 a SYMBOL_REF of the function to be called; PRIORITY is a number
26655 between 0 and MAX_INIT_PRIORITY.
26657 This differs from default_named_section_asm_out_constructor in
26658 that we have special handling for -mrelocatable. */
26660 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
26661 static void
26662 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
26664 const char *section = ".ctors";
26665 char buf[16];
26667 if (priority != DEFAULT_INIT_PRIORITY)
26669 sprintf (buf, ".ctors.%.5u",
26670 /* Invert the numbering so the linker puts us in the proper
26671 order; constructors are run from right to left, and the
26672 linker sorts in increasing order. */
26673 MAX_INIT_PRIORITY - priority);
26674 section = buf;
26677 switch_to_section (get_section (section, SECTION_WRITE, NULL));
26678 assemble_align (POINTER_SIZE);
26680 if (TARGET_RELOCATABLE)
26682 fputs ("\t.long (", asm_out_file);
26683 output_addr_const (asm_out_file, symbol);
26684 fputs (")@fixup\n", asm_out_file);
26686 else
26687 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
26690 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
26691 static void
26692 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
26694 const char *section = ".dtors";
26695 char buf[16];
26697 if (priority != DEFAULT_INIT_PRIORITY)
26699 sprintf (buf, ".dtors.%.5u",
26700 /* Invert the numbering so the linker puts us in the proper
26701 order; constructors are run from right to left, and the
26702 linker sorts in increasing order. */
26703 MAX_INIT_PRIORITY - priority);
26704 section = buf;
26707 switch_to_section (get_section (section, SECTION_WRITE, NULL));
26708 assemble_align (POINTER_SIZE);
26710 if (TARGET_RELOCATABLE)
26712 fputs ("\t.long (", asm_out_file);
26713 output_addr_const (asm_out_file, symbol);
26714 fputs (")@fixup\n", asm_out_file);
26716 else
26717 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
26720 void
26721 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
26723 if (TARGET_64BIT)
26725 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
26726 ASM_OUTPUT_LABEL (file, name);
26727 fputs (DOUBLE_INT_ASM_OP, file);
26728 rs6000_output_function_entry (file, name);
26729 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
26730 if (DOT_SYMBOLS)
26732 fputs ("\t.size\t", file);
26733 assemble_name (file, name);
26734 fputs (",24\n\t.type\t.", file);
26735 assemble_name (file, name);
26736 fputs (",@function\n", file);
26737 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
26739 fputs ("\t.globl\t.", file);
26740 assemble_name (file, name);
26741 putc ('\n', file);
26744 else
26745 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
26746 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
26747 rs6000_output_function_entry (file, name);
26748 fputs (":\n", file);
26749 return;
26752 if (TARGET_RELOCATABLE
26753 && !TARGET_SECURE_PLT
26754 && (get_pool_size () != 0 || crtl->profile)
26755 && uses_TOC ())
26757 char buf[256];
26759 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
26761 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
26762 fprintf (file, "\t.long ");
26763 assemble_name (file, buf);
26764 putc ('-', file);
26765 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
26766 assemble_name (file, buf);
26767 putc ('\n', file);
26770 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
26771 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
26773 if (DEFAULT_ABI == ABI_AIX)
26775 const char *desc_name, *orig_name;
26777 orig_name = (*targetm.strip_name_encoding) (name);
26778 desc_name = orig_name;
26779 while (*desc_name == '.')
26780 desc_name++;
26782 if (TREE_PUBLIC (decl))
26783 fprintf (file, "\t.globl %s\n", desc_name);
26785 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
26786 fprintf (file, "%s:\n", desc_name);
26787 fprintf (file, "\t.long %s\n", orig_name);
26788 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
26789 if (DEFAULT_ABI == ABI_AIX)
26790 fputs ("\t.long 0\n", file);
26791 fprintf (file, "\t.previous\n");
26793 ASM_OUTPUT_LABEL (file, name);
26796 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
26797 static void
26798 rs6000_elf_file_end (void)
26800 #ifdef HAVE_AS_GNU_ATTRIBUTE
26801 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
26803 if (rs6000_passes_float)
26804 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
26805 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
26806 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
26807 : 2));
26808 if (rs6000_passes_vector)
26809 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
26810 (TARGET_ALTIVEC_ABI ? 2
26811 : TARGET_SPE_ABI ? 3
26812 : 1));
26813 if (rs6000_returns_struct)
26814 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
26815 aix_struct_return ? 2 : 1);
26817 #endif
26818 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26819 if (TARGET_32BIT)
26820 file_end_indicate_exec_stack ();
26821 #endif
26823 #endif
26825 #if TARGET_XCOFF
26826 static void
26827 rs6000_xcoff_asm_output_anchor (rtx symbol)
26829 char buffer[100];
26831 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
26832 SYMBOL_REF_BLOCK_OFFSET (symbol));
26833 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
26836 static void
26837 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
26839 fputs (GLOBAL_ASM_OP, stream);
26840 RS6000_OUTPUT_BASENAME (stream, name);
26841 putc ('\n', stream);
26844 /* A get_unnamed_decl callback, used for read-only sections. PTR
26845 points to the section string variable. */
26847 static void
26848 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
26850 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
26851 *(const char *const *) directive,
26852 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
26855 /* Likewise for read-write sections. */
26857 static void
26858 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
26860 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
26861 *(const char *const *) directive,
26862 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
26865 static void
26866 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
26868 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
26869 *(const char *const *) directive,
26870 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
26873 /* A get_unnamed_section callback, used for switching to toc_section. */
26875 static void
26876 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
26878 if (TARGET_MINIMAL_TOC)
26880 /* toc_section is always selected at least once from
26881 rs6000_xcoff_file_start, so this is guaranteed to
26882 always be defined once and only once in each file. */
26883 if (!toc_initialized)
26885 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
26886 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
26887 toc_initialized = 1;
26889 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
26890 (TARGET_32BIT ? "" : ",3"));
26892 else
26893 fputs ("\t.toc\n", asm_out_file);
26896 /* Implement TARGET_ASM_INIT_SECTIONS. */
26898 static void
26899 rs6000_xcoff_asm_init_sections (void)
26901 read_only_data_section
26902 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
26903 &xcoff_read_only_section_name);
26905 private_data_section
26906 = get_unnamed_section (SECTION_WRITE,
26907 rs6000_xcoff_output_readwrite_section_asm_op,
26908 &xcoff_private_data_section_name);
26910 tls_data_section
26911 = get_unnamed_section (SECTION_TLS,
26912 rs6000_xcoff_output_tls_section_asm_op,
26913 &xcoff_tls_data_section_name);
26915 tls_private_data_section
26916 = get_unnamed_section (SECTION_TLS,
26917 rs6000_xcoff_output_tls_section_asm_op,
26918 &xcoff_private_data_section_name);
26920 read_only_private_data_section
26921 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
26922 &xcoff_private_data_section_name);
26924 toc_section
26925 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
26927 readonly_data_section = read_only_data_section;
26928 exception_section = data_section;
26931 static int
26932 rs6000_xcoff_reloc_rw_mask (void)
26934 return 3;
26937 static void
26938 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
26939 tree decl ATTRIBUTE_UNUSED)
26941 int smclass;
26942 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
26944 if (flags & SECTION_CODE)
26945 smclass = 0;
26946 else if (flags & SECTION_TLS)
26947 smclass = 3;
26948 else if (flags & SECTION_WRITE)
26949 smclass = 2;
26950 else
26951 smclass = 1;
26953 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
26954 (flags & SECTION_CODE) ? "." : "",
26955 name, suffix[smclass], flags & SECTION_ENTSIZE);
26958 static section *
26959 rs6000_xcoff_select_section (tree decl, int reloc,
26960 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
26962 if (decl_readonly_section (decl, reloc))
26964 if (TREE_PUBLIC (decl))
26965 return read_only_data_section;
26966 else
26967 return read_only_private_data_section;
26969 else
26971 #if HAVE_AS_TLS
26972 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
26974 if (TREE_PUBLIC (decl))
26975 return tls_data_section;
26976 else if (bss_initializer_p (decl))
26978 /* Convert to COMMON to emit in BSS. */
26979 DECL_COMMON (decl) = 1;
26980 return tls_comm_section;
26982 else
26983 return tls_private_data_section;
26985 else
26986 #endif
26987 if (TREE_PUBLIC (decl))
26988 return data_section;
26989 else
26990 return private_data_section;
26994 static void
26995 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
26997 const char *name;
26999 /* Use select_section for private and uninitialized data. */
27000 if (!TREE_PUBLIC (decl)
27001 || DECL_COMMON (decl)
27002 || DECL_INITIAL (decl) == NULL_TREE
27003 || DECL_INITIAL (decl) == error_mark_node
27004 || (flag_zero_initialized_in_bss
27005 && initializer_zerop (DECL_INITIAL (decl))))
27006 return;
27008 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
27009 name = (*targetm.strip_name_encoding) (name);
27010 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
27013 /* Select section for constant in constant pool.
27015 On RS/6000, all constants are in the private read-only data area.
27016 However, if this is being placed in the TOC it must be output as a
27017 toc entry. */
27019 static section *
27020 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
27021 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
27023 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
27024 return toc_section;
27025 else
27026 return read_only_private_data_section;
27029 /* Remove any trailing [DS] or the like from the symbol name. */
27031 static const char *
27032 rs6000_xcoff_strip_name_encoding (const char *name)
27034 size_t len;
27035 if (*name == '*')
27036 name++;
27037 len = strlen (name);
27038 if (name[len - 1] == ']')
27039 return ggc_alloc_string (name, len - 4);
27040 else
27041 return name;
27044 /* Section attributes. AIX is always PIC. */
27046 static unsigned int
27047 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
27049 unsigned int align;
27050 unsigned int flags = default_section_type_flags (decl, name, reloc);
27052 /* Align to at least UNIT size. */
27053 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
27054 align = MIN_UNITS_PER_WORD;
27055 else
27056 /* Increase alignment of large objects if not already stricter. */
27057 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
27058 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
27059 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
27061 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
27064 /* Output at beginning of assembler file.
27066 Initialize the section names for the RS/6000 at this point.
27068 Specify filename, including full path, to assembler.
27070 We want to go into the TOC section so at least one .toc will be emitted.
27071 Also, in order to output proper .bs/.es pairs, we need at least one static
27072 [RW] section emitted.
27074 Finally, declare mcount when profiling to make the assembler happy. */
27076 static void
27077 rs6000_xcoff_file_start (void)
27079 rs6000_gen_section_name (&xcoff_bss_section_name,
27080 main_input_filename, ".bss_");
27081 rs6000_gen_section_name (&xcoff_private_data_section_name,
27082 main_input_filename, ".rw_");
27083 rs6000_gen_section_name (&xcoff_read_only_section_name,
27084 main_input_filename, ".ro_");
27085 rs6000_gen_section_name (&xcoff_tls_data_section_name,
27086 main_input_filename, ".tls_");
27087 rs6000_gen_section_name (&xcoff_tbss_section_name,
27088 main_input_filename, ".tbss_[UL]");
27090 fputs ("\t.file\t", asm_out_file);
27091 output_quoted_string (asm_out_file, main_input_filename);
27092 fputc ('\n', asm_out_file);
27093 if (write_symbols != NO_DEBUG)
27094 switch_to_section (private_data_section);
27095 switch_to_section (text_section);
27096 if (profile_flag)
27097 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
27098 rs6000_file_start ();
27101 /* Output at end of assembler file.
27102 On the RS/6000, referencing data should automatically pull in text. */
27104 static void
27105 rs6000_xcoff_file_end (void)
27107 switch_to_section (text_section);
27108 fputs ("_section_.text:\n", asm_out_file);
27109 switch_to_section (data_section);
27110 fputs (TARGET_32BIT
27111 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
27112 asm_out_file);
27115 #ifdef HAVE_AS_TLS
27116 static void
27117 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
27119 rtx symbol;
27120 int flags;
27122 default_encode_section_info (decl, rtl, first);
27124 /* Careful not to prod global register variables. */
27125 if (!MEM_P (rtl))
27126 return;
27127 symbol = XEXP (rtl, 0);
27128 if (GET_CODE (symbol) != SYMBOL_REF)
27129 return;
27131 flags = SYMBOL_REF_FLAGS (symbol);
27133 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
27134 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
27136 SYMBOL_REF_FLAGS (symbol) = flags;
27138 #endif /* HAVE_AS_TLS */
27139 #endif /* TARGET_XCOFF */
27141 /* Compute a (partial) cost for rtx X. Return true if the complete
27142 cost has been computed, and false if subexpressions should be
27143 scanned. In either case, *TOTAL contains the cost result. */
27145 static bool
27146 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
27147 int *total, bool speed)
27149 enum machine_mode mode = GET_MODE (x);
27151 switch (code)
27153 /* On the RS/6000, if it is valid in the insn, it is free. */
27154 case CONST_INT:
27155 if (((outer_code == SET
27156 || outer_code == PLUS
27157 || outer_code == MINUS)
27158 && (satisfies_constraint_I (x)
27159 || satisfies_constraint_L (x)))
27160 || (outer_code == AND
27161 && (satisfies_constraint_K (x)
27162 || (mode == SImode
27163 ? satisfies_constraint_L (x)
27164 : satisfies_constraint_J (x))
27165 || mask_operand (x, mode)
27166 || (mode == DImode
27167 && mask64_operand (x, DImode))))
27168 || ((outer_code == IOR || outer_code == XOR)
27169 && (satisfies_constraint_K (x)
27170 || (mode == SImode
27171 ? satisfies_constraint_L (x)
27172 : satisfies_constraint_J (x))))
27173 || outer_code == ASHIFT
27174 || outer_code == ASHIFTRT
27175 || outer_code == LSHIFTRT
27176 || outer_code == ROTATE
27177 || outer_code == ROTATERT
27178 || outer_code == ZERO_EXTRACT
27179 || (outer_code == MULT
27180 && satisfies_constraint_I (x))
27181 || ((outer_code == DIV || outer_code == UDIV
27182 || outer_code == MOD || outer_code == UMOD)
27183 && exact_log2 (INTVAL (x)) >= 0)
27184 || (outer_code == COMPARE
27185 && (satisfies_constraint_I (x)
27186 || satisfies_constraint_K (x)))
27187 || ((outer_code == EQ || outer_code == NE)
27188 && (satisfies_constraint_I (x)
27189 || satisfies_constraint_K (x)
27190 || (mode == SImode
27191 ? satisfies_constraint_L (x)
27192 : satisfies_constraint_J (x))))
27193 || (outer_code == GTU
27194 && satisfies_constraint_I (x))
27195 || (outer_code == LTU
27196 && satisfies_constraint_P (x)))
27198 *total = 0;
27199 return true;
27201 else if ((outer_code == PLUS
27202 && reg_or_add_cint_operand (x, VOIDmode))
27203 || (outer_code == MINUS
27204 && reg_or_sub_cint_operand (x, VOIDmode))
27205 || ((outer_code == SET
27206 || outer_code == IOR
27207 || outer_code == XOR)
27208 && (INTVAL (x)
27209 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
27211 *total = COSTS_N_INSNS (1);
27212 return true;
27214 /* FALLTHRU */
27216 case CONST_DOUBLE:
27217 case CONST:
27218 case HIGH:
27219 case SYMBOL_REF:
27220 case MEM:
27221 /* When optimizing for size, MEM should be slightly more expensive
27222 than generating address, e.g., (plus (reg) (const)).
27223 L1 cache latency is about two instructions. */
27224 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
27225 return true;
27227 case LABEL_REF:
27228 *total = 0;
27229 return true;
27231 case PLUS:
27232 case MINUS:
27233 if (FLOAT_MODE_P (mode))
27234 *total = rs6000_cost->fp;
27235 else
27236 *total = COSTS_N_INSNS (1);
27237 return false;
27239 case MULT:
27240 if (GET_CODE (XEXP (x, 1)) == CONST_INT
27241 && satisfies_constraint_I (XEXP (x, 1)))
27243 if (INTVAL (XEXP (x, 1)) >= -256
27244 && INTVAL (XEXP (x, 1)) <= 255)
27245 *total = rs6000_cost->mulsi_const9;
27246 else
27247 *total = rs6000_cost->mulsi_const;
27249 else if (mode == SFmode)
27250 *total = rs6000_cost->fp;
27251 else if (FLOAT_MODE_P (mode))
27252 *total = rs6000_cost->dmul;
27253 else if (mode == DImode)
27254 *total = rs6000_cost->muldi;
27255 else
27256 *total = rs6000_cost->mulsi;
27257 return false;
27259 case FMA:
27260 if (mode == SFmode)
27261 *total = rs6000_cost->fp;
27262 else
27263 *total = rs6000_cost->dmul;
27264 break;
27266 case DIV:
27267 case MOD:
27268 if (FLOAT_MODE_P (mode))
27270 *total = mode == DFmode ? rs6000_cost->ddiv
27271 : rs6000_cost->sdiv;
27272 return false;
27274 /* FALLTHRU */
27276 case UDIV:
27277 case UMOD:
27278 if (GET_CODE (XEXP (x, 1)) == CONST_INT
27279 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
27281 if (code == DIV || code == MOD)
27282 /* Shift, addze */
27283 *total = COSTS_N_INSNS (2);
27284 else
27285 /* Shift */
27286 *total = COSTS_N_INSNS (1);
27288 else
27290 if (GET_MODE (XEXP (x, 1)) == DImode)
27291 *total = rs6000_cost->divdi;
27292 else
27293 *total = rs6000_cost->divsi;
27295 /* Add in shift and subtract for MOD. */
27296 if (code == MOD || code == UMOD)
27297 *total += COSTS_N_INSNS (2);
27298 return false;
27300 case CTZ:
27301 case FFS:
27302 *total = COSTS_N_INSNS (4);
27303 return false;
27305 case POPCOUNT:
27306 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
27307 return false;
27309 case PARITY:
27310 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
27311 return false;
27313 case NOT:
27314 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
27316 *total = 0;
27317 return false;
27319 /* FALLTHRU */
27321 case AND:
27322 case CLZ:
27323 case IOR:
27324 case XOR:
27325 case ZERO_EXTRACT:
27326 *total = COSTS_N_INSNS (1);
27327 return false;
27329 case ASHIFT:
27330 case ASHIFTRT:
27331 case LSHIFTRT:
27332 case ROTATE:
27333 case ROTATERT:
27334 /* Handle mul_highpart. */
27335 if (outer_code == TRUNCATE
27336 && GET_CODE (XEXP (x, 0)) == MULT)
27338 if (mode == DImode)
27339 *total = rs6000_cost->muldi;
27340 else
27341 *total = rs6000_cost->mulsi;
27342 return true;
27344 else if (outer_code == AND)
27345 *total = 0;
27346 else
27347 *total = COSTS_N_INSNS (1);
27348 return false;
27350 case SIGN_EXTEND:
27351 case ZERO_EXTEND:
27352 if (GET_CODE (XEXP (x, 0)) == MEM)
27353 *total = 0;
27354 else
27355 *total = COSTS_N_INSNS (1);
27356 return false;
27358 case COMPARE:
27359 case NEG:
27360 case ABS:
27361 if (!FLOAT_MODE_P (mode))
27363 *total = COSTS_N_INSNS (1);
27364 return false;
27366 /* FALLTHRU */
27368 case FLOAT:
27369 case UNSIGNED_FLOAT:
27370 case FIX:
27371 case UNSIGNED_FIX:
27372 case FLOAT_TRUNCATE:
27373 *total = rs6000_cost->fp;
27374 return false;
27376 case FLOAT_EXTEND:
27377 if (mode == DFmode)
27378 *total = 0;
27379 else
27380 *total = rs6000_cost->fp;
27381 return false;
27383 case UNSPEC:
27384 switch (XINT (x, 1))
27386 case UNSPEC_FRSP:
27387 *total = rs6000_cost->fp;
27388 return true;
27390 default:
27391 break;
27393 break;
27395 case CALL:
27396 case IF_THEN_ELSE:
27397 if (!speed)
27399 *total = COSTS_N_INSNS (1);
27400 return true;
27402 else if (FLOAT_MODE_P (mode)
27403 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
27405 *total = rs6000_cost->fp;
27406 return false;
27408 break;
27410 case EQ:
27411 case GTU:
27412 case LTU:
27413 /* Carry bit requires mode == Pmode.
27414 NEG or PLUS already counted so only add one. */
27415 if (mode == Pmode
27416 && (outer_code == NEG || outer_code == PLUS))
27418 *total = COSTS_N_INSNS (1);
27419 return true;
27421 if (outer_code == SET)
27423 if (XEXP (x, 1) == const0_rtx)
27425 if (TARGET_ISEL && !TARGET_MFCRF)
27426 *total = COSTS_N_INSNS (8);
27427 else
27428 *total = COSTS_N_INSNS (2);
27429 return true;
27431 else if (mode == Pmode)
27433 *total = COSTS_N_INSNS (3);
27434 return false;
27437 /* FALLTHRU */
27439 case GT:
27440 case LT:
27441 case UNORDERED:
27442 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
27444 if (TARGET_ISEL && !TARGET_MFCRF)
27445 *total = COSTS_N_INSNS (8);
27446 else
27447 *total = COSTS_N_INSNS (2);
27448 return true;
27450 /* CC COMPARE. */
27451 if (outer_code == COMPARE)
27453 *total = 0;
27454 return true;
27456 break;
27458 default:
27459 break;
27462 return false;
27465 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
27467 static bool
27468 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
27469 bool speed)
27471 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
27473 fprintf (stderr,
27474 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
27475 "opno = %d, total = %d, speed = %s, x:\n",
27476 ret ? "complete" : "scan inner",
27477 GET_RTX_NAME (code),
27478 GET_RTX_NAME (outer_code),
27479 opno,
27480 *total,
27481 speed ? "true" : "false");
27483 debug_rtx (x);
27485 return ret;
27488 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
27490 static int
27491 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
27492 addr_space_t as, bool speed)
27494 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
27496 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
27497 ret, speed ? "true" : "false");
27498 debug_rtx (x);
27500 return ret;
27504 /* A C expression returning the cost of moving data from a register of class
27505 CLASS1 to one of CLASS2. */
27507 static int
27508 rs6000_register_move_cost (enum machine_mode mode,
27509 reg_class_t from, reg_class_t to)
27511 int ret;
27513 if (TARGET_DEBUG_COST)
27514 dbg_cost_ctrl++;
27516 /* Moves from/to GENERAL_REGS. */
27517 if (reg_classes_intersect_p (to, GENERAL_REGS)
27518 || reg_classes_intersect_p (from, GENERAL_REGS))
27520 reg_class_t rclass = from;
27522 if (! reg_classes_intersect_p (to, GENERAL_REGS))
27523 rclass = to;
27525 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
27526 ret = (rs6000_memory_move_cost (mode, rclass, false)
27527 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
27529 /* It's more expensive to move CR_REGS than CR0_REGS because of the
27530 shift. */
27531 else if (rclass == CR_REGS)
27532 ret = 4;
27534 /* For those processors that have slow LR/CTR moves, make them more
27535 expensive than memory in order to bias spills to memory .*/
27536 else if ((rs6000_cpu == PROCESSOR_POWER6
27537 || rs6000_cpu == PROCESSOR_POWER7
27538 || rs6000_cpu == PROCESSOR_POWER8)
27539 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
27540 ret = 6 * hard_regno_nregs[0][mode];
27542 else
27543 /* A move will cost one instruction per GPR moved. */
27544 ret = 2 * hard_regno_nregs[0][mode];
27547 /* If we have VSX, we can easily move between FPR or Altivec registers. */
27548 else if (VECTOR_MEM_VSX_P (mode)
27549 && reg_classes_intersect_p (to, VSX_REGS)
27550 && reg_classes_intersect_p (from, VSX_REGS))
27551 ret = 2 * hard_regno_nregs[32][mode];
27553 /* Moving between two similar registers is just one instruction. */
27554 else if (reg_classes_intersect_p (to, from))
27555 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
27557 /* Everything else has to go through GENERAL_REGS. */
27558 else
27559 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
27560 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
27562 if (TARGET_DEBUG_COST)
27564 if (dbg_cost_ctrl == 1)
27565 fprintf (stderr,
27566 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
27567 ret, GET_MODE_NAME (mode), reg_class_names[from],
27568 reg_class_names[to]);
27569 dbg_cost_ctrl--;
27572 return ret;
27575 /* A C expressions returning the cost of moving data of MODE from a register to
27576 or from memory. */
27578 static int
27579 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
27580 bool in ATTRIBUTE_UNUSED)
27582 int ret;
27584 if (TARGET_DEBUG_COST)
27585 dbg_cost_ctrl++;
27587 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
27588 ret = 4 * hard_regno_nregs[0][mode];
27589 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
27590 || reg_classes_intersect_p (rclass, VSX_REGS)))
27591 ret = 4 * hard_regno_nregs[32][mode];
27592 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
27593 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
27594 else
27595 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
27597 if (TARGET_DEBUG_COST)
27599 if (dbg_cost_ctrl == 1)
27600 fprintf (stderr,
27601 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
27602 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
27603 dbg_cost_ctrl--;
27606 return ret;
27609 /* Returns a code for a target-specific builtin that implements
27610 reciprocal of the function, or NULL_TREE if not available. */
27612 static tree
27613 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
27614 bool sqrt ATTRIBUTE_UNUSED)
27616 if (optimize_insn_for_size_p ())
27617 return NULL_TREE;
27619 if (md_fn)
27620 switch (fn)
27622 case VSX_BUILTIN_XVSQRTDP:
27623 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
27624 return NULL_TREE;
27626 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
27628 case VSX_BUILTIN_XVSQRTSP:
27629 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
27630 return NULL_TREE;
27632 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
27634 default:
27635 return NULL_TREE;
27638 else
27639 switch (fn)
27641 case BUILT_IN_SQRT:
27642 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
27643 return NULL_TREE;
27645 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
27647 case BUILT_IN_SQRTF:
27648 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
27649 return NULL_TREE;
27651 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
27653 default:
27654 return NULL_TREE;
27658 /* Load up a constant. If the mode is a vector mode, splat the value across
27659 all of the vector elements. */
27661 static rtx
27662 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
27664 rtx reg;
27666 if (mode == SFmode || mode == DFmode)
27668 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
27669 reg = force_reg (mode, d);
27671 else if (mode == V4SFmode)
27673 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
27674 rtvec v = gen_rtvec (4, d, d, d, d);
27675 reg = gen_reg_rtx (mode);
27676 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
27678 else if (mode == V2DFmode)
27680 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
27681 rtvec v = gen_rtvec (2, d, d);
27682 reg = gen_reg_rtx (mode);
27683 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
27685 else
27686 gcc_unreachable ();
27688 return reg;
27691 /* Generate an FMA instruction. */
27693 static void
27694 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
27696 enum machine_mode mode = GET_MODE (target);
27697 rtx dst;
27699 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
27700 gcc_assert (dst != NULL);
27702 if (dst != target)
27703 emit_move_insn (target, dst);
27706 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
27708 static void
27709 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
27711 enum machine_mode mode = GET_MODE (target);
27712 rtx dst;
27714 /* Altivec does not support fms directly;
27715 generate in terms of fma in that case. */
27716 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
27717 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
27718 else
27720 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
27721 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
27723 gcc_assert (dst != NULL);
27725 if (dst != target)
27726 emit_move_insn (target, dst);
27729 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
27731 static void
27732 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
27734 enum machine_mode mode = GET_MODE (dst);
27735 rtx r;
27737 /* This is a tad more complicated, since the fnma_optab is for
27738 a different expression: fma(-m1, m2, a), which is the same
27739 thing except in the case of signed zeros.
27741 Fortunately we know that if FMA is supported that FNMSUB is
27742 also supported in the ISA. Just expand it directly. */
27744 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
27746 r = gen_rtx_NEG (mode, a);
27747 r = gen_rtx_FMA (mode, m1, m2, r);
27748 r = gen_rtx_NEG (mode, r);
27749 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
27752 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
27753 add a reg_note saying that this was a division. Support both scalar and
27754 vector divide. Assumes no trapping math and finite arguments. */
27756 void
27757 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
27759 enum machine_mode mode = GET_MODE (dst);
27760 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
27761 int i;
27763 /* Low precision estimates guarantee 5 bits of accuracy. High
27764 precision estimates guarantee 14 bits of accuracy. SFmode
27765 requires 23 bits of accuracy. DFmode requires 52 bits of
27766 accuracy. Each pass at least doubles the accuracy, leading
27767 to the following. */
27768 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
27769 if (mode == DFmode || mode == V2DFmode)
27770 passes++;
27772 enum insn_code code = optab_handler (smul_optab, mode);
27773 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
27775 gcc_assert (code != CODE_FOR_nothing);
27777 one = rs6000_load_constant_and_splat (mode, dconst1);
27779 /* x0 = 1./d estimate */
27780 x0 = gen_reg_rtx (mode);
27781 emit_insn (gen_rtx_SET (VOIDmode, x0,
27782 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
27783 UNSPEC_FRES)));
27785 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
27786 if (passes > 1) {
27788 /* e0 = 1. - d * x0 */
27789 e0 = gen_reg_rtx (mode);
27790 rs6000_emit_nmsub (e0, d, x0, one);
27792 /* x1 = x0 + e0 * x0 */
27793 x1 = gen_reg_rtx (mode);
27794 rs6000_emit_madd (x1, e0, x0, x0);
27796 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
27797 ++i, xprev = xnext, eprev = enext) {
27799 /* enext = eprev * eprev */
27800 enext = gen_reg_rtx (mode);
27801 emit_insn (gen_mul (enext, eprev, eprev));
27803 /* xnext = xprev + enext * xprev */
27804 xnext = gen_reg_rtx (mode);
27805 rs6000_emit_madd (xnext, enext, xprev, xprev);
27808 } else
27809 xprev = x0;
27811 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
27813 /* u = n * xprev */
27814 u = gen_reg_rtx (mode);
27815 emit_insn (gen_mul (u, n, xprev));
27817 /* v = n - (d * u) */
27818 v = gen_reg_rtx (mode);
27819 rs6000_emit_nmsub (v, d, u, n);
27821 /* dst = (v * xprev) + u */
27822 rs6000_emit_madd (dst, v, xprev, u);
27824 if (note_p)
27825 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
27828 /* Newton-Raphson approximation of single/double-precision floating point
27829 rsqrt. Assumes no trapping math and finite arguments. */
27831 void
27832 rs6000_emit_swrsqrt (rtx dst, rtx src)
27834 enum machine_mode mode = GET_MODE (src);
27835 rtx x0 = gen_reg_rtx (mode);
27836 rtx y = gen_reg_rtx (mode);
27838 /* Low precision estimates guarantee 5 bits of accuracy. High
27839 precision estimates guarantee 14 bits of accuracy. SFmode
27840 requires 23 bits of accuracy. DFmode requires 52 bits of
27841 accuracy. Each pass at least doubles the accuracy, leading
27842 to the following. */
27843 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
27844 if (mode == DFmode || mode == V2DFmode)
27845 passes++;
27847 REAL_VALUE_TYPE dconst3_2;
27848 int i;
27849 rtx halfthree;
27850 enum insn_code code = optab_handler (smul_optab, mode);
27851 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
27853 gcc_assert (code != CODE_FOR_nothing);
27855 /* Load up the constant 1.5 either as a scalar, or as a vector. */
27856 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
27857 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
27859 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
27861 /* x0 = rsqrt estimate */
27862 emit_insn (gen_rtx_SET (VOIDmode, x0,
27863 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
27864 UNSPEC_RSQRT)));
27866 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
27867 rs6000_emit_msub (y, src, halfthree, src);
27869 for (i = 0; i < passes; i++)
27871 rtx x1 = gen_reg_rtx (mode);
27872 rtx u = gen_reg_rtx (mode);
27873 rtx v = gen_reg_rtx (mode);
27875 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
27876 emit_insn (gen_mul (u, x0, x0));
27877 rs6000_emit_nmsub (v, y, u, halfthree);
27878 emit_insn (gen_mul (x1, x0, v));
27879 x0 = x1;
27882 emit_move_insn (dst, x0);
27883 return;
27886 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
27887 (Power7) targets. DST is the target, and SRC is the argument operand. */
27889 void
27890 rs6000_emit_popcount (rtx dst, rtx src)
27892 enum machine_mode mode = GET_MODE (dst);
27893 rtx tmp1, tmp2;
27895 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
27896 if (TARGET_POPCNTD)
27898 if (mode == SImode)
27899 emit_insn (gen_popcntdsi2 (dst, src));
27900 else
27901 emit_insn (gen_popcntddi2 (dst, src));
27902 return;
27905 tmp1 = gen_reg_rtx (mode);
27907 if (mode == SImode)
27909 emit_insn (gen_popcntbsi2 (tmp1, src));
27910 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
27911 NULL_RTX, 0);
27912 tmp2 = force_reg (SImode, tmp2);
27913 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
27915 else
27917 emit_insn (gen_popcntbdi2 (tmp1, src));
27918 tmp2 = expand_mult (DImode, tmp1,
27919 GEN_INT ((HOST_WIDE_INT)
27920 0x01010101 << 32 | 0x01010101),
27921 NULL_RTX, 0);
27922 tmp2 = force_reg (DImode, tmp2);
27923 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
27928 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
27929 target, and SRC is the argument operand. */
27931 void
27932 rs6000_emit_parity (rtx dst, rtx src)
27934 enum machine_mode mode = GET_MODE (dst);
27935 rtx tmp;
27937 tmp = gen_reg_rtx (mode);
27939 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
27940 if (TARGET_CMPB)
27942 if (mode == SImode)
27944 emit_insn (gen_popcntbsi2 (tmp, src));
27945 emit_insn (gen_paritysi2_cmpb (dst, tmp));
27947 else
27949 emit_insn (gen_popcntbdi2 (tmp, src));
27950 emit_insn (gen_paritydi2_cmpb (dst, tmp));
27952 return;
27955 if (mode == SImode)
27957 /* Is mult+shift >= shift+xor+shift+xor? */
27958 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
27960 rtx tmp1, tmp2, tmp3, tmp4;
27962 tmp1 = gen_reg_rtx (SImode);
27963 emit_insn (gen_popcntbsi2 (tmp1, src));
27965 tmp2 = gen_reg_rtx (SImode);
27966 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
27967 tmp3 = gen_reg_rtx (SImode);
27968 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
27970 tmp4 = gen_reg_rtx (SImode);
27971 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
27972 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
27974 else
27975 rs6000_emit_popcount (tmp, src);
27976 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
27978 else
27980 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
27981 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
27983 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
27985 tmp1 = gen_reg_rtx (DImode);
27986 emit_insn (gen_popcntbdi2 (tmp1, src));
27988 tmp2 = gen_reg_rtx (DImode);
27989 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
27990 tmp3 = gen_reg_rtx (DImode);
27991 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
27993 tmp4 = gen_reg_rtx (DImode);
27994 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
27995 tmp5 = gen_reg_rtx (DImode);
27996 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
27998 tmp6 = gen_reg_rtx (DImode);
27999 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
28000 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
28002 else
28003 rs6000_emit_popcount (tmp, src);
28004 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
28008 /* Expand an Altivec constant permutation. Return true if we match
28009 an efficient implementation; false to fall back to VPERM. */
28011 bool
28012 altivec_expand_vec_perm_const (rtx operands[4])
28014 struct altivec_perm_insn {
28015 HOST_WIDE_INT mask;
28016 enum insn_code impl;
28017 unsigned char perm[16];
28019 static const struct altivec_perm_insn patterns[] = {
28020 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum,
28021 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
28022 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum,
28023 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
28024 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrghb,
28025 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
28026 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrghh,
28027 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
28028 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrghw,
28029 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
28030 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrglb,
28031 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
28032 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrglh,
28033 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
28034 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vmrglw,
28035 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
28036 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
28037 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
28038 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
28039 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
28042 unsigned int i, j, elt, which;
28043 unsigned char perm[16];
28044 rtx target, op0, op1, sel, x;
28045 bool one_vec;
28047 target = operands[0];
28048 op0 = operands[1];
28049 op1 = operands[2];
28050 sel = operands[3];
28052 /* Unpack the constant selector. */
28053 for (i = which = 0; i < 16; ++i)
28055 rtx e = XVECEXP (sel, 0, i);
28056 elt = INTVAL (e) & 31;
28057 which |= (elt < 16 ? 1 : 2);
28058 perm[i] = elt;
28061 /* Simplify the constant selector based on operands. */
28062 switch (which)
28064 default:
28065 gcc_unreachable ();
28067 case 3:
28068 one_vec = false;
28069 if (!rtx_equal_p (op0, op1))
28070 break;
28071 /* FALLTHRU */
28073 case 2:
28074 for (i = 0; i < 16; ++i)
28075 perm[i] &= 15;
28076 op0 = op1;
28077 one_vec = true;
28078 break;
28080 case 1:
28081 op1 = op0;
28082 one_vec = true;
28083 break;
28086 /* Look for splat patterns. */
28087 if (one_vec)
28089 elt = perm[0];
28091 for (i = 0; i < 16; ++i)
28092 if (perm[i] != elt)
28093 break;
28094 if (i == 16)
28096 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
28097 return true;
28100 if (elt % 2 == 0)
28102 for (i = 0; i < 16; i += 2)
28103 if (perm[i] != elt || perm[i + 1] != elt + 1)
28104 break;
28105 if (i == 16)
28107 x = gen_reg_rtx (V8HImode);
28108 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
28109 GEN_INT (elt / 2)));
28110 emit_move_insn (target, gen_lowpart (V16QImode, x));
28111 return true;
28115 if (elt % 4 == 0)
28117 for (i = 0; i < 16; i += 4)
28118 if (perm[i] != elt
28119 || perm[i + 1] != elt + 1
28120 || perm[i + 2] != elt + 2
28121 || perm[i + 3] != elt + 3)
28122 break;
28123 if (i == 16)
28125 x = gen_reg_rtx (V4SImode);
28126 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
28127 GEN_INT (elt / 4)));
28128 emit_move_insn (target, gen_lowpart (V16QImode, x));
28129 return true;
28134 /* Look for merge and pack patterns. */
28135 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
28137 bool swapped;
28139 if ((patterns[j].mask & rs6000_isa_flags) == 0)
28140 continue;
28142 elt = patterns[j].perm[0];
28143 if (perm[0] == elt)
28144 swapped = false;
28145 else if (perm[0] == elt + 16)
28146 swapped = true;
28147 else
28148 continue;
28149 for (i = 1; i < 16; ++i)
28151 elt = patterns[j].perm[i];
28152 if (swapped)
28153 elt = (elt >= 16 ? elt - 16 : elt + 16);
28154 else if (one_vec && elt >= 16)
28155 elt -= 16;
28156 if (perm[i] != elt)
28157 break;
28159 if (i == 16)
28161 enum insn_code icode = patterns[j].impl;
28162 enum machine_mode omode = insn_data[icode].operand[0].mode;
28163 enum machine_mode imode = insn_data[icode].operand[1].mode;
28165 if (swapped)
28166 x = op0, op0 = op1, op1 = x;
28167 if (imode != V16QImode)
28169 op0 = gen_lowpart (imode, op0);
28170 op1 = gen_lowpart (imode, op1);
28172 if (omode == V16QImode)
28173 x = target;
28174 else
28175 x = gen_reg_rtx (omode);
28176 emit_insn (GEN_FCN (icode) (x, op0, op1));
28177 if (omode != V16QImode)
28178 emit_move_insn (target, gen_lowpart (V16QImode, x));
28179 return true;
28183 return false;
28186 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
28187 Return true if we match an efficient implementation. */
28189 static bool
28190 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
28191 unsigned char perm0, unsigned char perm1)
28193 rtx x;
28195 /* If both selectors come from the same operand, fold to single op. */
28196 if ((perm0 & 2) == (perm1 & 2))
28198 if (perm0 & 2)
28199 op0 = op1;
28200 else
28201 op1 = op0;
28203 /* If both operands are equal, fold to simpler permutation. */
28204 if (rtx_equal_p (op0, op1))
28206 perm0 = perm0 & 1;
28207 perm1 = (perm1 & 1) + 2;
28209 /* If the first selector comes from the second operand, swap. */
28210 else if (perm0 & 2)
28212 if (perm1 & 2)
28213 return false;
28214 perm0 -= 2;
28215 perm1 += 2;
28216 x = op0, op0 = op1, op1 = x;
28218 /* If the second selector does not come from the second operand, fail. */
28219 else if ((perm1 & 2) == 0)
28220 return false;
28222 /* Success! */
28223 if (target != NULL)
28225 enum machine_mode vmode, dmode;
28226 rtvec v;
28228 vmode = GET_MODE (target);
28229 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
28230 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
28232 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
28233 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
28234 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
28235 emit_insn (gen_rtx_SET (VOIDmode, target, x));
28237 return true;
28240 bool
28241 rs6000_expand_vec_perm_const (rtx operands[4])
28243 rtx target, op0, op1, sel;
28244 unsigned char perm0, perm1;
28246 target = operands[0];
28247 op0 = operands[1];
28248 op1 = operands[2];
28249 sel = operands[3];
28251 /* Unpack the constant selector. */
28252 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
28253 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
28255 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
28258 /* Test whether a constant permutation is supported. */
28260 static bool
28261 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
28262 const unsigned char *sel)
28264 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
28265 if (TARGET_ALTIVEC)
28266 return true;
28268 /* Check for ps_merge* or evmerge* insns. */
28269 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
28270 || (TARGET_SPE && vmode == V2SImode))
28272 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
28273 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
28274 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
28277 return false;
28280 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
28282 static void
28283 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
28284 enum machine_mode vmode, unsigned nelt, rtx perm[])
28286 enum machine_mode imode;
28287 rtx x;
28289 imode = vmode;
28290 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
28292 imode = GET_MODE_INNER (vmode);
28293 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
28294 imode = mode_for_vector (imode, nelt);
28297 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
28298 x = expand_vec_perm (vmode, op0, op1, x, target);
28299 if (x != target)
28300 emit_move_insn (target, x);
28303 /* Expand an extract even operation. */
28305 void
28306 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
28308 enum machine_mode vmode = GET_MODE (target);
28309 unsigned i, nelt = GET_MODE_NUNITS (vmode);
28310 rtx perm[16];
28312 for (i = 0; i < nelt; i++)
28313 perm[i] = GEN_INT (i * 2);
28315 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
28318 /* Expand a vector interleave operation. */
28320 void
28321 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
28323 enum machine_mode vmode = GET_MODE (target);
28324 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
28325 rtx perm[16];
28327 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
28328 for (i = 0; i < nelt / 2; i++)
28330 perm[i * 2] = GEN_INT (i + high);
28331 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
28334 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
28337 /* Return an RTX representing where to find the function value of a
28338 function returning MODE. */
28339 static rtx
28340 rs6000_complex_function_value (enum machine_mode mode)
28342 unsigned int regno;
28343 rtx r1, r2;
28344 enum machine_mode inner = GET_MODE_INNER (mode);
28345 unsigned int inner_bytes = GET_MODE_SIZE (inner);
28347 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
28348 regno = FP_ARG_RETURN;
28349 else
28351 regno = GP_ARG_RETURN;
28353 /* 32-bit is OK since it'll go in r3/r4. */
28354 if (TARGET_32BIT && inner_bytes >= 4)
28355 return gen_rtx_REG (mode, regno);
28358 if (inner_bytes >= 8)
28359 return gen_rtx_REG (mode, regno);
28361 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
28362 const0_rtx);
28363 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
28364 GEN_INT (inner_bytes));
28365 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
28368 /* Target hook for TARGET_FUNCTION_VALUE.
28370 On the SPE, both FPs and vectors are returned in r3.
28372 On RS/6000 an integer value is in r3 and a floating-point value is in
28373 fp1, unless -msoft-float. */
28375 static rtx
28376 rs6000_function_value (const_tree valtype,
28377 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
28378 bool outgoing ATTRIBUTE_UNUSED)
28380 enum machine_mode mode;
28381 unsigned int regno;
28383 /* Special handling for structs in darwin64. */
28384 if (TARGET_MACHO
28385 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
28387 CUMULATIVE_ARGS valcum;
28388 rtx valret;
28390 valcum.words = 0;
28391 valcum.fregno = FP_ARG_MIN_REG;
28392 valcum.vregno = ALTIVEC_ARG_MIN_REG;
28393 /* Do a trial code generation as if this were going to be passed as
28394 an argument; if any part goes in memory, we return NULL. */
28395 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
28396 if (valret)
28397 return valret;
28398 /* Otherwise fall through to standard ABI rules. */
28401 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
28403 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
28404 return gen_rtx_PARALLEL (DImode,
28405 gen_rtvec (2,
28406 gen_rtx_EXPR_LIST (VOIDmode,
28407 gen_rtx_REG (SImode, GP_ARG_RETURN),
28408 const0_rtx),
28409 gen_rtx_EXPR_LIST (VOIDmode,
28410 gen_rtx_REG (SImode,
28411 GP_ARG_RETURN + 1),
28412 GEN_INT (4))));
28414 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
28416 return gen_rtx_PARALLEL (DCmode,
28417 gen_rtvec (4,
28418 gen_rtx_EXPR_LIST (VOIDmode,
28419 gen_rtx_REG (SImode, GP_ARG_RETURN),
28420 const0_rtx),
28421 gen_rtx_EXPR_LIST (VOIDmode,
28422 gen_rtx_REG (SImode,
28423 GP_ARG_RETURN + 1),
28424 GEN_INT (4)),
28425 gen_rtx_EXPR_LIST (VOIDmode,
28426 gen_rtx_REG (SImode,
28427 GP_ARG_RETURN + 2),
28428 GEN_INT (8)),
28429 gen_rtx_EXPR_LIST (VOIDmode,
28430 gen_rtx_REG (SImode,
28431 GP_ARG_RETURN + 3),
28432 GEN_INT (12))));
28435 mode = TYPE_MODE (valtype);
28436 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
28437 || POINTER_TYPE_P (valtype))
28438 mode = TARGET_32BIT ? SImode : DImode;
28440 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
28441 /* _Decimal128 must use an even/odd register pair. */
28442 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
28443 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
28444 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
28445 regno = FP_ARG_RETURN;
28446 else if (TREE_CODE (valtype) == COMPLEX_TYPE
28447 && targetm.calls.split_complex_arg)
28448 return rs6000_complex_function_value (mode);
28449 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
28450 return register is used in both cases, and we won't see V2DImode/V2DFmode
28451 for pure altivec, combine the two cases. */
28452 else if (TREE_CODE (valtype) == VECTOR_TYPE
28453 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
28454 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
28455 regno = ALTIVEC_ARG_RETURN;
28456 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
28457 && (mode == DFmode || mode == DCmode
28458 || mode == TFmode || mode == TCmode))
28459 return spe_build_register_parallel (mode, GP_ARG_RETURN);
28460 else
28461 regno = GP_ARG_RETURN;
28463 return gen_rtx_REG (mode, regno);
28466 /* Define how to find the value returned by a library function
28467 assuming the value has mode MODE. */
28469 rs6000_libcall_value (enum machine_mode mode)
28471 unsigned int regno;
28473 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
28475 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
28476 return gen_rtx_PARALLEL (DImode,
28477 gen_rtvec (2,
28478 gen_rtx_EXPR_LIST (VOIDmode,
28479 gen_rtx_REG (SImode, GP_ARG_RETURN),
28480 const0_rtx),
28481 gen_rtx_EXPR_LIST (VOIDmode,
28482 gen_rtx_REG (SImode,
28483 GP_ARG_RETURN + 1),
28484 GEN_INT (4))));
28487 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
28488 /* _Decimal128 must use an even/odd register pair. */
28489 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
28490 else if (SCALAR_FLOAT_MODE_P (mode)
28491 && TARGET_HARD_FLOAT && TARGET_FPRS
28492 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
28493 regno = FP_ARG_RETURN;
28494 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
28495 return register is used in both cases, and we won't see V2DImode/V2DFmode
28496 for pure altivec, combine the two cases. */
28497 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
28498 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
28499 regno = ALTIVEC_ARG_RETURN;
28500 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
28501 return rs6000_complex_function_value (mode);
28502 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
28503 && (mode == DFmode || mode == DCmode
28504 || mode == TFmode || mode == TCmode))
28505 return spe_build_register_parallel (mode, GP_ARG_RETURN);
28506 else
28507 regno = GP_ARG_RETURN;
28509 return gen_rtx_REG (mode, regno);
28513 /* Given FROM and TO register numbers, say whether this elimination is allowed.
28514 Frame pointer elimination is automatically handled.
28516 For the RS/6000, if frame pointer elimination is being done, we would like
28517 to convert ap into fp, not sp.
28519 We need r30 if -mminimal-toc was specified, and there are constant pool
28520 references. */
28522 static bool
28523 rs6000_can_eliminate (const int from, const int to)
28525 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
28526 ? ! frame_pointer_needed
28527 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
28528 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
28529 : true);
28532 /* Define the offset between two registers, FROM to be eliminated and its
28533 replacement TO, at the start of a routine. */
28534 HOST_WIDE_INT
28535 rs6000_initial_elimination_offset (int from, int to)
28537 rs6000_stack_t *info = rs6000_stack_info ();
28538 HOST_WIDE_INT offset;
28540 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
28541 offset = info->push_p ? 0 : -info->total_size;
28542 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
28544 offset = info->push_p ? 0 : -info->total_size;
28545 if (FRAME_GROWS_DOWNWARD)
28546 offset += info->fixed_size + info->vars_size + info->parm_size;
28548 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
28549 offset = FRAME_GROWS_DOWNWARD
28550 ? info->fixed_size + info->vars_size + info->parm_size
28551 : 0;
28552 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
28553 offset = info->total_size;
28554 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
28555 offset = info->push_p ? info->total_size : 0;
28556 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
28557 offset = 0;
28558 else
28559 gcc_unreachable ();
28561 return offset;
28564 static rtx
28565 rs6000_dwarf_register_span (rtx reg)
28567 rtx parts[8];
28568 int i, words;
28569 unsigned regno = REGNO (reg);
28570 enum machine_mode mode = GET_MODE (reg);
28572 if (TARGET_SPE
28573 && regno < 32
28574 && (SPE_VECTOR_MODE (GET_MODE (reg))
28575 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
28576 && mode != SFmode && mode != SDmode && mode != SCmode)))
28578 else
28579 return NULL_RTX;
28581 regno = REGNO (reg);
28583 /* The duality of the SPE register size wreaks all kinds of havoc.
28584 This is a way of distinguishing r0 in 32-bits from r0 in
28585 64-bits. */
28586 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
28587 gcc_assert (words <= 4);
28588 for (i = 0; i < words; i++, regno++)
28590 if (BYTES_BIG_ENDIAN)
28592 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
28593 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
28595 else
28597 parts[2 * i] = gen_rtx_REG (SImode, regno);
28598 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
28602 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
28605 /* Fill in sizes for SPE register high parts in table used by unwinder. */
28607 static void
28608 rs6000_init_dwarf_reg_sizes_extra (tree address)
28610 if (TARGET_SPE)
28612 int i;
28613 enum machine_mode mode = TYPE_MODE (char_type_node);
28614 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
28615 rtx mem = gen_rtx_MEM (BLKmode, addr);
28616 rtx value = gen_int_mode (4, mode);
28618 for (i = 1201; i < 1232; i++)
28620 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
28621 HOST_WIDE_INT offset
28622 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
28624 emit_move_insn (adjust_address (mem, mode, offset), value);
28629 /* Map internal gcc register numbers to DWARF2 register numbers. */
28631 unsigned int
28632 rs6000_dbx_register_number (unsigned int regno)
28634 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
28635 return regno;
28636 if (regno == LR_REGNO)
28637 return 108;
28638 if (regno == CTR_REGNO)
28639 return 109;
28640 if (CR_REGNO_P (regno))
28641 return regno - CR0_REGNO + 86;
28642 if (regno == CA_REGNO)
28643 return 101; /* XER */
28644 if (ALTIVEC_REGNO_P (regno))
28645 return regno - FIRST_ALTIVEC_REGNO + 1124;
28646 if (regno == VRSAVE_REGNO)
28647 return 356;
28648 if (regno == VSCR_REGNO)
28649 return 67;
28650 if (regno == SPE_ACC_REGNO)
28651 return 99;
28652 if (regno == SPEFSCR_REGNO)
28653 return 612;
28654 /* SPE high reg number. We get these values of regno from
28655 rs6000_dwarf_register_span. */
28656 gcc_assert (regno >= 1200 && regno < 1232);
28657 return regno;
28660 /* target hook eh_return_filter_mode */
28661 static enum machine_mode
28662 rs6000_eh_return_filter_mode (void)
28664 return TARGET_32BIT ? SImode : word_mode;
28667 /* Target hook for scalar_mode_supported_p. */
28668 static bool
28669 rs6000_scalar_mode_supported_p (enum machine_mode mode)
28671 if (DECIMAL_FLOAT_MODE_P (mode))
28672 return default_decimal_float_supported_p ();
28673 else
28674 return default_scalar_mode_supported_p (mode);
28677 /* Target hook for vector_mode_supported_p. */
28678 static bool
28679 rs6000_vector_mode_supported_p (enum machine_mode mode)
28682 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
28683 return true;
28685 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
28686 return true;
28688 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
28689 return true;
28691 else
28692 return false;
28695 /* Target hook for invalid_arg_for_unprototyped_fn. */
28696 static const char *
28697 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
28699 return (!rs6000_darwin64_abi
28700 && typelist == 0
28701 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
28702 && (funcdecl == NULL_TREE
28703 || (TREE_CODE (funcdecl) == FUNCTION_DECL
28704 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
28705 ? N_("AltiVec argument passed to unprototyped function")
28706 : NULL;
28709 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
28710 setup by using __stack_chk_fail_local hidden function instead of
28711 calling __stack_chk_fail directly. Otherwise it is better to call
28712 __stack_chk_fail directly. */
28714 static tree ATTRIBUTE_UNUSED
28715 rs6000_stack_protect_fail (void)
28717 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
28718 ? default_hidden_stack_protect_fail ()
28719 : default_external_stack_protect_fail ();
28722 void
28723 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
28724 int num_operands ATTRIBUTE_UNUSED)
28726 if (rs6000_warn_cell_microcode)
28728 const char *temp;
28729 int insn_code_number = recog_memoized (insn);
28730 location_t location = INSN_LOCATION (insn);
28732 /* Punt on insns we cannot recognize. */
28733 if (insn_code_number < 0)
28734 return;
28736 temp = get_insn_template (insn_code_number, insn);
28738 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
28739 warning_at (location, OPT_mwarn_cell_microcode,
28740 "emitting microcode insn %s\t[%s] #%d",
28741 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
28742 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
28743 warning_at (location, OPT_mwarn_cell_microcode,
28744 "emitting conditional microcode insn %s\t[%s] #%d",
28745 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
28749 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
28751 #if TARGET_ELF
28752 static unsigned HOST_WIDE_INT
28753 rs6000_asan_shadow_offset (void)
28755 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
28757 #endif
28759 /* Mask options that we want to support inside of attribute((target)) and
28760 #pragma GCC target operations. Note, we do not include things like
28761 64/32-bit, endianess, hard/soft floating point, etc. that would have
28762 different calling sequences. */
28764 struct rs6000_opt_mask {
28765 const char *name; /* option name */
28766 HOST_WIDE_INT mask; /* mask to set */
28767 bool invert; /* invert sense of mask */
28768 bool valid_target; /* option is a target option */
28771 static struct rs6000_opt_mask const rs6000_opt_masks[] =
28773 { "altivec", OPTION_MASK_ALTIVEC, false, true },
28774 { "cmpb", OPTION_MASK_CMPB, false, true },
28775 { "crypto", OPTION_MASK_CRYPTO, false, true },
28776 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
28777 { "dlmzb", OPTION_MASK_DLMZB, false, true },
28778 { "fprnd", OPTION_MASK_FPRND, false, true },
28779 { "hard-dfp", OPTION_MASK_DFP, false, true },
28780 { "isel", OPTION_MASK_ISEL, false, true },
28781 { "mfcrf", OPTION_MASK_MFCRF, false, true },
28782 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
28783 { "mulhw", OPTION_MASK_MULHW, false, true },
28784 { "multiple", OPTION_MASK_MULTIPLE, false, true },
28785 { "popcntb", OPTION_MASK_POPCNTB, false, true },
28786 { "popcntd", OPTION_MASK_POPCNTD, false, true },
28787 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
28788 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
28789 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
28790 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
28791 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
28792 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
28793 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
28794 { "string", OPTION_MASK_STRING, false, true },
28795 { "update", OPTION_MASK_NO_UPDATE, true , true },
28796 { "vsx", OPTION_MASK_VSX, false, true },
28797 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
28798 #ifdef OPTION_MASK_64BIT
28799 #if TARGET_AIX_OS
28800 { "aix64", OPTION_MASK_64BIT, false, false },
28801 { "aix32", OPTION_MASK_64BIT, true, false },
28802 #else
28803 { "64", OPTION_MASK_64BIT, false, false },
28804 { "32", OPTION_MASK_64BIT, true, false },
28805 #endif
28806 #endif
28807 #ifdef OPTION_MASK_EABI
28808 { "eabi", OPTION_MASK_EABI, false, false },
28809 #endif
28810 #ifdef OPTION_MASK_LITTLE_ENDIAN
28811 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
28812 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
28813 #endif
28814 #ifdef OPTION_MASK_RELOCATABLE
28815 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
28816 #endif
28817 #ifdef OPTION_MASK_STRICT_ALIGN
28818 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
28819 #endif
28820 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
28821 { "string", OPTION_MASK_STRING, false, false },
28824 /* Builtin mask mapping for printing the flags. */
28825 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
28827 { "altivec", RS6000_BTM_ALTIVEC, false, false },
28828 { "vsx", RS6000_BTM_VSX, false, false },
28829 { "spe", RS6000_BTM_SPE, false, false },
28830 { "paired", RS6000_BTM_PAIRED, false, false },
28831 { "fre", RS6000_BTM_FRE, false, false },
28832 { "fres", RS6000_BTM_FRES, false, false },
28833 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
28834 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
28835 { "popcntd", RS6000_BTM_POPCNTD, false, false },
28836 { "cell", RS6000_BTM_CELL, false, false },
28837 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
28838 { "crypto", RS6000_BTM_CRYPTO, false, false },
28841 /* Option variables that we want to support inside attribute((target)) and
28842 #pragma GCC target operations. */
28844 struct rs6000_opt_var {
28845 const char *name; /* option name */
28846 size_t global_offset; /* offset of the option in global_options. */
28847 size_t target_offset; /* offset of the option in target optiosn. */
28850 static struct rs6000_opt_var const rs6000_opt_vars[] =
28852 { "friz",
28853 offsetof (struct gcc_options, x_TARGET_FRIZ),
28854 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
28855 { "avoid-indexed-addresses",
28856 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
28857 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
28858 { "paired",
28859 offsetof (struct gcc_options, x_rs6000_paired_float),
28860 offsetof (struct cl_target_option, x_rs6000_paired_float), },
28861 { "longcall",
28862 offsetof (struct gcc_options, x_rs6000_default_long_calls),
28863 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
28866 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
28867 parsing. Return true if there were no errors. */
28869 static bool
28870 rs6000_inner_target_options (tree args, bool attr_p)
28872 bool ret = true;
28874 if (args == NULL_TREE)
28877 else if (TREE_CODE (args) == STRING_CST)
28879 char *p = ASTRDUP (TREE_STRING_POINTER (args));
28880 char *q;
28882 while ((q = strtok (p, ",")) != NULL)
28884 bool error_p = false;
28885 bool not_valid_p = false;
28886 const char *cpu_opt = NULL;
28888 p = NULL;
28889 if (strncmp (q, "cpu=", 4) == 0)
28891 int cpu_index = rs6000_cpu_name_lookup (q+4);
28892 if (cpu_index >= 0)
28893 rs6000_cpu_index = cpu_index;
28894 else
28896 error_p = true;
28897 cpu_opt = q+4;
28900 else if (strncmp (q, "tune=", 5) == 0)
28902 int tune_index = rs6000_cpu_name_lookup (q+5);
28903 if (tune_index >= 0)
28904 rs6000_tune_index = tune_index;
28905 else
28907 error_p = true;
28908 cpu_opt = q+5;
28911 else
28913 size_t i;
28914 bool invert = false;
28915 char *r = q;
28917 error_p = true;
28918 if (strncmp (r, "no-", 3) == 0)
28920 invert = true;
28921 r += 3;
28924 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
28925 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
28927 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
28929 if (!rs6000_opt_masks[i].valid_target)
28930 not_valid_p = true;
28931 else
28933 error_p = false;
28934 rs6000_isa_flags_explicit |= mask;
28936 /* VSX needs altivec, so -mvsx automagically sets
28937 altivec. */
28938 if (mask == OPTION_MASK_VSX && !invert)
28939 mask |= OPTION_MASK_ALTIVEC;
28941 if (rs6000_opt_masks[i].invert)
28942 invert = !invert;
28944 if (invert)
28945 rs6000_isa_flags &= ~mask;
28946 else
28947 rs6000_isa_flags |= mask;
28949 break;
28952 if (error_p && !not_valid_p)
28954 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
28955 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
28957 size_t j = rs6000_opt_vars[i].global_offset;
28958 *((int *) ((char *)&global_options + j)) = !invert;
28959 error_p = false;
28960 break;
28965 if (error_p)
28967 const char *eprefix, *esuffix;
28969 ret = false;
28970 if (attr_p)
28972 eprefix = "__attribute__((__target__(";
28973 esuffix = ")))";
28975 else
28977 eprefix = "#pragma GCC target ";
28978 esuffix = "";
28981 if (cpu_opt)
28982 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
28983 q, esuffix);
28984 else if (not_valid_p)
28985 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
28986 else
28987 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
28992 else if (TREE_CODE (args) == TREE_LIST)
28996 tree value = TREE_VALUE (args);
28997 if (value)
28999 bool ret2 = rs6000_inner_target_options (value, attr_p);
29000 if (!ret2)
29001 ret = false;
29003 args = TREE_CHAIN (args);
29005 while (args != NULL_TREE);
29008 else
29009 gcc_unreachable ();
29011 return ret;
29014 /* Print out the target options as a list for -mdebug=target. */
29016 static void
29017 rs6000_debug_target_options (tree args, const char *prefix)
29019 if (args == NULL_TREE)
29020 fprintf (stderr, "%s<NULL>", prefix);
29022 else if (TREE_CODE (args) == STRING_CST)
29024 char *p = ASTRDUP (TREE_STRING_POINTER (args));
29025 char *q;
29027 while ((q = strtok (p, ",")) != NULL)
29029 p = NULL;
29030 fprintf (stderr, "%s\"%s\"", prefix, q);
29031 prefix = ", ";
29035 else if (TREE_CODE (args) == TREE_LIST)
29039 tree value = TREE_VALUE (args);
29040 if (value)
29042 rs6000_debug_target_options (value, prefix);
29043 prefix = ", ";
29045 args = TREE_CHAIN (args);
29047 while (args != NULL_TREE);
29050 else
29051 gcc_unreachable ();
29053 return;
29057 /* Hook to validate attribute((target("..."))). */
29059 static bool
29060 rs6000_valid_attribute_p (tree fndecl,
29061 tree ARG_UNUSED (name),
29062 tree args,
29063 int flags)
29065 struct cl_target_option cur_target;
29066 bool ret;
29067 tree old_optimize = build_optimization_node ();
29068 tree new_target, new_optimize;
29069 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
29071 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
29073 if (TARGET_DEBUG_TARGET)
29075 tree tname = DECL_NAME (fndecl);
29076 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
29077 if (tname)
29078 fprintf (stderr, "function: %.*s\n",
29079 (int) IDENTIFIER_LENGTH (tname),
29080 IDENTIFIER_POINTER (tname));
29081 else
29082 fprintf (stderr, "function: unknown\n");
29084 fprintf (stderr, "args:");
29085 rs6000_debug_target_options (args, " ");
29086 fprintf (stderr, "\n");
29088 if (flags)
29089 fprintf (stderr, "flags: 0x%x\n", flags);
29091 fprintf (stderr, "--------------------\n");
29094 old_optimize = build_optimization_node ();
29095 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
29097 /* If the function changed the optimization levels as well as setting target
29098 options, start with the optimizations specified. */
29099 if (func_optimize && func_optimize != old_optimize)
29100 cl_optimization_restore (&global_options,
29101 TREE_OPTIMIZATION (func_optimize));
29103 /* The target attributes may also change some optimization flags, so update
29104 the optimization options if necessary. */
29105 cl_target_option_save (&cur_target, &global_options);
29106 rs6000_cpu_index = rs6000_tune_index = -1;
29107 ret = rs6000_inner_target_options (args, true);
29109 /* Set up any additional state. */
29110 if (ret)
29112 ret = rs6000_option_override_internal (false);
29113 new_target = build_target_option_node ();
29115 else
29116 new_target = NULL;
29118 new_optimize = build_optimization_node ();
29120 if (!new_target)
29121 ret = false;
29123 else if (fndecl)
29125 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
29127 if (old_optimize != new_optimize)
29128 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
29131 cl_target_option_restore (&global_options, &cur_target);
29133 if (old_optimize != new_optimize)
29134 cl_optimization_restore (&global_options,
29135 TREE_OPTIMIZATION (old_optimize));
29137 return ret;
29141 /* Hook to validate the current #pragma GCC target and set the state, and
29142 update the macros based on what was changed. If ARGS is NULL, then
29143 POP_TARGET is used to reset the options. */
29145 bool
29146 rs6000_pragma_target_parse (tree args, tree pop_target)
29148 tree prev_tree = build_target_option_node ();
29149 tree cur_tree;
29150 struct cl_target_option *prev_opt, *cur_opt;
29151 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
29152 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
29154 if (TARGET_DEBUG_TARGET)
29156 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
29157 fprintf (stderr, "args:");
29158 rs6000_debug_target_options (args, " ");
29159 fprintf (stderr, "\n");
29161 if (pop_target)
29163 fprintf (stderr, "pop_target:\n");
29164 debug_tree (pop_target);
29166 else
29167 fprintf (stderr, "pop_target: <NULL>\n");
29169 fprintf (stderr, "--------------------\n");
29172 if (! args)
29174 cur_tree = ((pop_target)
29175 ? pop_target
29176 : target_option_default_node);
29177 cl_target_option_restore (&global_options,
29178 TREE_TARGET_OPTION (cur_tree));
29180 else
29182 rs6000_cpu_index = rs6000_tune_index = -1;
29183 if (!rs6000_inner_target_options (args, false)
29184 || !rs6000_option_override_internal (false)
29185 || (cur_tree = build_target_option_node ()) == NULL_TREE)
29187 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
29188 fprintf (stderr, "invalid pragma\n");
29190 return false;
29194 target_option_current_node = cur_tree;
29196 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
29197 change the macros that are defined. */
29198 if (rs6000_target_modify_macros_ptr)
29200 prev_opt = TREE_TARGET_OPTION (prev_tree);
29201 prev_bumask = prev_opt->x_rs6000_builtin_mask;
29202 prev_flags = prev_opt->x_rs6000_isa_flags;
29204 cur_opt = TREE_TARGET_OPTION (cur_tree);
29205 cur_flags = cur_opt->x_rs6000_isa_flags;
29206 cur_bumask = cur_opt->x_rs6000_builtin_mask;
29208 diff_bumask = (prev_bumask ^ cur_bumask);
29209 diff_flags = (prev_flags ^ cur_flags);
29211 if ((diff_flags != 0) || (diff_bumask != 0))
29213 /* Delete old macros. */
29214 rs6000_target_modify_macros_ptr (false,
29215 prev_flags & diff_flags,
29216 prev_bumask & diff_bumask);
29218 /* Define new macros. */
29219 rs6000_target_modify_macros_ptr (true,
29220 cur_flags & diff_flags,
29221 cur_bumask & diff_bumask);
29225 return true;
29229 /* Remember the last target of rs6000_set_current_function. */
29230 static GTY(()) tree rs6000_previous_fndecl;
29232 /* Establish appropriate back-end context for processing the function
29233 FNDECL. The argument might be NULL to indicate processing at top
29234 level, outside of any function scope. */
29235 static void
29236 rs6000_set_current_function (tree fndecl)
29238 tree old_tree = (rs6000_previous_fndecl
29239 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
29240 : NULL_TREE);
29242 tree new_tree = (fndecl
29243 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
29244 : NULL_TREE);
29246 if (TARGET_DEBUG_TARGET)
29248 bool print_final = false;
29249 fprintf (stderr, "\n==================== rs6000_set_current_function");
29251 if (fndecl)
29252 fprintf (stderr, ", fndecl %s (%p)",
29253 (DECL_NAME (fndecl)
29254 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
29255 : "<unknown>"), (void *)fndecl);
29257 if (rs6000_previous_fndecl)
29258 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
29260 fprintf (stderr, "\n");
29261 if (new_tree)
29263 fprintf (stderr, "\nnew fndecl target specific options:\n");
29264 debug_tree (new_tree);
29265 print_final = true;
29268 if (old_tree)
29270 fprintf (stderr, "\nold fndecl target specific options:\n");
29271 debug_tree (old_tree);
29272 print_final = true;
29275 if (print_final)
29276 fprintf (stderr, "--------------------\n");
29279 /* Only change the context if the function changes. This hook is called
29280 several times in the course of compiling a function, and we don't want to
29281 slow things down too much or call target_reinit when it isn't safe. */
29282 if (fndecl && fndecl != rs6000_previous_fndecl)
29284 rs6000_previous_fndecl = fndecl;
29285 if (old_tree == new_tree)
29288 else if (new_tree)
29290 cl_target_option_restore (&global_options,
29291 TREE_TARGET_OPTION (new_tree));
29292 target_reinit ();
29295 else if (old_tree)
29297 struct cl_target_option *def
29298 = TREE_TARGET_OPTION (target_option_current_node);
29300 cl_target_option_restore (&global_options, def);
29301 target_reinit ();
29307 /* Save the current options */
29309 static void
29310 rs6000_function_specific_save (struct cl_target_option *ptr)
29312 ptr->x_rs6000_isa_flags = rs6000_isa_flags;
29313 ptr->x_rs6000_isa_flags_explicit = rs6000_isa_flags_explicit;
29316 /* Restore the current options */
29318 static void
29319 rs6000_function_specific_restore (struct cl_target_option *ptr)
29321 rs6000_isa_flags = ptr->x_rs6000_isa_flags;
29322 rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
29323 (void) rs6000_option_override_internal (false);
29326 /* Print the current options */
29328 static void
29329 rs6000_function_specific_print (FILE *file, int indent,
29330 struct cl_target_option *ptr)
29332 rs6000_print_isa_options (file, indent, "Isa options set",
29333 ptr->x_rs6000_isa_flags);
29335 rs6000_print_isa_options (file, indent, "Isa options explicit",
29336 ptr->x_rs6000_isa_flags_explicit);
29339 /* Helper function to print the current isa or misc options on a line. */
29341 static void
29342 rs6000_print_options_internal (FILE *file,
29343 int indent,
29344 const char *string,
29345 HOST_WIDE_INT flags,
29346 const char *prefix,
29347 const struct rs6000_opt_mask *opts,
29348 size_t num_elements)
29350 size_t i;
29351 size_t start_column = 0;
29352 size_t cur_column;
29353 size_t max_column = 76;
29354 const char *comma = "";
29355 const char *nl = "\n";
29357 if (indent)
29358 start_column += fprintf (file, "%*s", indent, "");
29360 if (!flags)
29362 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
29363 return;
29366 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
29368 /* Print the various mask options. */
29369 cur_column = start_column;
29370 for (i = 0; i < num_elements; i++)
29372 if ((flags & opts[i].mask) != 0)
29374 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
29375 size_t len = (strlen (comma)
29376 + strlen (prefix)
29377 + strlen (no_str)
29378 + strlen (rs6000_opt_masks[i].name));
29380 cur_column += len;
29381 if (cur_column > max_column)
29383 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
29384 cur_column = start_column + len;
29385 comma = "";
29386 nl = "\n\n";
29389 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
29390 rs6000_opt_masks[i].name);
29391 flags &= ~ opts[i].mask;
29392 comma = ", ";
29396 fputs (nl, file);
29399 /* Helper function to print the current isa options on a line. */
29401 static void
29402 rs6000_print_isa_options (FILE *file, int indent, const char *string,
29403 HOST_WIDE_INT flags)
29405 rs6000_print_options_internal (file, indent, string, flags, "-m",
29406 &rs6000_opt_masks[0],
29407 ARRAY_SIZE (rs6000_opt_masks));
29410 static void
29411 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
29412 HOST_WIDE_INT flags)
29414 rs6000_print_options_internal (file, indent, string, flags, "",
29415 &rs6000_builtin_mask_names[0],
29416 ARRAY_SIZE (rs6000_builtin_mask_names));
29420 /* Hook to determine if one function can safely inline another. */
29422 static bool
29423 rs6000_can_inline_p (tree caller, tree callee)
29425 bool ret = false;
29426 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
29427 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
29429 /* If callee has no option attributes, then it is ok to inline. */
29430 if (!callee_tree)
29431 ret = true;
29433 /* If caller has no option attributes, but callee does then it is not ok to
29434 inline. */
29435 else if (!caller_tree)
29436 ret = false;
29438 else
29440 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
29441 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
29443 /* Callee's options should a subset of the caller's, i.e. a vsx function
29444 can inline an altivec function but a non-vsx function can't inline a
29445 vsx function. */
29446 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
29447 == callee_opts->x_rs6000_isa_flags)
29448 ret = true;
29451 if (TARGET_DEBUG_TARGET)
29452 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
29453 (DECL_NAME (caller)
29454 ? IDENTIFIER_POINTER (DECL_NAME (caller))
29455 : "<unknown>"),
29456 (DECL_NAME (callee)
29457 ? IDENTIFIER_POINTER (DECL_NAME (callee))
29458 : "<unknown>"),
29459 (ret ? "can" : "cannot"));
29461 return ret;
29464 /* Allocate a stack temp and fixup the address so it meets the particular
29465 memory requirements (either offetable or REG+REG addressing). */
29468 rs6000_allocate_stack_temp (enum machine_mode mode,
29469 bool offsettable_p,
29470 bool reg_reg_p)
29472 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
29473 rtx addr = XEXP (stack, 0);
29474 int strict_p = (reload_in_progress || reload_completed);
29476 if (!legitimate_indirect_address_p (addr, strict_p))
29478 if (offsettable_p
29479 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
29480 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
29482 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
29483 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
29486 return stack;
29489 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
29490 to such a form to deal with memory reference instructions like STFIWX that
29491 only take reg+reg addressing. */
29494 rs6000_address_for_fpconvert (rtx x)
29496 int strict_p = (reload_in_progress || reload_completed);
29497 rtx addr;
29499 gcc_assert (MEM_P (x));
29500 addr = XEXP (x, 0);
29501 if (! legitimate_indirect_address_p (addr, strict_p)
29502 && ! legitimate_indexed_address_p (addr, strict_p))
29504 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
29506 rtx reg = XEXP (addr, 0);
29507 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
29508 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
29509 gcc_assert (REG_P (reg));
29510 emit_insn (gen_add3_insn (reg, reg, size_rtx));
29511 addr = reg;
29513 else if (GET_CODE (addr) == PRE_MODIFY)
29515 rtx reg = XEXP (addr, 0);
29516 rtx expr = XEXP (addr, 1);
29517 gcc_assert (REG_P (reg));
29518 gcc_assert (GET_CODE (expr) == PLUS);
29519 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
29520 addr = reg;
29523 x = replace_equiv_address (x, copy_addr_to_reg (addr));
29526 return x;
29529 /* Given a memory reference, if it is not in the form for altivec memory
29530 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
29531 convert to the altivec format. */
29534 rs6000_address_for_altivec (rtx x)
29536 gcc_assert (MEM_P (x));
29537 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
29539 rtx addr = XEXP (x, 0);
29540 int strict_p = (reload_in_progress || reload_completed);
29542 if (!legitimate_indexed_address_p (addr, strict_p)
29543 && !legitimate_indirect_address_p (addr, strict_p))
29544 addr = copy_to_mode_reg (Pmode, addr);
29546 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
29547 x = change_address (x, GET_MODE (x), addr);
29550 return x;
29553 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
29555 On the RS/6000, all integer constants are acceptable, most won't be valid
29556 for particular insns, though. Only easy FP constants are acceptable. */
29558 static bool
29559 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
29561 if (TARGET_ELF && rs6000_tls_referenced_p (x))
29562 return false;
29564 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
29565 || GET_MODE (x) == VOIDmode
29566 || (TARGET_POWERPC64 && mode == DImode)
29567 || easy_fp_constant (x, mode)
29568 || easy_vector_constant (x, mode));
29572 /* A function pointer under AIX is a pointer to a data area whose first word
29573 contains the actual address of the function, whose second word contains a
29574 pointer to its TOC, and whose third word contains a value to place in the
29575 static chain register (r11). Note that if we load the static chain, our
29576 "trampoline" need not have any executable code. */
29578 void
29579 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
29581 rtx func_addr;
29582 rtx toc_reg;
29583 rtx sc_reg;
29584 rtx stack_ptr;
29585 rtx stack_toc_offset;
29586 rtx stack_toc_mem;
29587 rtx func_toc_offset;
29588 rtx func_toc_mem;
29589 rtx func_sc_offset;
29590 rtx func_sc_mem;
29591 rtx insn;
29592 rtx (*call_func) (rtx, rtx, rtx, rtx);
29593 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
29595 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29596 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
29598 /* Load up address of the actual function. */
29599 func_desc = force_reg (Pmode, func_desc);
29600 func_addr = gen_reg_rtx (Pmode);
29601 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
29603 if (TARGET_32BIT)
29606 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
29607 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
29608 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
29609 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
29611 call_func = gen_call_indirect_aix32bit;
29612 call_value_func = gen_call_value_indirect_aix32bit;
29614 else
29616 call_func = gen_call_indirect_aix32bit_nor11;
29617 call_value_func = gen_call_value_indirect_aix32bit_nor11;
29620 else
29622 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
29623 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
29624 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
29625 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
29627 call_func = gen_call_indirect_aix64bit;
29628 call_value_func = gen_call_value_indirect_aix64bit;
29630 else
29632 call_func = gen_call_indirect_aix64bit_nor11;
29633 call_value_func = gen_call_value_indirect_aix64bit_nor11;
29637 /* Reserved spot to store the TOC. */
29638 stack_toc_mem = gen_frame_mem (Pmode,
29639 gen_rtx_PLUS (Pmode,
29640 stack_ptr,
29641 stack_toc_offset));
29643 gcc_assert (cfun);
29644 gcc_assert (cfun->machine);
29646 /* Can we optimize saving the TOC in the prologue or do we need to do it at
29647 every call? */
29648 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
29649 cfun->machine->save_toc_in_prologue = true;
29651 else
29653 MEM_VOLATILE_P (stack_toc_mem) = 1;
29654 emit_move_insn (stack_toc_mem, toc_reg);
29657 /* Calculate the address to load the TOC of the called function. We don't
29658 actually load this until the split after reload. */
29659 func_toc_mem = gen_rtx_MEM (Pmode,
29660 gen_rtx_PLUS (Pmode,
29661 func_desc,
29662 func_toc_offset));
29664 /* If we have a static chain, load it up. */
29665 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
29667 func_sc_mem = gen_rtx_MEM (Pmode,
29668 gen_rtx_PLUS (Pmode,
29669 func_desc,
29670 func_sc_offset));
29672 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
29673 emit_move_insn (sc_reg, func_sc_mem);
29676 /* Create the call. */
29677 if (value)
29678 insn = call_value_func (value, func_addr, flag, func_toc_mem,
29679 stack_toc_mem);
29680 else
29681 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
29683 emit_call_insn (insn);
29686 /* Return whether we need to always update the saved TOC pointer when we update
29687 the stack pointer. */
29689 static bool
29690 rs6000_save_toc_in_prologue_p (void)
29692 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
29695 #ifdef HAVE_GAS_HIDDEN
29696 # define USE_HIDDEN_LINKONCE 1
29697 #else
29698 # define USE_HIDDEN_LINKONCE 0
29699 #endif
29701 /* Fills in the label name that should be used for a 476 link stack thunk. */
29703 void
29704 get_ppc476_thunk_name (char name[32])
29706 gcc_assert (TARGET_LINK_STACK);
29708 if (USE_HIDDEN_LINKONCE)
29709 sprintf (name, "__ppc476.get_thunk");
29710 else
29711 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
29714 /* This function emits the simple thunk routine that is used to preserve
29715 the link stack on the 476 cpu. */
29717 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
29718 static void
29719 rs6000_code_end (void)
29721 char name[32];
29722 tree decl;
29724 if (!TARGET_LINK_STACK)
29725 return;
29727 get_ppc476_thunk_name (name);
29729 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
29730 build_function_type_list (void_type_node, NULL_TREE));
29731 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
29732 NULL_TREE, void_type_node);
29733 TREE_PUBLIC (decl) = 1;
29734 TREE_STATIC (decl) = 1;
29736 #if RS6000_WEAK
29737 if (USE_HIDDEN_LINKONCE)
29739 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
29740 targetm.asm_out.unique_section (decl, 0);
29741 switch_to_section (get_named_section (decl, NULL, 0));
29742 DECL_WEAK (decl) = 1;
29743 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
29744 targetm.asm_out.globalize_label (asm_out_file, name);
29745 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
29746 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
29748 else
29749 #endif
29751 switch_to_section (text_section);
29752 ASM_OUTPUT_LABEL (asm_out_file, name);
29755 DECL_INITIAL (decl) = make_node (BLOCK);
29756 current_function_decl = decl;
29757 init_function_start (decl);
29758 first_function_block_is_cold = false;
29759 /* Make sure unwind info is emitted for the thunk if needed. */
29760 final_start_function (emit_barrier (), asm_out_file, 1);
29762 fputs ("\tblr\n", asm_out_file);
29764 final_end_function ();
29765 init_insn_lengths ();
29766 free_after_compilation (cfun);
29767 set_cfun (NULL);
29768 current_function_decl = NULL;
29771 /* Add r30 to hard reg set if the prologue sets it up and it is not
29772 pic_offset_table_rtx. */
29774 static void
29775 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
29777 if (!TARGET_SINGLE_PIC_BASE
29778 && TARGET_TOC
29779 && TARGET_MINIMAL_TOC
29780 && get_pool_size () != 0)
29781 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
29784 struct gcc_target targetm = TARGET_INITIALIZER;
29786 #include "gt-rs6000.h"