2013-05-29 Michael Meissner <meissner@linux.vnet.ibm.com>
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobcb6876051d70063dd12703344ec515e52773c284
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2013 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #include "dumpfile.h"
62 #if TARGET_XCOFF
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #endif
65 #if TARGET_MACHO
66 #include "gstab.h" /* for N_SLINE */
67 #endif
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
71 #endif
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack {
78 int reload_completed; /* stack info won't change from here on */
79 int first_gp_reg_save; /* first callee saved GP register used */
80 int first_fp_reg_save; /* first callee saved FP register used */
81 int first_altivec_reg_save; /* first callee saved AltiVec register used */
82 int lr_save_p; /* true if the link reg needs to be saved */
83 int cr_save_p; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask; /* mask of vec registers to save */
85 int push_p; /* true if we need to allocate stack space */
86 int calls_p; /* true if the function makes any calls */
87 int world_save_p; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi; /* which ABI to use */
90 int gp_save_offset; /* offset to save GP regs from initial SP */
91 int fp_save_offset; /* offset to save FP regs from initial SP */
92 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset; /* offset to save LR from initial SP */
94 int cr_save_offset; /* offset to save CR from initial SP */
95 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset; /* offset to save the varargs registers */
98 int ehrd_offset; /* offset to EH return data */
99 int reg_size; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size; /* variable save area size */
101 int parm_size; /* outgoing parameter size */
102 int save_size; /* save area size */
103 int fixed_size; /* fixed size of stack frame */
104 int gp_size; /* size of saved GP registers */
105 int fp_size; /* size of saved FP registers */
106 int altivec_size; /* size of saved AltiVec registers */
107 int cr_size; /* size to hold CR if not in save_size */
108 int vrsave_size; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size; /* size of altivec alignment padding if
110 not in save_size */
111 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size;
113 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
114 int spe_64bit_regs_used;
115 int savres_strategy;
116 } rs6000_stack_t;
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame;
128 /* Flags if __builtin_return_address (0) was used. */
129 int ra_need_lr;
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 int lr_save_state;
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot;
142 } machine_function;
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load;
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined;
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno;
154 #ifdef USING_ELFOS_H
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno = 0;
157 #endif
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 int dot_symbols;
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode;
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size;
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct;
178 #endif
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
192 /* Reload functions based on the type and the vector unit. */
193 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
195 static int dbg_cost_ctrl;
197 /* Built in types. */
198 tree rs6000_builtin_types[RS6000_BTI_MAX];
199 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
201 /* Flag to say the TOC is initialized */
202 int toc_initialized;
203 char toc_label_name[10];
205 /* Cached value of rs6000_variable_issue. This is cached in
206 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
207 static short cached_can_issue_more;
209 static GTY(()) section *read_only_data_section;
210 static GTY(()) section *private_data_section;
211 static GTY(()) section *tls_data_section;
212 static GTY(()) section *tls_private_data_section;
213 static GTY(()) section *read_only_private_data_section;
214 static GTY(()) section *sdata2_section;
215 static GTY(()) section *toc_section;
217 struct builtin_description
219 const HOST_WIDE_INT mask;
220 const enum insn_code icode;
221 const char *const name;
222 const enum rs6000_builtins code;
225 /* Describe the vector unit used for modes. */
226 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
227 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
229 /* Register classes for various constraints that are based on the target
230 switches. */
231 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
233 /* Describe the alignment of a vector. */
234 int rs6000_vector_align[NUM_MACHINE_MODES];
236 /* Map selected modes to types for builtins. */
237 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
239 /* What modes to automatically generate reciprocal divide estimate (fre) and
240 reciprocal sqrt (frsqrte) for. */
241 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
243 /* Masks to determine which reciprocal esitmate instructions to generate
244 automatically. */
245 enum rs6000_recip_mask {
246 RECIP_SF_DIV = 0x001, /* Use divide estimate */
247 RECIP_DF_DIV = 0x002,
248 RECIP_V4SF_DIV = 0x004,
249 RECIP_V2DF_DIV = 0x008,
251 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
252 RECIP_DF_RSQRT = 0x020,
253 RECIP_V4SF_RSQRT = 0x040,
254 RECIP_V2DF_RSQRT = 0x080,
256 /* Various combination of flags for -mrecip=xxx. */
257 RECIP_NONE = 0,
258 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
259 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
260 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
262 RECIP_HIGH_PRECISION = RECIP_ALL,
264 /* On low precision machines like the power5, don't enable double precision
265 reciprocal square root estimate, since it isn't accurate enough. */
266 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
269 /* -mrecip options. */
270 static struct
272 const char *string; /* option name */
273 unsigned int mask; /* mask bits to set */
274 } recip_options[] = {
275 { "all", RECIP_ALL },
276 { "none", RECIP_NONE },
277 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
278 | RECIP_V2DF_DIV) },
279 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
280 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
281 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
282 | RECIP_V2DF_RSQRT) },
283 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
284 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
287 /* 2 argument gen function typedef. */
288 typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx);
290 /* Pointer to function (in rs6000-c.c) that can define or undefine target
291 macros that have changed. Languages that don't support the preprocessor
292 don't link in rs6000-c.c, so we can't call it directly. */
293 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
296 /* Target cpu costs. */
298 struct processor_costs {
299 const int mulsi; /* cost of SImode multiplication. */
300 const int mulsi_const; /* cost of SImode multiplication by constant. */
301 const int mulsi_const9; /* cost of SImode mult by short constant. */
302 const int muldi; /* cost of DImode multiplication. */
303 const int divsi; /* cost of SImode division. */
304 const int divdi; /* cost of DImode division. */
305 const int fp; /* cost of simple SFmode and DFmode insns. */
306 const int dmul; /* cost of DFmode multiplication (and fmadd). */
307 const int sdiv; /* cost of SFmode division (fdivs). */
308 const int ddiv; /* cost of DFmode division (fdiv). */
309 const int cache_line_size; /* cache line size in bytes. */
310 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
311 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
312 const int simultaneous_prefetches; /* number of parallel prefetch
313 operations. */
316 const struct processor_costs *rs6000_cost;
318 /* Processor costs (relative to an add) */
320 /* Instruction size costs on 32bit processors. */
321 static const
322 struct processor_costs size32_cost = {
323 COSTS_N_INSNS (1), /* mulsi */
324 COSTS_N_INSNS (1), /* mulsi_const */
325 COSTS_N_INSNS (1), /* mulsi_const9 */
326 COSTS_N_INSNS (1), /* muldi */
327 COSTS_N_INSNS (1), /* divsi */
328 COSTS_N_INSNS (1), /* divdi */
329 COSTS_N_INSNS (1), /* fp */
330 COSTS_N_INSNS (1), /* dmul */
331 COSTS_N_INSNS (1), /* sdiv */
332 COSTS_N_INSNS (1), /* ddiv */
339 /* Instruction size costs on 64bit processors. */
340 static const
341 struct processor_costs size64_cost = {
342 COSTS_N_INSNS (1), /* mulsi */
343 COSTS_N_INSNS (1), /* mulsi_const */
344 COSTS_N_INSNS (1), /* mulsi_const9 */
345 COSTS_N_INSNS (1), /* muldi */
346 COSTS_N_INSNS (1), /* divsi */
347 COSTS_N_INSNS (1), /* divdi */
348 COSTS_N_INSNS (1), /* fp */
349 COSTS_N_INSNS (1), /* dmul */
350 COSTS_N_INSNS (1), /* sdiv */
351 COSTS_N_INSNS (1), /* ddiv */
352 128,
358 /* Instruction costs on RS64A processors. */
359 static const
360 struct processor_costs rs64a_cost = {
361 COSTS_N_INSNS (20), /* mulsi */
362 COSTS_N_INSNS (12), /* mulsi_const */
363 COSTS_N_INSNS (8), /* mulsi_const9 */
364 COSTS_N_INSNS (34), /* muldi */
365 COSTS_N_INSNS (65), /* divsi */
366 COSTS_N_INSNS (67), /* divdi */
367 COSTS_N_INSNS (4), /* fp */
368 COSTS_N_INSNS (4), /* dmul */
369 COSTS_N_INSNS (31), /* sdiv */
370 COSTS_N_INSNS (31), /* ddiv */
371 128, /* cache line size */
372 128, /* l1 cache */
373 2048, /* l2 cache */
374 1, /* streams */
377 /* Instruction costs on MPCCORE processors. */
378 static const
379 struct processor_costs mpccore_cost = {
380 COSTS_N_INSNS (2), /* mulsi */
381 COSTS_N_INSNS (2), /* mulsi_const */
382 COSTS_N_INSNS (2), /* mulsi_const9 */
383 COSTS_N_INSNS (2), /* muldi */
384 COSTS_N_INSNS (6), /* divsi */
385 COSTS_N_INSNS (6), /* divdi */
386 COSTS_N_INSNS (4), /* fp */
387 COSTS_N_INSNS (5), /* dmul */
388 COSTS_N_INSNS (10), /* sdiv */
389 COSTS_N_INSNS (17), /* ddiv */
390 32, /* cache line size */
391 4, /* l1 cache */
392 16, /* l2 cache */
393 1, /* streams */
396 /* Instruction costs on PPC403 processors. */
397 static const
398 struct processor_costs ppc403_cost = {
399 COSTS_N_INSNS (4), /* mulsi */
400 COSTS_N_INSNS (4), /* mulsi_const */
401 COSTS_N_INSNS (4), /* mulsi_const9 */
402 COSTS_N_INSNS (4), /* muldi */
403 COSTS_N_INSNS (33), /* divsi */
404 COSTS_N_INSNS (33), /* divdi */
405 COSTS_N_INSNS (11), /* fp */
406 COSTS_N_INSNS (11), /* dmul */
407 COSTS_N_INSNS (11), /* sdiv */
408 COSTS_N_INSNS (11), /* ddiv */
409 32, /* cache line size */
410 4, /* l1 cache */
411 16, /* l2 cache */
412 1, /* streams */
415 /* Instruction costs on PPC405 processors. */
416 static const
417 struct processor_costs ppc405_cost = {
418 COSTS_N_INSNS (5), /* mulsi */
419 COSTS_N_INSNS (4), /* mulsi_const */
420 COSTS_N_INSNS (3), /* mulsi_const9 */
421 COSTS_N_INSNS (5), /* muldi */
422 COSTS_N_INSNS (35), /* divsi */
423 COSTS_N_INSNS (35), /* divdi */
424 COSTS_N_INSNS (11), /* fp */
425 COSTS_N_INSNS (11), /* dmul */
426 COSTS_N_INSNS (11), /* sdiv */
427 COSTS_N_INSNS (11), /* ddiv */
428 32, /* cache line size */
429 16, /* l1 cache */
430 128, /* l2 cache */
431 1, /* streams */
434 /* Instruction costs on PPC440 processors. */
435 static const
436 struct processor_costs ppc440_cost = {
437 COSTS_N_INSNS (3), /* mulsi */
438 COSTS_N_INSNS (2), /* mulsi_const */
439 COSTS_N_INSNS (2), /* mulsi_const9 */
440 COSTS_N_INSNS (3), /* muldi */
441 COSTS_N_INSNS (34), /* divsi */
442 COSTS_N_INSNS (34), /* divdi */
443 COSTS_N_INSNS (5), /* fp */
444 COSTS_N_INSNS (5), /* dmul */
445 COSTS_N_INSNS (19), /* sdiv */
446 COSTS_N_INSNS (33), /* ddiv */
447 32, /* cache line size */
448 32, /* l1 cache */
449 256, /* l2 cache */
450 1, /* streams */
453 /* Instruction costs on PPC476 processors. */
454 static const
455 struct processor_costs ppc476_cost = {
456 COSTS_N_INSNS (4), /* mulsi */
457 COSTS_N_INSNS (4), /* mulsi_const */
458 COSTS_N_INSNS (4), /* mulsi_const9 */
459 COSTS_N_INSNS (4), /* muldi */
460 COSTS_N_INSNS (11), /* divsi */
461 COSTS_N_INSNS (11), /* divdi */
462 COSTS_N_INSNS (6), /* fp */
463 COSTS_N_INSNS (6), /* dmul */
464 COSTS_N_INSNS (19), /* sdiv */
465 COSTS_N_INSNS (33), /* ddiv */
466 32, /* l1 cache line size */
467 32, /* l1 cache */
468 512, /* l2 cache */
469 1, /* streams */
472 /* Instruction costs on PPC601 processors. */
473 static const
474 struct processor_costs ppc601_cost = {
475 COSTS_N_INSNS (5), /* mulsi */
476 COSTS_N_INSNS (5), /* mulsi_const */
477 COSTS_N_INSNS (5), /* mulsi_const9 */
478 COSTS_N_INSNS (5), /* muldi */
479 COSTS_N_INSNS (36), /* divsi */
480 COSTS_N_INSNS (36), /* divdi */
481 COSTS_N_INSNS (4), /* fp */
482 COSTS_N_INSNS (5), /* dmul */
483 COSTS_N_INSNS (17), /* sdiv */
484 COSTS_N_INSNS (31), /* ddiv */
485 32, /* cache line size */
486 32, /* l1 cache */
487 256, /* l2 cache */
488 1, /* streams */
491 /* Instruction costs on PPC603 processors. */
492 static const
493 struct processor_costs ppc603_cost = {
494 COSTS_N_INSNS (5), /* mulsi */
495 COSTS_N_INSNS (3), /* mulsi_const */
496 COSTS_N_INSNS (2), /* mulsi_const9 */
497 COSTS_N_INSNS (5), /* muldi */
498 COSTS_N_INSNS (37), /* divsi */
499 COSTS_N_INSNS (37), /* divdi */
500 COSTS_N_INSNS (3), /* fp */
501 COSTS_N_INSNS (4), /* dmul */
502 COSTS_N_INSNS (18), /* sdiv */
503 COSTS_N_INSNS (33), /* ddiv */
504 32, /* cache line size */
505 8, /* l1 cache */
506 64, /* l2 cache */
507 1, /* streams */
510 /* Instruction costs on PPC604 processors. */
511 static const
512 struct processor_costs ppc604_cost = {
513 COSTS_N_INSNS (4), /* mulsi */
514 COSTS_N_INSNS (4), /* mulsi_const */
515 COSTS_N_INSNS (4), /* mulsi_const9 */
516 COSTS_N_INSNS (4), /* muldi */
517 COSTS_N_INSNS (20), /* divsi */
518 COSTS_N_INSNS (20), /* divdi */
519 COSTS_N_INSNS (3), /* fp */
520 COSTS_N_INSNS (3), /* dmul */
521 COSTS_N_INSNS (18), /* sdiv */
522 COSTS_N_INSNS (32), /* ddiv */
523 32, /* cache line size */
524 16, /* l1 cache */
525 512, /* l2 cache */
526 1, /* streams */
529 /* Instruction costs on PPC604e processors. */
530 static const
531 struct processor_costs ppc604e_cost = {
532 COSTS_N_INSNS (2), /* mulsi */
533 COSTS_N_INSNS (2), /* mulsi_const */
534 COSTS_N_INSNS (2), /* mulsi_const9 */
535 COSTS_N_INSNS (2), /* muldi */
536 COSTS_N_INSNS (20), /* divsi */
537 COSTS_N_INSNS (20), /* divdi */
538 COSTS_N_INSNS (3), /* fp */
539 COSTS_N_INSNS (3), /* dmul */
540 COSTS_N_INSNS (18), /* sdiv */
541 COSTS_N_INSNS (32), /* ddiv */
542 32, /* cache line size */
543 32, /* l1 cache */
544 1024, /* l2 cache */
545 1, /* streams */
548 /* Instruction costs on PPC620 processors. */
549 static const
550 struct processor_costs ppc620_cost = {
551 COSTS_N_INSNS (5), /* mulsi */
552 COSTS_N_INSNS (4), /* mulsi_const */
553 COSTS_N_INSNS (3), /* mulsi_const9 */
554 COSTS_N_INSNS (7), /* muldi */
555 COSTS_N_INSNS (21), /* divsi */
556 COSTS_N_INSNS (37), /* divdi */
557 COSTS_N_INSNS (3), /* fp */
558 COSTS_N_INSNS (3), /* dmul */
559 COSTS_N_INSNS (18), /* sdiv */
560 COSTS_N_INSNS (32), /* ddiv */
561 128, /* cache line size */
562 32, /* l1 cache */
563 1024, /* l2 cache */
564 1, /* streams */
567 /* Instruction costs on PPC630 processors. */
568 static const
569 struct processor_costs ppc630_cost = {
570 COSTS_N_INSNS (5), /* mulsi */
571 COSTS_N_INSNS (4), /* mulsi_const */
572 COSTS_N_INSNS (3), /* mulsi_const9 */
573 COSTS_N_INSNS (7), /* muldi */
574 COSTS_N_INSNS (21), /* divsi */
575 COSTS_N_INSNS (37), /* divdi */
576 COSTS_N_INSNS (3), /* fp */
577 COSTS_N_INSNS (3), /* dmul */
578 COSTS_N_INSNS (17), /* sdiv */
579 COSTS_N_INSNS (21), /* ddiv */
580 128, /* cache line size */
581 64, /* l1 cache */
582 1024, /* l2 cache */
583 1, /* streams */
586 /* Instruction costs on Cell processor. */
587 /* COSTS_N_INSNS (1) ~ one add. */
588 static const
589 struct processor_costs ppccell_cost = {
590 COSTS_N_INSNS (9/2)+2, /* mulsi */
591 COSTS_N_INSNS (6/2), /* mulsi_const */
592 COSTS_N_INSNS (6/2), /* mulsi_const9 */
593 COSTS_N_INSNS (15/2)+2, /* muldi */
594 COSTS_N_INSNS (38/2), /* divsi */
595 COSTS_N_INSNS (70/2), /* divdi */
596 COSTS_N_INSNS (10/2), /* fp */
597 COSTS_N_INSNS (10/2), /* dmul */
598 COSTS_N_INSNS (74/2), /* sdiv */
599 COSTS_N_INSNS (74/2), /* ddiv */
600 128, /* cache line size */
601 32, /* l1 cache */
602 512, /* l2 cache */
603 6, /* streams */
606 /* Instruction costs on PPC750 and PPC7400 processors. */
607 static const
608 struct processor_costs ppc750_cost = {
609 COSTS_N_INSNS (5), /* mulsi */
610 COSTS_N_INSNS (3), /* mulsi_const */
611 COSTS_N_INSNS (2), /* mulsi_const9 */
612 COSTS_N_INSNS (5), /* muldi */
613 COSTS_N_INSNS (17), /* divsi */
614 COSTS_N_INSNS (17), /* divdi */
615 COSTS_N_INSNS (3), /* fp */
616 COSTS_N_INSNS (3), /* dmul */
617 COSTS_N_INSNS (17), /* sdiv */
618 COSTS_N_INSNS (31), /* ddiv */
619 32, /* cache line size */
620 32, /* l1 cache */
621 512, /* l2 cache */
622 1, /* streams */
625 /* Instruction costs on PPC7450 processors. */
626 static const
627 struct processor_costs ppc7450_cost = {
628 COSTS_N_INSNS (4), /* mulsi */
629 COSTS_N_INSNS (3), /* mulsi_const */
630 COSTS_N_INSNS (3), /* mulsi_const9 */
631 COSTS_N_INSNS (4), /* muldi */
632 COSTS_N_INSNS (23), /* divsi */
633 COSTS_N_INSNS (23), /* divdi */
634 COSTS_N_INSNS (5), /* fp */
635 COSTS_N_INSNS (5), /* dmul */
636 COSTS_N_INSNS (21), /* sdiv */
637 COSTS_N_INSNS (35), /* ddiv */
638 32, /* cache line size */
639 32, /* l1 cache */
640 1024, /* l2 cache */
641 1, /* streams */
644 /* Instruction costs on PPC8540 processors. */
645 static const
646 struct processor_costs ppc8540_cost = {
647 COSTS_N_INSNS (4), /* mulsi */
648 COSTS_N_INSNS (4), /* mulsi_const */
649 COSTS_N_INSNS (4), /* mulsi_const9 */
650 COSTS_N_INSNS (4), /* muldi */
651 COSTS_N_INSNS (19), /* divsi */
652 COSTS_N_INSNS (19), /* divdi */
653 COSTS_N_INSNS (4), /* fp */
654 COSTS_N_INSNS (4), /* dmul */
655 COSTS_N_INSNS (29), /* sdiv */
656 COSTS_N_INSNS (29), /* ddiv */
657 32, /* cache line size */
658 32, /* l1 cache */
659 256, /* l2 cache */
660 1, /* prefetch streams /*/
663 /* Instruction costs on E300C2 and E300C3 cores. */
664 static const
665 struct processor_costs ppce300c2c3_cost = {
666 COSTS_N_INSNS (4), /* mulsi */
667 COSTS_N_INSNS (4), /* mulsi_const */
668 COSTS_N_INSNS (4), /* mulsi_const9 */
669 COSTS_N_INSNS (4), /* muldi */
670 COSTS_N_INSNS (19), /* divsi */
671 COSTS_N_INSNS (19), /* divdi */
672 COSTS_N_INSNS (3), /* fp */
673 COSTS_N_INSNS (4), /* dmul */
674 COSTS_N_INSNS (18), /* sdiv */
675 COSTS_N_INSNS (33), /* ddiv */
677 16, /* l1 cache */
678 16, /* l2 cache */
679 1, /* prefetch streams /*/
682 /* Instruction costs on PPCE500MC processors. */
683 static const
684 struct processor_costs ppce500mc_cost = {
685 COSTS_N_INSNS (4), /* mulsi */
686 COSTS_N_INSNS (4), /* mulsi_const */
687 COSTS_N_INSNS (4), /* mulsi_const9 */
688 COSTS_N_INSNS (4), /* muldi */
689 COSTS_N_INSNS (14), /* divsi */
690 COSTS_N_INSNS (14), /* divdi */
691 COSTS_N_INSNS (8), /* fp */
692 COSTS_N_INSNS (10), /* dmul */
693 COSTS_N_INSNS (36), /* sdiv */
694 COSTS_N_INSNS (66), /* ddiv */
695 64, /* cache line size */
696 32, /* l1 cache */
697 128, /* l2 cache */
698 1, /* prefetch streams /*/
701 /* Instruction costs on PPCE500MC64 processors. */
702 static const
703 struct processor_costs ppce500mc64_cost = {
704 COSTS_N_INSNS (4), /* mulsi */
705 COSTS_N_INSNS (4), /* mulsi_const */
706 COSTS_N_INSNS (4), /* mulsi_const9 */
707 COSTS_N_INSNS (4), /* muldi */
708 COSTS_N_INSNS (14), /* divsi */
709 COSTS_N_INSNS (14), /* divdi */
710 COSTS_N_INSNS (4), /* fp */
711 COSTS_N_INSNS (10), /* dmul */
712 COSTS_N_INSNS (36), /* sdiv */
713 COSTS_N_INSNS (66), /* ddiv */
714 64, /* cache line size */
715 32, /* l1 cache */
716 128, /* l2 cache */
717 1, /* prefetch streams /*/
720 /* Instruction costs on PPCE5500 processors. */
721 static const
722 struct processor_costs ppce5500_cost = {
723 COSTS_N_INSNS (5), /* mulsi */
724 COSTS_N_INSNS (5), /* mulsi_const */
725 COSTS_N_INSNS (4), /* mulsi_const9 */
726 COSTS_N_INSNS (5), /* muldi */
727 COSTS_N_INSNS (14), /* divsi */
728 COSTS_N_INSNS (14), /* divdi */
729 COSTS_N_INSNS (7), /* fp */
730 COSTS_N_INSNS (10), /* dmul */
731 COSTS_N_INSNS (36), /* sdiv */
732 COSTS_N_INSNS (66), /* ddiv */
733 64, /* cache line size */
734 32, /* l1 cache */
735 128, /* l2 cache */
736 1, /* prefetch streams /*/
739 /* Instruction costs on PPCE6500 processors. */
740 static const
741 struct processor_costs ppce6500_cost = {
742 COSTS_N_INSNS (5), /* mulsi */
743 COSTS_N_INSNS (5), /* mulsi_const */
744 COSTS_N_INSNS (4), /* mulsi_const9 */
745 COSTS_N_INSNS (5), /* muldi */
746 COSTS_N_INSNS (14), /* divsi */
747 COSTS_N_INSNS (14), /* divdi */
748 COSTS_N_INSNS (7), /* fp */
749 COSTS_N_INSNS (10), /* dmul */
750 COSTS_N_INSNS (36), /* sdiv */
751 COSTS_N_INSNS (66), /* ddiv */
752 64, /* cache line size */
753 32, /* l1 cache */
754 128, /* l2 cache */
755 1, /* prefetch streams /*/
758 /* Instruction costs on AppliedMicro Titan processors. */
759 static const
760 struct processor_costs titan_cost = {
761 COSTS_N_INSNS (5), /* mulsi */
762 COSTS_N_INSNS (5), /* mulsi_const */
763 COSTS_N_INSNS (5), /* mulsi_const9 */
764 COSTS_N_INSNS (5), /* muldi */
765 COSTS_N_INSNS (18), /* divsi */
766 COSTS_N_INSNS (18), /* divdi */
767 COSTS_N_INSNS (10), /* fp */
768 COSTS_N_INSNS (10), /* dmul */
769 COSTS_N_INSNS (46), /* sdiv */
770 COSTS_N_INSNS (72), /* ddiv */
771 32, /* cache line size */
772 32, /* l1 cache */
773 512, /* l2 cache */
774 1, /* prefetch streams /*/
777 /* Instruction costs on POWER4 and POWER5 processors. */
778 static const
779 struct processor_costs power4_cost = {
780 COSTS_N_INSNS (3), /* mulsi */
781 COSTS_N_INSNS (2), /* mulsi_const */
782 COSTS_N_INSNS (2), /* mulsi_const9 */
783 COSTS_N_INSNS (4), /* muldi */
784 COSTS_N_INSNS (18), /* divsi */
785 COSTS_N_INSNS (34), /* divdi */
786 COSTS_N_INSNS (3), /* fp */
787 COSTS_N_INSNS (3), /* dmul */
788 COSTS_N_INSNS (17), /* sdiv */
789 COSTS_N_INSNS (17), /* ddiv */
790 128, /* cache line size */
791 32, /* l1 cache */
792 1024, /* l2 cache */
793 8, /* prefetch streams /*/
796 /* Instruction costs on POWER6 processors. */
797 static const
798 struct processor_costs power6_cost = {
799 COSTS_N_INSNS (8), /* mulsi */
800 COSTS_N_INSNS (8), /* mulsi_const */
801 COSTS_N_INSNS (8), /* mulsi_const9 */
802 COSTS_N_INSNS (8), /* muldi */
803 COSTS_N_INSNS (22), /* divsi */
804 COSTS_N_INSNS (28), /* divdi */
805 COSTS_N_INSNS (3), /* fp */
806 COSTS_N_INSNS (3), /* dmul */
807 COSTS_N_INSNS (13), /* sdiv */
808 COSTS_N_INSNS (16), /* ddiv */
809 128, /* cache line size */
810 64, /* l1 cache */
811 2048, /* l2 cache */
812 16, /* prefetch streams */
815 /* Instruction costs on POWER7 processors. */
816 static const
817 struct processor_costs power7_cost = {
818 COSTS_N_INSNS (2), /* mulsi */
819 COSTS_N_INSNS (2), /* mulsi_const */
820 COSTS_N_INSNS (2), /* mulsi_const9 */
821 COSTS_N_INSNS (2), /* muldi */
822 COSTS_N_INSNS (18), /* divsi */
823 COSTS_N_INSNS (34), /* divdi */
824 COSTS_N_INSNS (3), /* fp */
825 COSTS_N_INSNS (3), /* dmul */
826 COSTS_N_INSNS (13), /* sdiv */
827 COSTS_N_INSNS (16), /* ddiv */
828 128, /* cache line size */
829 32, /* l1 cache */
830 256, /* l2 cache */
831 12, /* prefetch streams */
834 /* Instruction costs on POWER8 processors. */
835 static const
836 struct processor_costs power8_cost = {
837 COSTS_N_INSNS (3), /* mulsi */
838 COSTS_N_INSNS (3), /* mulsi_const */
839 COSTS_N_INSNS (3), /* mulsi_const9 */
840 COSTS_N_INSNS (3), /* muldi */
841 COSTS_N_INSNS (19), /* divsi */
842 COSTS_N_INSNS (35), /* divdi */
843 COSTS_N_INSNS (3), /* fp */
844 COSTS_N_INSNS (3), /* dmul */
845 COSTS_N_INSNS (14), /* sdiv */
846 COSTS_N_INSNS (17), /* ddiv */
847 128, /* cache line size */
848 32, /* l1 cache */
849 256, /* l2 cache */
850 12, /* prefetch streams */
853 /* Instruction costs on POWER A2 processors. */
854 static const
855 struct processor_costs ppca2_cost = {
856 COSTS_N_INSNS (16), /* mulsi */
857 COSTS_N_INSNS (16), /* mulsi_const */
858 COSTS_N_INSNS (16), /* mulsi_const9 */
859 COSTS_N_INSNS (16), /* muldi */
860 COSTS_N_INSNS (22), /* divsi */
861 COSTS_N_INSNS (28), /* divdi */
862 COSTS_N_INSNS (3), /* fp */
863 COSTS_N_INSNS (3), /* dmul */
864 COSTS_N_INSNS (59), /* sdiv */
865 COSTS_N_INSNS (72), /* ddiv */
867 16, /* l1 cache */
868 2048, /* l2 cache */
869 16, /* prefetch streams */
873 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
874 #undef RS6000_BUILTIN_1
875 #undef RS6000_BUILTIN_2
876 #undef RS6000_BUILTIN_3
877 #undef RS6000_BUILTIN_A
878 #undef RS6000_BUILTIN_D
879 #undef RS6000_BUILTIN_E
880 #undef RS6000_BUILTIN_P
881 #undef RS6000_BUILTIN_Q
882 #undef RS6000_BUILTIN_S
883 #undef RS6000_BUILTIN_X
885 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
886 { NAME, ICODE, MASK, ATTR },
888 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
889 { NAME, ICODE, MASK, ATTR },
891 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
892 { NAME, ICODE, MASK, ATTR },
894 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
895 { NAME, ICODE, MASK, ATTR },
897 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
898 { NAME, ICODE, MASK, ATTR },
900 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
901 { NAME, ICODE, MASK, ATTR },
903 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
904 { NAME, ICODE, MASK, ATTR },
906 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
907 { NAME, ICODE, MASK, ATTR },
909 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
910 { NAME, ICODE, MASK, ATTR },
912 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
913 { NAME, ICODE, MASK, ATTR },
915 struct rs6000_builtin_info_type {
916 const char *name;
917 const enum insn_code icode;
918 const HOST_WIDE_INT mask;
919 const unsigned attr;
922 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
924 #include "rs6000-builtin.def"
927 #undef RS6000_BUILTIN_1
928 #undef RS6000_BUILTIN_2
929 #undef RS6000_BUILTIN_3
930 #undef RS6000_BUILTIN_A
931 #undef RS6000_BUILTIN_D
932 #undef RS6000_BUILTIN_E
933 #undef RS6000_BUILTIN_P
934 #undef RS6000_BUILTIN_Q
935 #undef RS6000_BUILTIN_S
936 #undef RS6000_BUILTIN_X
938 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
939 static tree (*rs6000_veclib_handler) (tree, tree, tree);
942 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
943 static bool spe_func_has_64bit_regs_p (void);
944 static struct machine_function * rs6000_init_machine_status (void);
945 static int rs6000_ra_ever_killed (void);
946 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
947 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
948 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
949 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
950 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
951 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
952 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
953 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
954 bool);
955 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
956 static bool is_microcoded_insn (rtx);
957 static bool is_nonpipeline_insn (rtx);
958 static bool is_cracked_insn (rtx);
959 static bool is_load_insn (rtx, rtx *);
960 static bool is_store_insn (rtx, rtx *);
961 static bool set_to_load_agen (rtx,rtx);
962 static bool insn_terminates_group_p (rtx , enum group_termination);
963 static bool insn_must_be_first_in_group (rtx);
964 static bool insn_must_be_last_in_group (rtx);
965 static void altivec_init_builtins (void);
966 static tree builtin_function_type (enum machine_mode, enum machine_mode,
967 enum machine_mode, enum machine_mode,
968 enum rs6000_builtins, const char *name);
969 static void rs6000_common_init_builtins (void);
970 static void paired_init_builtins (void);
971 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
972 static void spe_init_builtins (void);
973 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
974 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
975 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
976 static rs6000_stack_t *rs6000_stack_info (void);
977 static void is_altivec_return_reg (rtx, void *);
978 int easy_vector_constant (rtx, enum machine_mode);
979 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
980 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
981 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
982 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
983 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
984 bool, bool);
985 #if TARGET_MACHO
986 static void macho_branch_islands (void);
987 #endif
988 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
989 int, int *);
990 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
991 int, int, int *);
992 static bool rs6000_mode_dependent_address (const_rtx);
993 static bool rs6000_debug_mode_dependent_address (const_rtx);
994 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
995 enum machine_mode, rtx);
996 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
997 enum machine_mode,
998 rtx);
999 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1000 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1001 enum reg_class);
1002 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1003 enum machine_mode);
1004 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1005 enum reg_class,
1006 enum machine_mode);
1007 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1008 enum machine_mode,
1009 enum reg_class);
1010 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1011 enum machine_mode,
1012 enum reg_class);
1013 static bool rs6000_save_toc_in_prologue_p (void);
1015 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1016 int, int *)
1017 = rs6000_legitimize_reload_address;
1019 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1020 = rs6000_mode_dependent_address;
1022 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1023 enum machine_mode, rtx)
1024 = rs6000_secondary_reload_class;
1026 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1027 = rs6000_preferred_reload_class;
1029 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1030 enum machine_mode)
1031 = rs6000_secondary_memory_needed;
1033 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1034 enum machine_mode,
1035 enum reg_class)
1036 = rs6000_cannot_change_mode_class;
1038 const int INSN_NOT_AVAILABLE = -1;
1040 static void rs6000_print_isa_options (FILE *, int, const char *,
1041 HOST_WIDE_INT);
1042 static void rs6000_print_builtin_options (FILE *, int, const char *,
1043 HOST_WIDE_INT);
1045 /* Hash table stuff for keeping track of TOC entries. */
1047 struct GTY(()) toc_hash_struct
1049 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1050 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1051 rtx key;
1052 enum machine_mode key_mode;
1053 int labelno;
1056 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1058 /* Hash table to keep track of the argument types for builtin functions. */
1060 struct GTY(()) builtin_hash_struct
1062 tree type;
1063 enum machine_mode mode[4]; /* return value + 3 arguments. */
1064 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1067 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1070 /* Default register names. */
1071 char rs6000_reg_names[][8] =
1073 "0", "1", "2", "3", "4", "5", "6", "7",
1074 "8", "9", "10", "11", "12", "13", "14", "15",
1075 "16", "17", "18", "19", "20", "21", "22", "23",
1076 "24", "25", "26", "27", "28", "29", "30", "31",
1077 "0", "1", "2", "3", "4", "5", "6", "7",
1078 "8", "9", "10", "11", "12", "13", "14", "15",
1079 "16", "17", "18", "19", "20", "21", "22", "23",
1080 "24", "25", "26", "27", "28", "29", "30", "31",
1081 "mq", "lr", "ctr","ap",
1082 "0", "1", "2", "3", "4", "5", "6", "7",
1083 "ca",
1084 /* AltiVec registers. */
1085 "0", "1", "2", "3", "4", "5", "6", "7",
1086 "8", "9", "10", "11", "12", "13", "14", "15",
1087 "16", "17", "18", "19", "20", "21", "22", "23",
1088 "24", "25", "26", "27", "28", "29", "30", "31",
1089 "vrsave", "vscr",
1090 /* SPE registers. */
1091 "spe_acc", "spefscr",
1092 /* Soft frame pointer. */
1093 "sfp"
1096 #ifdef TARGET_REGNAMES
1097 static const char alt_reg_names[][8] =
1099 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1100 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1101 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1102 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1103 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1104 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1105 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1106 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1107 "mq", "lr", "ctr", "ap",
1108 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1109 "ca",
1110 /* AltiVec registers. */
1111 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1112 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1113 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1114 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1115 "vrsave", "vscr",
1116 /* SPE registers. */
1117 "spe_acc", "spefscr",
1118 /* Soft frame pointer. */
1119 "sfp"
1121 #endif
1123 /* Table of valid machine attributes. */
1125 static const struct attribute_spec rs6000_attribute_table[] =
1127 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1128 affects_type_identity } */
1129 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1130 false },
1131 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1132 false },
1133 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1134 false },
1135 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1136 false },
1137 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1138 false },
1139 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1140 SUBTARGET_ATTRIBUTE_TABLE,
1141 #endif
1142 { NULL, 0, 0, false, false, false, NULL, false }
1145 #ifndef TARGET_PROFILE_KERNEL
1146 #define TARGET_PROFILE_KERNEL 0
1147 #endif
1149 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1150 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1152 /* Initialize the GCC target structure. */
1153 #undef TARGET_ATTRIBUTE_TABLE
1154 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1155 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1156 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1157 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1158 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1160 #undef TARGET_ASM_ALIGNED_DI_OP
1161 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1163 /* Default unaligned ops are only provided for ELF. Find the ops needed
1164 for non-ELF systems. */
1165 #ifndef OBJECT_FORMAT_ELF
1166 #if TARGET_XCOFF
1167 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1168 64-bit targets. */
1169 #undef TARGET_ASM_UNALIGNED_HI_OP
1170 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1171 #undef TARGET_ASM_UNALIGNED_SI_OP
1172 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1173 #undef TARGET_ASM_UNALIGNED_DI_OP
1174 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1175 #else
1176 /* For Darwin. */
1177 #undef TARGET_ASM_UNALIGNED_HI_OP
1178 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1179 #undef TARGET_ASM_UNALIGNED_SI_OP
1180 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1181 #undef TARGET_ASM_UNALIGNED_DI_OP
1182 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1183 #undef TARGET_ASM_ALIGNED_DI_OP
1184 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1185 #endif
1186 #endif
1188 /* This hook deals with fixups for relocatable code and DI-mode objects
1189 in 64-bit code. */
1190 #undef TARGET_ASM_INTEGER
1191 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1193 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1194 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1195 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1196 #endif
1198 #undef TARGET_SET_UP_BY_PROLOGUE
1199 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1201 #undef TARGET_HAVE_TLS
1202 #define TARGET_HAVE_TLS HAVE_AS_TLS
1204 #undef TARGET_CANNOT_FORCE_CONST_MEM
1205 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1207 #undef TARGET_DELEGITIMIZE_ADDRESS
1208 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1210 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1211 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1213 #undef TARGET_ASM_FUNCTION_PROLOGUE
1214 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1215 #undef TARGET_ASM_FUNCTION_EPILOGUE
1216 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1218 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1219 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1221 #undef TARGET_LEGITIMIZE_ADDRESS
1222 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1224 #undef TARGET_SCHED_VARIABLE_ISSUE
1225 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1227 #undef TARGET_SCHED_ISSUE_RATE
1228 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1229 #undef TARGET_SCHED_ADJUST_COST
1230 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1231 #undef TARGET_SCHED_ADJUST_PRIORITY
1232 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1233 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1234 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1235 #undef TARGET_SCHED_INIT
1236 #define TARGET_SCHED_INIT rs6000_sched_init
1237 #undef TARGET_SCHED_FINISH
1238 #define TARGET_SCHED_FINISH rs6000_sched_finish
1239 #undef TARGET_SCHED_REORDER
1240 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1241 #undef TARGET_SCHED_REORDER2
1242 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1244 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1245 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1247 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1248 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1250 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1251 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1252 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1253 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1254 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1255 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1256 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1257 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1259 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1260 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1261 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1262 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1263 rs6000_builtin_support_vector_misalignment
1264 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1265 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1266 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1267 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1268 rs6000_builtin_vectorization_cost
1269 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1270 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1271 rs6000_preferred_simd_mode
1272 #undef TARGET_VECTORIZE_INIT_COST
1273 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1274 #undef TARGET_VECTORIZE_ADD_STMT_COST
1275 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1276 #undef TARGET_VECTORIZE_FINISH_COST
1277 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1278 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1279 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1281 #undef TARGET_INIT_BUILTINS
1282 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1283 #undef TARGET_BUILTIN_DECL
1284 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1286 #undef TARGET_EXPAND_BUILTIN
1287 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1289 #undef TARGET_MANGLE_TYPE
1290 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1292 #undef TARGET_INIT_LIBFUNCS
1293 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1295 #if TARGET_MACHO
1296 #undef TARGET_BINDS_LOCAL_P
1297 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1298 #endif
1300 #undef TARGET_MS_BITFIELD_LAYOUT_P
1301 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1303 #undef TARGET_ASM_OUTPUT_MI_THUNK
1304 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1306 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1307 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1309 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1310 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1312 #undef TARGET_REGISTER_MOVE_COST
1313 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1314 #undef TARGET_MEMORY_MOVE_COST
1315 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1316 #undef TARGET_RTX_COSTS
1317 #define TARGET_RTX_COSTS rs6000_rtx_costs
1318 #undef TARGET_ADDRESS_COST
1319 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1321 #undef TARGET_DWARF_REGISTER_SPAN
1322 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1324 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1325 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1327 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1328 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1330 /* On rs6000, function arguments are promoted, as are function return
1331 values. */
1332 #undef TARGET_PROMOTE_FUNCTION_MODE
1333 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1335 #undef TARGET_RETURN_IN_MEMORY
1336 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1338 #undef TARGET_SETUP_INCOMING_VARARGS
1339 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1341 /* Always strict argument naming on rs6000. */
1342 #undef TARGET_STRICT_ARGUMENT_NAMING
1343 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1344 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1345 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1346 #undef TARGET_SPLIT_COMPLEX_ARG
1347 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1348 #undef TARGET_MUST_PASS_IN_STACK
1349 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1350 #undef TARGET_PASS_BY_REFERENCE
1351 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1352 #undef TARGET_ARG_PARTIAL_BYTES
1353 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1354 #undef TARGET_FUNCTION_ARG_ADVANCE
1355 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1356 #undef TARGET_FUNCTION_ARG
1357 #define TARGET_FUNCTION_ARG rs6000_function_arg
1358 #undef TARGET_FUNCTION_ARG_BOUNDARY
1359 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1361 #undef TARGET_BUILD_BUILTIN_VA_LIST
1362 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1364 #undef TARGET_EXPAND_BUILTIN_VA_START
1365 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1367 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1368 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1370 #undef TARGET_EH_RETURN_FILTER_MODE
1371 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1373 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1374 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1376 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1377 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1379 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1380 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1382 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1383 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1385 #undef TARGET_OPTION_OVERRIDE
1386 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1388 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1389 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1390 rs6000_builtin_vectorized_function
1392 #if !TARGET_MACHO
1393 #undef TARGET_STACK_PROTECT_FAIL
1394 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1395 #endif
1397 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1398 The PowerPC architecture requires only weak consistency among
1399 processors--that is, memory accesses between processors need not be
1400 sequentially consistent and memory accesses among processors can occur
1401 in any order. The ability to order memory accesses weakly provides
1402 opportunities for more efficient use of the system bus. Unless a
1403 dependency exists, the 604e allows read operations to precede store
1404 operations. */
1405 #undef TARGET_RELAXED_ORDERING
1406 #define TARGET_RELAXED_ORDERING true
1408 #ifdef HAVE_AS_TLS
1409 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1410 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1411 #endif
1413 /* Use a 32-bit anchor range. This leads to sequences like:
1415 addis tmp,anchor,high
1416 add dest,tmp,low
1418 where tmp itself acts as an anchor, and can be shared between
1419 accesses to the same 64k page. */
1420 #undef TARGET_MIN_ANCHOR_OFFSET
1421 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1422 #undef TARGET_MAX_ANCHOR_OFFSET
1423 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1424 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1425 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1426 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1427 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1429 #undef TARGET_BUILTIN_RECIPROCAL
1430 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1432 #undef TARGET_EXPAND_TO_RTL_HOOK
1433 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1435 #undef TARGET_INSTANTIATE_DECLS
1436 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1438 #undef TARGET_SECONDARY_RELOAD
1439 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1441 #undef TARGET_LEGITIMATE_ADDRESS_P
1442 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1444 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1445 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1447 #undef TARGET_CAN_ELIMINATE
1448 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1450 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1451 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1453 #undef TARGET_TRAMPOLINE_INIT
1454 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1456 #undef TARGET_FUNCTION_VALUE
1457 #define TARGET_FUNCTION_VALUE rs6000_function_value
1459 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1460 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1462 #undef TARGET_OPTION_SAVE
1463 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1465 #undef TARGET_OPTION_RESTORE
1466 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1468 #undef TARGET_OPTION_PRINT
1469 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1471 #undef TARGET_CAN_INLINE_P
1472 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1474 #undef TARGET_SET_CURRENT_FUNCTION
1475 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1477 #undef TARGET_LEGITIMATE_CONSTANT_P
1478 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1480 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1481 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1484 /* Processor table. */
1485 struct rs6000_ptt
1487 const char *const name; /* Canonical processor name. */
1488 const enum processor_type processor; /* Processor type enum value. */
1489 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1492 static struct rs6000_ptt const processor_target_table[] =
1494 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1495 #include "rs6000-cpus.def"
1496 #undef RS6000_CPU
1499 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1500 name is invalid. */
1502 static int
1503 rs6000_cpu_name_lookup (const char *name)
1505 size_t i;
1507 if (name != NULL)
1509 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1510 if (! strcmp (name, processor_target_table[i].name))
1511 return (int)i;
1514 return -1;
1518 /* Return number of consecutive hard regs needed starting at reg REGNO
1519 to hold something of mode MODE.
1520 This is ordinarily the length in words of a value of mode MODE
1521 but can be less for certain modes in special long registers.
1523 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1524 scalar instructions. The upper 32 bits are only available to the
1525 SIMD instructions.
1527 POWER and PowerPC GPRs hold 32 bits worth;
1528 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1530 static int
1531 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1533 unsigned HOST_WIDE_INT reg_size;
1535 /* TF/TD modes are special in that they always take 2 registers. */
1536 if (FP_REGNO_P (regno))
1537 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1538 ? UNITS_PER_VSX_WORD
1539 : UNITS_PER_FP_WORD);
1541 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1542 reg_size = UNITS_PER_SPE_WORD;
1544 else if (ALTIVEC_REGNO_P (regno))
1545 reg_size = UNITS_PER_ALTIVEC_WORD;
1547 /* The value returned for SCmode in the E500 double case is 2 for
1548 ABI compatibility; storing an SCmode value in a single register
1549 would require function_arg and rs6000_spe_function_arg to handle
1550 SCmode so as to pass the value correctly in a pair of
1551 registers. */
1552 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1553 && !DECIMAL_FLOAT_MODE_P (mode))
1554 reg_size = UNITS_PER_FP_WORD;
1556 else
1557 reg_size = UNITS_PER_WORD;
1559 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1562 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1563 MODE. */
1564 static int
1565 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1567 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1569 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1570 register combinations, and use PTImode where we need to deal with quad
1571 word memory operations. Don't allow quad words in the argument or frame
1572 pointer registers, just registers 0..31. */
1573 if (mode == PTImode)
1574 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1575 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1576 && ((regno & 1) == 0));
1578 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1579 implementations. Don't allow an item to be split between a FP register
1580 and an Altivec register. */
1581 if (VECTOR_MEM_VSX_P (mode))
1583 if (FP_REGNO_P (regno))
1584 return FP_REGNO_P (last_regno);
1586 if (ALTIVEC_REGNO_P (regno))
1587 return ALTIVEC_REGNO_P (last_regno);
1590 /* Allow TImode in all VSX registers if the user asked for it. Note, PTImode
1591 can only go in GPRs. */
1592 if (mode == TImode && TARGET_VSX_TIMODE && VSX_REGNO_P (regno))
1593 return 1;
1595 /* The GPRs can hold any mode, but values bigger than one register
1596 cannot go past R31. */
1597 if (INT_REGNO_P (regno))
1598 return INT_REGNO_P (last_regno);
1600 /* The float registers (except for VSX vector modes) can only hold floating
1601 modes and DImode. */
1602 if (FP_REGNO_P (regno))
1604 if (SCALAR_FLOAT_MODE_P (mode)
1605 && (mode != TDmode || (regno % 2) == 0)
1606 && FP_REGNO_P (last_regno))
1607 return 1;
1609 if (GET_MODE_CLASS (mode) == MODE_INT
1610 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1611 return 1;
1613 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1614 && PAIRED_VECTOR_MODE (mode))
1615 return 1;
1617 return 0;
1620 /* The CR register can only hold CC modes. */
1621 if (CR_REGNO_P (regno))
1622 return GET_MODE_CLASS (mode) == MODE_CC;
1624 if (CA_REGNO_P (regno))
1625 return mode == BImode;
1627 /* AltiVec only in AldyVec registers. */
1628 if (ALTIVEC_REGNO_P (regno))
1629 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1631 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1632 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1633 return 1;
1635 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1636 and it must be able to fit within the register set. */
1638 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1641 /* Print interesting facts about registers. */
1642 static void
1643 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1645 int r, m;
1647 for (r = first_regno; r <= last_regno; ++r)
1649 const char *comma = "";
1650 int len;
1652 if (first_regno == last_regno)
1653 fprintf (stderr, "%s:\t", reg_name);
1654 else
1655 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1657 len = 8;
1658 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1659 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1661 if (len > 70)
1663 fprintf (stderr, ",\n\t");
1664 len = 8;
1665 comma = "";
1668 if (rs6000_hard_regno_nregs[m][r] > 1)
1669 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1670 rs6000_hard_regno_nregs[m][r]);
1671 else
1672 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1674 comma = ", ";
1677 if (call_used_regs[r])
1679 if (len > 70)
1681 fprintf (stderr, ",\n\t");
1682 len = 8;
1683 comma = "";
1686 len += fprintf (stderr, "%s%s", comma, "call-used");
1687 comma = ", ";
1690 if (fixed_regs[r])
1692 if (len > 70)
1694 fprintf (stderr, ",\n\t");
1695 len = 8;
1696 comma = "";
1699 len += fprintf (stderr, "%s%s", comma, "fixed");
1700 comma = ", ";
1703 if (len > 70)
1705 fprintf (stderr, ",\n\t");
1706 comma = "";
1709 len += fprintf (stderr, "%sreg-class = %s", comma,
1710 reg_class_names[(int)rs6000_regno_regclass[r]]);
1711 comma = ", ";
1713 if (len > 70)
1715 fprintf (stderr, ",\n\t");
1716 comma = "";
1719 fprintf (stderr, "%sregno = %d\n", comma, r);
1723 #define DEBUG_FMT_ID "%-32s= "
1724 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1725 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1726 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1728 /* Print various interesting information with -mdebug=reg. */
1729 static void
1730 rs6000_debug_reg_global (void)
1732 static const char *const tf[2] = { "false", "true" };
1733 const char *nl = (const char *)0;
1734 int m;
1735 size_t m1, m2, v;
1736 char costly_num[20];
1737 char nop_num[20];
1738 char flags_buffer[40];
1739 const char *costly_str;
1740 const char *nop_str;
1741 const char *trace_str;
1742 const char *abi_str;
1743 const char *cmodel_str;
1744 struct cl_target_option cl_opts;
1746 /* Map enum rs6000_vector to string. */
1747 static const char *rs6000_debug_vector_unit[] = {
1748 "none",
1749 "altivec",
1750 "vsx",
1751 "p8_vector",
1752 "paired",
1753 "spe",
1754 "other"
1757 /* Modes we want tieable information on. */
1758 static const enum machine_mode print_tieable_modes[] = {
1759 QImode,
1760 HImode,
1761 SImode,
1762 DImode,
1763 TImode,
1764 PTImode,
1765 SFmode,
1766 DFmode,
1767 TFmode,
1768 SDmode,
1769 DDmode,
1770 TDmode,
1771 V8QImode,
1772 V4HImode,
1773 V2SImode,
1774 V16QImode,
1775 V8HImode,
1776 V4SImode,
1777 V2DImode,
1778 V32QImode,
1779 V16HImode,
1780 V8SImode,
1781 V4DImode,
1782 V2SFmode,
1783 V4SFmode,
1784 V2DFmode,
1785 V8SFmode,
1786 V4DFmode,
1787 CCmode,
1788 CCUNSmode,
1789 CCEQmode,
1792 /* Virtual regs we are interested in. */
1793 const static struct {
1794 int regno; /* register number. */
1795 const char *name; /* register name. */
1796 } virtual_regs[] = {
1797 { STACK_POINTER_REGNUM, "stack pointer:" },
1798 { TOC_REGNUM, "toc: " },
1799 { STATIC_CHAIN_REGNUM, "static chain: " },
1800 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
1801 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
1802 { ARG_POINTER_REGNUM, "arg pointer: " },
1803 { FRAME_POINTER_REGNUM, "frame pointer:" },
1804 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
1805 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
1806 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
1807 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
1808 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
1809 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
1810 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
1811 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
1812 { LAST_VIRTUAL_REGISTER, "last virtual: " },
1815 fputs ("\nHard register information:\n", stderr);
1816 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
1817 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
1818 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1819 LAST_ALTIVEC_REGNO,
1820 "vs");
1821 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1822 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1823 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1824 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1825 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1826 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1827 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1828 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1830 fputs ("\nVirtual/stack/frame registers:\n", stderr);
1831 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
1832 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
1834 fprintf (stderr,
1835 "\n"
1836 "d reg_class = %s\n"
1837 "f reg_class = %s\n"
1838 "v reg_class = %s\n"
1839 "wa reg_class = %s\n"
1840 "wd reg_class = %s\n"
1841 "wf reg_class = %s\n"
1842 "wg reg_class = %s\n"
1843 "wl reg_class = %s\n"
1844 "wm reg_class = %s\n"
1845 "wr reg_class = %s\n"
1846 "ws reg_class = %s\n"
1847 "wt reg_class = %s\n"
1848 "wv reg_class = %s\n"
1849 "wx reg_class = %s\n"
1850 "wz reg_class = %s\n"
1851 "\n",
1852 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1853 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1854 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1855 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1856 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1857 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1858 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
1859 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
1860 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
1861 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
1862 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
1863 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
1864 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
1865 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
1866 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
1868 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1869 if (rs6000_vector_unit[m] || rs6000_vector_mem[m]
1870 || (rs6000_vector_reload[m][0] != CODE_FOR_nothing)
1871 || (rs6000_vector_reload[m][1] != CODE_FOR_nothing))
1873 nl = "\n";
1874 fprintf (stderr,
1875 "Vector mode: %-5s arithmetic: %-10s move: %-10s "
1876 "reload-out: %c reload-in: %c\n",
1877 GET_MODE_NAME (m),
1878 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1879 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ],
1880 (rs6000_vector_reload[m][0] != CODE_FOR_nothing) ? 'y' : 'n',
1881 (rs6000_vector_reload[m][1] != CODE_FOR_nothing) ? 'y' : 'n');
1884 if (nl)
1885 fputs (nl, stderr);
1887 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
1889 enum machine_mode mode1 = print_tieable_modes[m1];
1890 bool first_time = true;
1892 nl = (const char *)0;
1893 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
1895 enum machine_mode mode2 = print_tieable_modes[m2];
1896 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
1898 if (first_time)
1900 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
1901 nl = "\n";
1902 first_time = false;
1905 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
1909 if (!first_time)
1910 fputs ("\n", stderr);
1913 if (nl)
1914 fputs (nl, stderr);
1916 if (rs6000_recip_control)
1918 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1920 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1921 if (rs6000_recip_bits[m])
1923 fprintf (stderr,
1924 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1925 GET_MODE_NAME (m),
1926 (RS6000_RECIP_AUTO_RE_P (m)
1927 ? "auto"
1928 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1929 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1930 ? "auto"
1931 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1934 fputs ("\n", stderr);
1937 if (rs6000_cpu_index >= 0)
1939 const char *name = processor_target_table[rs6000_cpu_index].name;
1940 HOST_WIDE_INT flags
1941 = processor_target_table[rs6000_cpu_index].target_enable;
1943 sprintf (flags_buffer, "-mcpu=%s flags", name);
1944 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
1946 else
1947 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
1949 if (rs6000_tune_index >= 0)
1951 const char *name = processor_target_table[rs6000_tune_index].name;
1952 HOST_WIDE_INT flags
1953 = processor_target_table[rs6000_tune_index].target_enable;
1955 sprintf (flags_buffer, "-mtune=%s flags", name);
1956 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
1958 else
1959 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
1961 cl_target_option_save (&cl_opts, &global_options);
1962 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
1963 rs6000_isa_flags);
1965 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
1966 rs6000_isa_flags_explicit);
1968 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
1969 rs6000_builtin_mask);
1971 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
1973 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
1974 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
1976 switch (rs6000_sched_costly_dep)
1978 case max_dep_latency:
1979 costly_str = "max_dep_latency";
1980 break;
1982 case no_dep_costly:
1983 costly_str = "no_dep_costly";
1984 break;
1986 case all_deps_costly:
1987 costly_str = "all_deps_costly";
1988 break;
1990 case true_store_to_load_dep_costly:
1991 costly_str = "true_store_to_load_dep_costly";
1992 break;
1994 case store_to_load_dep_costly:
1995 costly_str = "store_to_load_dep_costly";
1996 break;
1998 default:
1999 costly_str = costly_num;
2000 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2001 break;
2004 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2006 switch (rs6000_sched_insert_nops)
2008 case sched_finish_regroup_exact:
2009 nop_str = "sched_finish_regroup_exact";
2010 break;
2012 case sched_finish_pad_groups:
2013 nop_str = "sched_finish_pad_groups";
2014 break;
2016 case sched_finish_none:
2017 nop_str = "sched_finish_none";
2018 break;
2020 default:
2021 nop_str = nop_num;
2022 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2023 break;
2026 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2028 switch (rs6000_sdata)
2030 default:
2031 case SDATA_NONE:
2032 break;
2034 case SDATA_DATA:
2035 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2036 break;
2038 case SDATA_SYSV:
2039 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2040 break;
2042 case SDATA_EABI:
2043 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2044 break;
2048 switch (rs6000_traceback)
2050 case traceback_default: trace_str = "default"; break;
2051 case traceback_none: trace_str = "none"; break;
2052 case traceback_part: trace_str = "part"; break;
2053 case traceback_full: trace_str = "full"; break;
2054 default: trace_str = "unknown"; break;
2057 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2059 switch (rs6000_current_cmodel)
2061 case CMODEL_SMALL: cmodel_str = "small"; break;
2062 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2063 case CMODEL_LARGE: cmodel_str = "large"; break;
2064 default: cmodel_str = "unknown"; break;
2067 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2069 switch (rs6000_current_abi)
2071 case ABI_NONE: abi_str = "none"; break;
2072 case ABI_AIX: abi_str = "aix"; break;
2073 case ABI_V4: abi_str = "V4"; break;
2074 case ABI_DARWIN: abi_str = "darwin"; break;
2075 default: abi_str = "unknown"; break;
2078 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2080 if (rs6000_altivec_abi)
2081 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2083 if (rs6000_spe_abi)
2084 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2086 if (rs6000_darwin64_abi)
2087 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2089 if (rs6000_float_gprs)
2090 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2092 if (TARGET_LINK_STACK)
2093 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2095 if (targetm.lra_p ())
2096 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2098 if (TARGET_P8_FUSION)
2099 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2100 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2102 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2103 TARGET_SECURE_PLT ? "secure" : "bss");
2104 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2105 aix_struct_return ? "aix" : "sysv");
2106 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2107 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2108 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2109 tf[!!rs6000_align_branch_targets]);
2110 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2111 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2112 rs6000_long_double_type_size);
2113 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2114 (int)rs6000_sched_restricted_insns_priority);
2115 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2116 (int)END_BUILTINS);
2117 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2118 (int)RS6000_BUILTIN_COUNT);
2121 /* Initialize the various global tables that are based on register size. */
2122 static void
2123 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2125 int r, m, c;
2126 int align64;
2127 int align32;
2129 /* Precalculate REGNO_REG_CLASS. */
2130 rs6000_regno_regclass[0] = GENERAL_REGS;
2131 for (r = 1; r < 32; ++r)
2132 rs6000_regno_regclass[r] = BASE_REGS;
2134 for (r = 32; r < 64; ++r)
2135 rs6000_regno_regclass[r] = FLOAT_REGS;
2137 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2138 rs6000_regno_regclass[r] = NO_REGS;
2140 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2141 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2143 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2144 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2145 rs6000_regno_regclass[r] = CR_REGS;
2147 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2148 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2149 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2150 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2151 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2152 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2153 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2154 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2155 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2157 /* Precalculate vector information, this must be set up before the
2158 rs6000_hard_regno_nregs_internal below. */
2159 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2161 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
2162 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
2163 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
2166 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
2167 rs6000_constraints[c] = NO_REGS;
2169 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2170 believes it can use native alignment or still uses 128-bit alignment. */
2171 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2173 align64 = 64;
2174 align32 = 32;
2176 else
2178 align64 = 128;
2179 align32 = 128;
2182 /* V2DF mode, VSX only. */
2183 if (TARGET_VSX)
2185 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2186 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2187 rs6000_vector_align[V2DFmode] = align64;
2190 /* V4SF mode, either VSX or Altivec. */
2191 if (TARGET_VSX)
2193 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2194 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2195 rs6000_vector_align[V4SFmode] = align32;
2197 else if (TARGET_ALTIVEC)
2199 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2200 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2201 rs6000_vector_align[V4SFmode] = align32;
2204 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2205 and stores. */
2206 if (TARGET_ALTIVEC)
2208 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2209 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2210 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2211 rs6000_vector_align[V4SImode] = align32;
2212 rs6000_vector_align[V8HImode] = align32;
2213 rs6000_vector_align[V16QImode] = align32;
2215 if (TARGET_VSX)
2217 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2218 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2219 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2221 else
2223 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2224 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2225 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2229 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2230 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2231 if (TARGET_VSX)
2233 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2234 rs6000_vector_unit[V2DImode]
2235 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2236 rs6000_vector_align[V2DImode] = align64;
2239 /* DFmode, see if we want to use the VSX unit. */
2240 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2242 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2243 rs6000_vector_mem[DFmode]
2244 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2245 rs6000_vector_align[DFmode] = align64;
2248 /* Allow TImode in VSX register and set the VSX memory macros. */
2249 if (TARGET_VSX && TARGET_VSX_TIMODE)
2251 rs6000_vector_mem[TImode] = VECTOR_VSX;
2252 rs6000_vector_align[TImode] = align64;
2255 /* TODO add SPE and paired floating point vector support. */
2257 /* Register class constraints for the constraints that depend on compile
2258 switches. */
2259 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2260 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2262 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2263 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2265 if (TARGET_VSX)
2267 /* At present, we just use VSX_REGS, but we have different constraints
2268 based on the use, in case we want to fine tune the default register
2269 class used. wa = any VSX register, wf = register class to use for
2270 V4SF, wd = register class to use for V2DF, and ws = register classs to
2271 use for DF scalars. */
2272 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2273 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2274 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2275 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2276 ? VSX_REGS
2277 : FLOAT_REGS);
2278 if (TARGET_VSX_TIMODE)
2279 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS;
2282 /* Add conditional constraints based on various options, to allow us to
2283 collapse multiple insn patterns. */
2284 if (TARGET_ALTIVEC)
2285 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2287 if (TARGET_MFPGPR)
2288 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2290 if (TARGET_LFIWAX)
2291 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS;
2293 if (TARGET_DIRECT_MOVE)
2294 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2296 if (TARGET_POWERPC64)
2297 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2299 if (TARGET_P8_VECTOR)
2300 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2302 if (TARGET_STFIWX)
2303 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS;
2305 if (TARGET_LFIWZX)
2306 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS;
2308 /* Set up the reload helper functions. */
2309 if (TARGET_VSX || TARGET_ALTIVEC)
2311 if (TARGET_64BIT)
2313 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2314 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2315 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2316 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2317 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2318 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2319 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2320 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2321 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2322 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2323 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2324 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2325 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2327 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2328 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2329 rs6000_vector_reload[DDmode][0] = CODE_FOR_reload_dd_di_store;
2330 rs6000_vector_reload[DDmode][1] = CODE_FOR_reload_dd_di_load;
2332 if (TARGET_VSX_TIMODE)
2334 rs6000_vector_reload[TImode][0] = CODE_FOR_reload_ti_di_store;
2335 rs6000_vector_reload[TImode][1] = CODE_FOR_reload_ti_di_load;
2338 else
2340 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2341 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2342 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2343 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2344 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2345 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2346 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2347 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2348 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2349 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2350 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2351 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2352 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2354 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2355 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2356 rs6000_vector_reload[DDmode][0] = CODE_FOR_reload_dd_si_store;
2357 rs6000_vector_reload[DDmode][1] = CODE_FOR_reload_dd_si_load;
2359 if (TARGET_VSX_TIMODE)
2361 rs6000_vector_reload[TImode][0] = CODE_FOR_reload_ti_si_store;
2362 rs6000_vector_reload[TImode][1] = CODE_FOR_reload_ti_si_load;
2367 /* Precalculate HARD_REGNO_NREGS. */
2368 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2369 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2370 rs6000_hard_regno_nregs[m][r]
2371 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2373 /* Precalculate HARD_REGNO_MODE_OK. */
2374 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2375 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2376 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2377 rs6000_hard_regno_mode_ok_p[m][r] = true;
2379 /* Precalculate CLASS_MAX_NREGS sizes. */
2380 for (c = 0; c < LIM_REG_CLASSES; ++c)
2382 int reg_size;
2384 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2385 reg_size = UNITS_PER_VSX_WORD;
2387 else if (c == ALTIVEC_REGS)
2388 reg_size = UNITS_PER_ALTIVEC_WORD;
2390 else if (c == FLOAT_REGS)
2391 reg_size = UNITS_PER_FP_WORD;
2393 else
2394 reg_size = UNITS_PER_WORD;
2396 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2398 int reg_size2 = reg_size;
2400 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2401 if (m == TDmode || m == TFmode)
2402 reg_size2 = UNITS_PER_FP_WORD;
2404 rs6000_class_max_nregs[m][c]
2405 = (GET_MODE_SIZE (m) + reg_size2 - 1) / reg_size2;
2409 if (TARGET_E500_DOUBLE)
2410 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2412 /* Calculate which modes to automatically generate code to use a the
2413 reciprocal divide and square root instructions. In the future, possibly
2414 automatically generate the instructions even if the user did not specify
2415 -mrecip. The older machines double precision reciprocal sqrt estimate is
2416 not accurate enough. */
2417 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2418 if (TARGET_FRES)
2419 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2420 if (TARGET_FRE)
2421 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2422 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2423 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2424 if (VECTOR_UNIT_VSX_P (V2DFmode))
2425 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2427 if (TARGET_FRSQRTES)
2428 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2429 if (TARGET_FRSQRTE)
2430 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2431 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2432 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2433 if (VECTOR_UNIT_VSX_P (V2DFmode))
2434 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2436 if (rs6000_recip_control)
2438 if (!flag_finite_math_only)
2439 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2440 if (flag_trapping_math)
2441 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2442 if (!flag_reciprocal_math)
2443 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2444 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2446 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2447 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2448 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2450 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2451 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2452 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2454 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2455 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2456 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2458 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2459 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2460 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2462 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2463 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2464 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2466 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2467 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2468 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2470 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2471 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2472 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2474 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2475 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2476 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2480 if (global_init_p || TARGET_DEBUG_TARGET)
2482 if (TARGET_DEBUG_REG)
2483 rs6000_debug_reg_global ();
2485 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2486 fprintf (stderr,
2487 "SImode variable mult cost = %d\n"
2488 "SImode constant mult cost = %d\n"
2489 "SImode short constant mult cost = %d\n"
2490 "DImode multipliciation cost = %d\n"
2491 "SImode division cost = %d\n"
2492 "DImode division cost = %d\n"
2493 "Simple fp operation cost = %d\n"
2494 "DFmode multiplication cost = %d\n"
2495 "SFmode division cost = %d\n"
2496 "DFmode division cost = %d\n"
2497 "cache line size = %d\n"
2498 "l1 cache size = %d\n"
2499 "l2 cache size = %d\n"
2500 "simultaneous prefetches = %d\n"
2501 "\n",
2502 rs6000_cost->mulsi,
2503 rs6000_cost->mulsi_const,
2504 rs6000_cost->mulsi_const9,
2505 rs6000_cost->muldi,
2506 rs6000_cost->divsi,
2507 rs6000_cost->divdi,
2508 rs6000_cost->fp,
2509 rs6000_cost->dmul,
2510 rs6000_cost->sdiv,
2511 rs6000_cost->ddiv,
2512 rs6000_cost->cache_line_size,
2513 rs6000_cost->l1_cache_size,
2514 rs6000_cost->l2_cache_size,
2515 rs6000_cost->simultaneous_prefetches);
2519 #if TARGET_MACHO
2520 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2522 static void
2523 darwin_rs6000_override_options (void)
2525 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2526 off. */
2527 rs6000_altivec_abi = 1;
2528 TARGET_ALTIVEC_VRSAVE = 1;
2529 rs6000_current_abi = ABI_DARWIN;
2531 if (DEFAULT_ABI == ABI_DARWIN
2532 && TARGET_64BIT)
2533 darwin_one_byte_bool = 1;
2535 if (TARGET_64BIT && ! TARGET_POWERPC64)
2537 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
2538 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2540 if (flag_mkernel)
2542 rs6000_default_long_calls = 1;
2543 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
2546 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2547 Altivec. */
2548 if (!flag_mkernel && !flag_apple_kext
2549 && TARGET_64BIT
2550 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
2551 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2553 /* Unless the user (not the configurer) has explicitly overridden
2554 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2555 G4 unless targeting the kernel. */
2556 if (!flag_mkernel
2557 && !flag_apple_kext
2558 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2559 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
2560 && ! global_options_set.x_rs6000_cpu_index)
2562 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2565 #endif
2567 /* If not otherwise specified by a target, make 'long double' equivalent to
2568 'double'. */
2570 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2571 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2572 #endif
2574 /* Return the builtin mask of the various options used that could affect which
2575 builtins were used. In the past we used target_flags, but we've run out of
2576 bits, and some options like SPE and PAIRED are no longer in
2577 target_flags. */
2579 HOST_WIDE_INT
2580 rs6000_builtin_mask_calculate (void)
2582 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2583 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2584 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2585 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2586 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2587 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2588 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2589 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2590 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2591 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
2592 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
2593 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0));
2596 /* Override command line options. Mostly we process the processor type and
2597 sometimes adjust other TARGET_ options. */
2599 static bool
2600 rs6000_option_override_internal (bool global_init_p)
2602 bool ret = true;
2603 bool have_cpu = false;
2605 /* The default cpu requested at configure time, if any. */
2606 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2608 HOST_WIDE_INT set_masks;
2609 int cpu_index;
2610 int tune_index;
2611 struct cl_target_option *main_target_opt
2612 = ((global_init_p || target_option_default_node == NULL)
2613 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2615 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2616 library functions, so warn about it. The flag may be useful for
2617 performance studies from time to time though, so don't disable it
2618 entirely. */
2619 if (global_options_set.x_rs6000_alignment_flags
2620 && rs6000_alignment_flags == MASK_ALIGN_POWER
2621 && DEFAULT_ABI == ABI_DARWIN
2622 && TARGET_64BIT)
2623 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2624 " it is incompatible with the installed C and C++ libraries");
2626 /* Numerous experiment shows that IRA based loop pressure
2627 calculation works better for RTL loop invariant motion on targets
2628 with enough (>= 32) registers. It is an expensive optimization.
2629 So it is on only for peak performance. */
2630 if (optimize >= 3 && global_init_p)
2631 flag_ira_loop_pressure = 1;
2633 /* Set the pointer size. */
2634 if (TARGET_64BIT)
2636 rs6000_pmode = (int)DImode;
2637 rs6000_pointer_size = 64;
2639 else
2641 rs6000_pmode = (int)SImode;
2642 rs6000_pointer_size = 32;
2645 /* Some OSs don't support saving the high part of 64-bit registers on context
2646 switch. Other OSs don't support saving Altivec registers. On those OSs,
2647 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
2648 if the user wants either, the user must explicitly specify them and we
2649 won't interfere with the user's specification. */
2651 set_masks = POWERPC_MASKS;
2652 #ifdef OS_MISSING_POWERPC64
2653 if (OS_MISSING_POWERPC64)
2654 set_masks &= ~OPTION_MASK_POWERPC64;
2655 #endif
2656 #ifdef OS_MISSING_ALTIVEC
2657 if (OS_MISSING_ALTIVEC)
2658 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
2659 #endif
2661 /* Don't override by the processor default if given explicitly. */
2662 set_masks &= ~rs6000_isa_flags_explicit;
2664 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2665 the cpu in a target attribute or pragma, but did not specify a tuning
2666 option, use the cpu for the tuning option rather than the option specified
2667 with -mtune on the command line. Process a '--with-cpu' configuration
2668 request as an implicit --cpu. */
2669 if (rs6000_cpu_index >= 0)
2671 cpu_index = rs6000_cpu_index;
2672 have_cpu = true;
2674 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2676 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2677 have_cpu = true;
2679 else if (implicit_cpu)
2681 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
2682 have_cpu = true;
2684 else
2686 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
2687 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2688 have_cpu = false;
2691 gcc_assert (cpu_index >= 0);
2693 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
2694 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
2695 with those from the cpu, except for options that were explicitly set. If
2696 we don't have a cpu, do not override the target bits set in
2697 TARGET_DEFAULT. */
2698 if (have_cpu)
2700 rs6000_isa_flags &= ~set_masks;
2701 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2702 & set_masks);
2704 else
2705 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2706 & ~rs6000_isa_flags_explicit);
2708 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
2709 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
2710 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
2711 to using rs6000_isa_flags, we need to do the initialization here. */
2712 if (!have_cpu)
2713 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
2715 if (rs6000_tune_index >= 0)
2716 tune_index = rs6000_tune_index;
2717 else if (have_cpu)
2718 rs6000_tune_index = tune_index = cpu_index;
2719 else
2721 size_t i;
2722 enum processor_type tune_proc
2723 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2725 tune_index = -1;
2726 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2727 if (processor_target_table[i].processor == tune_proc)
2729 rs6000_tune_index = tune_index = i;
2730 break;
2734 gcc_assert (tune_index >= 0);
2735 rs6000_cpu = processor_target_table[tune_index].processor;
2737 /* Pick defaults for SPE related control flags. Do this early to make sure
2738 that the TARGET_ macros are representative ASAP. */
2740 int spe_capable_cpu =
2741 (rs6000_cpu == PROCESSOR_PPC8540
2742 || rs6000_cpu == PROCESSOR_PPC8548);
2744 if (!global_options_set.x_rs6000_spe_abi)
2745 rs6000_spe_abi = spe_capable_cpu;
2747 if (!global_options_set.x_rs6000_spe)
2748 rs6000_spe = spe_capable_cpu;
2750 if (!global_options_set.x_rs6000_float_gprs)
2751 rs6000_float_gprs =
2752 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2753 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2754 : 0);
2757 if (global_options_set.x_rs6000_spe_abi
2758 && rs6000_spe_abi
2759 && !TARGET_SPE_ABI)
2760 error ("not configured for SPE ABI");
2762 if (global_options_set.x_rs6000_spe
2763 && rs6000_spe
2764 && !TARGET_SPE)
2765 error ("not configured for SPE instruction set");
2767 if (main_target_opt != NULL
2768 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2769 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2770 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2771 error ("target attribute or pragma changes SPE ABI");
2773 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2774 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2775 || rs6000_cpu == PROCESSOR_PPCE5500)
2777 if (TARGET_ALTIVEC)
2778 error ("AltiVec not supported in this target");
2779 if (TARGET_SPE)
2780 error ("SPE not supported in this target");
2782 if (rs6000_cpu == PROCESSOR_PPCE6500)
2784 if (TARGET_SPE)
2785 error ("SPE not supported in this target");
2788 /* Disable Cell microcode if we are optimizing for the Cell
2789 and not optimizing for size. */
2790 if (rs6000_gen_cell_microcode == -1)
2791 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2792 && !optimize_size);
2794 /* If we are optimizing big endian systems for space and it's OK to
2795 use instructions that would be microcoded on the Cell, use the
2796 load/store multiple and string instructions. */
2797 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2798 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
2799 | OPTION_MASK_STRING);
2801 /* Don't allow -mmultiple or -mstring on little endian systems
2802 unless the cpu is a 750, because the hardware doesn't support the
2803 instructions used in little endian mode, and causes an alignment
2804 trap. The 750 does not cause an alignment trap (except when the
2805 target is unaligned). */
2807 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2809 if (TARGET_MULTIPLE)
2811 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
2812 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
2813 warning (0, "-mmultiple is not supported on little endian systems");
2816 if (TARGET_STRING)
2818 rs6000_isa_flags &= ~OPTION_MASK_STRING;
2819 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
2820 warning (0, "-mstring is not supported on little endian systems");
2824 /* Add some warnings for VSX. */
2825 if (TARGET_VSX)
2827 const char *msg = NULL;
2828 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2829 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2831 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
2832 msg = N_("-mvsx requires hardware floating point");
2833 else
2834 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
2836 else if (TARGET_PAIRED_FLOAT)
2837 msg = N_("-mvsx and -mpaired are incompatible");
2838 /* The hardware will allow VSX and little endian, but until we make sure
2839 things like vector select, etc. work don't allow VSX on little endian
2840 systems at this point. */
2841 else if (!BYTES_BIG_ENDIAN)
2842 msg = N_("-mvsx used with little endian code");
2843 else if (TARGET_AVOID_XFORM > 0)
2844 msg = N_("-mvsx needs indexed addressing");
2845 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
2846 & OPTION_MASK_ALTIVEC))
2848 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
2849 msg = N_("-mvsx and -mno-altivec are incompatible");
2850 else
2851 msg = N_("-mno-altivec disables vsx");
2854 if (msg)
2856 warning (0, msg);
2857 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
2858 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
2862 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
2863 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
2865 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2866 unless the user explicitly used the -mno-<option> to disable the code. */
2867 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
2868 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2869 else if (TARGET_VSX)
2870 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2871 else if (TARGET_POPCNTD)
2872 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
2873 else if (TARGET_DFP)
2874 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2875 else if (TARGET_CMPB)
2876 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
2877 else if (TARGET_FPRND)
2878 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
2879 else if (TARGET_POPCNTB)
2880 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
2881 else if (TARGET_ALTIVEC)
2882 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
2884 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
2886 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
2887 error ("-mcrypto requires -maltivec");
2888 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
2891 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
2893 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
2894 error ("-mdirect-move requires -mvsx");
2895 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
2898 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
2900 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
2901 error ("-mpower8-vector requires -maltivec");
2902 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
2905 if (TARGET_P8_VECTOR && !TARGET_VSX)
2907 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
2908 error ("-mpower8-vector requires -mvsx");
2909 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
2912 if (TARGET_VSX_TIMODE && !TARGET_VSX)
2914 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
2915 error ("-mvsx-timode requires -mvsx");
2916 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
2919 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
2920 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
2922 /* E500mc does "better" if we inline more aggressively. Respect the
2923 user's opinion, though. */
2924 if (rs6000_block_move_inline_limit == 0
2925 && (rs6000_cpu == PROCESSOR_PPCE500MC
2926 || rs6000_cpu == PROCESSOR_PPCE500MC64
2927 || rs6000_cpu == PROCESSOR_PPCE5500
2928 || rs6000_cpu == PROCESSOR_PPCE6500))
2929 rs6000_block_move_inline_limit = 128;
2931 /* store_one_arg depends on expand_block_move to handle at least the
2932 size of reg_parm_stack_space. */
2933 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
2934 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
2936 if (global_init_p)
2938 /* If the appropriate debug option is enabled, replace the target hooks
2939 with debug versions that call the real version and then prints
2940 debugging information. */
2941 if (TARGET_DEBUG_COST)
2943 targetm.rtx_costs = rs6000_debug_rtx_costs;
2944 targetm.address_cost = rs6000_debug_address_cost;
2945 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2948 if (TARGET_DEBUG_ADDR)
2950 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2951 targetm.legitimize_address = rs6000_debug_legitimize_address;
2952 rs6000_secondary_reload_class_ptr
2953 = rs6000_debug_secondary_reload_class;
2954 rs6000_secondary_memory_needed_ptr
2955 = rs6000_debug_secondary_memory_needed;
2956 rs6000_cannot_change_mode_class_ptr
2957 = rs6000_debug_cannot_change_mode_class;
2958 rs6000_preferred_reload_class_ptr
2959 = rs6000_debug_preferred_reload_class;
2960 rs6000_legitimize_reload_address_ptr
2961 = rs6000_debug_legitimize_reload_address;
2962 rs6000_mode_dependent_address_ptr
2963 = rs6000_debug_mode_dependent_address;
2966 if (rs6000_veclibabi_name)
2968 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
2969 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
2970 else
2972 error ("unknown vectorization library ABI type (%s) for "
2973 "-mveclibabi= switch", rs6000_veclibabi_name);
2974 ret = false;
2979 if (!global_options_set.x_rs6000_long_double_type_size)
2981 if (main_target_opt != NULL
2982 && (main_target_opt->x_rs6000_long_double_type_size
2983 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
2984 error ("target attribute or pragma changes long double size");
2985 else
2986 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2989 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2990 if (!global_options_set.x_rs6000_ieeequad)
2991 rs6000_ieeequad = 1;
2992 #endif
2994 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2995 target attribute or pragma which automatically enables both options,
2996 unless the altivec ABI was set. This is set by default for 64-bit, but
2997 not for 32-bit. */
2998 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2999 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3000 & ~rs6000_isa_flags_explicit);
3002 /* Enable Altivec ABI for AIX -maltivec. */
3003 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3005 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3006 error ("target attribute or pragma changes AltiVec ABI");
3007 else
3008 rs6000_altivec_abi = 1;
3011 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3012 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3013 be explicitly overridden in either case. */
3014 if (TARGET_ELF)
3016 if (!global_options_set.x_rs6000_altivec_abi
3017 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3019 if (main_target_opt != NULL &&
3020 !main_target_opt->x_rs6000_altivec_abi)
3021 error ("target attribute or pragma changes AltiVec ABI");
3022 else
3023 rs6000_altivec_abi = 1;
3027 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3028 So far, the only darwin64 targets are also MACH-O. */
3029 if (TARGET_MACHO
3030 && DEFAULT_ABI == ABI_DARWIN
3031 && TARGET_64BIT)
3033 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3034 error ("target attribute or pragma changes darwin64 ABI");
3035 else
3037 rs6000_darwin64_abi = 1;
3038 /* Default to natural alignment, for better performance. */
3039 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3043 /* Place FP constants in the constant pool instead of TOC
3044 if section anchors enabled. */
3045 if (flag_section_anchors)
3046 TARGET_NO_FP_IN_TOC = 1;
3048 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3049 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3051 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3052 SUBTARGET_OVERRIDE_OPTIONS;
3053 #endif
3054 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3055 SUBSUBTARGET_OVERRIDE_OPTIONS;
3056 #endif
3057 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3058 SUB3TARGET_OVERRIDE_OPTIONS;
3059 #endif
3061 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3062 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3064 /* For the E500 family of cores, reset the single/double FP flags to let us
3065 check that they remain constant across attributes or pragmas. Also,
3066 clear a possible request for string instructions, not supported and which
3067 we might have silently queried above for -Os.
3069 For other families, clear ISEL in case it was set implicitly.
3072 switch (rs6000_cpu)
3074 case PROCESSOR_PPC8540:
3075 case PROCESSOR_PPC8548:
3076 case PROCESSOR_PPCE500MC:
3077 case PROCESSOR_PPCE500MC64:
3078 case PROCESSOR_PPCE5500:
3079 case PROCESSOR_PPCE6500:
3081 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3082 rs6000_double_float = TARGET_E500_DOUBLE;
3084 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3086 break;
3088 default:
3090 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3091 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3093 break;
3096 if (main_target_opt)
3098 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3099 error ("target attribute or pragma changes single precision floating "
3100 "point");
3101 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3102 error ("target attribute or pragma changes double precision floating "
3103 "point");
3106 /* Detect invalid option combinations with E500. */
3107 CHECK_E500_OPTIONS;
3109 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3110 && rs6000_cpu != PROCESSOR_POWER5
3111 && rs6000_cpu != PROCESSOR_POWER6
3112 && rs6000_cpu != PROCESSOR_POWER7
3113 && rs6000_cpu != PROCESSOR_POWER8
3114 && rs6000_cpu != PROCESSOR_PPCA2
3115 && rs6000_cpu != PROCESSOR_CELL
3116 && rs6000_cpu != PROCESSOR_PPC476);
3117 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3118 || rs6000_cpu == PROCESSOR_POWER5
3119 || rs6000_cpu == PROCESSOR_POWER7
3120 || rs6000_cpu == PROCESSOR_POWER8);
3121 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3122 || rs6000_cpu == PROCESSOR_POWER5
3123 || rs6000_cpu == PROCESSOR_POWER6
3124 || rs6000_cpu == PROCESSOR_POWER7
3125 || rs6000_cpu == PROCESSOR_POWER8
3126 || rs6000_cpu == PROCESSOR_PPCE500MC
3127 || rs6000_cpu == PROCESSOR_PPCE500MC64
3128 || rs6000_cpu == PROCESSOR_PPCE5500
3129 || rs6000_cpu == PROCESSOR_PPCE6500);
3131 /* Allow debug switches to override the above settings. These are set to -1
3132 in rs6000.opt to indicate the user hasn't directly set the switch. */
3133 if (TARGET_ALWAYS_HINT >= 0)
3134 rs6000_always_hint = TARGET_ALWAYS_HINT;
3136 if (TARGET_SCHED_GROUPS >= 0)
3137 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3139 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3140 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3142 rs6000_sched_restricted_insns_priority
3143 = (rs6000_sched_groups ? 1 : 0);
3145 /* Handle -msched-costly-dep option. */
3146 rs6000_sched_costly_dep
3147 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3149 if (rs6000_sched_costly_dep_str)
3151 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3152 rs6000_sched_costly_dep = no_dep_costly;
3153 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3154 rs6000_sched_costly_dep = all_deps_costly;
3155 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3156 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3157 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3158 rs6000_sched_costly_dep = store_to_load_dep_costly;
3159 else
3160 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3161 atoi (rs6000_sched_costly_dep_str));
3164 /* Handle -minsert-sched-nops option. */
3165 rs6000_sched_insert_nops
3166 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3168 if (rs6000_sched_insert_nops_str)
3170 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3171 rs6000_sched_insert_nops = sched_finish_none;
3172 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3173 rs6000_sched_insert_nops = sched_finish_pad_groups;
3174 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3175 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3176 else
3177 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3178 atoi (rs6000_sched_insert_nops_str));
3181 if (global_init_p)
3183 #ifdef TARGET_REGNAMES
3184 /* If the user desires alternate register names, copy in the
3185 alternate names now. */
3186 if (TARGET_REGNAMES)
3187 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3188 #endif
3190 /* Set aix_struct_return last, after the ABI is determined.
3191 If -maix-struct-return or -msvr4-struct-return was explicitly
3192 used, don't override with the ABI default. */
3193 if (!global_options_set.x_aix_struct_return)
3194 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3196 #if 0
3197 /* IBM XL compiler defaults to unsigned bitfields. */
3198 if (TARGET_XL_COMPAT)
3199 flag_signed_bitfields = 0;
3200 #endif
3202 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3203 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3205 if (TARGET_TOC)
3206 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3208 /* We can only guarantee the availability of DI pseudo-ops when
3209 assembling for 64-bit targets. */
3210 if (!TARGET_64BIT)
3212 targetm.asm_out.aligned_op.di = NULL;
3213 targetm.asm_out.unaligned_op.di = NULL;
3217 /* Set branch target alignment, if not optimizing for size. */
3218 if (!optimize_size)
3220 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3221 aligned 8byte to avoid misprediction by the branch predictor. */
3222 if (rs6000_cpu == PROCESSOR_TITAN
3223 || rs6000_cpu == PROCESSOR_CELL)
3225 if (align_functions <= 0)
3226 align_functions = 8;
3227 if (align_jumps <= 0)
3228 align_jumps = 8;
3229 if (align_loops <= 0)
3230 align_loops = 8;
3232 if (rs6000_align_branch_targets)
3234 if (align_functions <= 0)
3235 align_functions = 16;
3236 if (align_jumps <= 0)
3237 align_jumps = 16;
3238 if (align_loops <= 0)
3240 can_override_loop_align = 1;
3241 align_loops = 16;
3244 if (align_jumps_max_skip <= 0)
3245 align_jumps_max_skip = 15;
3246 if (align_loops_max_skip <= 0)
3247 align_loops_max_skip = 15;
3250 /* Arrange to save and restore machine status around nested functions. */
3251 init_machine_status = rs6000_init_machine_status;
3253 /* We should always be splitting complex arguments, but we can't break
3254 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3255 if (DEFAULT_ABI != ABI_AIX)
3256 targetm.calls.split_complex_arg = NULL;
3259 /* Initialize rs6000_cost with the appropriate target costs. */
3260 if (optimize_size)
3261 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3262 else
3263 switch (rs6000_cpu)
3265 case PROCESSOR_RS64A:
3266 rs6000_cost = &rs64a_cost;
3267 break;
3269 case PROCESSOR_MPCCORE:
3270 rs6000_cost = &mpccore_cost;
3271 break;
3273 case PROCESSOR_PPC403:
3274 rs6000_cost = &ppc403_cost;
3275 break;
3277 case PROCESSOR_PPC405:
3278 rs6000_cost = &ppc405_cost;
3279 break;
3281 case PROCESSOR_PPC440:
3282 rs6000_cost = &ppc440_cost;
3283 break;
3285 case PROCESSOR_PPC476:
3286 rs6000_cost = &ppc476_cost;
3287 break;
3289 case PROCESSOR_PPC601:
3290 rs6000_cost = &ppc601_cost;
3291 break;
3293 case PROCESSOR_PPC603:
3294 rs6000_cost = &ppc603_cost;
3295 break;
3297 case PROCESSOR_PPC604:
3298 rs6000_cost = &ppc604_cost;
3299 break;
3301 case PROCESSOR_PPC604e:
3302 rs6000_cost = &ppc604e_cost;
3303 break;
3305 case PROCESSOR_PPC620:
3306 rs6000_cost = &ppc620_cost;
3307 break;
3309 case PROCESSOR_PPC630:
3310 rs6000_cost = &ppc630_cost;
3311 break;
3313 case PROCESSOR_CELL:
3314 rs6000_cost = &ppccell_cost;
3315 break;
3317 case PROCESSOR_PPC750:
3318 case PROCESSOR_PPC7400:
3319 rs6000_cost = &ppc750_cost;
3320 break;
3322 case PROCESSOR_PPC7450:
3323 rs6000_cost = &ppc7450_cost;
3324 break;
3326 case PROCESSOR_PPC8540:
3327 case PROCESSOR_PPC8548:
3328 rs6000_cost = &ppc8540_cost;
3329 break;
3331 case PROCESSOR_PPCE300C2:
3332 case PROCESSOR_PPCE300C3:
3333 rs6000_cost = &ppce300c2c3_cost;
3334 break;
3336 case PROCESSOR_PPCE500MC:
3337 rs6000_cost = &ppce500mc_cost;
3338 break;
3340 case PROCESSOR_PPCE500MC64:
3341 rs6000_cost = &ppce500mc64_cost;
3342 break;
3344 case PROCESSOR_PPCE5500:
3345 rs6000_cost = &ppce5500_cost;
3346 break;
3348 case PROCESSOR_PPCE6500:
3349 rs6000_cost = &ppce6500_cost;
3350 break;
3352 case PROCESSOR_TITAN:
3353 rs6000_cost = &titan_cost;
3354 break;
3356 case PROCESSOR_POWER4:
3357 case PROCESSOR_POWER5:
3358 rs6000_cost = &power4_cost;
3359 break;
3361 case PROCESSOR_POWER6:
3362 rs6000_cost = &power6_cost;
3363 break;
3365 case PROCESSOR_POWER7:
3366 rs6000_cost = &power7_cost;
3367 break;
3369 case PROCESSOR_POWER8:
3370 rs6000_cost = &power8_cost;
3371 break;
3373 case PROCESSOR_PPCA2:
3374 rs6000_cost = &ppca2_cost;
3375 break;
3377 default:
3378 gcc_unreachable ();
3381 if (global_init_p)
3383 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3384 rs6000_cost->simultaneous_prefetches,
3385 global_options.x_param_values,
3386 global_options_set.x_param_values);
3387 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3388 global_options.x_param_values,
3389 global_options_set.x_param_values);
3390 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3391 rs6000_cost->cache_line_size,
3392 global_options.x_param_values,
3393 global_options_set.x_param_values);
3394 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3395 global_options.x_param_values,
3396 global_options_set.x_param_values);
3398 /* Increase loop peeling limits based on performance analysis. */
3399 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3400 global_options.x_param_values,
3401 global_options_set.x_param_values);
3402 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3403 global_options.x_param_values,
3404 global_options_set.x_param_values);
3406 /* If using typedef char *va_list, signal that
3407 __builtin_va_start (&ap, 0) can be optimized to
3408 ap = __builtin_next_arg (0). */
3409 if (DEFAULT_ABI != ABI_V4)
3410 targetm.expand_builtin_va_start = NULL;
3413 /* Set up single/double float flags.
3414 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3415 then set both flags. */
3416 if (TARGET_HARD_FLOAT && TARGET_FPRS
3417 && rs6000_single_float == 0 && rs6000_double_float == 0)
3418 rs6000_single_float = rs6000_double_float = 1;
3420 /* If not explicitly specified via option, decide whether to generate indexed
3421 load/store instructions. */
3422 if (TARGET_AVOID_XFORM == -1)
3423 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3424 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3425 need indexed accesses and the type used is the scalar type of the element
3426 being loaded or stored. */
3427 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3428 && !TARGET_ALTIVEC);
3430 /* Set the -mrecip options. */
3431 if (rs6000_recip_name)
3433 char *p = ASTRDUP (rs6000_recip_name);
3434 char *q;
3435 unsigned int mask, i;
3436 bool invert;
3438 while ((q = strtok (p, ",")) != NULL)
3440 p = NULL;
3441 if (*q == '!')
3443 invert = true;
3444 q++;
3446 else
3447 invert = false;
3449 if (!strcmp (q, "default"))
3450 mask = ((TARGET_RECIP_PRECISION)
3451 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3452 else
3454 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3455 if (!strcmp (q, recip_options[i].string))
3457 mask = recip_options[i].mask;
3458 break;
3461 if (i == ARRAY_SIZE (recip_options))
3463 error ("unknown option for -mrecip=%s", q);
3464 invert = false;
3465 mask = 0;
3466 ret = false;
3470 if (invert)
3471 rs6000_recip_control &= ~mask;
3472 else
3473 rs6000_recip_control |= mask;
3477 /* Set the builtin mask of the various options used that could affect which
3478 builtins were used. In the past we used target_flags, but we've run out
3479 of bits, and some options like SPE and PAIRED are no longer in
3480 target_flags. */
3481 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3482 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3484 fprintf (stderr,
3485 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
3486 rs6000_builtin_mask);
3487 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
3490 /* Initialize all of the registers. */
3491 rs6000_init_hard_regno_mode_ok (global_init_p);
3493 /* Save the initial options in case the user does function specific options */
3494 if (global_init_p)
3495 target_option_default_node = target_option_current_node
3496 = build_target_option_node ();
3498 /* If not explicitly specified via option, decide whether to generate the
3499 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3500 if (TARGET_LINK_STACK == -1)
3501 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3503 return ret;
3506 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3507 define the target cpu type. */
3509 static void
3510 rs6000_option_override (void)
3512 (void) rs6000_option_override_internal (true);
3516 /* Implement targetm.vectorize.builtin_mask_for_load. */
3517 static tree
3518 rs6000_builtin_mask_for_load (void)
3520 if (TARGET_ALTIVEC || TARGET_VSX)
3521 return altivec_builtin_mask_for_load;
3522 else
3523 return 0;
3526 /* Implement LOOP_ALIGN. */
3528 rs6000_loop_align (rtx label)
3530 basic_block bb;
3531 int ninsns;
3533 /* Don't override loop alignment if -falign-loops was specified. */
3534 if (!can_override_loop_align)
3535 return align_loops_log;
3537 bb = BLOCK_FOR_INSN (label);
3538 ninsns = num_loop_insns(bb->loop_father);
3540 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3541 if (ninsns > 4 && ninsns <= 8
3542 && (rs6000_cpu == PROCESSOR_POWER4
3543 || rs6000_cpu == PROCESSOR_POWER5
3544 || rs6000_cpu == PROCESSOR_POWER6
3545 || rs6000_cpu == PROCESSOR_POWER7
3546 || rs6000_cpu == PROCESSOR_POWER8))
3547 return 5;
3548 else
3549 return align_loops_log;
3552 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3553 static int
3554 rs6000_loop_align_max_skip (rtx label)
3556 return (1 << rs6000_loop_align (label)) - 1;
3559 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3560 after applying N number of iterations. This routine does not determine
3561 how may iterations are required to reach desired alignment. */
3563 static bool
3564 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3566 if (is_packed)
3567 return false;
3569 if (TARGET_32BIT)
3571 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3572 return true;
3574 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3575 return true;
3577 return false;
3579 else
3581 if (TARGET_MACHO)
3582 return false;
3584 /* Assuming that all other types are naturally aligned. CHECKME! */
3585 return true;
3589 /* Return true if the vector misalignment factor is supported by the
3590 target. */
3591 static bool
3592 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3593 const_tree type,
3594 int misalignment,
3595 bool is_packed)
3597 if (TARGET_VSX)
3599 /* Return if movmisalign pattern is not supported for this mode. */
3600 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3601 return false;
3603 if (misalignment == -1)
3605 /* Misalignment factor is unknown at compile time but we know
3606 it's word aligned. */
3607 if (rs6000_vector_alignment_reachable (type, is_packed))
3609 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3611 if (element_size == 64 || element_size == 32)
3612 return true;
3615 return false;
3618 /* VSX supports word-aligned vector. */
3619 if (misalignment % 4 == 0)
3620 return true;
3622 return false;
3625 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3626 static int
3627 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3628 tree vectype, int misalign)
3630 unsigned elements;
3631 tree elem_type;
3633 switch (type_of_cost)
3635 case scalar_stmt:
3636 case scalar_load:
3637 case scalar_store:
3638 case vector_stmt:
3639 case vector_load:
3640 case vector_store:
3641 case vec_to_scalar:
3642 case scalar_to_vec:
3643 case cond_branch_not_taken:
3644 return 1;
3646 case vec_perm:
3647 if (TARGET_VSX)
3648 return 3;
3649 else
3650 return 1;
3652 case vec_promote_demote:
3653 if (TARGET_VSX)
3654 return 4;
3655 else
3656 return 1;
3658 case cond_branch_taken:
3659 return 3;
3661 case unaligned_load:
3662 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3664 elements = TYPE_VECTOR_SUBPARTS (vectype);
3665 if (elements == 2)
3666 /* Double word aligned. */
3667 return 2;
3669 if (elements == 4)
3671 switch (misalign)
3673 case 8:
3674 /* Double word aligned. */
3675 return 2;
3677 case -1:
3678 /* Unknown misalignment. */
3679 case 4:
3680 case 12:
3681 /* Word aligned. */
3682 return 22;
3684 default:
3685 gcc_unreachable ();
3690 if (TARGET_ALTIVEC)
3691 /* Misaligned loads are not supported. */
3692 gcc_unreachable ();
3694 return 2;
3696 case unaligned_store:
3697 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3699 elements = TYPE_VECTOR_SUBPARTS (vectype);
3700 if (elements == 2)
3701 /* Double word aligned. */
3702 return 2;
3704 if (elements == 4)
3706 switch (misalign)
3708 case 8:
3709 /* Double word aligned. */
3710 return 2;
3712 case -1:
3713 /* Unknown misalignment. */
3714 case 4:
3715 case 12:
3716 /* Word aligned. */
3717 return 23;
3719 default:
3720 gcc_unreachable ();
3725 if (TARGET_ALTIVEC)
3726 /* Misaligned stores are not supported. */
3727 gcc_unreachable ();
3729 return 2;
3731 case vec_construct:
3732 elements = TYPE_VECTOR_SUBPARTS (vectype);
3733 elem_type = TREE_TYPE (vectype);
3734 /* 32-bit vectors loaded into registers are stored as double
3735 precision, so we need n/2 converts in addition to the usual
3736 n/2 merges to construct a vector of short floats from them. */
3737 if (SCALAR_FLOAT_TYPE_P (elem_type)
3738 && TYPE_PRECISION (elem_type) == 32)
3739 return elements + 1;
3740 else
3741 return elements / 2 + 1;
3743 default:
3744 gcc_unreachable ();
3748 /* Implement targetm.vectorize.preferred_simd_mode. */
3750 static enum machine_mode
3751 rs6000_preferred_simd_mode (enum machine_mode mode)
3753 if (TARGET_VSX)
3754 switch (mode)
3756 case DFmode:
3757 return V2DFmode;
3758 default:;
3760 if (TARGET_ALTIVEC || TARGET_VSX)
3761 switch (mode)
3763 case SFmode:
3764 return V4SFmode;
3765 case DImode:
3766 return V2DImode;
3767 case SImode:
3768 return V4SImode;
3769 case HImode:
3770 return V8HImode;
3771 case QImode:
3772 return V16QImode;
3773 default:;
3775 if (TARGET_SPE)
3776 switch (mode)
3778 case SFmode:
3779 return V2SFmode;
3780 case SImode:
3781 return V2SImode;
3782 default:;
3784 if (TARGET_PAIRED_FLOAT
3785 && mode == SFmode)
3786 return V2SFmode;
3787 return word_mode;
3790 typedef struct _rs6000_cost_data
3792 struct loop *loop_info;
3793 unsigned cost[3];
3794 } rs6000_cost_data;
3796 /* Test for likely overcommitment of vector hardware resources. If a
3797 loop iteration is relatively large, and too large a percentage of
3798 instructions in the loop are vectorized, the cost model may not
3799 adequately reflect delays from unavailable vector resources.
3800 Penalize the loop body cost for this case. */
3802 static void
3803 rs6000_density_test (rs6000_cost_data *data)
3805 const int DENSITY_PCT_THRESHOLD = 85;
3806 const int DENSITY_SIZE_THRESHOLD = 70;
3807 const int DENSITY_PENALTY = 10;
3808 struct loop *loop = data->loop_info;
3809 basic_block *bbs = get_loop_body (loop);
3810 int nbbs = loop->num_nodes;
3811 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
3812 int i, density_pct;
3814 for (i = 0; i < nbbs; i++)
3816 basic_block bb = bbs[i];
3817 gimple_stmt_iterator gsi;
3819 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3821 gimple stmt = gsi_stmt (gsi);
3822 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3824 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3825 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
3826 not_vec_cost++;
3830 free (bbs);
3831 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
3833 if (density_pct > DENSITY_PCT_THRESHOLD
3834 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
3836 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
3837 if (dump_enabled_p ())
3838 dump_printf_loc (MSG_NOTE, vect_location,
3839 "density %d%%, cost %d exceeds threshold, penalizing "
3840 "loop body cost by %d%%", density_pct,
3841 vec_cost + not_vec_cost, DENSITY_PENALTY);
3845 /* Implement targetm.vectorize.init_cost. */
3847 static void *
3848 rs6000_init_cost (struct loop *loop_info)
3850 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
3851 data->loop_info = loop_info;
3852 data->cost[vect_prologue] = 0;
3853 data->cost[vect_body] = 0;
3854 data->cost[vect_epilogue] = 0;
3855 return data;
3858 /* Implement targetm.vectorize.add_stmt_cost. */
3860 static unsigned
3861 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
3862 struct _stmt_vec_info *stmt_info, int misalign,
3863 enum vect_cost_model_location where)
3865 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3866 unsigned retval = 0;
3868 if (flag_vect_cost_model)
3870 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
3871 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
3872 misalign);
3873 /* Statements in an inner loop relative to the loop being
3874 vectorized are weighted more heavily. The value here is
3875 arbitrary and could potentially be improved with analysis. */
3876 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
3877 count *= 50; /* FIXME. */
3879 retval = (unsigned) (count * stmt_cost);
3880 cost_data->cost[where] += retval;
3883 return retval;
3886 /* Implement targetm.vectorize.finish_cost. */
3888 static void
3889 rs6000_finish_cost (void *data, unsigned *prologue_cost,
3890 unsigned *body_cost, unsigned *epilogue_cost)
3892 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3894 if (cost_data->loop_info)
3895 rs6000_density_test (cost_data);
3897 *prologue_cost = cost_data->cost[vect_prologue];
3898 *body_cost = cost_data->cost[vect_body];
3899 *epilogue_cost = cost_data->cost[vect_epilogue];
3902 /* Implement targetm.vectorize.destroy_cost_data. */
3904 static void
3905 rs6000_destroy_cost_data (void *data)
3907 free (data);
3910 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3911 library with vectorized intrinsics. */
3913 static tree
3914 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
3916 char name[32];
3917 const char *suffix = NULL;
3918 tree fntype, new_fndecl, bdecl = NULL_TREE;
3919 int n_args = 1;
3920 const char *bname;
3921 enum machine_mode el_mode, in_mode;
3922 int n, in_n;
3924 /* Libmass is suitable for unsafe math only as it does not correctly support
3925 parts of IEEE with the required precision such as denormals. Only support
3926 it if we have VSX to use the simd d2 or f4 functions.
3927 XXX: Add variable length support. */
3928 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
3929 return NULL_TREE;
3931 el_mode = TYPE_MODE (TREE_TYPE (type_out));
3932 n = TYPE_VECTOR_SUBPARTS (type_out);
3933 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3934 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3935 if (el_mode != in_mode
3936 || n != in_n)
3937 return NULL_TREE;
3939 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3941 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3942 switch (fn)
3944 case BUILT_IN_ATAN2:
3945 case BUILT_IN_HYPOT:
3946 case BUILT_IN_POW:
3947 n_args = 2;
3948 /* fall through */
3950 case BUILT_IN_ACOS:
3951 case BUILT_IN_ACOSH:
3952 case BUILT_IN_ASIN:
3953 case BUILT_IN_ASINH:
3954 case BUILT_IN_ATAN:
3955 case BUILT_IN_ATANH:
3956 case BUILT_IN_CBRT:
3957 case BUILT_IN_COS:
3958 case BUILT_IN_COSH:
3959 case BUILT_IN_ERF:
3960 case BUILT_IN_ERFC:
3961 case BUILT_IN_EXP2:
3962 case BUILT_IN_EXP:
3963 case BUILT_IN_EXPM1:
3964 case BUILT_IN_LGAMMA:
3965 case BUILT_IN_LOG10:
3966 case BUILT_IN_LOG1P:
3967 case BUILT_IN_LOG2:
3968 case BUILT_IN_LOG:
3969 case BUILT_IN_SIN:
3970 case BUILT_IN_SINH:
3971 case BUILT_IN_SQRT:
3972 case BUILT_IN_TAN:
3973 case BUILT_IN_TANH:
3974 bdecl = builtin_decl_implicit (fn);
3975 suffix = "d2"; /* pow -> powd2 */
3976 if (el_mode != DFmode
3977 || n != 2
3978 || !bdecl)
3979 return NULL_TREE;
3980 break;
3982 case BUILT_IN_ATAN2F:
3983 case BUILT_IN_HYPOTF:
3984 case BUILT_IN_POWF:
3985 n_args = 2;
3986 /* fall through */
3988 case BUILT_IN_ACOSF:
3989 case BUILT_IN_ACOSHF:
3990 case BUILT_IN_ASINF:
3991 case BUILT_IN_ASINHF:
3992 case BUILT_IN_ATANF:
3993 case BUILT_IN_ATANHF:
3994 case BUILT_IN_CBRTF:
3995 case BUILT_IN_COSF:
3996 case BUILT_IN_COSHF:
3997 case BUILT_IN_ERFF:
3998 case BUILT_IN_ERFCF:
3999 case BUILT_IN_EXP2F:
4000 case BUILT_IN_EXPF:
4001 case BUILT_IN_EXPM1F:
4002 case BUILT_IN_LGAMMAF:
4003 case BUILT_IN_LOG10F:
4004 case BUILT_IN_LOG1PF:
4005 case BUILT_IN_LOG2F:
4006 case BUILT_IN_LOGF:
4007 case BUILT_IN_SINF:
4008 case BUILT_IN_SINHF:
4009 case BUILT_IN_SQRTF:
4010 case BUILT_IN_TANF:
4011 case BUILT_IN_TANHF:
4012 bdecl = builtin_decl_implicit (fn);
4013 suffix = "4"; /* powf -> powf4 */
4014 if (el_mode != SFmode
4015 || n != 4
4016 || !bdecl)
4017 return NULL_TREE;
4018 break;
4020 default:
4021 return NULL_TREE;
4024 else
4025 return NULL_TREE;
4027 gcc_assert (suffix != NULL);
4028 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4029 if (!bname)
4030 return NULL_TREE;
4032 strcpy (name, bname + sizeof ("__builtin_") - 1);
4033 strcat (name, suffix);
4035 if (n_args == 1)
4036 fntype = build_function_type_list (type_out, type_in, NULL);
4037 else if (n_args == 2)
4038 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4039 else
4040 gcc_unreachable ();
4042 /* Build a function declaration for the vectorized function. */
4043 new_fndecl = build_decl (BUILTINS_LOCATION,
4044 FUNCTION_DECL, get_identifier (name), fntype);
4045 TREE_PUBLIC (new_fndecl) = 1;
4046 DECL_EXTERNAL (new_fndecl) = 1;
4047 DECL_IS_NOVOPS (new_fndecl) = 1;
4048 TREE_READONLY (new_fndecl) = 1;
4050 return new_fndecl;
4053 /* Returns a function decl for a vectorized version of the builtin function
4054 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4055 if it is not available. */
4057 static tree
4058 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4059 tree type_in)
4061 enum machine_mode in_mode, out_mode;
4062 int in_n, out_n;
4064 if (TARGET_DEBUG_BUILTIN)
4065 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4066 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4067 GET_MODE_NAME (TYPE_MODE (type_out)),
4068 GET_MODE_NAME (TYPE_MODE (type_in)));
4070 if (TREE_CODE (type_out) != VECTOR_TYPE
4071 || TREE_CODE (type_in) != VECTOR_TYPE
4072 || !TARGET_VECTORIZE_BUILTINS)
4073 return NULL_TREE;
4075 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4076 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4077 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4078 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4080 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4082 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4083 switch (fn)
4085 case BUILT_IN_COPYSIGN:
4086 if (VECTOR_UNIT_VSX_P (V2DFmode)
4087 && out_mode == DFmode && out_n == 2
4088 && in_mode == DFmode && in_n == 2)
4089 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4090 break;
4091 case BUILT_IN_COPYSIGNF:
4092 if (out_mode != SFmode || out_n != 4
4093 || in_mode != SFmode || in_n != 4)
4094 break;
4095 if (VECTOR_UNIT_VSX_P (V4SFmode))
4096 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4097 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4098 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4099 break;
4100 case BUILT_IN_SQRT:
4101 if (VECTOR_UNIT_VSX_P (V2DFmode)
4102 && out_mode == DFmode && out_n == 2
4103 && in_mode == DFmode && in_n == 2)
4104 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4105 break;
4106 case BUILT_IN_SQRTF:
4107 if (VECTOR_UNIT_VSX_P (V4SFmode)
4108 && out_mode == SFmode && out_n == 4
4109 && in_mode == SFmode && in_n == 4)
4110 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4111 break;
4112 case BUILT_IN_CEIL:
4113 if (VECTOR_UNIT_VSX_P (V2DFmode)
4114 && out_mode == DFmode && out_n == 2
4115 && in_mode == DFmode && in_n == 2)
4116 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4117 break;
4118 case BUILT_IN_CEILF:
4119 if (out_mode != SFmode || out_n != 4
4120 || in_mode != SFmode || in_n != 4)
4121 break;
4122 if (VECTOR_UNIT_VSX_P (V4SFmode))
4123 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4124 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4125 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4126 break;
4127 case BUILT_IN_FLOOR:
4128 if (VECTOR_UNIT_VSX_P (V2DFmode)
4129 && out_mode == DFmode && out_n == 2
4130 && in_mode == DFmode && in_n == 2)
4131 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4132 break;
4133 case BUILT_IN_FLOORF:
4134 if (out_mode != SFmode || out_n != 4
4135 || in_mode != SFmode || in_n != 4)
4136 break;
4137 if (VECTOR_UNIT_VSX_P (V4SFmode))
4138 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4139 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4140 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4141 break;
4142 case BUILT_IN_FMA:
4143 if (VECTOR_UNIT_VSX_P (V2DFmode)
4144 && out_mode == DFmode && out_n == 2
4145 && in_mode == DFmode && in_n == 2)
4146 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4147 break;
4148 case BUILT_IN_FMAF:
4149 if (VECTOR_UNIT_VSX_P (V4SFmode)
4150 && out_mode == SFmode && out_n == 4
4151 && in_mode == SFmode && in_n == 4)
4152 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4153 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4154 && out_mode == SFmode && out_n == 4
4155 && in_mode == SFmode && in_n == 4)
4156 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4157 break;
4158 case BUILT_IN_TRUNC:
4159 if (VECTOR_UNIT_VSX_P (V2DFmode)
4160 && out_mode == DFmode && out_n == 2
4161 && in_mode == DFmode && in_n == 2)
4162 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4163 break;
4164 case BUILT_IN_TRUNCF:
4165 if (out_mode != SFmode || out_n != 4
4166 || in_mode != SFmode || in_n != 4)
4167 break;
4168 if (VECTOR_UNIT_VSX_P (V4SFmode))
4169 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4170 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4171 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4172 break;
4173 case BUILT_IN_NEARBYINT:
4174 if (VECTOR_UNIT_VSX_P (V2DFmode)
4175 && flag_unsafe_math_optimizations
4176 && out_mode == DFmode && out_n == 2
4177 && in_mode == DFmode && in_n == 2)
4178 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4179 break;
4180 case BUILT_IN_NEARBYINTF:
4181 if (VECTOR_UNIT_VSX_P (V4SFmode)
4182 && flag_unsafe_math_optimizations
4183 && out_mode == SFmode && out_n == 4
4184 && in_mode == SFmode && in_n == 4)
4185 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4186 break;
4187 case BUILT_IN_RINT:
4188 if (VECTOR_UNIT_VSX_P (V2DFmode)
4189 && !flag_trapping_math
4190 && out_mode == DFmode && out_n == 2
4191 && in_mode == DFmode && in_n == 2)
4192 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4193 break;
4194 case BUILT_IN_RINTF:
4195 if (VECTOR_UNIT_VSX_P (V4SFmode)
4196 && !flag_trapping_math
4197 && out_mode == SFmode && out_n == 4
4198 && in_mode == SFmode && in_n == 4)
4199 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4200 break;
4201 default:
4202 break;
4206 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4208 enum rs6000_builtins fn
4209 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4210 switch (fn)
4212 case RS6000_BUILTIN_RSQRTF:
4213 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4214 && out_mode == SFmode && out_n == 4
4215 && in_mode == SFmode && in_n == 4)
4216 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4217 break;
4218 case RS6000_BUILTIN_RSQRT:
4219 if (VECTOR_UNIT_VSX_P (V2DFmode)
4220 && out_mode == DFmode && out_n == 2
4221 && in_mode == DFmode && in_n == 2)
4222 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4223 break;
4224 case RS6000_BUILTIN_RECIPF:
4225 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4226 && out_mode == SFmode && out_n == 4
4227 && in_mode == SFmode && in_n == 4)
4228 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4229 break;
4230 case RS6000_BUILTIN_RECIP:
4231 if (VECTOR_UNIT_VSX_P (V2DFmode)
4232 && out_mode == DFmode && out_n == 2
4233 && in_mode == DFmode && in_n == 2)
4234 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4235 break;
4236 default:
4237 break;
4241 /* Generate calls to libmass if appropriate. */
4242 if (rs6000_veclib_handler)
4243 return rs6000_veclib_handler (fndecl, type_out, type_in);
4245 return NULL_TREE;
4248 /* Default CPU string for rs6000*_file_start functions. */
4249 static const char *rs6000_default_cpu;
4251 /* Do anything needed at the start of the asm file. */
4253 static void
4254 rs6000_file_start (void)
4256 char buffer[80];
4257 const char *start = buffer;
4258 FILE *file = asm_out_file;
4260 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4262 default_file_start ();
4264 if (flag_verbose_asm)
4266 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4268 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4270 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4271 start = "";
4274 if (global_options_set.x_rs6000_cpu_index)
4276 fprintf (file, "%s -mcpu=%s", start,
4277 processor_target_table[rs6000_cpu_index].name);
4278 start = "";
4281 if (global_options_set.x_rs6000_tune_index)
4283 fprintf (file, "%s -mtune=%s", start,
4284 processor_target_table[rs6000_tune_index].name);
4285 start = "";
4288 if (PPC405_ERRATUM77)
4290 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4291 start = "";
4294 #ifdef USING_ELFOS_H
4295 switch (rs6000_sdata)
4297 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4298 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4299 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4300 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4303 if (rs6000_sdata && g_switch_value)
4305 fprintf (file, "%s -G %d", start,
4306 g_switch_value);
4307 start = "";
4309 #endif
4311 if (*start == '\0')
4312 putc ('\n', file);
4315 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
4317 switch_to_section (toc_section);
4318 switch_to_section (text_section);
4323 /* Return nonzero if this function is known to have a null epilogue. */
4326 direct_return (void)
4328 if (reload_completed)
4330 rs6000_stack_t *info = rs6000_stack_info ();
4332 if (info->first_gp_reg_save == 32
4333 && info->first_fp_reg_save == 64
4334 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4335 && ! info->lr_save_p
4336 && ! info->cr_save_p
4337 && info->vrsave_mask == 0
4338 && ! info->push_p)
4339 return 1;
4342 return 0;
4345 /* Return the number of instructions it takes to form a constant in an
4346 integer register. */
4349 num_insns_constant_wide (HOST_WIDE_INT value)
4351 /* signed constant loadable with addi */
4352 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4353 return 1;
4355 /* constant loadable with addis */
4356 else if ((value & 0xffff) == 0
4357 && (value >> 31 == -1 || value >> 31 == 0))
4358 return 1;
4360 else if (TARGET_POWERPC64)
4362 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4363 HOST_WIDE_INT high = value >> 31;
4365 if (high == 0 || high == -1)
4366 return 2;
4368 high >>= 1;
4370 if (low == 0)
4371 return num_insns_constant_wide (high) + 1;
4372 else if (high == 0)
4373 return num_insns_constant_wide (low) + 1;
4374 else
4375 return (num_insns_constant_wide (high)
4376 + num_insns_constant_wide (low) + 1);
4379 else
4380 return 2;
4384 num_insns_constant (rtx op, enum machine_mode mode)
4386 HOST_WIDE_INT low, high;
4388 switch (GET_CODE (op))
4390 case CONST_INT:
4391 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4392 && mask64_operand (op, mode))
4393 return 2;
4394 else
4395 return num_insns_constant_wide (INTVAL (op));
4397 case CONST_DOUBLE:
4398 if (mode == SFmode || mode == SDmode)
4400 long l;
4401 REAL_VALUE_TYPE rv;
4403 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4404 if (DECIMAL_FLOAT_MODE_P (mode))
4405 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4406 else
4407 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4408 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4411 long l[2];
4412 REAL_VALUE_TYPE rv;
4414 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4415 if (DECIMAL_FLOAT_MODE_P (mode))
4416 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4417 else
4418 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4419 high = l[WORDS_BIG_ENDIAN == 0];
4420 low = l[WORDS_BIG_ENDIAN != 0];
4422 if (TARGET_32BIT)
4423 return (num_insns_constant_wide (low)
4424 + num_insns_constant_wide (high));
4425 else
4427 if ((high == 0 && low >= 0)
4428 || (high == -1 && low < 0))
4429 return num_insns_constant_wide (low);
4431 else if (mask64_operand (op, mode))
4432 return 2;
4434 else if (low == 0)
4435 return num_insns_constant_wide (high) + 1;
4437 else
4438 return (num_insns_constant_wide (high)
4439 + num_insns_constant_wide (low) + 1);
4442 default:
4443 gcc_unreachable ();
4447 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4448 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4449 corresponding element of the vector, but for V4SFmode and V2SFmode,
4450 the corresponding "float" is interpreted as an SImode integer. */
4452 HOST_WIDE_INT
4453 const_vector_elt_as_int (rtx op, unsigned int elt)
4455 rtx tmp;
4457 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4458 gcc_assert (GET_MODE (op) != V2DImode
4459 && GET_MODE (op) != V2DFmode);
4461 tmp = CONST_VECTOR_ELT (op, elt);
4462 if (GET_MODE (op) == V4SFmode
4463 || GET_MODE (op) == V2SFmode)
4464 tmp = gen_lowpart (SImode, tmp);
4465 return INTVAL (tmp);
4468 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4469 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4470 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4471 all items are set to the same value and contain COPIES replicas of the
4472 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4473 operand and the others are set to the value of the operand's msb. */
4475 static bool
4476 vspltis_constant (rtx op, unsigned step, unsigned copies)
4478 enum machine_mode mode = GET_MODE (op);
4479 enum machine_mode inner = GET_MODE_INNER (mode);
4481 unsigned i;
4482 unsigned nunits;
4483 unsigned bitsize;
4484 unsigned mask;
4486 HOST_WIDE_INT val;
4487 HOST_WIDE_INT splat_val;
4488 HOST_WIDE_INT msb_val;
4490 if (mode == V2DImode || mode == V2DFmode)
4491 return false;
4493 nunits = GET_MODE_NUNITS (mode);
4494 bitsize = GET_MODE_BITSIZE (inner);
4495 mask = GET_MODE_MASK (inner);
4497 val = const_vector_elt_as_int (op, nunits - 1);
4498 splat_val = val;
4499 msb_val = val > 0 ? 0 : -1;
4501 /* Construct the value to be splatted, if possible. If not, return 0. */
4502 for (i = 2; i <= copies; i *= 2)
4504 HOST_WIDE_INT small_val;
4505 bitsize /= 2;
4506 small_val = splat_val >> bitsize;
4507 mask >>= bitsize;
4508 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4509 return false;
4510 splat_val = small_val;
4513 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4514 if (EASY_VECTOR_15 (splat_val))
4517 /* Also check if we can splat, and then add the result to itself. Do so if
4518 the value is positive, of if the splat instruction is using OP's mode;
4519 for splat_val < 0, the splat and the add should use the same mode. */
4520 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4521 && (splat_val >= 0 || (step == 1 && copies == 1)))
4524 /* Also check if are loading up the most significant bit which can be done by
4525 loading up -1 and shifting the value left by -1. */
4526 else if (EASY_VECTOR_MSB (splat_val, inner))
4529 else
4530 return false;
4532 /* Check if VAL is present in every STEP-th element, and the
4533 other elements are filled with its most significant bit. */
4534 for (i = 0; i < nunits - 1; ++i)
4536 HOST_WIDE_INT desired_val;
4537 if (((i + 1) & (step - 1)) == 0)
4538 desired_val = val;
4539 else
4540 desired_val = msb_val;
4542 if (desired_val != const_vector_elt_as_int (op, i))
4543 return false;
4546 return true;
4550 /* Return true if OP is of the given MODE and can be synthesized
4551 with a vspltisb, vspltish or vspltisw. */
4553 bool
4554 easy_altivec_constant (rtx op, enum machine_mode mode)
4556 unsigned step, copies;
4558 if (mode == VOIDmode)
4559 mode = GET_MODE (op);
4560 else if (mode != GET_MODE (op))
4561 return false;
4563 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4564 constants. */
4565 if (mode == V2DFmode)
4566 return zero_constant (op, mode);
4568 if (mode == V2DImode)
4570 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4571 easy. */
4572 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4573 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4574 return false;
4576 if (zero_constant (op, mode))
4577 return true;
4579 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4580 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4581 return true;
4583 return false;
4586 /* Start with a vspltisw. */
4587 step = GET_MODE_NUNITS (mode) / 4;
4588 copies = 1;
4590 if (vspltis_constant (op, step, copies))
4591 return true;
4593 /* Then try with a vspltish. */
4594 if (step == 1)
4595 copies <<= 1;
4596 else
4597 step >>= 1;
4599 if (vspltis_constant (op, step, copies))
4600 return true;
4602 /* And finally a vspltisb. */
4603 if (step == 1)
4604 copies <<= 1;
4605 else
4606 step >>= 1;
4608 if (vspltis_constant (op, step, copies))
4609 return true;
4611 return false;
4614 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4615 result is OP. Abort if it is not possible. */
4618 gen_easy_altivec_constant (rtx op)
4620 enum machine_mode mode = GET_MODE (op);
4621 int nunits = GET_MODE_NUNITS (mode);
4622 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
4623 unsigned step = nunits / 4;
4624 unsigned copies = 1;
4626 /* Start with a vspltisw. */
4627 if (vspltis_constant (op, step, copies))
4628 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
4630 /* Then try with a vspltish. */
4631 if (step == 1)
4632 copies <<= 1;
4633 else
4634 step >>= 1;
4636 if (vspltis_constant (op, step, copies))
4637 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
4639 /* And finally a vspltisb. */
4640 if (step == 1)
4641 copies <<= 1;
4642 else
4643 step >>= 1;
4645 if (vspltis_constant (op, step, copies))
4646 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
4648 gcc_unreachable ();
4651 const char *
4652 output_vec_const_move (rtx *operands)
4654 int cst, cst2;
4655 enum machine_mode mode;
4656 rtx dest, vec;
4658 dest = operands[0];
4659 vec = operands[1];
4660 mode = GET_MODE (dest);
4662 if (TARGET_VSX)
4664 if (zero_constant (vec, mode))
4665 return "xxlxor %x0,%x0,%x0";
4667 if (mode == V2DImode
4668 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4669 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4670 return "vspltisw %0,-1";
4673 if (TARGET_ALTIVEC)
4675 rtx splat_vec;
4676 if (zero_constant (vec, mode))
4677 return "vxor %0,%0,%0";
4679 splat_vec = gen_easy_altivec_constant (vec);
4680 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4681 operands[1] = XEXP (splat_vec, 0);
4682 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4683 return "#";
4685 switch (GET_MODE (splat_vec))
4687 case V4SImode:
4688 return "vspltisw %0,%1";
4690 case V8HImode:
4691 return "vspltish %0,%1";
4693 case V16QImode:
4694 return "vspltisb %0,%1";
4696 default:
4697 gcc_unreachable ();
4701 gcc_assert (TARGET_SPE);
4703 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4704 pattern of V1DI, V4HI, and V2SF.
4706 FIXME: We should probably return # and add post reload
4707 splitters for these, but this way is so easy ;-). */
4708 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4709 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4710 operands[1] = CONST_VECTOR_ELT (vec, 0);
4711 operands[2] = CONST_VECTOR_ELT (vec, 1);
4712 if (cst == cst2)
4713 return "li %0,%1\n\tevmergelo %0,%0,%0";
4714 else
4715 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4718 /* Initialize TARGET of vector PAIRED to VALS. */
4720 void
4721 paired_expand_vector_init (rtx target, rtx vals)
4723 enum machine_mode mode = GET_MODE (target);
4724 int n_elts = GET_MODE_NUNITS (mode);
4725 int n_var = 0;
4726 rtx x, new_rtx, tmp, constant_op, op1, op2;
4727 int i;
4729 for (i = 0; i < n_elts; ++i)
4731 x = XVECEXP (vals, 0, i);
4732 if (!(CONST_INT_P (x)
4733 || GET_CODE (x) == CONST_DOUBLE
4734 || GET_CODE (x) == CONST_FIXED))
4735 ++n_var;
4737 if (n_var == 0)
4739 /* Load from constant pool. */
4740 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4741 return;
4744 if (n_var == 2)
4746 /* The vector is initialized only with non-constants. */
4747 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4748 XVECEXP (vals, 0, 1));
4750 emit_move_insn (target, new_rtx);
4751 return;
4754 /* One field is non-constant and the other one is a constant. Load the
4755 constant from the constant pool and use ps_merge instruction to
4756 construct the whole vector. */
4757 op1 = XVECEXP (vals, 0, 0);
4758 op2 = XVECEXP (vals, 0, 1);
4760 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4762 tmp = gen_reg_rtx (GET_MODE (constant_op));
4763 emit_move_insn (tmp, constant_op);
4765 if (CONSTANT_P (op1))
4766 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4767 else
4768 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4770 emit_move_insn (target, new_rtx);
4773 void
4774 paired_expand_vector_move (rtx operands[])
4776 rtx op0 = operands[0], op1 = operands[1];
4778 emit_move_insn (op0, op1);
4781 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4782 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4783 operands for the relation operation COND. This is a recursive
4784 function. */
4786 static void
4787 paired_emit_vector_compare (enum rtx_code rcode,
4788 rtx dest, rtx op0, rtx op1,
4789 rtx cc_op0, rtx cc_op1)
4791 rtx tmp = gen_reg_rtx (V2SFmode);
4792 rtx tmp1, max, min;
4794 gcc_assert (TARGET_PAIRED_FLOAT);
4795 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4797 switch (rcode)
4799 case LT:
4800 case LTU:
4801 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4802 return;
4803 case GE:
4804 case GEU:
4805 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4806 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4807 return;
4808 case LE:
4809 case LEU:
4810 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4811 return;
4812 case GT:
4813 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4814 return;
4815 case EQ:
4816 tmp1 = gen_reg_rtx (V2SFmode);
4817 max = gen_reg_rtx (V2SFmode);
4818 min = gen_reg_rtx (V2SFmode);
4819 gen_reg_rtx (V2SFmode);
4821 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4822 emit_insn (gen_selv2sf4
4823 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4824 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4825 emit_insn (gen_selv2sf4
4826 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4827 emit_insn (gen_subv2sf3 (tmp1, min, max));
4828 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4829 return;
4830 case NE:
4831 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4832 return;
4833 case UNLE:
4834 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4835 return;
4836 case UNLT:
4837 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4838 return;
4839 case UNGE:
4840 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4841 return;
4842 case UNGT:
4843 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4844 return;
4845 default:
4846 gcc_unreachable ();
4849 return;
4852 /* Emit vector conditional expression.
4853 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4854 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4857 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4858 rtx cond, rtx cc_op0, rtx cc_op1)
4860 enum rtx_code rcode = GET_CODE (cond);
4862 if (!TARGET_PAIRED_FLOAT)
4863 return 0;
4865 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4867 return 1;
4870 /* Initialize vector TARGET to VALS. */
4872 void
4873 rs6000_expand_vector_init (rtx target, rtx vals)
4875 enum machine_mode mode = GET_MODE (target);
4876 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4877 int n_elts = GET_MODE_NUNITS (mode);
4878 int n_var = 0, one_var = -1;
4879 bool all_same = true, all_const_zero = true;
4880 rtx x, mem;
4881 int i;
4883 for (i = 0; i < n_elts; ++i)
4885 x = XVECEXP (vals, 0, i);
4886 if (!(CONST_INT_P (x)
4887 || GET_CODE (x) == CONST_DOUBLE
4888 || GET_CODE (x) == CONST_FIXED))
4889 ++n_var, one_var = i;
4890 else if (x != CONST0_RTX (inner_mode))
4891 all_const_zero = false;
4893 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4894 all_same = false;
4897 if (n_var == 0)
4899 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4900 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4901 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4903 /* Zero register. */
4904 emit_insn (gen_rtx_SET (VOIDmode, target,
4905 gen_rtx_XOR (mode, target, target)));
4906 return;
4908 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4910 /* Splat immediate. */
4911 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4912 return;
4914 else
4916 /* Load from constant pool. */
4917 emit_move_insn (target, const_vec);
4918 return;
4922 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4923 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4925 rtx op0 = XVECEXP (vals, 0, 0);
4926 rtx op1 = XVECEXP (vals, 0, 1);
4927 if (all_same)
4929 if (!MEM_P (op0) && !REG_P (op0))
4930 op0 = force_reg (inner_mode, op0);
4931 if (mode == V2DFmode)
4932 emit_insn (gen_vsx_splat_v2df (target, op0));
4933 else
4934 emit_insn (gen_vsx_splat_v2di (target, op0));
4936 else
4938 op0 = force_reg (inner_mode, op0);
4939 op1 = force_reg (inner_mode, op1);
4940 if (mode == V2DFmode)
4941 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4942 else
4943 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4945 return;
4948 /* With single precision floating point on VSX, know that internally single
4949 precision is actually represented as a double, and either make 2 V2DF
4950 vectors, and convert these vectors to single precision, or do one
4951 conversion, and splat the result to the other elements. */
4952 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4954 if (all_same)
4956 rtx freg = gen_reg_rtx (V4SFmode);
4957 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
4959 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4960 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4962 else
4964 rtx dbl_even = gen_reg_rtx (V2DFmode);
4965 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4966 rtx flt_even = gen_reg_rtx (V4SFmode);
4967 rtx flt_odd = gen_reg_rtx (V4SFmode);
4968 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
4969 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
4970 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
4971 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
4973 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
4974 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
4975 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4976 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4977 rs6000_expand_extract_even (target, flt_even, flt_odd);
4979 return;
4982 /* Store value to stack temp. Load vector element. Splat. However, splat
4983 of 64-bit items is not supported on Altivec. */
4984 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
4986 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4987 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4988 XVECEXP (vals, 0, 0));
4989 x = gen_rtx_UNSPEC (VOIDmode,
4990 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4991 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4992 gen_rtvec (2,
4993 gen_rtx_SET (VOIDmode,
4994 target, mem),
4995 x)));
4996 x = gen_rtx_VEC_SELECT (inner_mode, target,
4997 gen_rtx_PARALLEL (VOIDmode,
4998 gen_rtvec (1, const0_rtx)));
4999 emit_insn (gen_rtx_SET (VOIDmode, target,
5000 gen_rtx_VEC_DUPLICATE (mode, x)));
5001 return;
5004 /* One field is non-constant. Load constant then overwrite
5005 varying field. */
5006 if (n_var == 1)
5008 rtx copy = copy_rtx (vals);
5010 /* Load constant part of vector, substitute neighboring value for
5011 varying element. */
5012 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5013 rs6000_expand_vector_init (target, copy);
5015 /* Insert variable. */
5016 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5017 return;
5020 /* Construct the vector in memory one field at a time
5021 and load the whole vector. */
5022 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5023 for (i = 0; i < n_elts; i++)
5024 emit_move_insn (adjust_address_nv (mem, inner_mode,
5025 i * GET_MODE_SIZE (inner_mode)),
5026 XVECEXP (vals, 0, i));
5027 emit_move_insn (target, mem);
5030 /* Set field ELT of TARGET to VAL. */
5032 void
5033 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5035 enum machine_mode mode = GET_MODE (target);
5036 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5037 rtx reg = gen_reg_rtx (mode);
5038 rtx mask, mem, x;
5039 int width = GET_MODE_SIZE (inner_mode);
5040 int i;
5042 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5044 rtx (*set_func) (rtx, rtx, rtx, rtx)
5045 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5046 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5047 return;
5050 /* Load single variable value. */
5051 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5052 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5053 x = gen_rtx_UNSPEC (VOIDmode,
5054 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5055 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5056 gen_rtvec (2,
5057 gen_rtx_SET (VOIDmode,
5058 reg, mem),
5059 x)));
5061 /* Linear sequence. */
5062 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5063 for (i = 0; i < 16; ++i)
5064 XVECEXP (mask, 0, i) = GEN_INT (i);
5066 /* Set permute mask to insert element into target. */
5067 for (i = 0; i < width; ++i)
5068 XVECEXP (mask, 0, elt*width + i)
5069 = GEN_INT (i + 0x10);
5070 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5071 x = gen_rtx_UNSPEC (mode,
5072 gen_rtvec (3, target, reg,
5073 force_reg (V16QImode, x)),
5074 UNSPEC_VPERM);
5075 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5078 /* Extract field ELT from VEC into TARGET. */
5080 void
5081 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5083 enum machine_mode mode = GET_MODE (vec);
5084 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5085 rtx mem;
5087 if (VECTOR_MEM_VSX_P (mode))
5089 switch (mode)
5091 default:
5092 break;
5093 case V2DFmode:
5094 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5095 return;
5096 case V2DImode:
5097 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5098 return;
5099 case V4SFmode:
5100 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5101 return;
5105 /* Allocate mode-sized buffer. */
5106 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5108 emit_move_insn (mem, vec);
5110 /* Add offset to field within buffer matching vector element. */
5111 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5113 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5116 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5117 implement ANDing by the mask IN. */
5118 void
5119 build_mask64_2_operands (rtx in, rtx *out)
5121 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5122 int shift;
5124 gcc_assert (GET_CODE (in) == CONST_INT);
5126 c = INTVAL (in);
5127 if (c & 1)
5129 /* Assume c initially something like 0x00fff000000fffff. The idea
5130 is to rotate the word so that the middle ^^^^^^ group of zeros
5131 is at the MS end and can be cleared with an rldicl mask. We then
5132 rotate back and clear off the MS ^^ group of zeros with a
5133 second rldicl. */
5134 c = ~c; /* c == 0xff000ffffff00000 */
5135 lsb = c & -c; /* lsb == 0x0000000000100000 */
5136 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5137 c = ~c; /* c == 0x00fff000000fffff */
5138 c &= -lsb; /* c == 0x00fff00000000000 */
5139 lsb = c & -c; /* lsb == 0x0000100000000000 */
5140 c = ~c; /* c == 0xff000fffffffffff */
5141 c &= -lsb; /* c == 0xff00000000000000 */
5142 shift = 0;
5143 while ((lsb >>= 1) != 0)
5144 shift++; /* shift == 44 on exit from loop */
5145 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5146 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5147 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5149 else
5151 /* Assume c initially something like 0xff000f0000000000. The idea
5152 is to rotate the word so that the ^^^ middle group of zeros
5153 is at the LS end and can be cleared with an rldicr mask. We then
5154 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5155 a second rldicr. */
5156 lsb = c & -c; /* lsb == 0x0000010000000000 */
5157 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5158 c = ~c; /* c == 0x00fff0ffffffffff */
5159 c &= -lsb; /* c == 0x00fff00000000000 */
5160 lsb = c & -c; /* lsb == 0x0000100000000000 */
5161 c = ~c; /* c == 0xff000fffffffffff */
5162 c &= -lsb; /* c == 0xff00000000000000 */
5163 shift = 0;
5164 while ((lsb >>= 1) != 0)
5165 shift++; /* shift == 44 on exit from loop */
5166 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5167 m1 >>= shift; /* m1 == 0x0000000000000fff */
5168 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5171 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5172 masks will be all 1's. We are guaranteed more than one transition. */
5173 out[0] = GEN_INT (64 - shift);
5174 out[1] = GEN_INT (m1);
5175 out[2] = GEN_INT (shift);
5176 out[3] = GEN_INT (m2);
5179 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5181 bool
5182 invalid_e500_subreg (rtx op, enum machine_mode mode)
5184 if (TARGET_E500_DOUBLE)
5186 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5187 subreg:TI and reg:TF. Decimal float modes are like integer
5188 modes (only low part of each register used) for this
5189 purpose. */
5190 if (GET_CODE (op) == SUBREG
5191 && (mode == SImode || mode == DImode || mode == TImode
5192 || mode == DDmode || mode == TDmode || mode == PTImode)
5193 && REG_P (SUBREG_REG (op))
5194 && (GET_MODE (SUBREG_REG (op)) == DFmode
5195 || GET_MODE (SUBREG_REG (op)) == TFmode))
5196 return true;
5198 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5199 reg:TI. */
5200 if (GET_CODE (op) == SUBREG
5201 && (mode == DFmode || mode == TFmode)
5202 && REG_P (SUBREG_REG (op))
5203 && (GET_MODE (SUBREG_REG (op)) == DImode
5204 || GET_MODE (SUBREG_REG (op)) == TImode
5205 || GET_MODE (SUBREG_REG (op)) == PTImode
5206 || GET_MODE (SUBREG_REG (op)) == DDmode
5207 || GET_MODE (SUBREG_REG (op)) == TDmode))
5208 return true;
5211 if (TARGET_SPE
5212 && GET_CODE (op) == SUBREG
5213 && mode == SImode
5214 && REG_P (SUBREG_REG (op))
5215 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5216 return true;
5218 return false;
5221 /* AIX increases natural record alignment to doubleword if the first
5222 field is an FP double while the FP fields remain word aligned. */
5224 unsigned int
5225 rs6000_special_round_type_align (tree type, unsigned int computed,
5226 unsigned int specified)
5228 unsigned int align = MAX (computed, specified);
5229 tree field = TYPE_FIELDS (type);
5231 /* Skip all non field decls */
5232 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5233 field = DECL_CHAIN (field);
5235 if (field != NULL && field != type)
5237 type = TREE_TYPE (field);
5238 while (TREE_CODE (type) == ARRAY_TYPE)
5239 type = TREE_TYPE (type);
5241 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5242 align = MAX (align, 64);
5245 return align;
5248 /* Darwin increases record alignment to the natural alignment of
5249 the first field. */
5251 unsigned int
5252 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5253 unsigned int specified)
5255 unsigned int align = MAX (computed, specified);
5257 if (TYPE_PACKED (type))
5258 return align;
5260 /* Find the first field, looking down into aggregates. */
5261 do {
5262 tree field = TYPE_FIELDS (type);
5263 /* Skip all non field decls */
5264 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5265 field = DECL_CHAIN (field);
5266 if (! field)
5267 break;
5268 /* A packed field does not contribute any extra alignment. */
5269 if (DECL_PACKED (field))
5270 return align;
5271 type = TREE_TYPE (field);
5272 while (TREE_CODE (type) == ARRAY_TYPE)
5273 type = TREE_TYPE (type);
5274 } while (AGGREGATE_TYPE_P (type));
5276 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
5277 align = MAX (align, TYPE_ALIGN (type));
5279 return align;
5282 /* Return 1 for an operand in small memory on V.4/eabi. */
5285 small_data_operand (rtx op ATTRIBUTE_UNUSED,
5286 enum machine_mode mode ATTRIBUTE_UNUSED)
5288 #if TARGET_ELF
5289 rtx sym_ref;
5291 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
5292 return 0;
5294 if (DEFAULT_ABI != ABI_V4)
5295 return 0;
5297 /* Vector and float memory instructions have a limited offset on the
5298 SPE, so using a vector or float variable directly as an operand is
5299 not useful. */
5300 if (TARGET_SPE
5301 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5302 return 0;
5304 if (GET_CODE (op) == SYMBOL_REF)
5305 sym_ref = op;
5307 else if (GET_CODE (op) != CONST
5308 || GET_CODE (XEXP (op, 0)) != PLUS
5309 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5310 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5311 return 0;
5313 else
5315 rtx sum = XEXP (op, 0);
5316 HOST_WIDE_INT summand;
5318 /* We have to be careful here, because it is the referenced address
5319 that must be 32k from _SDA_BASE_, not just the symbol. */
5320 summand = INTVAL (XEXP (sum, 1));
5321 if (summand < 0 || summand > g_switch_value)
5322 return 0;
5324 sym_ref = XEXP (sum, 0);
5327 return SYMBOL_REF_SMALL_P (sym_ref);
5328 #else
5329 return 0;
5330 #endif
5333 /* Return true if either operand is a general purpose register. */
5335 bool
5336 gpr_or_gpr_p (rtx op0, rtx op1)
5338 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5339 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5342 /* Given an address, return a constant offset term if one exists. */
5344 static rtx
5345 address_offset (rtx op)
5347 if (GET_CODE (op) == PRE_INC
5348 || GET_CODE (op) == PRE_DEC)
5349 op = XEXP (op, 0);
5350 else if (GET_CODE (op) == PRE_MODIFY
5351 || GET_CODE (op) == LO_SUM)
5352 op = XEXP (op, 1);
5354 if (GET_CODE (op) == CONST)
5355 op = XEXP (op, 0);
5357 if (GET_CODE (op) == PLUS)
5358 op = XEXP (op, 1);
5360 if (CONST_INT_P (op))
5361 return op;
5363 return NULL_RTX;
5366 /* Return true if the MEM operand is a memory operand suitable for use
5367 with a (full width, possibly multiple) gpr load/store. On
5368 powerpc64 this means the offset must be divisible by 4.
5369 Implements 'Y' constraint.
5371 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5372 a constraint function we know the operand has satisfied a suitable
5373 memory predicate. Also accept some odd rtl generated by reload
5374 (see rs6000_legitimize_reload_address for various forms). It is
5375 important that reload rtl be accepted by appropriate constraints
5376 but not by the operand predicate.
5378 Offsetting a lo_sum should not be allowed, except where we know by
5379 alignment that a 32k boundary is not crossed, but see the ???
5380 comment in rs6000_legitimize_reload_address. Note that by
5381 "offsetting" here we mean a further offset to access parts of the
5382 MEM. It's fine to have a lo_sum where the inner address is offset
5383 from a sym, since the same sym+offset will appear in the high part
5384 of the address calculation. */
5386 bool
5387 mem_operand_gpr (rtx op, enum machine_mode mode)
5389 unsigned HOST_WIDE_INT offset;
5390 int extra;
5391 rtx addr = XEXP (op, 0);
5393 op = address_offset (addr);
5394 if (op == NULL_RTX)
5395 return true;
5397 offset = INTVAL (op);
5398 if (TARGET_POWERPC64 && (offset & 3) != 0)
5399 return false;
5401 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5402 gcc_assert (extra >= 0);
5404 if (GET_CODE (addr) == LO_SUM)
5405 /* For lo_sum addresses, we must allow any offset except one that
5406 causes a wrap, so test only the low 16 bits. */
5407 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
5409 return offset + 0x8000 < 0x10000u - extra;
5412 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5414 static bool
5415 reg_offset_addressing_ok_p (enum machine_mode mode)
5417 switch (mode)
5419 case V16QImode:
5420 case V8HImode:
5421 case V4SFmode:
5422 case V4SImode:
5423 case V2DFmode:
5424 case V2DImode:
5425 case TImode:
5426 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
5427 TImode is not a vector mode, if we want to use the VSX registers to
5428 move it around, we need to restrict ourselves to reg+reg
5429 addressing. */
5430 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5431 return false;
5432 break;
5434 case V4HImode:
5435 case V2SImode:
5436 case V1DImode:
5437 case V2SFmode:
5438 /* Paired vector modes. Only reg+reg addressing is valid. */
5439 if (TARGET_PAIRED_FLOAT)
5440 return false;
5441 break;
5443 case SDmode:
5444 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
5445 addressing for the LFIWZX and STFIWX instructions. */
5446 if (TARGET_NO_SDMODE_STACK)
5447 return false;
5448 break;
5450 default:
5451 break;
5454 return true;
5457 static bool
5458 virtual_stack_registers_memory_p (rtx op)
5460 int regnum;
5462 if (GET_CODE (op) == REG)
5463 regnum = REGNO (op);
5465 else if (GET_CODE (op) == PLUS
5466 && GET_CODE (XEXP (op, 0)) == REG
5467 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5468 regnum = REGNO (XEXP (op, 0));
5470 else
5471 return false;
5473 return (regnum >= FIRST_VIRTUAL_REGISTER
5474 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5477 /* Return true if memory accesses to OP are known to never straddle
5478 a 32k boundary. */
5480 static bool
5481 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5482 enum machine_mode mode)
5484 tree decl, type;
5485 unsigned HOST_WIDE_INT dsize, dalign;
5487 if (GET_CODE (op) != SYMBOL_REF)
5488 return false;
5490 decl = SYMBOL_REF_DECL (op);
5491 if (!decl)
5493 if (GET_MODE_SIZE (mode) == 0)
5494 return false;
5496 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5497 replacing memory addresses with an anchor plus offset. We
5498 could find the decl by rummaging around in the block->objects
5499 VEC for the given offset but that seems like too much work. */
5500 dalign = 1;
5501 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5502 && SYMBOL_REF_ANCHOR_P (op)
5503 && SYMBOL_REF_BLOCK (op) != NULL)
5505 struct object_block *block = SYMBOL_REF_BLOCK (op);
5506 HOST_WIDE_INT lsb, mask;
5508 /* Given the alignment of the block.. */
5509 dalign = block->alignment;
5510 mask = dalign / BITS_PER_UNIT - 1;
5512 /* ..and the combined offset of the anchor and any offset
5513 to this block object.. */
5514 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5515 lsb = offset & -offset;
5517 /* ..find how many bits of the alignment we know for the
5518 object. */
5519 mask &= lsb - 1;
5520 dalign = mask + 1;
5522 return dalign >= GET_MODE_SIZE (mode);
5525 if (DECL_P (decl))
5527 if (TREE_CODE (decl) == FUNCTION_DECL)
5528 return true;
5530 if (!DECL_SIZE_UNIT (decl))
5531 return false;
5533 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5534 return false;
5536 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5537 if (dsize > 32768)
5538 return false;
5540 dalign = DECL_ALIGN_UNIT (decl);
5541 return dalign >= dsize;
5544 type = TREE_TYPE (decl);
5546 if (TREE_CODE (decl) == STRING_CST)
5547 dsize = TREE_STRING_LENGTH (decl);
5548 else if (TYPE_SIZE_UNIT (type)
5549 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5550 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5551 else
5552 return false;
5553 if (dsize > 32768)
5554 return false;
5556 dalign = TYPE_ALIGN (type);
5557 if (CONSTANT_CLASS_P (decl))
5558 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5559 else
5560 dalign = DATA_ALIGNMENT (decl, dalign);
5561 dalign /= BITS_PER_UNIT;
5562 return dalign >= dsize;
5565 static bool
5566 constant_pool_expr_p (rtx op)
5568 rtx base, offset;
5570 split_const (op, &base, &offset);
5571 return (GET_CODE (base) == SYMBOL_REF
5572 && CONSTANT_POOL_ADDRESS_P (base)
5573 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5576 static const_rtx tocrel_base, tocrel_offset;
5578 /* Return true if OP is a toc pointer relative address (the output
5579 of create_TOC_reference). If STRICT, do not match high part or
5580 non-split -mcmodel=large/medium toc pointer relative addresses. */
5582 bool
5583 toc_relative_expr_p (const_rtx op, bool strict)
5585 if (!TARGET_TOC)
5586 return false;
5588 if (TARGET_CMODEL != CMODEL_SMALL)
5590 /* Only match the low part. */
5591 if (GET_CODE (op) == LO_SUM
5592 && REG_P (XEXP (op, 0))
5593 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5594 op = XEXP (op, 1);
5595 else if (strict)
5596 return false;
5599 tocrel_base = op;
5600 tocrel_offset = const0_rtx;
5601 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
5603 tocrel_base = XEXP (op, 0);
5604 tocrel_offset = XEXP (op, 1);
5607 return (GET_CODE (tocrel_base) == UNSPEC
5608 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5611 /* Return true if X is a constant pool address, and also for cmodel=medium
5612 if X is a toc-relative address known to be offsettable within MODE. */
5614 bool
5615 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5616 bool strict)
5618 return (toc_relative_expr_p (x, strict)
5619 && (TARGET_CMODEL != CMODEL_MEDIUM
5620 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5621 || mode == QImode
5622 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5623 INTVAL (tocrel_offset), mode)));
5626 static bool
5627 legitimate_small_data_p (enum machine_mode mode, rtx x)
5629 return (DEFAULT_ABI == ABI_V4
5630 && !flag_pic && !TARGET_TOC
5631 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5632 && small_data_operand (x, mode));
5635 /* SPE offset addressing is limited to 5-bits worth of double words. */
5636 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5638 bool
5639 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
5640 bool strict, bool worst_case)
5642 unsigned HOST_WIDE_INT offset;
5643 unsigned int extra;
5645 if (GET_CODE (x) != PLUS)
5646 return false;
5647 if (!REG_P (XEXP (x, 0)))
5648 return false;
5649 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5650 return false;
5651 if (!reg_offset_addressing_ok_p (mode))
5652 return virtual_stack_registers_memory_p (x);
5653 if (legitimate_constant_pool_address_p (x, mode, strict))
5654 return true;
5655 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5656 return false;
5658 offset = INTVAL (XEXP (x, 1));
5659 extra = 0;
5660 switch (mode)
5662 case V4HImode:
5663 case V2SImode:
5664 case V1DImode:
5665 case V2SFmode:
5666 /* SPE vector modes. */
5667 return SPE_CONST_OFFSET_OK (offset);
5669 case DFmode:
5670 case DDmode:
5671 case DImode:
5672 /* On e500v2, we may have:
5674 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5676 Which gets addressed with evldd instructions. */
5677 if (TARGET_E500_DOUBLE)
5678 return SPE_CONST_OFFSET_OK (offset);
5680 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5681 addressing. */
5682 if (VECTOR_MEM_VSX_P (mode))
5683 return false;
5685 if (!worst_case)
5686 break;
5687 if (!TARGET_POWERPC64)
5688 extra = 4;
5689 else if (offset & 3)
5690 return false;
5691 break;
5693 case TFmode:
5694 case TDmode:
5695 case TImode:
5696 case PTImode:
5697 if (TARGET_E500_DOUBLE)
5698 return (SPE_CONST_OFFSET_OK (offset)
5699 && SPE_CONST_OFFSET_OK (offset + 8));
5701 extra = 8;
5702 if (!worst_case)
5703 break;
5704 if (!TARGET_POWERPC64)
5705 extra = 12;
5706 else if (offset & 3)
5707 return false;
5708 break;
5710 default:
5711 break;
5714 offset += 0x8000;
5715 return offset < 0x10000 - extra;
5718 bool
5719 legitimate_indexed_address_p (rtx x, int strict)
5721 rtx op0, op1;
5723 if (GET_CODE (x) != PLUS)
5724 return false;
5726 op0 = XEXP (x, 0);
5727 op1 = XEXP (x, 1);
5729 /* Recognize the rtl generated by reload which we know will later be
5730 replaced with proper base and index regs. */
5731 if (!strict
5732 && reload_in_progress
5733 && (REG_P (op0) || GET_CODE (op0) == PLUS)
5734 && REG_P (op1))
5735 return true;
5737 return (REG_P (op0) && REG_P (op1)
5738 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
5739 && INT_REG_OK_FOR_INDEX_P (op1, strict))
5740 || (INT_REG_OK_FOR_BASE_P (op1, strict)
5741 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
5744 bool
5745 avoiding_indexed_address_p (enum machine_mode mode)
5747 /* Avoid indexed addressing for modes that have non-indexed
5748 load/store instruction forms. */
5749 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
5752 bool
5753 legitimate_indirect_address_p (rtx x, int strict)
5755 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
5758 bool
5759 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
5761 if (!TARGET_MACHO || !flag_pic
5762 || mode != SImode || GET_CODE (x) != MEM)
5763 return false;
5764 x = XEXP (x, 0);
5766 if (GET_CODE (x) != LO_SUM)
5767 return false;
5768 if (GET_CODE (XEXP (x, 0)) != REG)
5769 return false;
5770 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
5771 return false;
5772 x = XEXP (x, 1);
5774 return CONSTANT_P (x);
5777 static bool
5778 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
5780 if (GET_CODE (x) != LO_SUM)
5781 return false;
5782 if (GET_CODE (XEXP (x, 0)) != REG)
5783 return false;
5784 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5785 return false;
5786 /* Restrict addressing for DI because of our SUBREG hackery. */
5787 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
5788 return false;
5789 x = XEXP (x, 1);
5791 if (TARGET_ELF || TARGET_MACHO)
5793 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
5794 return false;
5795 if (TARGET_TOC)
5796 return false;
5797 if (GET_MODE_NUNITS (mode) != 1)
5798 return false;
5799 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5800 && !(/* ??? Assume floating point reg based on mode? */
5801 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5802 && (mode == DFmode || mode == DDmode)))
5803 return false;
5805 return CONSTANT_P (x);
5808 return false;
5812 /* Try machine-dependent ways of modifying an illegitimate address
5813 to be legitimate. If we find one, return the new, valid address.
5814 This is used from only one place: `memory_address' in explow.c.
5816 OLDX is the address as it was before break_out_memory_refs was
5817 called. In some cases it is useful to look at this to decide what
5818 needs to be done.
5820 It is always safe for this function to do nothing. It exists to
5821 recognize opportunities to optimize the output.
5823 On RS/6000, first check for the sum of a register with a constant
5824 integer that is out of range. If so, generate code to add the
5825 constant with the low-order 16 bits masked to the register and force
5826 this result into another register (this can be done with `cau').
5827 Then generate an address of REG+(CONST&0xffff), allowing for the
5828 possibility of bit 16 being a one.
5830 Then check for the sum of a register and something not constant, try to
5831 load the other things into a register and return the sum. */
5833 static rtx
5834 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5835 enum machine_mode mode)
5837 unsigned int extra;
5839 if (!reg_offset_addressing_ok_p (mode))
5841 if (virtual_stack_registers_memory_p (x))
5842 return x;
5844 /* In theory we should not be seeing addresses of the form reg+0,
5845 but just in case it is generated, optimize it away. */
5846 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5847 return force_reg (Pmode, XEXP (x, 0));
5849 /* Make sure both operands are registers. */
5850 else if (GET_CODE (x) == PLUS)
5851 return gen_rtx_PLUS (Pmode,
5852 force_reg (Pmode, XEXP (x, 0)),
5853 force_reg (Pmode, XEXP (x, 1)));
5854 else
5855 return force_reg (Pmode, x);
5857 if (GET_CODE (x) == SYMBOL_REF)
5859 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5860 if (model != 0)
5861 return rs6000_legitimize_tls_address (x, model);
5864 extra = 0;
5865 switch (mode)
5867 case TFmode:
5868 case TDmode:
5869 case TImode:
5870 case PTImode:
5871 /* As in legitimate_offset_address_p we do not assume
5872 worst-case. The mode here is just a hint as to the registers
5873 used. A TImode is usually in gprs, but may actually be in
5874 fprs. Leave worst-case scenario for reload to handle via
5875 insn constraints. PTImode is only GPRs. */
5876 extra = 8;
5877 break;
5878 default:
5879 break;
5882 if (GET_CODE (x) == PLUS
5883 && GET_CODE (XEXP (x, 0)) == REG
5884 && GET_CODE (XEXP (x, 1)) == CONST_INT
5885 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5886 >= 0x10000 - extra)
5887 && !(SPE_VECTOR_MODE (mode)
5888 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
5890 HOST_WIDE_INT high_int, low_int;
5891 rtx sum;
5892 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5893 if (low_int >= 0x8000 - extra)
5894 low_int = 0;
5895 high_int = INTVAL (XEXP (x, 1)) - low_int;
5896 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5897 GEN_INT (high_int)), 0);
5898 return plus_constant (Pmode, sum, low_int);
5900 else if (GET_CODE (x) == PLUS
5901 && GET_CODE (XEXP (x, 0)) == REG
5902 && GET_CODE (XEXP (x, 1)) != CONST_INT
5903 && GET_MODE_NUNITS (mode) == 1
5904 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5905 || (/* ??? Assume floating point reg based on mode? */
5906 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5907 && (mode == DFmode || mode == DDmode)))
5908 && !avoiding_indexed_address_p (mode))
5910 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5911 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5913 else if (SPE_VECTOR_MODE (mode)
5914 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
5916 if (mode == DImode)
5917 return x;
5918 /* We accept [reg + reg] and [reg + OFFSET]. */
5920 if (GET_CODE (x) == PLUS)
5922 rtx op1 = XEXP (x, 0);
5923 rtx op2 = XEXP (x, 1);
5924 rtx y;
5926 op1 = force_reg (Pmode, op1);
5928 if (GET_CODE (op2) != REG
5929 && (GET_CODE (op2) != CONST_INT
5930 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5931 || (GET_MODE_SIZE (mode) > 8
5932 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5933 op2 = force_reg (Pmode, op2);
5935 /* We can't always do [reg + reg] for these, because [reg +
5936 reg + offset] is not a legitimate addressing mode. */
5937 y = gen_rtx_PLUS (Pmode, op1, op2);
5939 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5940 return force_reg (Pmode, y);
5941 else
5942 return y;
5945 return force_reg (Pmode, x);
5947 else if ((TARGET_ELF
5948 #if TARGET_MACHO
5949 || !MACHO_DYNAMIC_NO_PIC_P
5950 #endif
5952 && TARGET_32BIT
5953 && TARGET_NO_TOC
5954 && ! flag_pic
5955 && GET_CODE (x) != CONST_INT
5956 && GET_CODE (x) != CONST_DOUBLE
5957 && CONSTANT_P (x)
5958 && GET_MODE_NUNITS (mode) == 1
5959 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5960 || (/* ??? Assume floating point reg based on mode? */
5961 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5962 && (mode == DFmode || mode == DDmode))))
5964 rtx reg = gen_reg_rtx (Pmode);
5965 if (TARGET_ELF)
5966 emit_insn (gen_elf_high (reg, x));
5967 else
5968 emit_insn (gen_macho_high (reg, x));
5969 return gen_rtx_LO_SUM (Pmode, reg, x);
5971 else if (TARGET_TOC
5972 && GET_CODE (x) == SYMBOL_REF
5973 && constant_pool_expr_p (x)
5974 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5975 return create_TOC_reference (x, NULL_RTX);
5976 else
5977 return x;
5980 /* Debug version of rs6000_legitimize_address. */
5981 static rtx
5982 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5984 rtx ret;
5985 rtx insns;
5987 start_sequence ();
5988 ret = rs6000_legitimize_address (x, oldx, mode);
5989 insns = get_insns ();
5990 end_sequence ();
5992 if (ret != x)
5994 fprintf (stderr,
5995 "\nrs6000_legitimize_address: mode %s, old code %s, "
5996 "new code %s, modified\n",
5997 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5998 GET_RTX_NAME (GET_CODE (ret)));
6000 fprintf (stderr, "Original address:\n");
6001 debug_rtx (x);
6003 fprintf (stderr, "oldx:\n");
6004 debug_rtx (oldx);
6006 fprintf (stderr, "New address:\n");
6007 debug_rtx (ret);
6009 if (insns)
6011 fprintf (stderr, "Insns added:\n");
6012 debug_rtx_list (insns, 20);
6015 else
6017 fprintf (stderr,
6018 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6019 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6021 debug_rtx (x);
6024 if (insns)
6025 emit_insn (insns);
6027 return ret;
6030 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6031 We need to emit DTP-relative relocations. */
6033 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6034 static void
6035 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6037 switch (size)
6039 case 4:
6040 fputs ("\t.long\t", file);
6041 break;
6042 case 8:
6043 fputs (DOUBLE_INT_ASM_OP, file);
6044 break;
6045 default:
6046 gcc_unreachable ();
6048 output_addr_const (file, x);
6049 fputs ("@dtprel+0x8000", file);
6052 /* In the name of slightly smaller debug output, and to cater to
6053 general assembler lossage, recognize various UNSPEC sequences
6054 and turn them back into a direct symbol reference. */
6056 static rtx
6057 rs6000_delegitimize_address (rtx orig_x)
6059 rtx x, y, offset;
6061 orig_x = delegitimize_mem_from_attrs (orig_x);
6062 x = orig_x;
6063 if (MEM_P (x))
6064 x = XEXP (x, 0);
6066 y = x;
6067 if (TARGET_CMODEL != CMODEL_SMALL
6068 && GET_CODE (y) == LO_SUM)
6069 y = XEXP (y, 1);
6071 offset = NULL_RTX;
6072 if (GET_CODE (y) == PLUS
6073 && GET_MODE (y) == Pmode
6074 && CONST_INT_P (XEXP (y, 1)))
6076 offset = XEXP (y, 1);
6077 y = XEXP (y, 0);
6080 if (GET_CODE (y) == UNSPEC
6081 && XINT (y, 1) == UNSPEC_TOCREL)
6083 #ifdef ENABLE_CHECKING
6084 if (REG_P (XVECEXP (y, 0, 1))
6085 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6087 /* All good. */
6089 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6091 /* Weirdness alert. df_note_compute can replace r2 with a
6092 debug_expr when this unspec is in a debug_insn.
6093 Seen in gcc.dg/pr51957-1.c */
6095 else
6097 debug_rtx (orig_x);
6098 abort ();
6100 #endif
6101 y = XVECEXP (y, 0, 0);
6103 #ifdef HAVE_AS_TLS
6104 /* Do not associate thread-local symbols with the original
6105 constant pool symbol. */
6106 if (TARGET_XCOFF
6107 && GET_CODE (y) == SYMBOL_REF
6108 && CONSTANT_POOL_ADDRESS_P (y)
6109 && SYMBOL_REF_TLS_MODEL (get_pool_constant (y)) >= TLS_MODEL_REAL)
6110 return orig_x;
6111 #endif
6113 if (offset != NULL_RTX)
6114 y = gen_rtx_PLUS (Pmode, y, offset);
6115 if (!MEM_P (orig_x))
6116 return y;
6117 else
6118 return replace_equiv_address_nv (orig_x, y);
6121 if (TARGET_MACHO
6122 && GET_CODE (orig_x) == LO_SUM
6123 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6125 y = XEXP (XEXP (orig_x, 1), 0);
6126 if (GET_CODE (y) == UNSPEC
6127 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6128 return XVECEXP (y, 0, 0);
6131 return orig_x;
6134 /* Return true if X shouldn't be emitted into the debug info.
6135 The linker doesn't like .toc section references from
6136 .debug_* sections, so reject .toc section symbols. */
6138 static bool
6139 rs6000_const_not_ok_for_debug_p (rtx x)
6141 if (GET_CODE (x) == SYMBOL_REF
6142 && CONSTANT_POOL_ADDRESS_P (x))
6144 rtx c = get_pool_constant (x);
6145 enum machine_mode cmode = get_pool_mode (x);
6146 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6147 return true;
6150 return false;
6153 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6155 static GTY(()) rtx rs6000_tls_symbol;
6156 static rtx
6157 rs6000_tls_get_addr (void)
6159 if (!rs6000_tls_symbol)
6160 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
6162 return rs6000_tls_symbol;
6165 /* Construct the SYMBOL_REF for TLS GOT references. */
6167 static GTY(()) rtx rs6000_got_symbol;
6168 static rtx
6169 rs6000_got_sym (void)
6171 if (!rs6000_got_symbol)
6173 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
6174 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
6175 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
6178 return rs6000_got_symbol;
6181 /* AIX Thread-Local Address support. */
6183 static rtx
6184 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
6186 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
6187 const char *name;
6188 char *tlsname;
6190 name = XSTR (addr, 0);
6191 /* Append TLS CSECT qualifier, unless the symbol already is qualified
6192 or the symbol will be in TLS private data section. */
6193 if (name[strlen (name) - 1] != ']'
6194 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
6195 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
6197 tlsname = XALLOCAVEC (char, strlen (name) + 4);
6198 strcpy (tlsname, name);
6199 strcat (tlsname,
6200 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
6201 tlsaddr = copy_rtx (addr);
6202 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
6204 else
6205 tlsaddr = addr;
6207 /* Place addr into TOC constant pool. */
6208 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
6210 /* Output the TOC entry and create the MEM referencing the value. */
6211 if (constant_pool_expr_p (XEXP (sym, 0))
6212 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
6214 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
6215 mem = gen_const_mem (Pmode, tocref);
6216 set_mem_alias_set (mem, get_TOC_alias_set ());
6218 else
6219 return sym;
6221 /* Use global-dynamic for local-dynamic. */
6222 if (model == TLS_MODEL_GLOBAL_DYNAMIC
6223 || model == TLS_MODEL_LOCAL_DYNAMIC)
6225 /* Create new TOC reference for @m symbol. */
6226 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
6227 tlsname = XALLOCAVEC (char, strlen (name) + 1);
6228 strcpy (tlsname, "*LCM");
6229 strcat (tlsname, name + 3);
6230 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
6231 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
6232 tocref = create_TOC_reference (modaddr, NULL_RTX);
6233 rtx modmem = gen_const_mem (Pmode, tocref);
6234 set_mem_alias_set (modmem, get_TOC_alias_set ());
6236 rtx modreg = gen_reg_rtx (Pmode);
6237 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
6239 tmpreg = gen_reg_rtx (Pmode);
6240 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6242 dest = gen_reg_rtx (Pmode);
6243 if (TARGET_32BIT)
6244 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
6245 else
6246 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
6247 return dest;
6249 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
6250 else if (TARGET_32BIT)
6252 tlsreg = gen_reg_rtx (SImode);
6253 emit_insn (gen_tls_get_tpointer (tlsreg));
6255 else
6256 tlsreg = gen_rtx_REG (DImode, 13);
6258 /* Load the TOC value into temporary register. */
6259 tmpreg = gen_reg_rtx (Pmode);
6260 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
6261 set_unique_reg_note (get_last_insn (), REG_EQUAL,
6262 gen_rtx_MINUS (Pmode, addr, tlsreg));
6264 /* Add TOC symbol value to TLS pointer. */
6265 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
6267 return dest;
6270 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
6271 this (thread-local) address. */
6273 static rtx
6274 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
6276 rtx dest, insn;
6278 if (TARGET_XCOFF)
6279 return rs6000_legitimize_tls_address_aix (addr, model);
6281 dest = gen_reg_rtx (Pmode);
6282 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
6284 rtx tlsreg;
6286 if (TARGET_64BIT)
6288 tlsreg = gen_rtx_REG (Pmode, 13);
6289 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
6291 else
6293 tlsreg = gen_rtx_REG (Pmode, 2);
6294 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
6296 emit_insn (insn);
6298 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
6300 rtx tlsreg, tmp;
6302 tmp = gen_reg_rtx (Pmode);
6303 if (TARGET_64BIT)
6305 tlsreg = gen_rtx_REG (Pmode, 13);
6306 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
6308 else
6310 tlsreg = gen_rtx_REG (Pmode, 2);
6311 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
6313 emit_insn (insn);
6314 if (TARGET_64BIT)
6315 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
6316 else
6317 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
6318 emit_insn (insn);
6320 else
6322 rtx r3, got, tga, tmp1, tmp2, call_insn;
6324 /* We currently use relocations like @got@tlsgd for tls, which
6325 means the linker will handle allocation of tls entries, placing
6326 them in the .got section. So use a pointer to the .got section,
6327 not one to secondary TOC sections used by 64-bit -mminimal-toc,
6328 or to secondary GOT sections used by 32-bit -fPIC. */
6329 if (TARGET_64BIT)
6330 got = gen_rtx_REG (Pmode, 2);
6331 else
6333 if (flag_pic == 1)
6334 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
6335 else
6337 rtx gsym = rs6000_got_sym ();
6338 got = gen_reg_rtx (Pmode);
6339 if (flag_pic == 0)
6340 rs6000_emit_move (got, gsym, Pmode);
6341 else
6343 rtx mem, lab, last;
6345 tmp1 = gen_reg_rtx (Pmode);
6346 tmp2 = gen_reg_rtx (Pmode);
6347 mem = gen_const_mem (Pmode, tmp1);
6348 lab = gen_label_rtx ();
6349 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
6350 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
6351 if (TARGET_LINK_STACK)
6352 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
6353 emit_move_insn (tmp2, mem);
6354 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
6355 set_unique_reg_note (last, REG_EQUAL, gsym);
6360 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
6362 tga = rs6000_tls_get_addr ();
6363 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
6364 1, const0_rtx, Pmode);
6366 r3 = gen_rtx_REG (Pmode, 3);
6367 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6368 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
6369 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6370 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
6371 else if (DEFAULT_ABI == ABI_V4)
6372 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
6373 else
6374 gcc_unreachable ();
6375 call_insn = last_call_insn ();
6376 PATTERN (call_insn) = insn;
6377 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6378 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6379 pic_offset_table_rtx);
6381 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
6383 tga = rs6000_tls_get_addr ();
6384 tmp1 = gen_reg_rtx (Pmode);
6385 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
6386 1, const0_rtx, Pmode);
6388 r3 = gen_rtx_REG (Pmode, 3);
6389 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6390 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
6391 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6392 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
6393 else if (DEFAULT_ABI == ABI_V4)
6394 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
6395 else
6396 gcc_unreachable ();
6397 call_insn = last_call_insn ();
6398 PATTERN (call_insn) = insn;
6399 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6400 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6401 pic_offset_table_rtx);
6403 if (rs6000_tls_size == 16)
6405 if (TARGET_64BIT)
6406 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
6407 else
6408 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
6410 else if (rs6000_tls_size == 32)
6412 tmp2 = gen_reg_rtx (Pmode);
6413 if (TARGET_64BIT)
6414 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
6415 else
6416 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
6417 emit_insn (insn);
6418 if (TARGET_64BIT)
6419 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6420 else
6421 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6423 else
6425 tmp2 = gen_reg_rtx (Pmode);
6426 if (TARGET_64BIT)
6427 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6428 else
6429 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6430 emit_insn (insn);
6431 insn = gen_rtx_SET (Pmode, dest,
6432 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6434 emit_insn (insn);
6436 else
6438 /* IE, or 64-bit offset LE. */
6439 tmp2 = gen_reg_rtx (Pmode);
6440 if (TARGET_64BIT)
6441 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6442 else
6443 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6444 emit_insn (insn);
6445 if (TARGET_64BIT)
6446 insn = gen_tls_tls_64 (dest, tmp2, addr);
6447 else
6448 insn = gen_tls_tls_32 (dest, tmp2, addr);
6449 emit_insn (insn);
6453 return dest;
6456 /* Return 1 if X contains a thread-local symbol. */
6458 static bool
6459 rs6000_tls_referenced_p (rtx x)
6461 if (! TARGET_HAVE_TLS)
6462 return false;
6464 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6467 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6469 static bool
6470 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6472 if (GET_CODE (x) == HIGH
6473 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6474 return true;
6476 /* A TLS symbol in the TOC cannot contain a sum. */
6477 if (GET_CODE (x) == CONST
6478 && GET_CODE (XEXP (x, 0)) == PLUS
6479 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6480 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
6481 return true;
6483 /* Do not place an ELF TLS symbol in the constant pool. */
6484 return TARGET_ELF && rs6000_tls_referenced_p (x);
6487 /* Return 1 if *X is a thread-local symbol. This is the same as
6488 rs6000_tls_symbol_ref except for the type of the unused argument. */
6490 static int
6491 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6493 return RS6000_SYMBOL_REF_TLS_P (*x);
6496 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6497 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6498 can be addressed relative to the toc pointer. */
6500 static bool
6501 use_toc_relative_ref (rtx sym)
6503 return ((constant_pool_expr_p (sym)
6504 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6505 get_pool_mode (sym)))
6506 || (TARGET_CMODEL == CMODEL_MEDIUM
6507 && !CONSTANT_POOL_ADDRESS_P (sym)
6508 && SYMBOL_REF_LOCAL_P (sym)));
6511 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6512 replace the input X, or the original X if no replacement is called for.
6513 The output parameter *WIN is 1 if the calling macro should goto WIN,
6514 0 if it should not.
6516 For RS/6000, we wish to handle large displacements off a base
6517 register by splitting the addend across an addiu/addis and the mem insn.
6518 This cuts number of extra insns needed from 3 to 1.
6520 On Darwin, we use this to generate code for floating point constants.
6521 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6522 The Darwin code is inside #if TARGET_MACHO because only then are the
6523 machopic_* functions defined. */
6524 static rtx
6525 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6526 int opnum, int type,
6527 int ind_levels ATTRIBUTE_UNUSED, int *win)
6529 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6531 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6532 DFmode/DImode MEM. */
6533 if (reg_offset_p
6534 && opnum == 1
6535 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6536 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6537 reg_offset_p = false;
6539 /* We must recognize output that we have already generated ourselves. */
6540 if (GET_CODE (x) == PLUS
6541 && GET_CODE (XEXP (x, 0)) == PLUS
6542 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6543 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6544 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6546 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6547 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6548 opnum, (enum reload_type) type);
6549 *win = 1;
6550 return x;
6553 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6554 if (GET_CODE (x) == LO_SUM
6555 && GET_CODE (XEXP (x, 0)) == HIGH)
6557 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6558 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6559 opnum, (enum reload_type) type);
6560 *win = 1;
6561 return x;
6564 #if TARGET_MACHO
6565 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6566 && GET_CODE (x) == LO_SUM
6567 && GET_CODE (XEXP (x, 0)) == PLUS
6568 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6569 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6570 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6571 && machopic_operand_p (XEXP (x, 1)))
6573 /* Result of previous invocation of this function on Darwin
6574 floating point constant. */
6575 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6576 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6577 opnum, (enum reload_type) type);
6578 *win = 1;
6579 return x;
6581 #endif
6583 if (TARGET_CMODEL != CMODEL_SMALL
6584 && reg_offset_p
6585 && small_toc_ref (x, VOIDmode))
6587 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6588 x = gen_rtx_LO_SUM (Pmode, hi, x);
6589 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6590 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6591 opnum, (enum reload_type) type);
6592 *win = 1;
6593 return x;
6596 if (GET_CODE (x) == PLUS
6597 && GET_CODE (XEXP (x, 0)) == REG
6598 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6599 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6600 && GET_CODE (XEXP (x, 1)) == CONST_INT
6601 && reg_offset_p
6602 && !SPE_VECTOR_MODE (mode)
6603 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6604 || mode == DDmode || mode == TDmode
6605 || mode == DImode))
6606 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
6608 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6609 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6610 HOST_WIDE_INT high
6611 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6613 /* Check for 32-bit overflow. */
6614 if (high + low != val)
6616 *win = 0;
6617 return x;
6620 /* Reload the high part into a base reg; leave the low part
6621 in the mem directly. */
6623 x = gen_rtx_PLUS (GET_MODE (x),
6624 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6625 GEN_INT (high)),
6626 GEN_INT (low));
6628 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6629 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6630 opnum, (enum reload_type) type);
6631 *win = 1;
6632 return x;
6635 if (GET_CODE (x) == SYMBOL_REF
6636 && reg_offset_p
6637 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
6638 && !SPE_VECTOR_MODE (mode)
6639 #if TARGET_MACHO
6640 && DEFAULT_ABI == ABI_DARWIN
6641 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6642 && machopic_symbol_defined_p (x)
6643 #else
6644 && DEFAULT_ABI == ABI_V4
6645 && !flag_pic
6646 #endif
6647 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6648 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6649 without fprs.
6650 ??? Assume floating point reg based on mode? This assumption is
6651 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6652 where reload ends up doing a DFmode load of a constant from
6653 mem using two gprs. Unfortunately, at this point reload
6654 hasn't yet selected regs so poking around in reload data
6655 won't help and even if we could figure out the regs reliably,
6656 we'd still want to allow this transformation when the mem is
6657 naturally aligned. Since we say the address is good here, we
6658 can't disable offsets from LO_SUMs in mem_operand_gpr.
6659 FIXME: Allow offset from lo_sum for other modes too, when
6660 mem is sufficiently aligned. */
6661 && mode != TFmode
6662 && mode != TDmode
6663 && (mode != TImode || !TARGET_VSX_TIMODE)
6664 && mode != PTImode
6665 && (mode != DImode || TARGET_POWERPC64)
6666 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6667 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6669 #if TARGET_MACHO
6670 if (flag_pic)
6672 rtx offset = machopic_gen_offset (x);
6673 x = gen_rtx_LO_SUM (GET_MODE (x),
6674 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6675 gen_rtx_HIGH (Pmode, offset)), offset);
6677 else
6678 #endif
6679 x = gen_rtx_LO_SUM (GET_MODE (x),
6680 gen_rtx_HIGH (Pmode, x), x);
6682 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6683 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6684 opnum, (enum reload_type) type);
6685 *win = 1;
6686 return x;
6689 /* Reload an offset address wrapped by an AND that represents the
6690 masking of the lower bits. Strip the outer AND and let reload
6691 convert the offset address into an indirect address. For VSX,
6692 force reload to create the address with an AND in a separate
6693 register, because we can't guarantee an altivec register will
6694 be used. */
6695 if (VECTOR_MEM_ALTIVEC_P (mode)
6696 && GET_CODE (x) == AND
6697 && GET_CODE (XEXP (x, 0)) == PLUS
6698 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6699 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6700 && GET_CODE (XEXP (x, 1)) == CONST_INT
6701 && INTVAL (XEXP (x, 1)) == -16)
6703 x = XEXP (x, 0);
6704 *win = 1;
6705 return x;
6708 if (TARGET_TOC
6709 && reg_offset_p
6710 && GET_CODE (x) == SYMBOL_REF
6711 && use_toc_relative_ref (x))
6713 x = create_TOC_reference (x, NULL_RTX);
6714 if (TARGET_CMODEL != CMODEL_SMALL)
6715 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6716 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6717 opnum, (enum reload_type) type);
6718 *win = 1;
6719 return x;
6721 *win = 0;
6722 return x;
6725 /* Debug version of rs6000_legitimize_reload_address. */
6726 static rtx
6727 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
6728 int opnum, int type,
6729 int ind_levels, int *win)
6731 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
6732 ind_levels, win);
6733 fprintf (stderr,
6734 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6735 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6736 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
6737 debug_rtx (x);
6739 if (x == ret)
6740 fprintf (stderr, "Same address returned\n");
6741 else if (!ret)
6742 fprintf (stderr, "NULL returned\n");
6743 else
6745 fprintf (stderr, "New address:\n");
6746 debug_rtx (ret);
6749 return ret;
6752 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6753 that is a valid memory address for an instruction.
6754 The MODE argument is the machine mode for the MEM expression
6755 that wants to use this address.
6757 On the RS/6000, there are four valid address: a SYMBOL_REF that
6758 refers to a constant pool entry of an address (or the sum of it
6759 plus a constant), a short (16-bit signed) constant plus a register,
6760 the sum of two registers, or a register indirect, possibly with an
6761 auto-increment. For DFmode, DDmode and DImode with a constant plus
6762 register, we must ensure that both words are addressable or PowerPC64
6763 with offset word aligned.
6765 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6766 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6767 because adjacent memory cells are accessed by adding word-sized offsets
6768 during assembly output. */
6769 static bool
6770 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
6772 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6774 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6775 if (VECTOR_MEM_ALTIVEC_P (mode)
6776 && GET_CODE (x) == AND
6777 && GET_CODE (XEXP (x, 1)) == CONST_INT
6778 && INTVAL (XEXP (x, 1)) == -16)
6779 x = XEXP (x, 0);
6781 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
6782 return 0;
6783 if (legitimate_indirect_address_p (x, reg_ok_strict))
6784 return 1;
6785 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
6786 && !ALTIVEC_OR_VSX_VECTOR_MODE (mode)
6787 && !SPE_VECTOR_MODE (mode)
6788 && mode != TFmode
6789 && mode != TDmode
6790 && mode != TImode
6791 && mode != PTImode
6792 /* Restrict addressing for DI because of our SUBREG hackery. */
6793 && !(TARGET_E500_DOUBLE
6794 && (mode == DFmode || mode == DDmode || mode == DImode))
6795 && TARGET_UPDATE
6796 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
6797 return 1;
6798 if (virtual_stack_registers_memory_p (x))
6799 return 1;
6800 if (reg_offset_p && legitimate_small_data_p (mode, x))
6801 return 1;
6802 if (reg_offset_p
6803 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
6804 return 1;
6805 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6806 if (! reg_ok_strict
6807 && reg_offset_p
6808 && GET_CODE (x) == PLUS
6809 && GET_CODE (XEXP (x, 0)) == REG
6810 && (XEXP (x, 0) == virtual_stack_vars_rtx
6811 || XEXP (x, 0) == arg_pointer_rtx)
6812 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6813 return 1;
6814 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
6815 return 1;
6816 if (mode != TFmode
6817 && mode != TDmode
6818 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6819 || TARGET_POWERPC64
6820 || (mode != DFmode && mode != DDmode)
6821 || (TARGET_E500_DOUBLE && mode != DDmode))
6822 && (TARGET_POWERPC64 || mode != DImode)
6823 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
6824 && mode != PTImode
6825 && !avoiding_indexed_address_p (mode)
6826 && legitimate_indexed_address_p (x, reg_ok_strict))
6827 return 1;
6828 if (GET_CODE (x) == PRE_MODIFY
6829 && mode != TImode
6830 && mode != PTImode
6831 && mode != TFmode
6832 && mode != TDmode
6833 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6834 || TARGET_POWERPC64
6835 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
6836 && (TARGET_POWERPC64 || mode != DImode)
6837 && !ALTIVEC_OR_VSX_VECTOR_MODE (mode)
6838 && !SPE_VECTOR_MODE (mode)
6839 /* Restrict addressing for DI because of our SUBREG hackery. */
6840 && !(TARGET_E500_DOUBLE
6841 && (mode == DFmode || mode == DDmode || mode == DImode))
6842 && TARGET_UPDATE
6843 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
6844 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
6845 reg_ok_strict, false)
6846 || (!avoiding_indexed_address_p (mode)
6847 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
6848 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6849 return 1;
6850 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
6851 return 1;
6852 return 0;
6855 /* Debug version of rs6000_legitimate_address_p. */
6856 static bool
6857 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
6858 bool reg_ok_strict)
6860 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
6861 fprintf (stderr,
6862 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6863 "strict = %d, code = %s\n",
6864 ret ? "true" : "false",
6865 GET_MODE_NAME (mode),
6866 reg_ok_strict,
6867 GET_RTX_NAME (GET_CODE (x)));
6868 debug_rtx (x);
6870 return ret;
6873 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6875 static bool
6876 rs6000_mode_dependent_address_p (const_rtx addr,
6877 addr_space_t as ATTRIBUTE_UNUSED)
6879 return rs6000_mode_dependent_address_ptr (addr);
6882 /* Go to LABEL if ADDR (a legitimate address expression)
6883 has an effect that depends on the machine mode it is used for.
6885 On the RS/6000 this is true of all integral offsets (since AltiVec
6886 and VSX modes don't allow them) or is a pre-increment or decrement.
6888 ??? Except that due to conceptual problems in offsettable_address_p
6889 we can't really report the problems of integral offsets. So leave
6890 this assuming that the adjustable offset must be valid for the
6891 sub-words of a TFmode operand, which is what we had before. */
6893 static bool
6894 rs6000_mode_dependent_address (const_rtx addr)
6896 switch (GET_CODE (addr))
6898 case PLUS:
6899 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6900 is considered a legitimate address before reload, so there
6901 are no offset restrictions in that case. Note that this
6902 condition is safe in strict mode because any address involving
6903 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6904 been rejected as illegitimate. */
6905 if (XEXP (addr, 0) != virtual_stack_vars_rtx
6906 && XEXP (addr, 0) != arg_pointer_rtx
6907 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6909 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
6910 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
6912 break;
6914 case LO_SUM:
6915 /* Anything in the constant pool is sufficiently aligned that
6916 all bytes have the same high part address. */
6917 return !legitimate_constant_pool_address_p (addr, QImode, false);
6919 /* Auto-increment cases are now treated generically in recog.c. */
6920 case PRE_MODIFY:
6921 return TARGET_UPDATE;
6923 /* AND is only allowed in Altivec loads. */
6924 case AND:
6925 return true;
6927 default:
6928 break;
6931 return false;
6934 /* Debug version of rs6000_mode_dependent_address. */
6935 static bool
6936 rs6000_debug_mode_dependent_address (const_rtx addr)
6938 bool ret = rs6000_mode_dependent_address (addr);
6940 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
6941 ret ? "true" : "false");
6942 debug_rtx (addr);
6944 return ret;
6947 /* Implement FIND_BASE_TERM. */
6950 rs6000_find_base_term (rtx op)
6952 rtx base;
6954 base = op;
6955 if (GET_CODE (base) == CONST)
6956 base = XEXP (base, 0);
6957 if (GET_CODE (base) == PLUS)
6958 base = XEXP (base, 0);
6959 if (GET_CODE (base) == UNSPEC)
6960 switch (XINT (base, 1))
6962 case UNSPEC_TOCREL:
6963 case UNSPEC_MACHOPIC_OFFSET:
6964 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6965 for aliasing purposes. */
6966 return XVECEXP (base, 0, 0);
6969 return op;
6972 /* More elaborate version of recog's offsettable_memref_p predicate
6973 that works around the ??? note of rs6000_mode_dependent_address.
6974 In particular it accepts
6976 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6978 in 32-bit mode, that the recog predicate rejects. */
6980 static bool
6981 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
6983 bool worst_case;
6985 if (!MEM_P (op))
6986 return false;
6988 /* First mimic offsettable_memref_p. */
6989 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
6990 return true;
6992 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6993 the latter predicate knows nothing about the mode of the memory
6994 reference and, therefore, assumes that it is the largest supported
6995 mode (TFmode). As a consequence, legitimate offsettable memory
6996 references are rejected. rs6000_legitimate_offset_address_p contains
6997 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6998 at least with a little bit of help here given that we know the
6999 actual registers used. */
7000 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7001 || GET_MODE_SIZE (reg_mode) == 4);
7002 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7003 true, worst_case);
7006 /* Change register usage conditional on target flags. */
7007 static void
7008 rs6000_conditional_register_usage (void)
7010 int i;
7012 if (TARGET_DEBUG_TARGET)
7013 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7015 /* Set MQ register fixed (already call_used) so that it will not be
7016 allocated. */
7017 fixed_regs[64] = 1;
7019 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7020 if (TARGET_64BIT)
7021 fixed_regs[13] = call_used_regs[13]
7022 = call_really_used_regs[13] = 1;
7024 /* Conditionally disable FPRs. */
7025 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7026 for (i = 32; i < 64; i++)
7027 fixed_regs[i] = call_used_regs[i]
7028 = call_really_used_regs[i] = 1;
7030 /* The TOC register is not killed across calls in a way that is
7031 visible to the compiler. */
7032 if (DEFAULT_ABI == ABI_AIX)
7033 call_really_used_regs[2] = 0;
7035 if (DEFAULT_ABI == ABI_V4
7036 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7037 && flag_pic == 2)
7038 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7040 if (DEFAULT_ABI == ABI_V4
7041 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7042 && flag_pic == 1)
7043 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7044 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7045 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7047 if (DEFAULT_ABI == ABI_DARWIN
7048 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7049 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7050 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7051 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7053 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7054 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7055 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7057 if (TARGET_SPE)
7059 global_regs[SPEFSCR_REGNO] = 1;
7060 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7061 registers in prologues and epilogues. We no longer use r14
7062 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7063 pool for link-compatibility with older versions of GCC. Once
7064 "old" code has died out, we can return r14 to the allocation
7065 pool. */
7066 fixed_regs[14]
7067 = call_used_regs[14]
7068 = call_really_used_regs[14] = 1;
7071 if (!TARGET_ALTIVEC && !TARGET_VSX)
7073 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7074 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7075 call_really_used_regs[VRSAVE_REGNO] = 1;
7078 if (TARGET_ALTIVEC || TARGET_VSX)
7079 global_regs[VSCR_REGNO] = 1;
7081 if (TARGET_ALTIVEC_ABI)
7083 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7084 call_used_regs[i] = call_really_used_regs[i] = 1;
7086 /* AIX reserves VR20:31 in non-extended ABI mode. */
7087 if (TARGET_XCOFF)
7088 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7089 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7093 /* Try to output insns to set TARGET equal to the constant C if it can
7094 be done in less than N insns. Do all computations in MODE.
7095 Returns the place where the output has been placed if it can be
7096 done and the insns have been emitted. If it would take more than N
7097 insns, zero is returned and no insns and emitted. */
7100 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
7101 rtx source, int n ATTRIBUTE_UNUSED)
7103 rtx result, insn, set;
7104 HOST_WIDE_INT c0, c1;
7106 switch (mode)
7108 case QImode:
7109 case HImode:
7110 if (dest == NULL)
7111 dest = gen_reg_rtx (mode);
7112 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7113 return dest;
7115 case SImode:
7116 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7118 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
7119 GEN_INT (INTVAL (source)
7120 & (~ (HOST_WIDE_INT) 0xffff))));
7121 emit_insn (gen_rtx_SET (VOIDmode, dest,
7122 gen_rtx_IOR (SImode, copy_rtx (result),
7123 GEN_INT (INTVAL (source) & 0xffff))));
7124 result = dest;
7125 break;
7127 case DImode:
7128 switch (GET_CODE (source))
7130 case CONST_INT:
7131 c0 = INTVAL (source);
7132 c1 = -(c0 < 0);
7133 break;
7135 default:
7136 gcc_unreachable ();
7139 result = rs6000_emit_set_long_const (dest, c0, c1);
7140 break;
7142 default:
7143 gcc_unreachable ();
7146 insn = get_last_insn ();
7147 set = single_set (insn);
7148 if (! CONSTANT_P (SET_SRC (set)))
7149 set_unique_reg_note (insn, REG_EQUAL, source);
7151 return result;
7154 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
7155 fall back to a straight forward decomposition. We do this to avoid
7156 exponential run times encountered when looking for longer sequences
7157 with rs6000_emit_set_const. */
7158 static rtx
7159 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
7161 if (!TARGET_POWERPC64)
7163 rtx operand1, operand2;
7165 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
7166 DImode);
7167 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
7168 DImode);
7169 emit_move_insn (operand1, GEN_INT (c1));
7170 emit_move_insn (operand2, GEN_INT (c2));
7172 else
7174 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7176 ud1 = c1 & 0xffff;
7177 ud2 = (c1 & 0xffff0000) >> 16;
7178 c2 = c1 >> 32;
7179 ud3 = c2 & 0xffff;
7180 ud4 = (c2 & 0xffff0000) >> 16;
7182 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7183 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7184 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7186 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7187 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7189 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7190 - 0x80000000));
7191 if (ud1 != 0)
7192 emit_move_insn (copy_rtx (dest),
7193 gen_rtx_IOR (DImode, copy_rtx (dest),
7194 GEN_INT (ud1)));
7196 else if (ud3 == 0 && ud4 == 0)
7198 gcc_assert (ud2 & 0x8000);
7199 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7200 - 0x80000000));
7201 if (ud1 != 0)
7202 emit_move_insn (copy_rtx (dest),
7203 gen_rtx_IOR (DImode, copy_rtx (dest),
7204 GEN_INT (ud1)));
7205 emit_move_insn (copy_rtx (dest),
7206 gen_rtx_ZERO_EXTEND (DImode,
7207 gen_lowpart (SImode,
7208 copy_rtx (dest))));
7210 else if ((ud4 == 0xffff && (ud3 & 0x8000))
7211 || (ud4 == 0 && ! (ud3 & 0x8000)))
7213 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
7214 - 0x80000000));
7215 if (ud2 != 0)
7216 emit_move_insn (copy_rtx (dest),
7217 gen_rtx_IOR (DImode, copy_rtx (dest),
7218 GEN_INT (ud2)));
7219 emit_move_insn (copy_rtx (dest),
7220 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7221 GEN_INT (16)));
7222 if (ud1 != 0)
7223 emit_move_insn (copy_rtx (dest),
7224 gen_rtx_IOR (DImode, copy_rtx (dest),
7225 GEN_INT (ud1)));
7227 else
7229 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
7230 - 0x80000000));
7231 if (ud3 != 0)
7232 emit_move_insn (copy_rtx (dest),
7233 gen_rtx_IOR (DImode, copy_rtx (dest),
7234 GEN_INT (ud3)));
7236 emit_move_insn (copy_rtx (dest),
7237 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7238 GEN_INT (32)));
7239 if (ud2 != 0)
7240 emit_move_insn (copy_rtx (dest),
7241 gen_rtx_IOR (DImode, copy_rtx (dest),
7242 GEN_INT (ud2 << 16)));
7243 if (ud1 != 0)
7244 emit_move_insn (copy_rtx (dest),
7245 gen_rtx_IOR (DImode, copy_rtx (dest),
7246 GEN_INT (ud1)));
7249 return dest;
7252 /* Helper for the following. Get rid of [r+r] memory refs
7253 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
7255 static void
7256 rs6000_eliminate_indexed_memrefs (rtx operands[2])
7258 if (reload_in_progress)
7259 return;
7261 if (GET_CODE (operands[0]) == MEM
7262 && GET_CODE (XEXP (operands[0], 0)) != REG
7263 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
7264 GET_MODE (operands[0]), false))
7265 operands[0]
7266 = replace_equiv_address (operands[0],
7267 copy_addr_to_reg (XEXP (operands[0], 0)));
7269 if (GET_CODE (operands[1]) == MEM
7270 && GET_CODE (XEXP (operands[1], 0)) != REG
7271 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
7272 GET_MODE (operands[1]), false))
7273 operands[1]
7274 = replace_equiv_address (operands[1],
7275 copy_addr_to_reg (XEXP (operands[1], 0)));
7278 /* Emit a move from SOURCE to DEST in mode MODE. */
7279 void
7280 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
7282 rtx operands[2];
7283 operands[0] = dest;
7284 operands[1] = source;
7286 if (TARGET_DEBUG_ADDR)
7288 fprintf (stderr,
7289 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
7290 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
7291 GET_MODE_NAME (mode),
7292 reload_in_progress,
7293 reload_completed,
7294 can_create_pseudo_p ());
7295 debug_rtx (dest);
7296 fprintf (stderr, "source:\n");
7297 debug_rtx (source);
7300 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
7301 if (GET_CODE (operands[1]) == CONST_DOUBLE
7302 && ! FLOAT_MODE_P (mode)
7303 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
7305 /* FIXME. This should never happen. */
7306 /* Since it seems that it does, do the safe thing and convert
7307 to a CONST_INT. */
7308 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
7310 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
7311 || FLOAT_MODE_P (mode)
7312 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
7313 || CONST_DOUBLE_LOW (operands[1]) < 0)
7314 && (CONST_DOUBLE_HIGH (operands[1]) != -1
7315 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
7317 /* Check if GCC is setting up a block move that will end up using FP
7318 registers as temporaries. We must make sure this is acceptable. */
7319 if (GET_CODE (operands[0]) == MEM
7320 && GET_CODE (operands[1]) == MEM
7321 && mode == DImode
7322 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
7323 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
7324 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
7325 ? 32 : MEM_ALIGN (operands[0])))
7326 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
7327 ? 32
7328 : MEM_ALIGN (operands[1]))))
7329 && ! MEM_VOLATILE_P (operands [0])
7330 && ! MEM_VOLATILE_P (operands [1]))
7332 emit_move_insn (adjust_address (operands[0], SImode, 0),
7333 adjust_address (operands[1], SImode, 0));
7334 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
7335 adjust_address (copy_rtx (operands[1]), SImode, 4));
7336 return;
7339 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
7340 && !gpc_reg_operand (operands[1], mode))
7341 operands[1] = force_reg (mode, operands[1]);
7343 /* Recognize the case where operand[1] is a reference to thread-local
7344 data and load its address to a register. */
7345 if (rs6000_tls_referenced_p (operands[1]))
7347 enum tls_model model;
7348 rtx tmp = operands[1];
7349 rtx addend = NULL;
7351 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
7353 addend = XEXP (XEXP (tmp, 0), 1);
7354 tmp = XEXP (XEXP (tmp, 0), 0);
7357 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
7358 model = SYMBOL_REF_TLS_MODEL (tmp);
7359 gcc_assert (model != 0);
7361 tmp = rs6000_legitimize_tls_address (tmp, model);
7362 if (addend)
7364 tmp = gen_rtx_PLUS (mode, tmp, addend);
7365 tmp = force_operand (tmp, operands[0]);
7367 operands[1] = tmp;
7370 /* Handle the case where reload calls us with an invalid address. */
7371 if (reload_in_progress && mode == Pmode
7372 && (! general_operand (operands[1], mode)
7373 || ! nonimmediate_operand (operands[0], mode)))
7374 goto emit_set;
7376 /* 128-bit constant floating-point values on Darwin should really be
7377 loaded as two parts. */
7378 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
7379 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
7381 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
7382 simplify_gen_subreg (DFmode, operands[1], mode, 0),
7383 DFmode);
7384 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
7385 GET_MODE_SIZE (DFmode)),
7386 simplify_gen_subreg (DFmode, operands[1], mode,
7387 GET_MODE_SIZE (DFmode)),
7388 DFmode);
7389 return;
7392 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7393 cfun->machine->sdmode_stack_slot =
7394 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7396 if (reload_in_progress
7397 && mode == SDmode
7398 && cfun->machine->sdmode_stack_slot != NULL_RTX
7399 && MEM_P (operands[0])
7400 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7401 && REG_P (operands[1]))
7403 if (FP_REGNO_P (REGNO (operands[1])))
7405 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7406 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7407 emit_insn (gen_movsd_store (mem, operands[1]));
7409 else if (INT_REGNO_P (REGNO (operands[1])))
7411 rtx mem = adjust_address_nv (operands[0], mode, 4);
7412 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7413 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7415 else
7416 gcc_unreachable();
7417 return;
7419 if (reload_in_progress
7420 && mode == SDmode
7421 && REG_P (operands[0])
7422 && MEM_P (operands[1])
7423 && cfun->machine->sdmode_stack_slot != NULL_RTX
7424 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7426 if (FP_REGNO_P (REGNO (operands[0])))
7428 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7429 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7430 emit_insn (gen_movsd_load (operands[0], mem));
7432 else if (INT_REGNO_P (REGNO (operands[0])))
7434 rtx mem = adjust_address_nv (operands[1], mode, 4);
7435 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7436 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7438 else
7439 gcc_unreachable();
7440 return;
7443 /* FIXME: In the long term, this switch statement should go away
7444 and be replaced by a sequence of tests based on things like
7445 mode == Pmode. */
7446 switch (mode)
7448 case HImode:
7449 case QImode:
7450 if (CONSTANT_P (operands[1])
7451 && GET_CODE (operands[1]) != CONST_INT)
7452 operands[1] = force_const_mem (mode, operands[1]);
7453 break;
7455 case TFmode:
7456 case TDmode:
7457 rs6000_eliminate_indexed_memrefs (operands);
7458 /* fall through */
7460 case DFmode:
7461 case DDmode:
7462 case SFmode:
7463 case SDmode:
7464 if (CONSTANT_P (operands[1])
7465 && ! easy_fp_constant (operands[1], mode))
7466 operands[1] = force_const_mem (mode, operands[1]);
7467 break;
7469 case V16QImode:
7470 case V8HImode:
7471 case V4SFmode:
7472 case V4SImode:
7473 case V4HImode:
7474 case V2SFmode:
7475 case V2SImode:
7476 case V1DImode:
7477 case V2DFmode:
7478 case V2DImode:
7479 if (CONSTANT_P (operands[1])
7480 && !easy_vector_constant (operands[1], mode))
7481 operands[1] = force_const_mem (mode, operands[1]);
7482 break;
7484 case SImode:
7485 case DImode:
7486 /* Use default pattern for address of ELF small data */
7487 if (TARGET_ELF
7488 && mode == Pmode
7489 && DEFAULT_ABI == ABI_V4
7490 && (GET_CODE (operands[1]) == SYMBOL_REF
7491 || GET_CODE (operands[1]) == CONST)
7492 && small_data_operand (operands[1], mode))
7494 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7495 return;
7498 if (DEFAULT_ABI == ABI_V4
7499 && mode == Pmode && mode == SImode
7500 && flag_pic == 1 && got_operand (operands[1], mode))
7502 emit_insn (gen_movsi_got (operands[0], operands[1]));
7503 return;
7506 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
7507 && TARGET_NO_TOC
7508 && ! flag_pic
7509 && mode == Pmode
7510 && CONSTANT_P (operands[1])
7511 && GET_CODE (operands[1]) != HIGH
7512 && GET_CODE (operands[1]) != CONST_INT)
7514 rtx target = (!can_create_pseudo_p ()
7515 ? operands[0]
7516 : gen_reg_rtx (mode));
7518 /* If this is a function address on -mcall-aixdesc,
7519 convert it to the address of the descriptor. */
7520 if (DEFAULT_ABI == ABI_AIX
7521 && GET_CODE (operands[1]) == SYMBOL_REF
7522 && XSTR (operands[1], 0)[0] == '.')
7524 const char *name = XSTR (operands[1], 0);
7525 rtx new_ref;
7526 while (*name == '.')
7527 name++;
7528 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7529 CONSTANT_POOL_ADDRESS_P (new_ref)
7530 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7531 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7532 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7533 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7534 operands[1] = new_ref;
7537 if (DEFAULT_ABI == ABI_DARWIN)
7539 #if TARGET_MACHO
7540 if (MACHO_DYNAMIC_NO_PIC_P)
7542 /* Take care of any required data indirection. */
7543 operands[1] = rs6000_machopic_legitimize_pic_address (
7544 operands[1], mode, operands[0]);
7545 if (operands[0] != operands[1])
7546 emit_insn (gen_rtx_SET (VOIDmode,
7547 operands[0], operands[1]));
7548 return;
7550 #endif
7551 emit_insn (gen_macho_high (target, operands[1]));
7552 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7553 return;
7556 emit_insn (gen_elf_high (target, operands[1]));
7557 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7558 return;
7561 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7562 and we have put it in the TOC, we just need to make a TOC-relative
7563 reference to it. */
7564 if (TARGET_TOC
7565 && GET_CODE (operands[1]) == SYMBOL_REF
7566 && use_toc_relative_ref (operands[1]))
7567 operands[1] = create_TOC_reference (operands[1], operands[0]);
7568 else if (mode == Pmode
7569 && CONSTANT_P (operands[1])
7570 && GET_CODE (operands[1]) != HIGH
7571 && ((GET_CODE (operands[1]) != CONST_INT
7572 && ! easy_fp_constant (operands[1], mode))
7573 || (GET_CODE (operands[1]) == CONST_INT
7574 && (num_insns_constant (operands[1], mode)
7575 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7576 || (GET_CODE (operands[0]) == REG
7577 && FP_REGNO_P (REGNO (operands[0]))))
7578 && !toc_relative_expr_p (operands[1], false)
7579 && (TARGET_CMODEL == CMODEL_SMALL
7580 || can_create_pseudo_p ()
7581 || (REG_P (operands[0])
7582 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7585 #if TARGET_MACHO
7586 /* Darwin uses a special PIC legitimizer. */
7587 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7589 operands[1] =
7590 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7591 operands[0]);
7592 if (operands[0] != operands[1])
7593 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7594 return;
7596 #endif
7598 /* If we are to limit the number of things we put in the TOC and
7599 this is a symbol plus a constant we can add in one insn,
7600 just put the symbol in the TOC and add the constant. Don't do
7601 this if reload is in progress. */
7602 if (GET_CODE (operands[1]) == CONST
7603 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7604 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7605 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7606 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7607 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7608 && ! side_effects_p (operands[0]))
7610 rtx sym =
7611 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7612 rtx other = XEXP (XEXP (operands[1], 0), 1);
7614 sym = force_reg (mode, sym);
7615 emit_insn (gen_add3_insn (operands[0], sym, other));
7616 return;
7619 operands[1] = force_const_mem (mode, operands[1]);
7621 if (TARGET_TOC
7622 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7623 && constant_pool_expr_p (XEXP (operands[1], 0))
7624 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7625 get_pool_constant (XEXP (operands[1], 0)),
7626 get_pool_mode (XEXP (operands[1], 0))))
7628 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7629 operands[0]);
7630 operands[1] = gen_const_mem (mode, tocref);
7631 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7634 break;
7636 case TImode:
7637 if (!VECTOR_MEM_VSX_P (TImode))
7638 rs6000_eliminate_indexed_memrefs (operands);
7639 break;
7641 case PTImode:
7642 rs6000_eliminate_indexed_memrefs (operands);
7643 break;
7645 default:
7646 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7649 /* Above, we may have called force_const_mem which may have returned
7650 an invalid address. If we can, fix this up; otherwise, reload will
7651 have to deal with it. */
7652 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7653 operands[1] = validize_mem (operands[1]);
7655 emit_set:
7656 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7659 /* Return true if a structure, union or array containing FIELD should be
7660 accessed using `BLKMODE'.
7662 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7663 entire thing in a DI and use subregs to access the internals.
7664 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7665 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7666 best thing to do is set structs to BLKmode and avoid Severe Tire
7667 Damage.
7669 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7670 fit into 1, whereas DI still needs two. */
7672 static bool
7673 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
7675 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7676 || (TARGET_E500_DOUBLE && mode == DFmode));
7679 /* Nonzero if we can use a floating-point register to pass this arg. */
7680 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7681 (SCALAR_FLOAT_MODE_P (MODE) \
7682 && (CUM)->fregno <= FP_ARG_MAX_REG \
7683 && TARGET_HARD_FLOAT && TARGET_FPRS)
7685 /* Nonzero if we can use an AltiVec register to pass this arg. */
7686 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7687 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7688 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7689 && TARGET_ALTIVEC_ABI \
7690 && (NAMED))
7692 /* Return a nonzero value to say to return the function value in
7693 memory, just as large structures are always returned. TYPE will be
7694 the data type of the value, and FNTYPE will be the type of the
7695 function doing the returning, or @code{NULL} for libcalls.
7697 The AIX ABI for the RS/6000 specifies that all structures are
7698 returned in memory. The Darwin ABI does the same.
7700 For the Darwin 64 Bit ABI, a function result can be returned in
7701 registers or in memory, depending on the size of the return data
7702 type. If it is returned in registers, the value occupies the same
7703 registers as it would if it were the first and only function
7704 argument. Otherwise, the function places its result in memory at
7705 the location pointed to by GPR3.
7707 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7708 but a draft put them in memory, and GCC used to implement the draft
7709 instead of the final standard. Therefore, aix_struct_return
7710 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7711 compatibility can change DRAFT_V4_STRUCT_RET to override the
7712 default, and -m switches get the final word. See
7713 rs6000_option_override_internal for more details.
7715 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7716 long double support is enabled. These values are returned in memory.
7718 int_size_in_bytes returns -1 for variable size objects, which go in
7719 memory always. The cast to unsigned makes -1 > 8. */
7721 static bool
7722 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7724 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7725 if (TARGET_MACHO
7726 && rs6000_darwin64_abi
7727 && TREE_CODE (type) == RECORD_TYPE
7728 && int_size_in_bytes (type) > 0)
7730 CUMULATIVE_ARGS valcum;
7731 rtx valret;
7733 valcum.words = 0;
7734 valcum.fregno = FP_ARG_MIN_REG;
7735 valcum.vregno = ALTIVEC_ARG_MIN_REG;
7736 /* Do a trial code generation as if this were going to be passed
7737 as an argument; if any part goes in memory, we return NULL. */
7738 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
7739 if (valret)
7740 return false;
7741 /* Otherwise fall through to more conventional ABI rules. */
7744 if (AGGREGATE_TYPE_P (type)
7745 && (aix_struct_return
7746 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
7747 return true;
7749 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7750 modes only exist for GCC vector types if -maltivec. */
7751 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
7752 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7753 return false;
7755 /* Return synthetic vectors in memory. */
7756 if (TREE_CODE (type) == VECTOR_TYPE
7757 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
7759 static bool warned_for_return_big_vectors = false;
7760 if (!warned_for_return_big_vectors)
7762 warning (0, "GCC vector returned by reference: "
7763 "non-standard ABI extension with no compatibility guarantee");
7764 warned_for_return_big_vectors = true;
7766 return true;
7769 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
7770 return true;
7772 return false;
7775 #ifdef HAVE_AS_GNU_ATTRIBUTE
7776 /* Return TRUE if a call to function FNDECL may be one that
7777 potentially affects the function calling ABI of the object file. */
7779 static bool
7780 call_ABI_of_interest (tree fndecl)
7782 if (cgraph_state == CGRAPH_STATE_EXPANSION)
7784 struct cgraph_node *c_node;
7786 /* Libcalls are always interesting. */
7787 if (fndecl == NULL_TREE)
7788 return true;
7790 /* Any call to an external function is interesting. */
7791 if (DECL_EXTERNAL (fndecl))
7792 return true;
7794 /* Interesting functions that we are emitting in this object file. */
7795 c_node = cgraph_get_node (fndecl);
7796 c_node = cgraph_function_or_thunk_node (c_node, NULL);
7797 return !cgraph_only_called_directly_p (c_node);
7799 return false;
7801 #endif
7803 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7804 for a call to a function whose data type is FNTYPE.
7805 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7807 For incoming args we set the number of arguments in the prototype large
7808 so we never return a PARALLEL. */
7810 void
7811 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
7812 rtx libname ATTRIBUTE_UNUSED, int incoming,
7813 int libcall, int n_named_args,
7814 tree fndecl ATTRIBUTE_UNUSED,
7815 enum machine_mode return_mode ATTRIBUTE_UNUSED)
7817 static CUMULATIVE_ARGS zero_cumulative;
7819 *cum = zero_cumulative;
7820 cum->words = 0;
7821 cum->fregno = FP_ARG_MIN_REG;
7822 cum->vregno = ALTIVEC_ARG_MIN_REG;
7823 cum->prototype = (fntype && prototype_p (fntype));
7824 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
7825 ? CALL_LIBCALL : CALL_NORMAL);
7826 cum->sysv_gregno = GP_ARG_MIN_REG;
7827 cum->stdarg = stdarg_p (fntype);
7829 cum->nargs_prototype = 0;
7830 if (incoming || cum->prototype)
7831 cum->nargs_prototype = n_named_args;
7833 /* Check for a longcall attribute. */
7834 if ((!fntype && rs6000_default_long_calls)
7835 || (fntype
7836 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
7837 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
7838 cum->call_cookie |= CALL_LONG;
7840 if (TARGET_DEBUG_ARG)
7842 fprintf (stderr, "\ninit_cumulative_args:");
7843 if (fntype)
7845 tree ret_type = TREE_TYPE (fntype);
7846 fprintf (stderr, " ret code = %s,",
7847 tree_code_name[ (int)TREE_CODE (ret_type) ]);
7850 if (cum->call_cookie & CALL_LONG)
7851 fprintf (stderr, " longcall,");
7853 fprintf (stderr, " proto = %d, nargs = %d\n",
7854 cum->prototype, cum->nargs_prototype);
7857 #ifdef HAVE_AS_GNU_ATTRIBUTE
7858 if (DEFAULT_ABI == ABI_V4)
7860 cum->escapes = call_ABI_of_interest (fndecl);
7861 if (cum->escapes)
7863 tree return_type;
7865 if (fntype)
7867 return_type = TREE_TYPE (fntype);
7868 return_mode = TYPE_MODE (return_type);
7870 else
7871 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
7873 if (return_type != NULL)
7875 if (TREE_CODE (return_type) == RECORD_TYPE
7876 && TYPE_TRANSPARENT_AGGR (return_type))
7878 return_type = TREE_TYPE (first_field (return_type));
7879 return_mode = TYPE_MODE (return_type);
7881 if (AGGREGATE_TYPE_P (return_type)
7882 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
7883 <= 8))
7884 rs6000_returns_struct = true;
7886 if (SCALAR_FLOAT_MODE_P (return_mode))
7887 rs6000_passes_float = true;
7888 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
7889 || SPE_VECTOR_MODE (return_mode))
7890 rs6000_passes_vector = true;
7893 #endif
7895 if (fntype
7896 && !TARGET_ALTIVEC
7897 && TARGET_ALTIVEC_ABI
7898 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
7900 error ("cannot return value in vector register because"
7901 " altivec instructions are disabled, use -maltivec"
7902 " to enable them");
7906 /* Return true if TYPE must be passed on the stack and not in registers. */
7908 static bool
7909 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
7911 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
7912 return must_pass_in_stack_var_size (mode, type);
7913 else
7914 return must_pass_in_stack_var_size_or_pad (mode, type);
7917 /* If defined, a C expression which determines whether, and in which
7918 direction, to pad out an argument with extra space. The value
7919 should be of type `enum direction': either `upward' to pad above
7920 the argument, `downward' to pad below, or `none' to inhibit
7921 padding.
7923 For the AIX ABI structs are always stored left shifted in their
7924 argument slot. */
7926 enum direction
7927 function_arg_padding (enum machine_mode mode, const_tree type)
7929 #ifndef AGGREGATE_PADDING_FIXED
7930 #define AGGREGATE_PADDING_FIXED 0
7931 #endif
7932 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7933 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7934 #endif
7936 if (!AGGREGATE_PADDING_FIXED)
7938 /* GCC used to pass structures of the same size as integer types as
7939 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7940 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7941 passed padded downward, except that -mstrict-align further
7942 muddied the water in that multi-component structures of 2 and 4
7943 bytes in size were passed padded upward.
7945 The following arranges for best compatibility with previous
7946 versions of gcc, but removes the -mstrict-align dependency. */
7947 if (BYTES_BIG_ENDIAN)
7949 HOST_WIDE_INT size = 0;
7951 if (mode == BLKmode)
7953 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
7954 size = int_size_in_bytes (type);
7956 else
7957 size = GET_MODE_SIZE (mode);
7959 if (size == 1 || size == 2 || size == 4)
7960 return downward;
7962 return upward;
7965 if (AGGREGATES_PAD_UPWARD_ALWAYS)
7967 if (type != 0 && AGGREGATE_TYPE_P (type))
7968 return upward;
7971 /* Fall back to the default. */
7972 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7975 /* If defined, a C expression that gives the alignment boundary, in bits,
7976 of an argument with the specified mode and type. If it is not defined,
7977 PARM_BOUNDARY is used for all arguments.
7979 V.4 wants long longs and doubles to be double word aligned. Just
7980 testing the mode size is a boneheaded way to do this as it means
7981 that other types such as complex int are also double word aligned.
7982 However, we're stuck with this because changing the ABI might break
7983 existing library interfaces.
7985 Doubleword align SPE vectors.
7986 Quadword align Altivec/VSX vectors.
7987 Quadword align large synthetic vector types. */
7989 static unsigned int
7990 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
7992 if (DEFAULT_ABI == ABI_V4
7993 && (GET_MODE_SIZE (mode) == 8
7994 || (TARGET_HARD_FLOAT
7995 && TARGET_FPRS
7996 && (mode == TFmode || mode == TDmode))))
7997 return 64;
7998 else if (SPE_VECTOR_MODE (mode)
7999 || (type && TREE_CODE (type) == VECTOR_TYPE
8000 && int_size_in_bytes (type) >= 8
8001 && int_size_in_bytes (type) < 16))
8002 return 64;
8003 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8004 || (type && TREE_CODE (type) == VECTOR_TYPE
8005 && int_size_in_bytes (type) >= 16))
8006 return 128;
8007 else if (TARGET_MACHO
8008 && rs6000_darwin64_abi
8009 && mode == BLKmode
8010 && type && TYPE_ALIGN (type) > 64)
8011 return 128;
8012 else
8013 return PARM_BOUNDARY;
8016 /* For a function parm of MODE and TYPE, return the starting word in
8017 the parameter area. NWORDS of the parameter area are already used. */
8019 static unsigned int
8020 rs6000_parm_start (enum machine_mode mode, const_tree type,
8021 unsigned int nwords)
8023 unsigned int align;
8024 unsigned int parm_offset;
8026 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
8027 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
8028 return nwords + (-(parm_offset + nwords) & align);
8031 /* Compute the size (in words) of a function argument. */
8033 static unsigned long
8034 rs6000_arg_size (enum machine_mode mode, const_tree type)
8036 unsigned long size;
8038 if (mode != BLKmode)
8039 size = GET_MODE_SIZE (mode);
8040 else
8041 size = int_size_in_bytes (type);
8043 if (TARGET_32BIT)
8044 return (size + 3) >> 2;
8045 else
8046 return (size + 7) >> 3;
8049 /* Use this to flush pending int fields. */
8051 static void
8052 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
8053 HOST_WIDE_INT bitpos, int final)
8055 unsigned int startbit, endbit;
8056 int intregs, intoffset;
8057 enum machine_mode mode;
8059 /* Handle the situations where a float is taking up the first half
8060 of the GPR, and the other half is empty (typically due to
8061 alignment restrictions). We can detect this by a 8-byte-aligned
8062 int field, or by seeing that this is the final flush for this
8063 argument. Count the word and continue on. */
8064 if (cum->floats_in_gpr == 1
8065 && (cum->intoffset % 64 == 0
8066 || (cum->intoffset == -1 && final)))
8068 cum->words++;
8069 cum->floats_in_gpr = 0;
8072 if (cum->intoffset == -1)
8073 return;
8075 intoffset = cum->intoffset;
8076 cum->intoffset = -1;
8077 cum->floats_in_gpr = 0;
8079 if (intoffset % BITS_PER_WORD != 0)
8081 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8082 MODE_INT, 0);
8083 if (mode == BLKmode)
8085 /* We couldn't find an appropriate mode, which happens,
8086 e.g., in packed structs when there are 3 bytes to load.
8087 Back intoffset back to the beginning of the word in this
8088 case. */
8089 intoffset = intoffset & -BITS_PER_WORD;
8093 startbit = intoffset & -BITS_PER_WORD;
8094 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8095 intregs = (endbit - startbit) / BITS_PER_WORD;
8096 cum->words += intregs;
8097 /* words should be unsigned. */
8098 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
8100 int pad = (endbit/BITS_PER_WORD) - cum->words;
8101 cum->words += pad;
8105 /* The darwin64 ABI calls for us to recurse down through structs,
8106 looking for elements passed in registers. Unfortunately, we have
8107 to track int register count here also because of misalignments
8108 in powerpc alignment mode. */
8110 static void
8111 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
8112 const_tree type,
8113 HOST_WIDE_INT startbitpos)
8115 tree f;
8117 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8118 if (TREE_CODE (f) == FIELD_DECL)
8120 HOST_WIDE_INT bitpos = startbitpos;
8121 tree ftype = TREE_TYPE (f);
8122 enum machine_mode mode;
8123 if (ftype == error_mark_node)
8124 continue;
8125 mode = TYPE_MODE (ftype);
8127 if (DECL_SIZE (f) != 0
8128 && host_integerp (bit_position (f), 1))
8129 bitpos += int_bit_position (f);
8131 /* ??? FIXME: else assume zero offset. */
8133 if (TREE_CODE (ftype) == RECORD_TYPE)
8134 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
8135 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
8137 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
8138 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
8139 cum->fregno += n_fpregs;
8140 /* Single-precision floats present a special problem for
8141 us, because they are smaller than an 8-byte GPR, and so
8142 the structure-packing rules combined with the standard
8143 varargs behavior mean that we want to pack float/float
8144 and float/int combinations into a single register's
8145 space. This is complicated by the arg advance flushing,
8146 which works on arbitrarily large groups of int-type
8147 fields. */
8148 if (mode == SFmode)
8150 if (cum->floats_in_gpr == 1)
8152 /* Two floats in a word; count the word and reset
8153 the float count. */
8154 cum->words++;
8155 cum->floats_in_gpr = 0;
8157 else if (bitpos % 64 == 0)
8159 /* A float at the beginning of an 8-byte word;
8160 count it and put off adjusting cum->words until
8161 we see if a arg advance flush is going to do it
8162 for us. */
8163 cum->floats_in_gpr++;
8165 else
8167 /* The float is at the end of a word, preceded
8168 by integer fields, so the arg advance flush
8169 just above has already set cum->words and
8170 everything is taken care of. */
8173 else
8174 cum->words += n_fpregs;
8176 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
8178 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
8179 cum->vregno++;
8180 cum->words += 2;
8182 else if (cum->intoffset == -1)
8183 cum->intoffset = bitpos;
8187 /* Check for an item that needs to be considered specially under the darwin 64
8188 bit ABI. These are record types where the mode is BLK or the structure is
8189 8 bytes in size. */
8190 static int
8191 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
8193 return rs6000_darwin64_abi
8194 && ((mode == BLKmode
8195 && TREE_CODE (type) == RECORD_TYPE
8196 && int_size_in_bytes (type) > 0)
8197 || (type && TREE_CODE (type) == RECORD_TYPE
8198 && int_size_in_bytes (type) == 8)) ? 1 : 0;
8201 /* Update the data in CUM to advance over an argument
8202 of mode MODE and data type TYPE.
8203 (TYPE is null for libcalls where that information may not be available.)
8205 Note that for args passed by reference, function_arg will be called
8206 with MODE and TYPE set to that of the pointer to the arg, not the arg
8207 itself. */
8209 static void
8210 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8211 const_tree type, bool named, int depth)
8213 /* Only tick off an argument if we're not recursing. */
8214 if (depth == 0)
8215 cum->nargs_prototype--;
8217 #ifdef HAVE_AS_GNU_ATTRIBUTE
8218 if (DEFAULT_ABI == ABI_V4
8219 && cum->escapes)
8221 if (SCALAR_FLOAT_MODE_P (mode))
8222 rs6000_passes_float = true;
8223 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
8224 rs6000_passes_vector = true;
8225 else if (SPE_VECTOR_MODE (mode)
8226 && !cum->stdarg
8227 && cum->sysv_gregno <= GP_ARG_MAX_REG)
8228 rs6000_passes_vector = true;
8230 #endif
8232 if (TARGET_ALTIVEC_ABI
8233 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8234 || (type && TREE_CODE (type) == VECTOR_TYPE
8235 && int_size_in_bytes (type) == 16)))
8237 bool stack = false;
8239 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8241 cum->vregno++;
8242 if (!TARGET_ALTIVEC)
8243 error ("cannot pass argument in vector register because"
8244 " altivec instructions are disabled, use -maltivec"
8245 " to enable them");
8247 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
8248 even if it is going to be passed in a vector register.
8249 Darwin does the same for variable-argument functions. */
8250 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
8251 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
8252 stack = true;
8254 else
8255 stack = true;
8257 if (stack)
8259 int align;
8261 /* Vector parameters must be 16-byte aligned. This places
8262 them at 2 mod 4 in terms of words in 32-bit mode, since
8263 the parameter save area starts at offset 24 from the
8264 stack. In 64-bit mode, they just have to start on an
8265 even word, since the parameter save area is 16-byte
8266 aligned. Space for GPRs is reserved even if the argument
8267 will be passed in memory. */
8268 if (TARGET_32BIT)
8269 align = (2 - cum->words) & 3;
8270 else
8271 align = cum->words & 1;
8272 cum->words += align + rs6000_arg_size (mode, type);
8274 if (TARGET_DEBUG_ARG)
8276 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
8277 cum->words, align);
8278 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
8279 cum->nargs_prototype, cum->prototype,
8280 GET_MODE_NAME (mode));
8284 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
8285 && !cum->stdarg
8286 && cum->sysv_gregno <= GP_ARG_MAX_REG)
8287 cum->sysv_gregno++;
8289 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8291 int size = int_size_in_bytes (type);
8292 /* Variable sized types have size == -1 and are
8293 treated as if consisting entirely of ints.
8294 Pad to 16 byte boundary if needed. */
8295 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8296 && (cum->words % 2) != 0)
8297 cum->words++;
8298 /* For varargs, we can just go up by the size of the struct. */
8299 if (!named)
8300 cum->words += (size + 7) / 8;
8301 else
8303 /* It is tempting to say int register count just goes up by
8304 sizeof(type)/8, but this is wrong in a case such as
8305 { int; double; int; } [powerpc alignment]. We have to
8306 grovel through the fields for these too. */
8307 cum->intoffset = 0;
8308 cum->floats_in_gpr = 0;
8309 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
8310 rs6000_darwin64_record_arg_advance_flush (cum,
8311 size * BITS_PER_UNIT, 1);
8313 if (TARGET_DEBUG_ARG)
8315 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
8316 cum->words, TYPE_ALIGN (type), size);
8317 fprintf (stderr,
8318 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
8319 cum->nargs_prototype, cum->prototype,
8320 GET_MODE_NAME (mode));
8323 else if (DEFAULT_ABI == ABI_V4)
8325 if (TARGET_HARD_FLOAT && TARGET_FPRS
8326 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8327 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8328 || (mode == TFmode && !TARGET_IEEEQUAD)
8329 || mode == SDmode || mode == DDmode || mode == TDmode))
8331 /* _Decimal128 must use an even/odd register pair. This assumes
8332 that the register number is odd when fregno is odd. */
8333 if (mode == TDmode && (cum->fregno % 2) == 1)
8334 cum->fregno++;
8336 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8337 <= FP_ARG_V4_MAX_REG)
8338 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8339 else
8341 cum->fregno = FP_ARG_V4_MAX_REG + 1;
8342 if (mode == DFmode || mode == TFmode
8343 || mode == DDmode || mode == TDmode)
8344 cum->words += cum->words & 1;
8345 cum->words += rs6000_arg_size (mode, type);
8348 else
8350 int n_words = rs6000_arg_size (mode, type);
8351 int gregno = cum->sysv_gregno;
8353 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8354 (r7,r8) or (r9,r10). As does any other 2 word item such
8355 as complex int due to a historical mistake. */
8356 if (n_words == 2)
8357 gregno += (1 - gregno) & 1;
8359 /* Multi-reg args are not split between registers and stack. */
8360 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8362 /* Long long and SPE vectors are aligned on the stack.
8363 So are other 2 word items such as complex int due to
8364 a historical mistake. */
8365 if (n_words == 2)
8366 cum->words += cum->words & 1;
8367 cum->words += n_words;
8370 /* Note: continuing to accumulate gregno past when we've started
8371 spilling to the stack indicates the fact that we've started
8372 spilling to the stack to expand_builtin_saveregs. */
8373 cum->sysv_gregno = gregno + n_words;
8376 if (TARGET_DEBUG_ARG)
8378 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8379 cum->words, cum->fregno);
8380 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
8381 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
8382 fprintf (stderr, "mode = %4s, named = %d\n",
8383 GET_MODE_NAME (mode), named);
8386 else
8388 int n_words = rs6000_arg_size (mode, type);
8389 int start_words = cum->words;
8390 int align_words = rs6000_parm_start (mode, type, start_words);
8392 cum->words = align_words + n_words;
8394 if (SCALAR_FLOAT_MODE_P (mode)
8395 && TARGET_HARD_FLOAT && TARGET_FPRS)
8397 /* _Decimal128 must be passed in an even/odd float register pair.
8398 This assumes that the register number is odd when fregno is
8399 odd. */
8400 if (mode == TDmode && (cum->fregno % 2) == 1)
8401 cum->fregno++;
8402 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8405 if (TARGET_DEBUG_ARG)
8407 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8408 cum->words, cum->fregno);
8409 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8410 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8411 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8412 named, align_words - start_words, depth);
8417 static void
8418 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8419 const_tree type, bool named)
8421 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8425 static rtx
8426 spe_build_register_parallel (enum machine_mode mode, int gregno)
8428 rtx r1, r3, r5, r7;
8430 switch (mode)
8432 case DFmode:
8433 r1 = gen_rtx_REG (DImode, gregno);
8434 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8435 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8437 case DCmode:
8438 case TFmode:
8439 r1 = gen_rtx_REG (DImode, gregno);
8440 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8441 r3 = gen_rtx_REG (DImode, gregno + 2);
8442 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8443 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8445 case TCmode:
8446 r1 = gen_rtx_REG (DImode, gregno);
8447 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8448 r3 = gen_rtx_REG (DImode, gregno + 2);
8449 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8450 r5 = gen_rtx_REG (DImode, gregno + 4);
8451 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8452 r7 = gen_rtx_REG (DImode, gregno + 6);
8453 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8454 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8456 default:
8457 gcc_unreachable ();
8461 /* Determine where to put a SIMD argument on the SPE. */
8462 static rtx
8463 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8464 const_tree type)
8466 int gregno = cum->sysv_gregno;
8468 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8469 are passed and returned in a pair of GPRs for ABI compatibility. */
8470 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8471 || mode == DCmode || mode == TCmode))
8473 int n_words = rs6000_arg_size (mode, type);
8475 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8476 if (mode == DFmode)
8477 gregno += (1 - gregno) & 1;
8479 /* Multi-reg args are not split between registers and stack. */
8480 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8481 return NULL_RTX;
8483 return spe_build_register_parallel (mode, gregno);
8485 if (cum->stdarg)
8487 int n_words = rs6000_arg_size (mode, type);
8489 /* SPE vectors are put in odd registers. */
8490 if (n_words == 2 && (gregno & 1) == 0)
8491 gregno += 1;
8493 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8495 rtx r1, r2;
8496 enum machine_mode m = SImode;
8498 r1 = gen_rtx_REG (m, gregno);
8499 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8500 r2 = gen_rtx_REG (m, gregno + 1);
8501 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8502 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8504 else
8505 return NULL_RTX;
8507 else
8509 if (gregno <= GP_ARG_MAX_REG)
8510 return gen_rtx_REG (mode, gregno);
8511 else
8512 return NULL_RTX;
8516 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8517 structure between cum->intoffset and bitpos to integer registers. */
8519 static void
8520 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
8521 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
8523 enum machine_mode mode;
8524 unsigned int regno;
8525 unsigned int startbit, endbit;
8526 int this_regno, intregs, intoffset;
8527 rtx reg;
8529 if (cum->intoffset == -1)
8530 return;
8532 intoffset = cum->intoffset;
8533 cum->intoffset = -1;
8535 /* If this is the trailing part of a word, try to only load that
8536 much into the register. Otherwise load the whole register. Note
8537 that in the latter case we may pick up unwanted bits. It's not a
8538 problem at the moment but may wish to revisit. */
8540 if (intoffset % BITS_PER_WORD != 0)
8542 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8543 MODE_INT, 0);
8544 if (mode == BLKmode)
8546 /* We couldn't find an appropriate mode, which happens,
8547 e.g., in packed structs when there are 3 bytes to load.
8548 Back intoffset back to the beginning of the word in this
8549 case. */
8550 intoffset = intoffset & -BITS_PER_WORD;
8551 mode = word_mode;
8554 else
8555 mode = word_mode;
8557 startbit = intoffset & -BITS_PER_WORD;
8558 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8559 intregs = (endbit - startbit) / BITS_PER_WORD;
8560 this_regno = cum->words + intoffset / BITS_PER_WORD;
8562 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8563 cum->use_stack = 1;
8565 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8566 if (intregs <= 0)
8567 return;
8569 intoffset /= BITS_PER_UNIT;
8572 regno = GP_ARG_MIN_REG + this_regno;
8573 reg = gen_rtx_REG (mode, regno);
8574 rvec[(*k)++] =
8575 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8577 this_regno += 1;
8578 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8579 mode = word_mode;
8580 intregs -= 1;
8582 while (intregs > 0);
8585 /* Recursive workhorse for the following. */
8587 static void
8588 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8589 HOST_WIDE_INT startbitpos, rtx rvec[],
8590 int *k)
8592 tree f;
8594 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8595 if (TREE_CODE (f) == FIELD_DECL)
8597 HOST_WIDE_INT bitpos = startbitpos;
8598 tree ftype = TREE_TYPE (f);
8599 enum machine_mode mode;
8600 if (ftype == error_mark_node)
8601 continue;
8602 mode = TYPE_MODE (ftype);
8604 if (DECL_SIZE (f) != 0
8605 && host_integerp (bit_position (f), 1))
8606 bitpos += int_bit_position (f);
8608 /* ??? FIXME: else assume zero offset. */
8610 if (TREE_CODE (ftype) == RECORD_TYPE)
8611 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8612 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8614 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8615 #if 0
8616 switch (mode)
8618 case SCmode: mode = SFmode; break;
8619 case DCmode: mode = DFmode; break;
8620 case TCmode: mode = TFmode; break;
8621 default: break;
8623 #endif
8624 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8625 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8627 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8628 && (mode == TFmode || mode == TDmode));
8629 /* Long double or _Decimal128 split over regs and memory. */
8630 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8631 cum->use_stack=1;
8633 rvec[(*k)++]
8634 = gen_rtx_EXPR_LIST (VOIDmode,
8635 gen_rtx_REG (mode, cum->fregno++),
8636 GEN_INT (bitpos / BITS_PER_UNIT));
8637 if (mode == TFmode || mode == TDmode)
8638 cum->fregno++;
8640 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8642 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8643 rvec[(*k)++]
8644 = gen_rtx_EXPR_LIST (VOIDmode,
8645 gen_rtx_REG (mode, cum->vregno++),
8646 GEN_INT (bitpos / BITS_PER_UNIT));
8648 else if (cum->intoffset == -1)
8649 cum->intoffset = bitpos;
8653 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8654 the register(s) to be used for each field and subfield of a struct
8655 being passed by value, along with the offset of where the
8656 register's value may be found in the block. FP fields go in FP
8657 register, vector fields go in vector registers, and everything
8658 else goes in int registers, packed as in memory.
8660 This code is also used for function return values. RETVAL indicates
8661 whether this is the case.
8663 Much of this is taken from the SPARC V9 port, which has a similar
8664 calling convention. */
8666 static rtx
8667 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8668 bool named, bool retval)
8670 rtx rvec[FIRST_PSEUDO_REGISTER];
8671 int k = 1, kbase = 1;
8672 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8673 /* This is a copy; modifications are not visible to our caller. */
8674 CUMULATIVE_ARGS copy_cum = *orig_cum;
8675 CUMULATIVE_ARGS *cum = &copy_cum;
8677 /* Pad to 16 byte boundary if needed. */
8678 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8679 && (cum->words % 2) != 0)
8680 cum->words++;
8682 cum->intoffset = 0;
8683 cum->use_stack = 0;
8684 cum->named = named;
8686 /* Put entries into rvec[] for individual FP and vector fields, and
8687 for the chunks of memory that go in int regs. Note we start at
8688 element 1; 0 is reserved for an indication of using memory, and
8689 may or may not be filled in below. */
8690 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8691 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8693 /* If any part of the struct went on the stack put all of it there.
8694 This hack is because the generic code for
8695 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8696 parts of the struct are not at the beginning. */
8697 if (cum->use_stack)
8699 if (retval)
8700 return NULL_RTX; /* doesn't go in registers at all */
8701 kbase = 0;
8702 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8704 if (k > 1 || cum->use_stack)
8705 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
8706 else
8707 return NULL_RTX;
8710 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8712 static rtx
8713 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
8714 int align_words)
8716 int n_units;
8717 int i, k;
8718 rtx rvec[GP_ARG_NUM_REG + 1];
8720 if (align_words >= GP_ARG_NUM_REG)
8721 return NULL_RTX;
8723 n_units = rs6000_arg_size (mode, type);
8725 /* Optimize the simple case where the arg fits in one gpr, except in
8726 the case of BLKmode due to assign_parms assuming that registers are
8727 BITS_PER_WORD wide. */
8728 if (n_units == 0
8729 || (n_units == 1 && mode != BLKmode))
8730 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8732 k = 0;
8733 if (align_words + n_units > GP_ARG_NUM_REG)
8734 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8735 using a magic NULL_RTX component.
8736 This is not strictly correct. Only some of the arg belongs in
8737 memory, not all of it. However, the normal scheme using
8738 function_arg_partial_nregs can result in unusual subregs, eg.
8739 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8740 store the whole arg to memory is often more efficient than code
8741 to store pieces, and we know that space is available in the right
8742 place for the whole arg. */
8743 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8745 i = 0;
8748 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
8749 rtx off = GEN_INT (i++ * 4);
8750 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8752 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
8754 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8757 /* Determine where to put an argument to a function.
8758 Value is zero to push the argument on the stack,
8759 or a hard register in which to store the argument.
8761 MODE is the argument's machine mode.
8762 TYPE is the data type of the argument (as a tree).
8763 This is null for libcalls where that information may
8764 not be available.
8765 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8766 the preceding args and about the function being called. It is
8767 not modified in this routine.
8768 NAMED is nonzero if this argument is a named parameter
8769 (otherwise it is an extra parameter matching an ellipsis).
8771 On RS/6000 the first eight words of non-FP are normally in registers
8772 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8773 Under V.4, the first 8 FP args are in registers.
8775 If this is floating-point and no prototype is specified, we use
8776 both an FP and integer register (or possibly FP reg and stack). Library
8777 functions (when CALL_LIBCALL is set) always have the proper types for args,
8778 so we can pass the FP value just in one register. emit_library_function
8779 doesn't support PARALLEL anyway.
8781 Note that for args passed by reference, function_arg will be called
8782 with MODE and TYPE set to that of the pointer to the arg, not the arg
8783 itself. */
8785 static rtx
8786 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8787 const_tree type, bool named)
8789 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8790 enum rs6000_abi abi = DEFAULT_ABI;
8792 /* Return a marker to indicate whether CR1 needs to set or clear the
8793 bit that V.4 uses to say fp args were passed in registers.
8794 Assume that we don't need the marker for software floating point,
8795 or compiler generated library calls. */
8796 if (mode == VOIDmode)
8798 if (abi == ABI_V4
8799 && (cum->call_cookie & CALL_LIBCALL) == 0
8800 && (cum->stdarg
8801 || (cum->nargs_prototype < 0
8802 && (cum->prototype || TARGET_NO_PROTOTYPE))))
8804 /* For the SPE, we need to crxor CR6 always. */
8805 if (TARGET_SPE_ABI)
8806 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
8807 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
8808 return GEN_INT (cum->call_cookie
8809 | ((cum->fregno == FP_ARG_MIN_REG)
8810 ? CALL_V4_SET_FP_ARGS
8811 : CALL_V4_CLEAR_FP_ARGS));
8814 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
8817 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8819 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
8820 if (rslt != NULL_RTX)
8821 return rslt;
8822 /* Else fall through to usual handling. */
8825 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8826 if (TARGET_64BIT && ! cum->prototype)
8828 /* Vector parameters get passed in vector register
8829 and also in GPRs or memory, in absence of prototype. */
8830 int align_words;
8831 rtx slot;
8832 align_words = (cum->words + 1) & ~1;
8834 if (align_words >= GP_ARG_NUM_REG)
8836 slot = NULL_RTX;
8838 else
8840 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8842 return gen_rtx_PARALLEL (mode,
8843 gen_rtvec (2,
8844 gen_rtx_EXPR_LIST (VOIDmode,
8845 slot, const0_rtx),
8846 gen_rtx_EXPR_LIST (VOIDmode,
8847 gen_rtx_REG (mode, cum->vregno),
8848 const0_rtx)));
8850 else
8851 return gen_rtx_REG (mode, cum->vregno);
8852 else if (TARGET_ALTIVEC_ABI
8853 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8854 || (type && TREE_CODE (type) == VECTOR_TYPE
8855 && int_size_in_bytes (type) == 16)))
8857 if (named || abi == ABI_V4)
8858 return NULL_RTX;
8859 else
8861 /* Vector parameters to varargs functions under AIX or Darwin
8862 get passed in memory and possibly also in GPRs. */
8863 int align, align_words, n_words;
8864 enum machine_mode part_mode;
8866 /* Vector parameters must be 16-byte aligned. This places them at
8867 2 mod 4 in terms of words in 32-bit mode, since the parameter
8868 save area starts at offset 24 from the stack. In 64-bit mode,
8869 they just have to start on an even word, since the parameter
8870 save area is 16-byte aligned. */
8871 if (TARGET_32BIT)
8872 align = (2 - cum->words) & 3;
8873 else
8874 align = cum->words & 1;
8875 align_words = cum->words + align;
8877 /* Out of registers? Memory, then. */
8878 if (align_words >= GP_ARG_NUM_REG)
8879 return NULL_RTX;
8881 if (TARGET_32BIT && TARGET_POWERPC64)
8882 return rs6000_mixed_function_arg (mode, type, align_words);
8884 /* The vector value goes in GPRs. Only the part of the
8885 value in GPRs is reported here. */
8886 part_mode = mode;
8887 n_words = rs6000_arg_size (mode, type);
8888 if (align_words + n_words > GP_ARG_NUM_REG)
8889 /* Fortunately, there are only two possibilities, the value
8890 is either wholly in GPRs or half in GPRs and half not. */
8891 part_mode = DImode;
8893 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
8896 else if (TARGET_SPE_ABI && TARGET_SPE
8897 && (SPE_VECTOR_MODE (mode)
8898 || (TARGET_E500_DOUBLE && (mode == DFmode
8899 || mode == DCmode
8900 || mode == TFmode
8901 || mode == TCmode))))
8902 return rs6000_spe_function_arg (cum, mode, type);
8904 else if (abi == ABI_V4)
8906 if (TARGET_HARD_FLOAT && TARGET_FPRS
8907 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8908 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8909 || (mode == TFmode && !TARGET_IEEEQUAD)
8910 || mode == SDmode || mode == DDmode || mode == TDmode))
8912 /* _Decimal128 must use an even/odd register pair. This assumes
8913 that the register number is odd when fregno is odd. */
8914 if (mode == TDmode && (cum->fregno % 2) == 1)
8915 cum->fregno++;
8917 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8918 <= FP_ARG_V4_MAX_REG)
8919 return gen_rtx_REG (mode, cum->fregno);
8920 else
8921 return NULL_RTX;
8923 else
8925 int n_words = rs6000_arg_size (mode, type);
8926 int gregno = cum->sysv_gregno;
8928 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8929 (r7,r8) or (r9,r10). As does any other 2 word item such
8930 as complex int due to a historical mistake. */
8931 if (n_words == 2)
8932 gregno += (1 - gregno) & 1;
8934 /* Multi-reg args are not split between registers and stack. */
8935 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8936 return NULL_RTX;
8938 if (TARGET_32BIT && TARGET_POWERPC64)
8939 return rs6000_mixed_function_arg (mode, type,
8940 gregno - GP_ARG_MIN_REG);
8941 return gen_rtx_REG (mode, gregno);
8944 else
8946 int align_words = rs6000_parm_start (mode, type, cum->words);
8948 /* _Decimal128 must be passed in an even/odd float register pair.
8949 This assumes that the register number is odd when fregno is odd. */
8950 if (mode == TDmode && (cum->fregno % 2) == 1)
8951 cum->fregno++;
8953 if (USE_FP_FOR_ARG_P (cum, mode, type))
8955 rtx rvec[GP_ARG_NUM_REG + 1];
8956 rtx r;
8957 int k;
8958 bool needs_psave;
8959 enum machine_mode fmode = mode;
8960 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8962 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8964 /* Currently, we only ever need one reg here because complex
8965 doubles are split. */
8966 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8967 && (fmode == TFmode || fmode == TDmode));
8969 /* Long double or _Decimal128 split over regs and memory. */
8970 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
8973 /* Do we also need to pass this arg in the parameter save
8974 area? */
8975 needs_psave = (type
8976 && (cum->nargs_prototype <= 0
8977 || (DEFAULT_ABI == ABI_AIX
8978 && TARGET_XL_COMPAT
8979 && align_words >= GP_ARG_NUM_REG)));
8981 if (!needs_psave && mode == fmode)
8982 return gen_rtx_REG (fmode, cum->fregno);
8984 k = 0;
8985 if (needs_psave)
8987 /* Describe the part that goes in gprs or the stack.
8988 This piece must come first, before the fprs. */
8989 if (align_words < GP_ARG_NUM_REG)
8991 unsigned long n_words = rs6000_arg_size (mode, type);
8993 if (align_words + n_words > GP_ARG_NUM_REG
8994 || (TARGET_32BIT && TARGET_POWERPC64))
8996 /* If this is partially on the stack, then we only
8997 include the portion actually in registers here. */
8998 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
8999 rtx off;
9000 int i = 0;
9001 if (align_words + n_words > GP_ARG_NUM_REG)
9002 /* Not all of the arg fits in gprs. Say that it
9003 goes in memory too, using a magic NULL_RTX
9004 component. Also see comment in
9005 rs6000_mixed_function_arg for why the normal
9006 function_arg_partial_nregs scheme doesn't work
9007 in this case. */
9008 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
9009 const0_rtx);
9012 r = gen_rtx_REG (rmode,
9013 GP_ARG_MIN_REG + align_words);
9014 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
9015 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9017 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
9019 else
9021 /* The whole arg fits in gprs. */
9022 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9023 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
9026 else
9027 /* It's entirely in memory. */
9028 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9031 /* Describe where this piece goes in the fprs. */
9032 r = gen_rtx_REG (fmode, cum->fregno);
9033 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
9035 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9037 else if (align_words < GP_ARG_NUM_REG)
9039 if (TARGET_32BIT && TARGET_POWERPC64)
9040 return rs6000_mixed_function_arg (mode, type, align_words);
9042 if (mode == BLKmode)
9043 mode = Pmode;
9045 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9047 else
9048 return NULL_RTX;
9052 /* For an arg passed partly in registers and partly in memory, this is
9053 the number of bytes passed in registers. For args passed entirely in
9054 registers or entirely in memory, zero. When an arg is described by a
9055 PARALLEL, perhaps using more than one register type, this function
9056 returns the number of bytes used by the first element of the PARALLEL. */
9058 static int
9059 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9060 tree type, bool named)
9062 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9063 int ret = 0;
9064 int align_words;
9066 if (DEFAULT_ABI == ABI_V4)
9067 return 0;
9069 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
9070 && cum->nargs_prototype >= 0)
9071 return 0;
9073 /* In this complicated case we just disable the partial_nregs code. */
9074 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9075 return 0;
9077 align_words = rs6000_parm_start (mode, type, cum->words);
9079 if (USE_FP_FOR_ARG_P (cum, mode, type))
9081 /* If we are passing this arg in the fixed parameter save area
9082 (gprs or memory) as well as fprs, then this function should
9083 return the number of partial bytes passed in the parameter
9084 save area rather than partial bytes passed in fprs. */
9085 if (type
9086 && (cum->nargs_prototype <= 0
9087 || (DEFAULT_ABI == ABI_AIX
9088 && TARGET_XL_COMPAT
9089 && align_words >= GP_ARG_NUM_REG)))
9090 return 0;
9091 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
9092 > FP_ARG_MAX_REG + 1)
9093 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
9094 else if (cum->nargs_prototype >= 0)
9095 return 0;
9098 if (align_words < GP_ARG_NUM_REG
9099 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
9100 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
9102 if (ret != 0 && TARGET_DEBUG_ARG)
9103 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
9105 return ret;
9108 /* A C expression that indicates when an argument must be passed by
9109 reference. If nonzero for an argument, a copy of that argument is
9110 made in memory and a pointer to the argument is passed instead of
9111 the argument itself. The pointer is passed in whatever way is
9112 appropriate for passing a pointer to that type.
9114 Under V.4, aggregates and long double are passed by reference.
9116 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
9117 reference unless the AltiVec vector extension ABI is in force.
9119 As an extension to all ABIs, variable sized types are passed by
9120 reference. */
9122 static bool
9123 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
9124 enum machine_mode mode, const_tree type,
9125 bool named ATTRIBUTE_UNUSED)
9127 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
9129 if (TARGET_DEBUG_ARG)
9130 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
9131 return 1;
9134 if (!type)
9135 return 0;
9137 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
9139 if (TARGET_DEBUG_ARG)
9140 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
9141 return 1;
9144 if (int_size_in_bytes (type) < 0)
9146 if (TARGET_DEBUG_ARG)
9147 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
9148 return 1;
9151 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
9152 modes only exist for GCC vector types if -maltivec. */
9153 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
9155 if (TARGET_DEBUG_ARG)
9156 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
9157 return 1;
9160 /* Pass synthetic vectors in memory. */
9161 if (TREE_CODE (type) == VECTOR_TYPE
9162 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
9164 static bool warned_for_pass_big_vectors = false;
9165 if (TARGET_DEBUG_ARG)
9166 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
9167 if (!warned_for_pass_big_vectors)
9169 warning (0, "GCC vector passed by reference: "
9170 "non-standard ABI extension with no compatibility guarantee");
9171 warned_for_pass_big_vectors = true;
9173 return 1;
9176 return 0;
9179 static void
9180 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
9182 int i;
9183 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
9185 if (nregs == 0)
9186 return;
9188 for (i = 0; i < nregs; i++)
9190 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
9191 if (reload_completed)
9193 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
9194 tem = NULL_RTX;
9195 else
9196 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
9197 i * GET_MODE_SIZE (reg_mode));
9199 else
9200 tem = replace_equiv_address (tem, XEXP (tem, 0));
9202 gcc_assert (tem);
9204 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
9208 /* Perform any needed actions needed for a function that is receiving a
9209 variable number of arguments.
9211 CUM is as above.
9213 MODE and TYPE are the mode and type of the current parameter.
9215 PRETEND_SIZE is a variable that should be set to the amount of stack
9216 that must be pushed by the prolog to pretend that our caller pushed
9219 Normally, this macro will push all remaining incoming registers on the
9220 stack and set PRETEND_SIZE to the length of the registers pushed. */
9222 static void
9223 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
9224 tree type, int *pretend_size ATTRIBUTE_UNUSED,
9225 int no_rtl)
9227 CUMULATIVE_ARGS next_cum;
9228 int reg_size = TARGET_32BIT ? 4 : 8;
9229 rtx save_area = NULL_RTX, mem;
9230 int first_reg_offset;
9231 alias_set_type set;
9233 /* Skip the last named argument. */
9234 next_cum = *get_cumulative_args (cum);
9235 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
9237 if (DEFAULT_ABI == ABI_V4)
9239 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
9241 if (! no_rtl)
9243 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
9244 HOST_WIDE_INT offset = 0;
9246 /* Try to optimize the size of the varargs save area.
9247 The ABI requires that ap.reg_save_area is doubleword
9248 aligned, but we don't need to allocate space for all
9249 the bytes, only those to which we actually will save
9250 anything. */
9251 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
9252 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
9253 if (TARGET_HARD_FLOAT && TARGET_FPRS
9254 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9255 && cfun->va_list_fpr_size)
9257 if (gpr_reg_num)
9258 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
9259 * UNITS_PER_FP_WORD;
9260 if (cfun->va_list_fpr_size
9261 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9262 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
9263 else
9264 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9265 * UNITS_PER_FP_WORD;
9267 if (gpr_reg_num)
9269 offset = -((first_reg_offset * reg_size) & ~7);
9270 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
9272 gpr_reg_num = cfun->va_list_gpr_size;
9273 if (reg_size == 4 && (first_reg_offset & 1))
9274 gpr_reg_num++;
9276 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
9278 else if (fpr_size)
9279 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
9280 * UNITS_PER_FP_WORD
9281 - (int) (GP_ARG_NUM_REG * reg_size);
9283 if (gpr_size + fpr_size)
9285 rtx reg_save_area
9286 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
9287 gcc_assert (GET_CODE (reg_save_area) == MEM);
9288 reg_save_area = XEXP (reg_save_area, 0);
9289 if (GET_CODE (reg_save_area) == PLUS)
9291 gcc_assert (XEXP (reg_save_area, 0)
9292 == virtual_stack_vars_rtx);
9293 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
9294 offset += INTVAL (XEXP (reg_save_area, 1));
9296 else
9297 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
9300 cfun->machine->varargs_save_offset = offset;
9301 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
9304 else
9306 first_reg_offset = next_cum.words;
9307 save_area = virtual_incoming_args_rtx;
9309 if (targetm.calls.must_pass_in_stack (mode, type))
9310 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
9313 set = get_varargs_alias_set ();
9314 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
9315 && cfun->va_list_gpr_size)
9317 int nregs = GP_ARG_NUM_REG - first_reg_offset;
9319 if (va_list_gpr_counter_field)
9321 /* V4 va_list_gpr_size counts number of registers needed. */
9322 if (nregs > cfun->va_list_gpr_size)
9323 nregs = cfun->va_list_gpr_size;
9325 else
9327 /* char * va_list instead counts number of bytes needed. */
9328 if (nregs > cfun->va_list_gpr_size / reg_size)
9329 nregs = cfun->va_list_gpr_size / reg_size;
9332 mem = gen_rtx_MEM (BLKmode,
9333 plus_constant (Pmode, save_area,
9334 first_reg_offset * reg_size));
9335 MEM_NOTRAP_P (mem) = 1;
9336 set_mem_alias_set (mem, set);
9337 set_mem_align (mem, BITS_PER_WORD);
9339 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
9340 nregs);
9343 /* Save FP registers if needed. */
9344 if (DEFAULT_ABI == ABI_V4
9345 && TARGET_HARD_FLOAT && TARGET_FPRS
9346 && ! no_rtl
9347 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9348 && cfun->va_list_fpr_size)
9350 int fregno = next_cum.fregno, nregs;
9351 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
9352 rtx lab = gen_label_rtx ();
9353 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
9354 * UNITS_PER_FP_WORD);
9356 emit_jump_insn
9357 (gen_rtx_SET (VOIDmode,
9358 pc_rtx,
9359 gen_rtx_IF_THEN_ELSE (VOIDmode,
9360 gen_rtx_NE (VOIDmode, cr1,
9361 const0_rtx),
9362 gen_rtx_LABEL_REF (VOIDmode, lab),
9363 pc_rtx)));
9365 for (nregs = 0;
9366 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
9367 fregno++, off += UNITS_PER_FP_WORD, nregs++)
9369 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9370 ? DFmode : SFmode,
9371 plus_constant (Pmode, save_area, off));
9372 MEM_NOTRAP_P (mem) = 1;
9373 set_mem_alias_set (mem, set);
9374 set_mem_align (mem, GET_MODE_ALIGNMENT (
9375 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9376 ? DFmode : SFmode));
9377 emit_move_insn (mem, gen_rtx_REG (
9378 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9379 ? DFmode : SFmode, fregno));
9382 emit_label (lab);
9386 /* Create the va_list data type. */
9388 static tree
9389 rs6000_build_builtin_va_list (void)
9391 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9393 /* For AIX, prefer 'char *' because that's what the system
9394 header files like. */
9395 if (DEFAULT_ABI != ABI_V4)
9396 return build_pointer_type (char_type_node);
9398 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9399 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9400 get_identifier ("__va_list_tag"), record);
9402 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9403 unsigned_char_type_node);
9404 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9405 unsigned_char_type_node);
9406 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9407 every user file. */
9408 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9409 get_identifier ("reserved"), short_unsigned_type_node);
9410 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9411 get_identifier ("overflow_arg_area"),
9412 ptr_type_node);
9413 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9414 get_identifier ("reg_save_area"),
9415 ptr_type_node);
9417 va_list_gpr_counter_field = f_gpr;
9418 va_list_fpr_counter_field = f_fpr;
9420 DECL_FIELD_CONTEXT (f_gpr) = record;
9421 DECL_FIELD_CONTEXT (f_fpr) = record;
9422 DECL_FIELD_CONTEXT (f_res) = record;
9423 DECL_FIELD_CONTEXT (f_ovf) = record;
9424 DECL_FIELD_CONTEXT (f_sav) = record;
9426 TYPE_STUB_DECL (record) = type_decl;
9427 TYPE_NAME (record) = type_decl;
9428 TYPE_FIELDS (record) = f_gpr;
9429 DECL_CHAIN (f_gpr) = f_fpr;
9430 DECL_CHAIN (f_fpr) = f_res;
9431 DECL_CHAIN (f_res) = f_ovf;
9432 DECL_CHAIN (f_ovf) = f_sav;
9434 layout_type (record);
9436 /* The correct type is an array type of one element. */
9437 return build_array_type (record, build_index_type (size_zero_node));
9440 /* Implement va_start. */
9442 static void
9443 rs6000_va_start (tree valist, rtx nextarg)
9445 HOST_WIDE_INT words, n_gpr, n_fpr;
9446 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9447 tree gpr, fpr, ovf, sav, t;
9449 /* Only SVR4 needs something special. */
9450 if (DEFAULT_ABI != ABI_V4)
9452 std_expand_builtin_va_start (valist, nextarg);
9453 return;
9456 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9457 f_fpr = DECL_CHAIN (f_gpr);
9458 f_res = DECL_CHAIN (f_fpr);
9459 f_ovf = DECL_CHAIN (f_res);
9460 f_sav = DECL_CHAIN (f_ovf);
9462 valist = build_simple_mem_ref (valist);
9463 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9464 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9465 f_fpr, NULL_TREE);
9466 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9467 f_ovf, NULL_TREE);
9468 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9469 f_sav, NULL_TREE);
9471 /* Count number of gp and fp argument registers used. */
9472 words = crtl->args.info.words;
9473 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9474 GP_ARG_NUM_REG);
9475 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9476 FP_ARG_NUM_REG);
9478 if (TARGET_DEBUG_ARG)
9479 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9480 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9481 words, n_gpr, n_fpr);
9483 if (cfun->va_list_gpr_size)
9485 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9486 build_int_cst (NULL_TREE, n_gpr));
9487 TREE_SIDE_EFFECTS (t) = 1;
9488 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9491 if (cfun->va_list_fpr_size)
9493 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9494 build_int_cst (NULL_TREE, n_fpr));
9495 TREE_SIDE_EFFECTS (t) = 1;
9496 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9498 #ifdef HAVE_AS_GNU_ATTRIBUTE
9499 if (call_ABI_of_interest (cfun->decl))
9500 rs6000_passes_float = true;
9501 #endif
9504 /* Find the overflow area. */
9505 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9506 if (words != 0)
9507 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9508 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9509 TREE_SIDE_EFFECTS (t) = 1;
9510 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9512 /* If there were no va_arg invocations, don't set up the register
9513 save area. */
9514 if (!cfun->va_list_gpr_size
9515 && !cfun->va_list_fpr_size
9516 && n_gpr < GP_ARG_NUM_REG
9517 && n_fpr < FP_ARG_V4_MAX_REG)
9518 return;
9520 /* Find the register save area. */
9521 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
9522 if (cfun->machine->varargs_save_offset)
9523 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
9524 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9525 TREE_SIDE_EFFECTS (t) = 1;
9526 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9529 /* Implement va_arg. */
9531 static tree
9532 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9533 gimple_seq *post_p)
9535 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9536 tree gpr, fpr, ovf, sav, reg, t, u;
9537 int size, rsize, n_reg, sav_ofs, sav_scale;
9538 tree lab_false, lab_over, addr;
9539 int align;
9540 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9541 int regalign = 0;
9542 gimple stmt;
9544 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9546 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9547 return build_va_arg_indirect_ref (t);
9550 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9551 earlier version of gcc, with the property that it always applied alignment
9552 adjustments to the va-args (even for zero-sized types). The cheapest way
9553 to deal with this is to replicate the effect of the part of
9554 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9555 of relevance.
9556 We don't need to check for pass-by-reference because of the test above.
9557 We can return a simplifed answer, since we know there's no offset to add. */
9559 if (TARGET_MACHO
9560 && rs6000_darwin64_abi
9561 && integer_zerop (TYPE_SIZE (type)))
9563 unsigned HOST_WIDE_INT align, boundary;
9564 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9565 align = PARM_BOUNDARY / BITS_PER_UNIT;
9566 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9567 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9568 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9569 boundary /= BITS_PER_UNIT;
9570 if (boundary > align)
9572 tree t ;
9573 /* This updates arg ptr by the amount that would be necessary
9574 to align the zero-sized (but not zero-alignment) item. */
9575 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9576 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9577 gimplify_and_add (t, pre_p);
9579 t = fold_convert (sizetype, valist_tmp);
9580 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9581 fold_convert (TREE_TYPE (valist),
9582 fold_build2 (BIT_AND_EXPR, sizetype, t,
9583 size_int (-boundary))));
9584 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9585 gimplify_and_add (t, pre_p);
9587 /* Since it is zero-sized there's no increment for the item itself. */
9588 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9589 return build_va_arg_indirect_ref (valist_tmp);
9592 if (DEFAULT_ABI != ABI_V4)
9594 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9596 tree elem_type = TREE_TYPE (type);
9597 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9598 int elem_size = GET_MODE_SIZE (elem_mode);
9600 if (elem_size < UNITS_PER_WORD)
9602 tree real_part, imag_part;
9603 gimple_seq post = NULL;
9605 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9606 &post);
9607 /* Copy the value into a temporary, lest the formal temporary
9608 be reused out from under us. */
9609 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9610 gimple_seq_add_seq (pre_p, post);
9612 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9613 post_p);
9615 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9619 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9622 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9623 f_fpr = DECL_CHAIN (f_gpr);
9624 f_res = DECL_CHAIN (f_fpr);
9625 f_ovf = DECL_CHAIN (f_res);
9626 f_sav = DECL_CHAIN (f_ovf);
9628 valist = build_va_arg_indirect_ref (valist);
9629 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9630 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9631 f_fpr, NULL_TREE);
9632 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9633 f_ovf, NULL_TREE);
9634 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9635 f_sav, NULL_TREE);
9637 size = int_size_in_bytes (type);
9638 rsize = (size + 3) / 4;
9639 align = 1;
9641 if (TARGET_HARD_FLOAT && TARGET_FPRS
9642 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9643 || (TARGET_DOUBLE_FLOAT
9644 && (TYPE_MODE (type) == DFmode
9645 || TYPE_MODE (type) == TFmode
9646 || TYPE_MODE (type) == SDmode
9647 || TYPE_MODE (type) == DDmode
9648 || TYPE_MODE (type) == TDmode))))
9650 /* FP args go in FP registers, if present. */
9651 reg = fpr;
9652 n_reg = (size + 7) / 8;
9653 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9654 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9655 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9656 align = 8;
9658 else
9660 /* Otherwise into GP registers. */
9661 reg = gpr;
9662 n_reg = rsize;
9663 sav_ofs = 0;
9664 sav_scale = 4;
9665 if (n_reg == 2)
9666 align = 8;
9669 /* Pull the value out of the saved registers.... */
9671 lab_over = NULL;
9672 addr = create_tmp_var (ptr_type_node, "addr");
9674 /* AltiVec vectors never go in registers when -mabi=altivec. */
9675 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9676 align = 16;
9677 else
9679 lab_false = create_artificial_label (input_location);
9680 lab_over = create_artificial_label (input_location);
9682 /* Long long and SPE vectors are aligned in the registers.
9683 As are any other 2 gpr item such as complex int due to a
9684 historical mistake. */
9685 u = reg;
9686 if (n_reg == 2 && reg == gpr)
9688 regalign = 1;
9689 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9690 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9691 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9692 unshare_expr (reg), u);
9694 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9695 reg number is 0 for f1, so we want to make it odd. */
9696 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9698 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9699 build_int_cst (TREE_TYPE (reg), 1));
9700 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9703 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9704 t = build2 (GE_EXPR, boolean_type_node, u, t);
9705 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9706 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9707 gimplify_and_add (t, pre_p);
9709 t = sav;
9710 if (sav_ofs)
9711 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9713 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9714 build_int_cst (TREE_TYPE (reg), n_reg));
9715 u = fold_convert (sizetype, u);
9716 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
9717 t = fold_build_pointer_plus (t, u);
9719 /* _Decimal32 varargs are located in the second word of the 64-bit
9720 FP register for 32-bit binaries. */
9721 if (!TARGET_POWERPC64
9722 && TARGET_HARD_FLOAT && TARGET_FPRS
9723 && TYPE_MODE (type) == SDmode)
9724 t = fold_build_pointer_plus_hwi (t, size);
9726 gimplify_assign (addr, t, pre_p);
9728 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9730 stmt = gimple_build_label (lab_false);
9731 gimple_seq_add_stmt (pre_p, stmt);
9733 if ((n_reg == 2 && !regalign) || n_reg > 2)
9735 /* Ensure that we don't find any more args in regs.
9736 Alignment has taken care of for special cases. */
9737 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
9741 /* ... otherwise out of the overflow area. */
9743 /* Care for on-stack alignment if needed. */
9744 t = ovf;
9745 if (align != 1)
9747 t = fold_build_pointer_plus_hwi (t, align - 1);
9748 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9749 build_int_cst (TREE_TYPE (t), -align));
9751 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9753 gimplify_assign (unshare_expr (addr), t, pre_p);
9755 t = fold_build_pointer_plus_hwi (t, size);
9756 gimplify_assign (unshare_expr (ovf), t, pre_p);
9758 if (lab_over)
9760 stmt = gimple_build_label (lab_over);
9761 gimple_seq_add_stmt (pre_p, stmt);
9764 if (STRICT_ALIGNMENT
9765 && (TYPE_ALIGN (type)
9766 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
9768 /* The value (of type complex double, for example) may not be
9769 aligned in memory in the saved registers, so copy via a
9770 temporary. (This is the same code as used for SPARC.) */
9771 tree tmp = create_tmp_var (type, "va_arg_tmp");
9772 tree dest_addr = build_fold_addr_expr (tmp);
9774 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
9775 3, dest_addr, addr, size_int (rsize * 4));
9777 gimplify_and_add (copy, pre_p);
9778 addr = dest_addr;
9781 addr = fold_convert (ptrtype, addr);
9782 return build_va_arg_indirect_ref (addr);
9785 /* Builtins. */
9787 static void
9788 def_builtin (const char *name, tree type, enum rs6000_builtins code)
9790 tree t;
9791 unsigned classify = rs6000_builtin_info[(int)code].attr;
9792 const char *attr_string = "";
9794 gcc_assert (name != NULL);
9795 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
9797 if (rs6000_builtin_decls[(int)code])
9798 fatal_error ("internal error: builtin function %s already processed", name);
9800 rs6000_builtin_decls[(int)code] = t =
9801 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
9803 /* Set any special attributes. */
9804 if ((classify & RS6000_BTC_CONST) != 0)
9806 /* const function, function only depends on the inputs. */
9807 TREE_READONLY (t) = 1;
9808 TREE_NOTHROW (t) = 1;
9809 attr_string = ", pure";
9811 else if ((classify & RS6000_BTC_PURE) != 0)
9813 /* pure function, function can read global memory, but does not set any
9814 external state. */
9815 DECL_PURE_P (t) = 1;
9816 TREE_NOTHROW (t) = 1;
9817 attr_string = ", const";
9819 else if ((classify & RS6000_BTC_FP) != 0)
9821 /* Function is a math function. If rounding mode is on, then treat the
9822 function as not reading global memory, but it can have arbitrary side
9823 effects. If it is off, then assume the function is a const function.
9824 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9825 builtin-attribute.def that is used for the math functions. */
9826 TREE_NOTHROW (t) = 1;
9827 if (flag_rounding_math)
9829 DECL_PURE_P (t) = 1;
9830 DECL_IS_NOVOPS (t) = 1;
9831 attr_string = ", fp, pure";
9833 else
9835 TREE_READONLY (t) = 1;
9836 attr_string = ", fp, const";
9839 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
9840 gcc_unreachable ();
9842 if (TARGET_DEBUG_BUILTIN)
9843 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
9844 (int)code, name, attr_string);
9847 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9849 #undef RS6000_BUILTIN_1
9850 #undef RS6000_BUILTIN_2
9851 #undef RS6000_BUILTIN_3
9852 #undef RS6000_BUILTIN_A
9853 #undef RS6000_BUILTIN_D
9854 #undef RS6000_BUILTIN_E
9855 #undef RS6000_BUILTIN_P
9856 #undef RS6000_BUILTIN_Q
9857 #undef RS6000_BUILTIN_S
9858 #undef RS6000_BUILTIN_X
9860 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9861 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9862 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9863 { MASK, ICODE, NAME, ENUM },
9865 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9866 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9867 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9868 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9869 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9870 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9871 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9873 static const struct builtin_description bdesc_3arg[] =
9875 #include "rs6000-builtin.def"
9878 /* DST operations: void foo (void *, const int, const char). */
9880 #undef RS6000_BUILTIN_1
9881 #undef RS6000_BUILTIN_2
9882 #undef RS6000_BUILTIN_3
9883 #undef RS6000_BUILTIN_A
9884 #undef RS6000_BUILTIN_D
9885 #undef RS6000_BUILTIN_E
9886 #undef RS6000_BUILTIN_P
9887 #undef RS6000_BUILTIN_Q
9888 #undef RS6000_BUILTIN_S
9889 #undef RS6000_BUILTIN_X
9891 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9892 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9893 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9894 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9895 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9896 { MASK, ICODE, NAME, ENUM },
9898 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9899 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9900 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9901 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9902 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9904 static const struct builtin_description bdesc_dst[] =
9906 #include "rs6000-builtin.def"
9909 /* Simple binary operations: VECc = foo (VECa, VECb). */
9911 #undef RS6000_BUILTIN_1
9912 #undef RS6000_BUILTIN_2
9913 #undef RS6000_BUILTIN_3
9914 #undef RS6000_BUILTIN_A
9915 #undef RS6000_BUILTIN_D
9916 #undef RS6000_BUILTIN_E
9917 #undef RS6000_BUILTIN_P
9918 #undef RS6000_BUILTIN_Q
9919 #undef RS6000_BUILTIN_S
9920 #undef RS6000_BUILTIN_X
9922 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9923 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9924 { MASK, ICODE, NAME, ENUM },
9926 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9927 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9928 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9929 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9930 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9931 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9932 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9933 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9935 static const struct builtin_description bdesc_2arg[] =
9937 #include "rs6000-builtin.def"
9940 #undef RS6000_BUILTIN_1
9941 #undef RS6000_BUILTIN_2
9942 #undef RS6000_BUILTIN_3
9943 #undef RS6000_BUILTIN_A
9944 #undef RS6000_BUILTIN_D
9945 #undef RS6000_BUILTIN_E
9946 #undef RS6000_BUILTIN_P
9947 #undef RS6000_BUILTIN_Q
9948 #undef RS6000_BUILTIN_S
9949 #undef RS6000_BUILTIN_X
9951 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9952 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9953 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9954 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9955 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9956 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9957 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9958 { MASK, ICODE, NAME, ENUM },
9960 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9961 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9962 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9964 /* AltiVec predicates. */
9966 static const struct builtin_description bdesc_altivec_preds[] =
9968 #include "rs6000-builtin.def"
9971 /* SPE predicates. */
9972 #undef RS6000_BUILTIN_1
9973 #undef RS6000_BUILTIN_2
9974 #undef RS6000_BUILTIN_3
9975 #undef RS6000_BUILTIN_A
9976 #undef RS6000_BUILTIN_D
9977 #undef RS6000_BUILTIN_E
9978 #undef RS6000_BUILTIN_P
9979 #undef RS6000_BUILTIN_Q
9980 #undef RS6000_BUILTIN_S
9981 #undef RS6000_BUILTIN_X
9983 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9984 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9985 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9986 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9987 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9988 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9989 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9990 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9991 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9992 { MASK, ICODE, NAME, ENUM },
9994 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9996 static const struct builtin_description bdesc_spe_predicates[] =
9998 #include "rs6000-builtin.def"
10001 /* SPE evsel predicates. */
10002 #undef RS6000_BUILTIN_1
10003 #undef RS6000_BUILTIN_2
10004 #undef RS6000_BUILTIN_3
10005 #undef RS6000_BUILTIN_A
10006 #undef RS6000_BUILTIN_D
10007 #undef RS6000_BUILTIN_E
10008 #undef RS6000_BUILTIN_P
10009 #undef RS6000_BUILTIN_Q
10010 #undef RS6000_BUILTIN_S
10011 #undef RS6000_BUILTIN_X
10013 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10014 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10015 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10016 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10017 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10018 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
10019 { MASK, ICODE, NAME, ENUM },
10021 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10022 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10023 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10024 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10026 static const struct builtin_description bdesc_spe_evsel[] =
10028 #include "rs6000-builtin.def"
10031 /* PAIRED predicates. */
10032 #undef RS6000_BUILTIN_1
10033 #undef RS6000_BUILTIN_2
10034 #undef RS6000_BUILTIN_3
10035 #undef RS6000_BUILTIN_A
10036 #undef RS6000_BUILTIN_D
10037 #undef RS6000_BUILTIN_E
10038 #undef RS6000_BUILTIN_P
10039 #undef RS6000_BUILTIN_Q
10040 #undef RS6000_BUILTIN_S
10041 #undef RS6000_BUILTIN_X
10043 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10044 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10045 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10046 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10047 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10048 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10049 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10050 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
10051 { MASK, ICODE, NAME, ENUM },
10053 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10054 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10056 static const struct builtin_description bdesc_paired_preds[] =
10058 #include "rs6000-builtin.def"
10061 /* ABS* operations. */
10063 #undef RS6000_BUILTIN_1
10064 #undef RS6000_BUILTIN_2
10065 #undef RS6000_BUILTIN_3
10066 #undef RS6000_BUILTIN_A
10067 #undef RS6000_BUILTIN_D
10068 #undef RS6000_BUILTIN_E
10069 #undef RS6000_BUILTIN_P
10070 #undef RS6000_BUILTIN_Q
10071 #undef RS6000_BUILTIN_S
10072 #undef RS6000_BUILTIN_X
10074 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
10075 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10076 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10077 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
10078 { MASK, ICODE, NAME, ENUM },
10080 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10081 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10082 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10083 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10084 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10085 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10087 static const struct builtin_description bdesc_abs[] =
10089 #include "rs6000-builtin.def"
10092 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
10093 foo (VECa). */
10095 #undef RS6000_BUILTIN_1
10096 #undef RS6000_BUILTIN_2
10097 #undef RS6000_BUILTIN_3
10098 #undef RS6000_BUILTIN_A
10099 #undef RS6000_BUILTIN_E
10100 #undef RS6000_BUILTIN_D
10101 #undef RS6000_BUILTIN_P
10102 #undef RS6000_BUILTIN_Q
10103 #undef RS6000_BUILTIN_S
10104 #undef RS6000_BUILTIN_X
10106 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
10107 { MASK, ICODE, NAME, ENUM },
10109 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
10110 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
10111 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
10112 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
10113 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
10114 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
10115 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
10116 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
10117 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
10119 static const struct builtin_description bdesc_1arg[] =
10121 #include "rs6000-builtin.def"
10124 #undef RS6000_BUILTIN_1
10125 #undef RS6000_BUILTIN_2
10126 #undef RS6000_BUILTIN_3
10127 #undef RS6000_BUILTIN_A
10128 #undef RS6000_BUILTIN_D
10129 #undef RS6000_BUILTIN_E
10130 #undef RS6000_BUILTIN_P
10131 #undef RS6000_BUILTIN_Q
10132 #undef RS6000_BUILTIN_S
10133 #undef RS6000_BUILTIN_X
10135 /* Return true if a builtin function is overloaded. */
10136 bool
10137 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
10139 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
10142 /* Expand an expression EXP that calls a builtin without arguments. */
10143 static rtx
10144 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
10146 rtx pat;
10147 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10149 if (icode == CODE_FOR_nothing)
10150 /* Builtin not supported on this processor. */
10151 return 0;
10153 if (target == 0
10154 || GET_MODE (target) != tmode
10155 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10156 target = gen_reg_rtx (tmode);
10158 pat = GEN_FCN (icode) (target);
10159 if (! pat)
10160 return 0;
10161 emit_insn (pat);
10163 return target;
10167 static rtx
10168 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
10170 rtx pat;
10171 tree arg0 = CALL_EXPR_ARG (exp, 0);
10172 rtx op0 = expand_normal (arg0);
10173 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10174 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10176 if (icode == CODE_FOR_nothing)
10177 /* Builtin not supported on this processor. */
10178 return 0;
10180 /* If we got invalid arguments bail out before generating bad rtl. */
10181 if (arg0 == error_mark_node)
10182 return const0_rtx;
10184 if (icode == CODE_FOR_altivec_vspltisb
10185 || icode == CODE_FOR_altivec_vspltish
10186 || icode == CODE_FOR_altivec_vspltisw
10187 || icode == CODE_FOR_spe_evsplatfi
10188 || icode == CODE_FOR_spe_evsplati)
10190 /* Only allow 5-bit *signed* literals. */
10191 if (GET_CODE (op0) != CONST_INT
10192 || INTVAL (op0) > 15
10193 || INTVAL (op0) < -16)
10195 error ("argument 1 must be a 5-bit signed literal");
10196 return const0_rtx;
10200 if (target == 0
10201 || GET_MODE (target) != tmode
10202 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10203 target = gen_reg_rtx (tmode);
10205 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10206 op0 = copy_to_mode_reg (mode0, op0);
10208 pat = GEN_FCN (icode) (target, op0);
10209 if (! pat)
10210 return 0;
10211 emit_insn (pat);
10213 return target;
10216 static rtx
10217 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
10219 rtx pat, scratch1, scratch2;
10220 tree arg0 = CALL_EXPR_ARG (exp, 0);
10221 rtx op0 = expand_normal (arg0);
10222 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10223 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10225 /* If we have invalid arguments, bail out before generating bad rtl. */
10226 if (arg0 == error_mark_node)
10227 return const0_rtx;
10229 if (target == 0
10230 || GET_MODE (target) != tmode
10231 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10232 target = gen_reg_rtx (tmode);
10234 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10235 op0 = copy_to_mode_reg (mode0, op0);
10237 scratch1 = gen_reg_rtx (mode0);
10238 scratch2 = gen_reg_rtx (mode0);
10240 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
10241 if (! pat)
10242 return 0;
10243 emit_insn (pat);
10245 return target;
10248 static rtx
10249 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
10251 rtx pat;
10252 tree arg0 = CALL_EXPR_ARG (exp, 0);
10253 tree arg1 = CALL_EXPR_ARG (exp, 1);
10254 rtx op0 = expand_normal (arg0);
10255 rtx op1 = expand_normal (arg1);
10256 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10257 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10258 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10260 if (icode == CODE_FOR_nothing)
10261 /* Builtin not supported on this processor. */
10262 return 0;
10264 /* If we got invalid arguments bail out before generating bad rtl. */
10265 if (arg0 == error_mark_node || arg1 == error_mark_node)
10266 return const0_rtx;
10268 if (icode == CODE_FOR_altivec_vcfux
10269 || icode == CODE_FOR_altivec_vcfsx
10270 || icode == CODE_FOR_altivec_vctsxs
10271 || icode == CODE_FOR_altivec_vctuxs
10272 || icode == CODE_FOR_altivec_vspltb
10273 || icode == CODE_FOR_altivec_vsplth
10274 || icode == CODE_FOR_altivec_vspltw
10275 || icode == CODE_FOR_spe_evaddiw
10276 || icode == CODE_FOR_spe_evldd
10277 || icode == CODE_FOR_spe_evldh
10278 || icode == CODE_FOR_spe_evldw
10279 || icode == CODE_FOR_spe_evlhhesplat
10280 || icode == CODE_FOR_spe_evlhhossplat
10281 || icode == CODE_FOR_spe_evlhhousplat
10282 || icode == CODE_FOR_spe_evlwhe
10283 || icode == CODE_FOR_spe_evlwhos
10284 || icode == CODE_FOR_spe_evlwhou
10285 || icode == CODE_FOR_spe_evlwhsplat
10286 || icode == CODE_FOR_spe_evlwwsplat
10287 || icode == CODE_FOR_spe_evrlwi
10288 || icode == CODE_FOR_spe_evslwi
10289 || icode == CODE_FOR_spe_evsrwis
10290 || icode == CODE_FOR_spe_evsubifw
10291 || icode == CODE_FOR_spe_evsrwiu)
10293 /* Only allow 5-bit unsigned literals. */
10294 STRIP_NOPS (arg1);
10295 if (TREE_CODE (arg1) != INTEGER_CST
10296 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10298 error ("argument 2 must be a 5-bit unsigned literal");
10299 return const0_rtx;
10303 if (target == 0
10304 || GET_MODE (target) != tmode
10305 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10306 target = gen_reg_rtx (tmode);
10308 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10309 op0 = copy_to_mode_reg (mode0, op0);
10310 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10311 op1 = copy_to_mode_reg (mode1, op1);
10313 pat = GEN_FCN (icode) (target, op0, op1);
10314 if (! pat)
10315 return 0;
10316 emit_insn (pat);
10318 return target;
10321 static rtx
10322 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10324 rtx pat, scratch;
10325 tree cr6_form = CALL_EXPR_ARG (exp, 0);
10326 tree arg0 = CALL_EXPR_ARG (exp, 1);
10327 tree arg1 = CALL_EXPR_ARG (exp, 2);
10328 rtx op0 = expand_normal (arg0);
10329 rtx op1 = expand_normal (arg1);
10330 enum machine_mode tmode = SImode;
10331 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10332 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10333 int cr6_form_int;
10335 if (TREE_CODE (cr6_form) != INTEGER_CST)
10337 error ("argument 1 of __builtin_altivec_predicate must be a constant");
10338 return const0_rtx;
10340 else
10341 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
10343 gcc_assert (mode0 == mode1);
10345 /* If we have invalid arguments, bail out before generating bad rtl. */
10346 if (arg0 == error_mark_node || arg1 == error_mark_node)
10347 return const0_rtx;
10349 if (target == 0
10350 || GET_MODE (target) != tmode
10351 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10352 target = gen_reg_rtx (tmode);
10354 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10355 op0 = copy_to_mode_reg (mode0, op0);
10356 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10357 op1 = copy_to_mode_reg (mode1, op1);
10359 scratch = gen_reg_rtx (mode0);
10361 pat = GEN_FCN (icode) (scratch, op0, op1);
10362 if (! pat)
10363 return 0;
10364 emit_insn (pat);
10366 /* The vec_any* and vec_all* predicates use the same opcodes for two
10367 different operations, but the bits in CR6 will be different
10368 depending on what information we want. So we have to play tricks
10369 with CR6 to get the right bits out.
10371 If you think this is disgusting, look at the specs for the
10372 AltiVec predicates. */
10374 switch (cr6_form_int)
10376 case 0:
10377 emit_insn (gen_cr6_test_for_zero (target));
10378 break;
10379 case 1:
10380 emit_insn (gen_cr6_test_for_zero_reverse (target));
10381 break;
10382 case 2:
10383 emit_insn (gen_cr6_test_for_lt (target));
10384 break;
10385 case 3:
10386 emit_insn (gen_cr6_test_for_lt_reverse (target));
10387 break;
10388 default:
10389 error ("argument 1 of __builtin_altivec_predicate is out of range");
10390 break;
10393 return target;
10396 static rtx
10397 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10399 rtx pat, addr;
10400 tree arg0 = CALL_EXPR_ARG (exp, 0);
10401 tree arg1 = CALL_EXPR_ARG (exp, 1);
10402 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10403 enum machine_mode mode0 = Pmode;
10404 enum machine_mode mode1 = Pmode;
10405 rtx op0 = expand_normal (arg0);
10406 rtx op1 = expand_normal (arg1);
10408 if (icode == CODE_FOR_nothing)
10409 /* Builtin not supported on this processor. */
10410 return 0;
10412 /* If we got invalid arguments bail out before generating bad rtl. */
10413 if (arg0 == error_mark_node || arg1 == error_mark_node)
10414 return const0_rtx;
10416 if (target == 0
10417 || GET_MODE (target) != tmode
10418 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10419 target = gen_reg_rtx (tmode);
10421 op1 = copy_to_mode_reg (mode1, op1);
10423 if (op0 == const0_rtx)
10425 addr = gen_rtx_MEM (tmode, op1);
10427 else
10429 op0 = copy_to_mode_reg (mode0, op0);
10430 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10433 pat = GEN_FCN (icode) (target, addr);
10435 if (! pat)
10436 return 0;
10437 emit_insn (pat);
10439 return target;
10442 static rtx
10443 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10445 rtx pat, addr;
10446 tree arg0 = CALL_EXPR_ARG (exp, 0);
10447 tree arg1 = CALL_EXPR_ARG (exp, 1);
10448 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10449 enum machine_mode mode0 = Pmode;
10450 enum machine_mode mode1 = Pmode;
10451 rtx op0 = expand_normal (arg0);
10452 rtx op1 = expand_normal (arg1);
10454 if (icode == CODE_FOR_nothing)
10455 /* Builtin not supported on this processor. */
10456 return 0;
10458 /* If we got invalid arguments bail out before generating bad rtl. */
10459 if (arg0 == error_mark_node || arg1 == error_mark_node)
10460 return const0_rtx;
10462 if (target == 0
10463 || GET_MODE (target) != tmode
10464 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10465 target = gen_reg_rtx (tmode);
10467 op1 = copy_to_mode_reg (mode1, op1);
10469 if (op0 == const0_rtx)
10471 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
10473 else
10475 op0 = copy_to_mode_reg (mode0, op0);
10476 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
10479 pat = GEN_FCN (icode) (target, addr);
10481 if (! pat)
10482 return 0;
10483 emit_insn (pat);
10485 return target;
10488 static rtx
10489 spe_expand_stv_builtin (enum insn_code icode, tree exp)
10491 tree arg0 = CALL_EXPR_ARG (exp, 0);
10492 tree arg1 = CALL_EXPR_ARG (exp, 1);
10493 tree arg2 = CALL_EXPR_ARG (exp, 2);
10494 rtx op0 = expand_normal (arg0);
10495 rtx op1 = expand_normal (arg1);
10496 rtx op2 = expand_normal (arg2);
10497 rtx pat;
10498 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
10499 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
10500 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
10502 /* Invalid arguments. Bail before doing anything stoopid! */
10503 if (arg0 == error_mark_node
10504 || arg1 == error_mark_node
10505 || arg2 == error_mark_node)
10506 return const0_rtx;
10508 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
10509 op0 = copy_to_mode_reg (mode2, op0);
10510 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
10511 op1 = copy_to_mode_reg (mode0, op1);
10512 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10513 op2 = copy_to_mode_reg (mode1, op2);
10515 pat = GEN_FCN (icode) (op1, op2, op0);
10516 if (pat)
10517 emit_insn (pat);
10518 return NULL_RTX;
10521 static rtx
10522 paired_expand_stv_builtin (enum insn_code icode, tree exp)
10524 tree arg0 = CALL_EXPR_ARG (exp, 0);
10525 tree arg1 = CALL_EXPR_ARG (exp, 1);
10526 tree arg2 = CALL_EXPR_ARG (exp, 2);
10527 rtx op0 = expand_normal (arg0);
10528 rtx op1 = expand_normal (arg1);
10529 rtx op2 = expand_normal (arg2);
10530 rtx pat, addr;
10531 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10532 enum machine_mode mode1 = Pmode;
10533 enum machine_mode mode2 = Pmode;
10535 /* Invalid arguments. Bail before doing anything stoopid! */
10536 if (arg0 == error_mark_node
10537 || arg1 == error_mark_node
10538 || arg2 == error_mark_node)
10539 return const0_rtx;
10541 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
10542 op0 = copy_to_mode_reg (tmode, op0);
10544 op2 = copy_to_mode_reg (mode2, op2);
10546 if (op1 == const0_rtx)
10548 addr = gen_rtx_MEM (tmode, op2);
10550 else
10552 op1 = copy_to_mode_reg (mode1, op1);
10553 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10556 pat = GEN_FCN (icode) (addr, op0);
10557 if (pat)
10558 emit_insn (pat);
10559 return NULL_RTX;
10562 static rtx
10563 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10565 tree arg0 = CALL_EXPR_ARG (exp, 0);
10566 tree arg1 = CALL_EXPR_ARG (exp, 1);
10567 tree arg2 = CALL_EXPR_ARG (exp, 2);
10568 rtx op0 = expand_normal (arg0);
10569 rtx op1 = expand_normal (arg1);
10570 rtx op2 = expand_normal (arg2);
10571 rtx pat, addr;
10572 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10573 enum machine_mode smode = insn_data[icode].operand[1].mode;
10574 enum machine_mode mode1 = Pmode;
10575 enum machine_mode mode2 = Pmode;
10577 /* Invalid arguments. Bail before doing anything stoopid! */
10578 if (arg0 == error_mark_node
10579 || arg1 == error_mark_node
10580 || arg2 == error_mark_node)
10581 return const0_rtx;
10583 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10584 op0 = copy_to_mode_reg (smode, op0);
10586 op2 = copy_to_mode_reg (mode2, op2);
10588 if (op1 == const0_rtx)
10590 addr = gen_rtx_MEM (tmode, op2);
10592 else
10594 op1 = copy_to_mode_reg (mode1, op1);
10595 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10598 pat = GEN_FCN (icode) (addr, op0);
10599 if (pat)
10600 emit_insn (pat);
10601 return NULL_RTX;
10604 static rtx
10605 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10607 rtx pat;
10608 tree arg0 = CALL_EXPR_ARG (exp, 0);
10609 tree arg1 = CALL_EXPR_ARG (exp, 1);
10610 tree arg2 = CALL_EXPR_ARG (exp, 2);
10611 rtx op0 = expand_normal (arg0);
10612 rtx op1 = expand_normal (arg1);
10613 rtx op2 = expand_normal (arg2);
10614 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10615 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10616 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10617 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10619 if (icode == CODE_FOR_nothing)
10620 /* Builtin not supported on this processor. */
10621 return 0;
10623 /* If we got invalid arguments bail out before generating bad rtl. */
10624 if (arg0 == error_mark_node
10625 || arg1 == error_mark_node
10626 || arg2 == error_mark_node)
10627 return const0_rtx;
10629 /* Check and prepare argument depending on the instruction code.
10631 Note that a switch statement instead of the sequence of tests
10632 would be incorrect as many of the CODE_FOR values could be
10633 CODE_FOR_nothing and that would yield multiple alternatives
10634 with identical values. We'd never reach here at runtime in
10635 this case. */
10636 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10637 || icode == CODE_FOR_altivec_vsldoi_v4si
10638 || icode == CODE_FOR_altivec_vsldoi_v8hi
10639 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10641 /* Only allow 4-bit unsigned literals. */
10642 STRIP_NOPS (arg2);
10643 if (TREE_CODE (arg2) != INTEGER_CST
10644 || TREE_INT_CST_LOW (arg2) & ~0xf)
10646 error ("argument 3 must be a 4-bit unsigned literal");
10647 return const0_rtx;
10650 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10651 || icode == CODE_FOR_vsx_xxpermdi_v2di
10652 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10653 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10654 || icode == CODE_FOR_vsx_xxsldwi_v4si
10655 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10656 || icode == CODE_FOR_vsx_xxsldwi_v2di
10657 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10659 /* Only allow 2-bit unsigned literals. */
10660 STRIP_NOPS (arg2);
10661 if (TREE_CODE (arg2) != INTEGER_CST
10662 || TREE_INT_CST_LOW (arg2) & ~0x3)
10664 error ("argument 3 must be a 2-bit unsigned literal");
10665 return const0_rtx;
10668 else if (icode == CODE_FOR_vsx_set_v2df
10669 || icode == CODE_FOR_vsx_set_v2di)
10671 /* Only allow 1-bit unsigned literals. */
10672 STRIP_NOPS (arg2);
10673 if (TREE_CODE (arg2) != INTEGER_CST
10674 || TREE_INT_CST_LOW (arg2) & ~0x1)
10676 error ("argument 3 must be a 1-bit unsigned literal");
10677 return const0_rtx;
10680 else if (icode == CODE_FOR_crypto_vshasigmaw
10681 || icode == CODE_FOR_crypto_vshasigmad)
10683 /* Check whether the 2nd and 3rd arguments are integer constants and in
10684 range and prepare arguments. */
10685 STRIP_NOPS (arg1);
10686 if (TREE_CODE (arg1) != INTEGER_CST
10687 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
10689 error ("argument 2 must be 0 or 1");
10690 return const0_rtx;
10693 STRIP_NOPS (arg2);
10694 if (TREE_CODE (arg2) != INTEGER_CST
10695 || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 15))
10697 error ("argument 3 must be in the range 0..15");
10698 return const0_rtx;
10702 if (target == 0
10703 || GET_MODE (target) != tmode
10704 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10705 target = gen_reg_rtx (tmode);
10707 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10708 op0 = copy_to_mode_reg (mode0, op0);
10709 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10710 op1 = copy_to_mode_reg (mode1, op1);
10711 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10712 op2 = copy_to_mode_reg (mode2, op2);
10714 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10715 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10716 else
10717 pat = GEN_FCN (icode) (target, op0, op1, op2);
10718 if (! pat)
10719 return 0;
10720 emit_insn (pat);
10722 return target;
10725 /* Expand the lvx builtins. */
10726 static rtx
10727 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10729 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10730 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10731 tree arg0;
10732 enum machine_mode tmode, mode0;
10733 rtx pat, op0;
10734 enum insn_code icode;
10736 switch (fcode)
10738 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10739 icode = CODE_FOR_vector_altivec_load_v16qi;
10740 break;
10741 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10742 icode = CODE_FOR_vector_altivec_load_v8hi;
10743 break;
10744 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10745 icode = CODE_FOR_vector_altivec_load_v4si;
10746 break;
10747 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10748 icode = CODE_FOR_vector_altivec_load_v4sf;
10749 break;
10750 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
10751 icode = CODE_FOR_vector_altivec_load_v2df;
10752 break;
10753 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
10754 icode = CODE_FOR_vector_altivec_load_v2di;
10755 break;
10756 default:
10757 *expandedp = false;
10758 return NULL_RTX;
10761 *expandedp = true;
10763 arg0 = CALL_EXPR_ARG (exp, 0);
10764 op0 = expand_normal (arg0);
10765 tmode = insn_data[icode].operand[0].mode;
10766 mode0 = insn_data[icode].operand[1].mode;
10768 if (target == 0
10769 || GET_MODE (target) != tmode
10770 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10771 target = gen_reg_rtx (tmode);
10773 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10774 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10776 pat = GEN_FCN (icode) (target, op0);
10777 if (! pat)
10778 return 0;
10779 emit_insn (pat);
10780 return target;
10783 /* Expand the stvx builtins. */
10784 static rtx
10785 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10786 bool *expandedp)
10788 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10789 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10790 tree arg0, arg1;
10791 enum machine_mode mode0, mode1;
10792 rtx pat, op0, op1;
10793 enum insn_code icode;
10795 switch (fcode)
10797 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10798 icode = CODE_FOR_vector_altivec_store_v16qi;
10799 break;
10800 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10801 icode = CODE_FOR_vector_altivec_store_v8hi;
10802 break;
10803 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10804 icode = CODE_FOR_vector_altivec_store_v4si;
10805 break;
10806 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10807 icode = CODE_FOR_vector_altivec_store_v4sf;
10808 break;
10809 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
10810 icode = CODE_FOR_vector_altivec_store_v2df;
10811 break;
10812 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
10813 icode = CODE_FOR_vector_altivec_store_v2di;
10814 break;
10815 default:
10816 *expandedp = false;
10817 return NULL_RTX;
10820 arg0 = CALL_EXPR_ARG (exp, 0);
10821 arg1 = CALL_EXPR_ARG (exp, 1);
10822 op0 = expand_normal (arg0);
10823 op1 = expand_normal (arg1);
10824 mode0 = insn_data[icode].operand[0].mode;
10825 mode1 = insn_data[icode].operand[1].mode;
10827 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10828 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10829 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10830 op1 = copy_to_mode_reg (mode1, op1);
10832 pat = GEN_FCN (icode) (op0, op1);
10833 if (pat)
10834 emit_insn (pat);
10836 *expandedp = true;
10837 return NULL_RTX;
10840 /* Expand the dst builtins. */
10841 static rtx
10842 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10843 bool *expandedp)
10845 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10846 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10847 tree arg0, arg1, arg2;
10848 enum machine_mode mode0, mode1;
10849 rtx pat, op0, op1, op2;
10850 const struct builtin_description *d;
10851 size_t i;
10853 *expandedp = false;
10855 /* Handle DST variants. */
10856 d = bdesc_dst;
10857 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10858 if (d->code == fcode)
10860 arg0 = CALL_EXPR_ARG (exp, 0);
10861 arg1 = CALL_EXPR_ARG (exp, 1);
10862 arg2 = CALL_EXPR_ARG (exp, 2);
10863 op0 = expand_normal (arg0);
10864 op1 = expand_normal (arg1);
10865 op2 = expand_normal (arg2);
10866 mode0 = insn_data[d->icode].operand[0].mode;
10867 mode1 = insn_data[d->icode].operand[1].mode;
10869 /* Invalid arguments, bail out before generating bad rtl. */
10870 if (arg0 == error_mark_node
10871 || arg1 == error_mark_node
10872 || arg2 == error_mark_node)
10873 return const0_rtx;
10875 *expandedp = true;
10876 STRIP_NOPS (arg2);
10877 if (TREE_CODE (arg2) != INTEGER_CST
10878 || TREE_INT_CST_LOW (arg2) & ~0x3)
10880 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10881 return const0_rtx;
10884 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10885 op0 = copy_to_mode_reg (Pmode, op0);
10886 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10887 op1 = copy_to_mode_reg (mode1, op1);
10889 pat = GEN_FCN (d->icode) (op0, op1, op2);
10890 if (pat != 0)
10891 emit_insn (pat);
10893 return NULL_RTX;
10896 return NULL_RTX;
10899 /* Expand vec_init builtin. */
10900 static rtx
10901 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10903 enum machine_mode tmode = TYPE_MODE (type);
10904 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10905 int i, n_elt = GET_MODE_NUNITS (tmode);
10906 rtvec v = rtvec_alloc (n_elt);
10908 gcc_assert (VECTOR_MODE_P (tmode));
10909 gcc_assert (n_elt == call_expr_nargs (exp));
10911 for (i = 0; i < n_elt; ++i)
10913 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10914 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10917 if (!target || !register_operand (target, tmode))
10918 target = gen_reg_rtx (tmode);
10920 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10921 return target;
10924 /* Return the integer constant in ARG. Constrain it to be in the range
10925 of the subparts of VEC_TYPE; issue an error if not. */
10927 static int
10928 get_element_number (tree vec_type, tree arg)
10930 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10932 if (!host_integerp (arg, 1)
10933 || (elt = tree_low_cst (arg, 1), elt > max))
10935 error ("selector must be an integer constant in the range 0..%wi", max);
10936 return 0;
10939 return elt;
10942 /* Expand vec_set builtin. */
10943 static rtx
10944 altivec_expand_vec_set_builtin (tree exp)
10946 enum machine_mode tmode, mode1;
10947 tree arg0, arg1, arg2;
10948 int elt;
10949 rtx op0, op1;
10951 arg0 = CALL_EXPR_ARG (exp, 0);
10952 arg1 = CALL_EXPR_ARG (exp, 1);
10953 arg2 = CALL_EXPR_ARG (exp, 2);
10955 tmode = TYPE_MODE (TREE_TYPE (arg0));
10956 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10957 gcc_assert (VECTOR_MODE_P (tmode));
10959 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10960 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10961 elt = get_element_number (TREE_TYPE (arg0), arg2);
10963 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10964 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10966 op0 = force_reg (tmode, op0);
10967 op1 = force_reg (mode1, op1);
10969 rs6000_expand_vector_set (op0, op1, elt);
10971 return op0;
10974 /* Expand vec_ext builtin. */
10975 static rtx
10976 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10978 enum machine_mode tmode, mode0;
10979 tree arg0, arg1;
10980 int elt;
10981 rtx op0;
10983 arg0 = CALL_EXPR_ARG (exp, 0);
10984 arg1 = CALL_EXPR_ARG (exp, 1);
10986 op0 = expand_normal (arg0);
10987 elt = get_element_number (TREE_TYPE (arg0), arg1);
10989 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10990 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10991 gcc_assert (VECTOR_MODE_P (mode0));
10993 op0 = force_reg (mode0, op0);
10995 if (optimize || !target || !register_operand (target, tmode))
10996 target = gen_reg_rtx (tmode);
10998 rs6000_expand_vector_extract (target, op0, elt);
11000 return target;
11003 /* Expand the builtin in EXP and store the result in TARGET. Store
11004 true in *EXPANDEDP if we found a builtin to expand. */
11005 static rtx
11006 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
11008 const struct builtin_description *d;
11009 size_t i;
11010 enum insn_code icode;
11011 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11012 tree arg0;
11013 rtx op0, pat;
11014 enum machine_mode tmode, mode0;
11015 enum rs6000_builtins fcode
11016 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11018 if (rs6000_overloaded_builtin_p (fcode))
11020 *expandedp = true;
11021 error ("unresolved overload for Altivec builtin %qF", fndecl);
11023 /* Given it is invalid, just generate a normal call. */
11024 return expand_call (exp, target, false);
11027 target = altivec_expand_ld_builtin (exp, target, expandedp);
11028 if (*expandedp)
11029 return target;
11031 target = altivec_expand_st_builtin (exp, target, expandedp);
11032 if (*expandedp)
11033 return target;
11035 target = altivec_expand_dst_builtin (exp, target, expandedp);
11036 if (*expandedp)
11037 return target;
11039 *expandedp = true;
11041 switch (fcode)
11043 case ALTIVEC_BUILTIN_STVX:
11044 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
11045 case ALTIVEC_BUILTIN_STVEBX:
11046 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
11047 case ALTIVEC_BUILTIN_STVEHX:
11048 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
11049 case ALTIVEC_BUILTIN_STVEWX:
11050 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
11051 case ALTIVEC_BUILTIN_STVXL:
11052 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
11054 case ALTIVEC_BUILTIN_STVLX:
11055 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
11056 case ALTIVEC_BUILTIN_STVLXL:
11057 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
11058 case ALTIVEC_BUILTIN_STVRX:
11059 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
11060 case ALTIVEC_BUILTIN_STVRXL:
11061 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
11063 case VSX_BUILTIN_STXVD2X_V2DF:
11064 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
11065 case VSX_BUILTIN_STXVD2X_V2DI:
11066 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
11067 case VSX_BUILTIN_STXVW4X_V4SF:
11068 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
11069 case VSX_BUILTIN_STXVW4X_V4SI:
11070 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
11071 case VSX_BUILTIN_STXVW4X_V8HI:
11072 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
11073 case VSX_BUILTIN_STXVW4X_V16QI:
11074 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
11076 case ALTIVEC_BUILTIN_MFVSCR:
11077 icode = CODE_FOR_altivec_mfvscr;
11078 tmode = insn_data[icode].operand[0].mode;
11080 if (target == 0
11081 || GET_MODE (target) != tmode
11082 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11083 target = gen_reg_rtx (tmode);
11085 pat = GEN_FCN (icode) (target);
11086 if (! pat)
11087 return 0;
11088 emit_insn (pat);
11089 return target;
11091 case ALTIVEC_BUILTIN_MTVSCR:
11092 icode = CODE_FOR_altivec_mtvscr;
11093 arg0 = CALL_EXPR_ARG (exp, 0);
11094 op0 = expand_normal (arg0);
11095 mode0 = insn_data[icode].operand[0].mode;
11097 /* If we got invalid arguments bail out before generating bad rtl. */
11098 if (arg0 == error_mark_node)
11099 return const0_rtx;
11101 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11102 op0 = copy_to_mode_reg (mode0, op0);
11104 pat = GEN_FCN (icode) (op0);
11105 if (pat)
11106 emit_insn (pat);
11107 return NULL_RTX;
11109 case ALTIVEC_BUILTIN_DSSALL:
11110 emit_insn (gen_altivec_dssall ());
11111 return NULL_RTX;
11113 case ALTIVEC_BUILTIN_DSS:
11114 icode = CODE_FOR_altivec_dss;
11115 arg0 = CALL_EXPR_ARG (exp, 0);
11116 STRIP_NOPS (arg0);
11117 op0 = expand_normal (arg0);
11118 mode0 = insn_data[icode].operand[0].mode;
11120 /* If we got invalid arguments bail out before generating bad rtl. */
11121 if (arg0 == error_mark_node)
11122 return const0_rtx;
11124 if (TREE_CODE (arg0) != INTEGER_CST
11125 || TREE_INT_CST_LOW (arg0) & ~0x3)
11127 error ("argument to dss must be a 2-bit unsigned literal");
11128 return const0_rtx;
11131 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11132 op0 = copy_to_mode_reg (mode0, op0);
11134 emit_insn (gen_altivec_dss (op0));
11135 return NULL_RTX;
11137 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
11138 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
11139 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
11140 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
11141 case VSX_BUILTIN_VEC_INIT_V2DF:
11142 case VSX_BUILTIN_VEC_INIT_V2DI:
11143 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
11145 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
11146 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
11147 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
11148 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
11149 case VSX_BUILTIN_VEC_SET_V2DF:
11150 case VSX_BUILTIN_VEC_SET_V2DI:
11151 return altivec_expand_vec_set_builtin (exp);
11153 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
11154 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
11155 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
11156 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
11157 case VSX_BUILTIN_VEC_EXT_V2DF:
11158 case VSX_BUILTIN_VEC_EXT_V2DI:
11159 return altivec_expand_vec_ext_builtin (exp, target);
11161 default:
11162 break;
11163 /* Fall through. */
11166 /* Expand abs* operations. */
11167 d = bdesc_abs;
11168 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
11169 if (d->code == fcode)
11170 return altivec_expand_abs_builtin (d->icode, exp, target);
11172 /* Expand the AltiVec predicates. */
11173 d = bdesc_altivec_preds;
11174 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
11175 if (d->code == fcode)
11176 return altivec_expand_predicate_builtin (d->icode, exp, target);
11178 /* LV* are funky. We initialized them differently. */
11179 switch (fcode)
11181 case ALTIVEC_BUILTIN_LVSL:
11182 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
11183 exp, target, false);
11184 case ALTIVEC_BUILTIN_LVSR:
11185 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
11186 exp, target, false);
11187 case ALTIVEC_BUILTIN_LVEBX:
11188 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
11189 exp, target, false);
11190 case ALTIVEC_BUILTIN_LVEHX:
11191 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
11192 exp, target, false);
11193 case ALTIVEC_BUILTIN_LVEWX:
11194 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
11195 exp, target, false);
11196 case ALTIVEC_BUILTIN_LVXL:
11197 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
11198 exp, target, false);
11199 case ALTIVEC_BUILTIN_LVX:
11200 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
11201 exp, target, false);
11202 case ALTIVEC_BUILTIN_LVLX:
11203 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
11204 exp, target, true);
11205 case ALTIVEC_BUILTIN_LVLXL:
11206 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
11207 exp, target, true);
11208 case ALTIVEC_BUILTIN_LVRX:
11209 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
11210 exp, target, true);
11211 case ALTIVEC_BUILTIN_LVRXL:
11212 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
11213 exp, target, true);
11214 case VSX_BUILTIN_LXVD2X_V2DF:
11215 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
11216 exp, target, false);
11217 case VSX_BUILTIN_LXVD2X_V2DI:
11218 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
11219 exp, target, false);
11220 case VSX_BUILTIN_LXVW4X_V4SF:
11221 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
11222 exp, target, false);
11223 case VSX_BUILTIN_LXVW4X_V4SI:
11224 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
11225 exp, target, false);
11226 case VSX_BUILTIN_LXVW4X_V8HI:
11227 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
11228 exp, target, false);
11229 case VSX_BUILTIN_LXVW4X_V16QI:
11230 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
11231 exp, target, false);
11232 break;
11233 default:
11234 break;
11235 /* Fall through. */
11238 *expandedp = false;
11239 return NULL_RTX;
11242 /* Expand the builtin in EXP and store the result in TARGET. Store
11243 true in *EXPANDEDP if we found a builtin to expand. */
11244 static rtx
11245 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
11247 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11248 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11249 const struct builtin_description *d;
11250 size_t i;
11252 *expandedp = true;
11254 switch (fcode)
11256 case PAIRED_BUILTIN_STX:
11257 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
11258 case PAIRED_BUILTIN_LX:
11259 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
11260 default:
11261 break;
11262 /* Fall through. */
11265 /* Expand the paired predicates. */
11266 d = bdesc_paired_preds;
11267 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
11268 if (d->code == fcode)
11269 return paired_expand_predicate_builtin (d->icode, exp, target);
11271 *expandedp = false;
11272 return NULL_RTX;
11275 /* Binops that need to be initialized manually, but can be expanded
11276 automagically by rs6000_expand_binop_builtin. */
11277 static const struct builtin_description bdesc_2arg_spe[] =
11279 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
11280 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
11281 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
11282 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
11283 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
11284 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
11285 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
11286 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
11287 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
11288 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
11289 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
11290 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
11291 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
11292 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
11293 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
11294 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
11295 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
11296 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
11297 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
11298 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
11299 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
11300 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
11303 /* Expand the builtin in EXP and store the result in TARGET. Store
11304 true in *EXPANDEDP if we found a builtin to expand.
11306 This expands the SPE builtins that are not simple unary and binary
11307 operations. */
11308 static rtx
11309 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
11311 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11312 tree arg1, arg0;
11313 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11314 enum insn_code icode;
11315 enum machine_mode tmode, mode0;
11316 rtx pat, op0;
11317 const struct builtin_description *d;
11318 size_t i;
11320 *expandedp = true;
11322 /* Syntax check for a 5-bit unsigned immediate. */
11323 switch (fcode)
11325 case SPE_BUILTIN_EVSTDD:
11326 case SPE_BUILTIN_EVSTDH:
11327 case SPE_BUILTIN_EVSTDW:
11328 case SPE_BUILTIN_EVSTWHE:
11329 case SPE_BUILTIN_EVSTWHO:
11330 case SPE_BUILTIN_EVSTWWE:
11331 case SPE_BUILTIN_EVSTWWO:
11332 arg1 = CALL_EXPR_ARG (exp, 2);
11333 if (TREE_CODE (arg1) != INTEGER_CST
11334 || TREE_INT_CST_LOW (arg1) & ~0x1f)
11336 error ("argument 2 must be a 5-bit unsigned literal");
11337 return const0_rtx;
11339 break;
11340 default:
11341 break;
11344 /* The evsplat*i instructions are not quite generic. */
11345 switch (fcode)
11347 case SPE_BUILTIN_EVSPLATFI:
11348 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
11349 exp, target);
11350 case SPE_BUILTIN_EVSPLATI:
11351 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
11352 exp, target);
11353 default:
11354 break;
11357 d = bdesc_2arg_spe;
11358 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
11359 if (d->code == fcode)
11360 return rs6000_expand_binop_builtin (d->icode, exp, target);
11362 d = bdesc_spe_predicates;
11363 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
11364 if (d->code == fcode)
11365 return spe_expand_predicate_builtin (d->icode, exp, target);
11367 d = bdesc_spe_evsel;
11368 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
11369 if (d->code == fcode)
11370 return spe_expand_evsel_builtin (d->icode, exp, target);
11372 switch (fcode)
11374 case SPE_BUILTIN_EVSTDDX:
11375 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
11376 case SPE_BUILTIN_EVSTDHX:
11377 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
11378 case SPE_BUILTIN_EVSTDWX:
11379 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
11380 case SPE_BUILTIN_EVSTWHEX:
11381 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
11382 case SPE_BUILTIN_EVSTWHOX:
11383 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
11384 case SPE_BUILTIN_EVSTWWEX:
11385 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
11386 case SPE_BUILTIN_EVSTWWOX:
11387 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
11388 case SPE_BUILTIN_EVSTDD:
11389 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
11390 case SPE_BUILTIN_EVSTDH:
11391 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
11392 case SPE_BUILTIN_EVSTDW:
11393 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
11394 case SPE_BUILTIN_EVSTWHE:
11395 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
11396 case SPE_BUILTIN_EVSTWHO:
11397 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
11398 case SPE_BUILTIN_EVSTWWE:
11399 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
11400 case SPE_BUILTIN_EVSTWWO:
11401 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
11402 case SPE_BUILTIN_MFSPEFSCR:
11403 icode = CODE_FOR_spe_mfspefscr;
11404 tmode = insn_data[icode].operand[0].mode;
11406 if (target == 0
11407 || GET_MODE (target) != tmode
11408 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11409 target = gen_reg_rtx (tmode);
11411 pat = GEN_FCN (icode) (target);
11412 if (! pat)
11413 return 0;
11414 emit_insn (pat);
11415 return target;
11416 case SPE_BUILTIN_MTSPEFSCR:
11417 icode = CODE_FOR_spe_mtspefscr;
11418 arg0 = CALL_EXPR_ARG (exp, 0);
11419 op0 = expand_normal (arg0);
11420 mode0 = insn_data[icode].operand[0].mode;
11422 if (arg0 == error_mark_node)
11423 return const0_rtx;
11425 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11426 op0 = copy_to_mode_reg (mode0, op0);
11428 pat = GEN_FCN (icode) (op0);
11429 if (pat)
11430 emit_insn (pat);
11431 return NULL_RTX;
11432 default:
11433 break;
11436 *expandedp = false;
11437 return NULL_RTX;
11440 static rtx
11441 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11443 rtx pat, scratch, tmp;
11444 tree form = CALL_EXPR_ARG (exp, 0);
11445 tree arg0 = CALL_EXPR_ARG (exp, 1);
11446 tree arg1 = CALL_EXPR_ARG (exp, 2);
11447 rtx op0 = expand_normal (arg0);
11448 rtx op1 = expand_normal (arg1);
11449 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11450 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11451 int form_int;
11452 enum rtx_code code;
11454 if (TREE_CODE (form) != INTEGER_CST)
11456 error ("argument 1 of __builtin_paired_predicate must be a constant");
11457 return const0_rtx;
11459 else
11460 form_int = TREE_INT_CST_LOW (form);
11462 gcc_assert (mode0 == mode1);
11464 if (arg0 == error_mark_node || arg1 == error_mark_node)
11465 return const0_rtx;
11467 if (target == 0
11468 || GET_MODE (target) != SImode
11469 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
11470 target = gen_reg_rtx (SImode);
11471 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
11472 op0 = copy_to_mode_reg (mode0, op0);
11473 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
11474 op1 = copy_to_mode_reg (mode1, op1);
11476 scratch = gen_reg_rtx (CCFPmode);
11478 pat = GEN_FCN (icode) (scratch, op0, op1);
11479 if (!pat)
11480 return const0_rtx;
11482 emit_insn (pat);
11484 switch (form_int)
11486 /* LT bit. */
11487 case 0:
11488 code = LT;
11489 break;
11490 /* GT bit. */
11491 case 1:
11492 code = GT;
11493 break;
11494 /* EQ bit. */
11495 case 2:
11496 code = EQ;
11497 break;
11498 /* UN bit. */
11499 case 3:
11500 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11501 return target;
11502 default:
11503 error ("argument 1 of __builtin_paired_predicate is out of range");
11504 return const0_rtx;
11507 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11508 emit_move_insn (target, tmp);
11509 return target;
11512 static rtx
11513 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11515 rtx pat, scratch, tmp;
11516 tree form = CALL_EXPR_ARG (exp, 0);
11517 tree arg0 = CALL_EXPR_ARG (exp, 1);
11518 tree arg1 = CALL_EXPR_ARG (exp, 2);
11519 rtx op0 = expand_normal (arg0);
11520 rtx op1 = expand_normal (arg1);
11521 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11522 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11523 int form_int;
11524 enum rtx_code code;
11526 if (TREE_CODE (form) != INTEGER_CST)
11528 error ("argument 1 of __builtin_spe_predicate must be a constant");
11529 return const0_rtx;
11531 else
11532 form_int = TREE_INT_CST_LOW (form);
11534 gcc_assert (mode0 == mode1);
11536 if (arg0 == error_mark_node || arg1 == error_mark_node)
11537 return const0_rtx;
11539 if (target == 0
11540 || GET_MODE (target) != SImode
11541 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
11542 target = gen_reg_rtx (SImode);
11544 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11545 op0 = copy_to_mode_reg (mode0, op0);
11546 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11547 op1 = copy_to_mode_reg (mode1, op1);
11549 scratch = gen_reg_rtx (CCmode);
11551 pat = GEN_FCN (icode) (scratch, op0, op1);
11552 if (! pat)
11553 return const0_rtx;
11554 emit_insn (pat);
11556 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11557 _lower_. We use one compare, but look in different bits of the
11558 CR for each variant.
11560 There are 2 elements in each SPE simd type (upper/lower). The CR
11561 bits are set as follows:
11563 BIT0 | BIT 1 | BIT 2 | BIT 3
11564 U | L | (U | L) | (U & L)
11566 So, for an "all" relationship, BIT 3 would be set.
11567 For an "any" relationship, BIT 2 would be set. Etc.
11569 Following traditional nomenclature, these bits map to:
11571 BIT0 | BIT 1 | BIT 2 | BIT 3
11572 LT | GT | EQ | OV
11574 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11577 switch (form_int)
11579 /* All variant. OV bit. */
11580 case 0:
11581 /* We need to get to the OV bit, which is the ORDERED bit. We
11582 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11583 that's ugly and will make validate_condition_mode die.
11584 So let's just use another pattern. */
11585 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11586 return target;
11587 /* Any variant. EQ bit. */
11588 case 1:
11589 code = EQ;
11590 break;
11591 /* Upper variant. LT bit. */
11592 case 2:
11593 code = LT;
11594 break;
11595 /* Lower variant. GT bit. */
11596 case 3:
11597 code = GT;
11598 break;
11599 default:
11600 error ("argument 1 of __builtin_spe_predicate is out of range");
11601 return const0_rtx;
11604 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11605 emit_move_insn (target, tmp);
11607 return target;
11610 /* The evsel builtins look like this:
11612 e = __builtin_spe_evsel_OP (a, b, c, d);
11614 and work like this:
11616 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11617 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11620 static rtx
11621 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11623 rtx pat, scratch;
11624 tree arg0 = CALL_EXPR_ARG (exp, 0);
11625 tree arg1 = CALL_EXPR_ARG (exp, 1);
11626 tree arg2 = CALL_EXPR_ARG (exp, 2);
11627 tree arg3 = CALL_EXPR_ARG (exp, 3);
11628 rtx op0 = expand_normal (arg0);
11629 rtx op1 = expand_normal (arg1);
11630 rtx op2 = expand_normal (arg2);
11631 rtx op3 = expand_normal (arg3);
11632 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11633 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11635 gcc_assert (mode0 == mode1);
11637 if (arg0 == error_mark_node || arg1 == error_mark_node
11638 || arg2 == error_mark_node || arg3 == error_mark_node)
11639 return const0_rtx;
11641 if (target == 0
11642 || GET_MODE (target) != mode0
11643 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11644 target = gen_reg_rtx (mode0);
11646 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11647 op0 = copy_to_mode_reg (mode0, op0);
11648 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11649 op1 = copy_to_mode_reg (mode0, op1);
11650 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11651 op2 = copy_to_mode_reg (mode0, op2);
11652 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11653 op3 = copy_to_mode_reg (mode0, op3);
11655 /* Generate the compare. */
11656 scratch = gen_reg_rtx (CCmode);
11657 pat = GEN_FCN (icode) (scratch, op0, op1);
11658 if (! pat)
11659 return const0_rtx;
11660 emit_insn (pat);
11662 if (mode0 == V2SImode)
11663 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11664 else
11665 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11667 return target;
11670 /* Raise an error message for a builtin function that is called without the
11671 appropriate target options being set. */
11673 static void
11674 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11676 size_t uns_fncode = (size_t)fncode;
11677 const char *name = rs6000_builtin_info[uns_fncode].name;
11678 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
11680 gcc_assert (name != NULL);
11681 if ((fnmask & RS6000_BTM_CELL) != 0)
11682 error ("Builtin function %s is only valid for the cell processor", name);
11683 else if ((fnmask & RS6000_BTM_VSX) != 0)
11684 error ("Builtin function %s requires the -mvsx option", name);
11685 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11686 error ("Builtin function %s requires the -maltivec option", name);
11687 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11688 error ("Builtin function %s requires the -mpaired option", name);
11689 else if ((fnmask & RS6000_BTM_SPE) != 0)
11690 error ("Builtin function %s requires the -mspe option", name);
11691 else
11692 error ("Builtin function %s is not supported with the current options",
11693 name);
11696 /* Expand an expression EXP that calls a built-in function,
11697 with result going to TARGET if that's convenient
11698 (and in mode MODE if that's convenient).
11699 SUBTARGET may be used as the target for computing one of EXP's operands.
11700 IGNORE is nonzero if the value is to be ignored. */
11702 static rtx
11703 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11704 enum machine_mode mode ATTRIBUTE_UNUSED,
11705 int ignore ATTRIBUTE_UNUSED)
11707 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11708 enum rs6000_builtins fcode
11709 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
11710 size_t uns_fcode = (size_t)fcode;
11711 const struct builtin_description *d;
11712 size_t i;
11713 rtx ret;
11714 bool success;
11715 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
11716 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
11718 if (TARGET_DEBUG_BUILTIN)
11720 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
11721 const char *name1 = rs6000_builtin_info[uns_fcode].name;
11722 const char *name2 = ((icode != CODE_FOR_nothing)
11723 ? get_insn_name ((int)icode)
11724 : "nothing");
11725 const char *name3;
11727 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
11729 default: name3 = "unknown"; break;
11730 case RS6000_BTC_SPECIAL: name3 = "special"; break;
11731 case RS6000_BTC_UNARY: name3 = "unary"; break;
11732 case RS6000_BTC_BINARY: name3 = "binary"; break;
11733 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
11734 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
11735 case RS6000_BTC_ABS: name3 = "abs"; break;
11736 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
11737 case RS6000_BTC_DST: name3 = "dst"; break;
11741 fprintf (stderr,
11742 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11743 (name1) ? name1 : "---", fcode,
11744 (name2) ? name2 : "---", (int)icode,
11745 name3,
11746 func_valid_p ? "" : ", not valid");
11749 if (!func_valid_p)
11751 rs6000_invalid_builtin (fcode);
11753 /* Given it is invalid, just generate a normal call. */
11754 return expand_call (exp, target, ignore);
11757 switch (fcode)
11759 case RS6000_BUILTIN_RECIP:
11760 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
11762 case RS6000_BUILTIN_RECIPF:
11763 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
11765 case RS6000_BUILTIN_RSQRTF:
11766 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
11768 case RS6000_BUILTIN_RSQRT:
11769 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
11771 case POWER7_BUILTIN_BPERMD:
11772 return rs6000_expand_binop_builtin (((TARGET_64BIT)
11773 ? CODE_FOR_bpermd_di
11774 : CODE_FOR_bpermd_si), exp, target);
11776 case RS6000_BUILTIN_GET_TB:
11777 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
11778 target);
11780 case RS6000_BUILTIN_MFTB:
11781 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
11782 ? CODE_FOR_rs6000_mftb_di
11783 : CODE_FOR_rs6000_mftb_si),
11784 target);
11786 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
11787 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
11789 int icode = (int) CODE_FOR_altivec_lvsr;
11790 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11791 enum machine_mode mode = insn_data[icode].operand[1].mode;
11792 tree arg;
11793 rtx op, addr, pat;
11795 gcc_assert (TARGET_ALTIVEC);
11797 arg = CALL_EXPR_ARG (exp, 0);
11798 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
11799 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
11800 addr = memory_address (mode, op);
11801 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11802 op = addr;
11803 else
11805 /* For the load case need to negate the address. */
11806 op = gen_reg_rtx (GET_MODE (addr));
11807 emit_insn (gen_rtx_SET (VOIDmode, op,
11808 gen_rtx_NEG (GET_MODE (addr), addr)));
11810 op = gen_rtx_MEM (mode, op);
11812 if (target == 0
11813 || GET_MODE (target) != tmode
11814 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11815 target = gen_reg_rtx (tmode);
11817 /*pat = gen_altivec_lvsr (target, op);*/
11818 pat = GEN_FCN (icode) (target, op);
11819 if (!pat)
11820 return 0;
11821 emit_insn (pat);
11823 return target;
11826 case ALTIVEC_BUILTIN_VCFUX:
11827 case ALTIVEC_BUILTIN_VCFSX:
11828 case ALTIVEC_BUILTIN_VCTUXS:
11829 case ALTIVEC_BUILTIN_VCTSXS:
11830 /* FIXME: There's got to be a nicer way to handle this case than
11831 constructing a new CALL_EXPR. */
11832 if (call_expr_nargs (exp) == 1)
11834 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11835 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11837 break;
11839 default:
11840 break;
11843 if (TARGET_ALTIVEC)
11845 ret = altivec_expand_builtin (exp, target, &success);
11847 if (success)
11848 return ret;
11850 if (TARGET_SPE)
11852 ret = spe_expand_builtin (exp, target, &success);
11854 if (success)
11855 return ret;
11857 if (TARGET_PAIRED_FLOAT)
11859 ret = paired_expand_builtin (exp, target, &success);
11861 if (success)
11862 return ret;
11865 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11867 /* Handle simple unary operations. */
11868 d = bdesc_1arg;
11869 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11870 if (d->code == fcode)
11871 return rs6000_expand_unop_builtin (d->icode, exp, target);
11873 /* Handle simple binary operations. */
11874 d = bdesc_2arg;
11875 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11876 if (d->code == fcode)
11877 return rs6000_expand_binop_builtin (d->icode, exp, target);
11879 /* Handle simple ternary operations. */
11880 d = bdesc_3arg;
11881 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11882 if (d->code == fcode)
11883 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11885 gcc_unreachable ();
11888 static void
11889 rs6000_init_builtins (void)
11891 tree tdecl;
11892 tree ftype;
11893 enum machine_mode mode;
11895 if (TARGET_DEBUG_BUILTIN)
11896 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
11897 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
11898 (TARGET_SPE) ? ", spe" : "",
11899 (TARGET_ALTIVEC) ? ", altivec" : "",
11900 (TARGET_VSX) ? ", vsx" : "");
11902 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11903 V2SF_type_node = build_vector_type (float_type_node, 2);
11904 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11905 V2DF_type_node = build_vector_type (double_type_node, 2);
11906 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11907 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11908 V4SF_type_node = build_vector_type (float_type_node, 4);
11909 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11910 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11912 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11913 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11914 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11915 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11917 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11918 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11919 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11920 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11922 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11923 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11924 'vector unsigned short'. */
11926 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11927 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11928 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11929 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11930 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11932 long_integer_type_internal_node = long_integer_type_node;
11933 long_unsigned_type_internal_node = long_unsigned_type_node;
11934 long_long_integer_type_internal_node = long_long_integer_type_node;
11935 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
11936 intQI_type_internal_node = intQI_type_node;
11937 uintQI_type_internal_node = unsigned_intQI_type_node;
11938 intHI_type_internal_node = intHI_type_node;
11939 uintHI_type_internal_node = unsigned_intHI_type_node;
11940 intSI_type_internal_node = intSI_type_node;
11941 uintSI_type_internal_node = unsigned_intSI_type_node;
11942 intDI_type_internal_node = intDI_type_node;
11943 uintDI_type_internal_node = unsigned_intDI_type_node;
11944 float_type_internal_node = float_type_node;
11945 double_type_internal_node = double_type_node;
11946 void_type_internal_node = void_type_node;
11948 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11949 tree type node. */
11950 builtin_mode_to_type[QImode][0] = integer_type_node;
11951 builtin_mode_to_type[HImode][0] = integer_type_node;
11952 builtin_mode_to_type[SImode][0] = intSI_type_node;
11953 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11954 builtin_mode_to_type[DImode][0] = intDI_type_node;
11955 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11956 builtin_mode_to_type[SFmode][0] = float_type_node;
11957 builtin_mode_to_type[DFmode][0] = double_type_node;
11958 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11959 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11960 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11961 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11962 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11963 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11964 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11965 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11966 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11967 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11968 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11969 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11970 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11972 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
11973 TYPE_NAME (bool_char_type_node) = tdecl;
11975 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
11976 TYPE_NAME (bool_short_type_node) = tdecl;
11978 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
11979 TYPE_NAME (bool_int_type_node) = tdecl;
11981 tdecl = add_builtin_type ("__pixel", pixel_type_node);
11982 TYPE_NAME (pixel_type_node) = tdecl;
11984 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11985 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11986 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11987 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11988 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11990 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
11991 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11993 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
11994 TYPE_NAME (V16QI_type_node) = tdecl;
11996 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
11997 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11999 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
12000 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
12002 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
12003 TYPE_NAME (V8HI_type_node) = tdecl;
12005 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
12006 TYPE_NAME (bool_V8HI_type_node) = tdecl;
12008 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
12009 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
12011 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
12012 TYPE_NAME (V4SI_type_node) = tdecl;
12014 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
12015 TYPE_NAME (bool_V4SI_type_node) = tdecl;
12017 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
12018 TYPE_NAME (V4SF_type_node) = tdecl;
12020 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
12021 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
12023 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
12024 TYPE_NAME (V2DF_type_node) = tdecl;
12026 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
12027 TYPE_NAME (V2DI_type_node) = tdecl;
12029 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
12030 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
12032 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
12033 TYPE_NAME (bool_V2DI_type_node) = tdecl;
12035 /* Paired and SPE builtins are only available if you build a compiler with
12036 the appropriate options, so only create those builtins with the
12037 appropriate compiler option. Create Altivec and VSX builtins on machines
12038 with at least the general purpose extensions (970 and newer) to allow the
12039 use of the target attribute. */
12040 if (TARGET_PAIRED_FLOAT)
12041 paired_init_builtins ();
12042 if (TARGET_SPE)
12043 spe_init_builtins ();
12044 if (TARGET_EXTRA_BUILTINS)
12045 altivec_init_builtins ();
12046 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
12047 rs6000_common_init_builtins ();
12049 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
12050 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
12051 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
12053 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
12054 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
12055 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
12057 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
12058 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
12059 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
12061 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
12062 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
12063 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
12065 mode = (TARGET_64BIT) ? DImode : SImode;
12066 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
12067 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
12068 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
12070 ftype = build_function_type_list (unsigned_intDI_type_node,
12071 NULL_TREE);
12072 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
12074 if (TARGET_64BIT)
12075 ftype = build_function_type_list (unsigned_intDI_type_node,
12076 NULL_TREE);
12077 else
12078 ftype = build_function_type_list (unsigned_intSI_type_node,
12079 NULL_TREE);
12080 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
12082 #if TARGET_XCOFF
12083 /* AIX libm provides clog as __clog. */
12084 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
12085 set_user_assembler_name (tdecl, "__clog");
12086 #endif
12088 #ifdef SUBTARGET_INIT_BUILTINS
12089 SUBTARGET_INIT_BUILTINS;
12090 #endif
12093 /* Returns the rs6000 builtin decl for CODE. */
12095 static tree
12096 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
12098 HOST_WIDE_INT fnmask;
12100 if (code >= RS6000_BUILTIN_COUNT)
12101 return error_mark_node;
12103 fnmask = rs6000_builtin_info[code].mask;
12104 if ((fnmask & rs6000_builtin_mask) != fnmask)
12106 rs6000_invalid_builtin ((enum rs6000_builtins)code);
12107 return error_mark_node;
12110 return rs6000_builtin_decls[code];
12113 static void
12114 spe_init_builtins (void)
12116 tree puint_type_node = build_pointer_type (unsigned_type_node);
12117 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
12118 const struct builtin_description *d;
12119 size_t i;
12121 tree v2si_ftype_4_v2si
12122 = build_function_type_list (opaque_V2SI_type_node,
12123 opaque_V2SI_type_node,
12124 opaque_V2SI_type_node,
12125 opaque_V2SI_type_node,
12126 opaque_V2SI_type_node,
12127 NULL_TREE);
12129 tree v2sf_ftype_4_v2sf
12130 = build_function_type_list (opaque_V2SF_type_node,
12131 opaque_V2SF_type_node,
12132 opaque_V2SF_type_node,
12133 opaque_V2SF_type_node,
12134 opaque_V2SF_type_node,
12135 NULL_TREE);
12137 tree int_ftype_int_v2si_v2si
12138 = build_function_type_list (integer_type_node,
12139 integer_type_node,
12140 opaque_V2SI_type_node,
12141 opaque_V2SI_type_node,
12142 NULL_TREE);
12144 tree int_ftype_int_v2sf_v2sf
12145 = build_function_type_list (integer_type_node,
12146 integer_type_node,
12147 opaque_V2SF_type_node,
12148 opaque_V2SF_type_node,
12149 NULL_TREE);
12151 tree void_ftype_v2si_puint_int
12152 = build_function_type_list (void_type_node,
12153 opaque_V2SI_type_node,
12154 puint_type_node,
12155 integer_type_node,
12156 NULL_TREE);
12158 tree void_ftype_v2si_puint_char
12159 = build_function_type_list (void_type_node,
12160 opaque_V2SI_type_node,
12161 puint_type_node,
12162 char_type_node,
12163 NULL_TREE);
12165 tree void_ftype_v2si_pv2si_int
12166 = build_function_type_list (void_type_node,
12167 opaque_V2SI_type_node,
12168 opaque_p_V2SI_type_node,
12169 integer_type_node,
12170 NULL_TREE);
12172 tree void_ftype_v2si_pv2si_char
12173 = build_function_type_list (void_type_node,
12174 opaque_V2SI_type_node,
12175 opaque_p_V2SI_type_node,
12176 char_type_node,
12177 NULL_TREE);
12179 tree void_ftype_int
12180 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
12182 tree int_ftype_void
12183 = build_function_type_list (integer_type_node, NULL_TREE);
12185 tree v2si_ftype_pv2si_int
12186 = build_function_type_list (opaque_V2SI_type_node,
12187 opaque_p_V2SI_type_node,
12188 integer_type_node,
12189 NULL_TREE);
12191 tree v2si_ftype_puint_int
12192 = build_function_type_list (opaque_V2SI_type_node,
12193 puint_type_node,
12194 integer_type_node,
12195 NULL_TREE);
12197 tree v2si_ftype_pushort_int
12198 = build_function_type_list (opaque_V2SI_type_node,
12199 pushort_type_node,
12200 integer_type_node,
12201 NULL_TREE);
12203 tree v2si_ftype_signed_char
12204 = build_function_type_list (opaque_V2SI_type_node,
12205 signed_char_type_node,
12206 NULL_TREE);
12208 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
12210 /* Initialize irregular SPE builtins. */
12212 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
12213 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
12214 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
12215 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
12216 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
12217 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
12218 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
12219 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
12220 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
12221 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
12222 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
12223 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
12224 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
12225 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
12226 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
12227 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
12228 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
12229 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
12231 /* Loads. */
12232 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
12233 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
12234 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
12235 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
12236 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
12237 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
12238 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
12239 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
12240 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
12241 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
12242 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
12243 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
12244 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
12245 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
12246 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
12247 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
12248 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
12249 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
12250 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
12251 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
12252 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
12253 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
12255 /* Predicates. */
12256 d = bdesc_spe_predicates;
12257 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
12259 tree type;
12261 switch (insn_data[d->icode].operand[1].mode)
12263 case V2SImode:
12264 type = int_ftype_int_v2si_v2si;
12265 break;
12266 case V2SFmode:
12267 type = int_ftype_int_v2sf_v2sf;
12268 break;
12269 default:
12270 gcc_unreachable ();
12273 def_builtin (d->name, type, d->code);
12276 /* Evsel predicates. */
12277 d = bdesc_spe_evsel;
12278 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
12280 tree type;
12282 switch (insn_data[d->icode].operand[1].mode)
12284 case V2SImode:
12285 type = v2si_ftype_4_v2si;
12286 break;
12287 case V2SFmode:
12288 type = v2sf_ftype_4_v2sf;
12289 break;
12290 default:
12291 gcc_unreachable ();
12294 def_builtin (d->name, type, d->code);
12298 static void
12299 paired_init_builtins (void)
12301 const struct builtin_description *d;
12302 size_t i;
12304 tree int_ftype_int_v2sf_v2sf
12305 = build_function_type_list (integer_type_node,
12306 integer_type_node,
12307 V2SF_type_node,
12308 V2SF_type_node,
12309 NULL_TREE);
12310 tree pcfloat_type_node =
12311 build_pointer_type (build_qualified_type
12312 (float_type_node, TYPE_QUAL_CONST));
12314 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
12315 long_integer_type_node,
12316 pcfloat_type_node,
12317 NULL_TREE);
12318 tree void_ftype_v2sf_long_pcfloat =
12319 build_function_type_list (void_type_node,
12320 V2SF_type_node,
12321 long_integer_type_node,
12322 pcfloat_type_node,
12323 NULL_TREE);
12326 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
12327 PAIRED_BUILTIN_LX);
12330 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
12331 PAIRED_BUILTIN_STX);
12333 /* Predicates. */
12334 d = bdesc_paired_preds;
12335 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
12337 tree type;
12339 if (TARGET_DEBUG_BUILTIN)
12340 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
12341 (int)i, get_insn_name (d->icode), (int)d->icode,
12342 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
12344 switch (insn_data[d->icode].operand[1].mode)
12346 case V2SFmode:
12347 type = int_ftype_int_v2sf_v2sf;
12348 break;
12349 default:
12350 gcc_unreachable ();
12353 def_builtin (d->name, type, d->code);
12357 static void
12358 altivec_init_builtins (void)
12360 const struct builtin_description *d;
12361 size_t i;
12362 tree ftype;
12363 tree decl;
12365 tree pvoid_type_node = build_pointer_type (void_type_node);
12367 tree pcvoid_type_node
12368 = build_pointer_type (build_qualified_type (void_type_node,
12369 TYPE_QUAL_CONST));
12371 tree int_ftype_opaque
12372 = build_function_type_list (integer_type_node,
12373 opaque_V4SI_type_node, NULL_TREE);
12374 tree opaque_ftype_opaque
12375 = build_function_type_list (integer_type_node, NULL_TREE);
12376 tree opaque_ftype_opaque_int
12377 = build_function_type_list (opaque_V4SI_type_node,
12378 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
12379 tree opaque_ftype_opaque_opaque_int
12380 = build_function_type_list (opaque_V4SI_type_node,
12381 opaque_V4SI_type_node, opaque_V4SI_type_node,
12382 integer_type_node, NULL_TREE);
12383 tree int_ftype_int_opaque_opaque
12384 = build_function_type_list (integer_type_node,
12385 integer_type_node, opaque_V4SI_type_node,
12386 opaque_V4SI_type_node, NULL_TREE);
12387 tree int_ftype_int_v4si_v4si
12388 = build_function_type_list (integer_type_node,
12389 integer_type_node, V4SI_type_node,
12390 V4SI_type_node, NULL_TREE);
12391 tree int_ftype_int_v2di_v2di
12392 = build_function_type_list (integer_type_node,
12393 integer_type_node, V2DI_type_node,
12394 V2DI_type_node, NULL_TREE);
12395 tree void_ftype_v4si
12396 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
12397 tree v8hi_ftype_void
12398 = build_function_type_list (V8HI_type_node, NULL_TREE);
12399 tree void_ftype_void
12400 = build_function_type_list (void_type_node, NULL_TREE);
12401 tree void_ftype_int
12402 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
12404 tree opaque_ftype_long_pcvoid
12405 = build_function_type_list (opaque_V4SI_type_node,
12406 long_integer_type_node, pcvoid_type_node,
12407 NULL_TREE);
12408 tree v16qi_ftype_long_pcvoid
12409 = build_function_type_list (V16QI_type_node,
12410 long_integer_type_node, pcvoid_type_node,
12411 NULL_TREE);
12412 tree v8hi_ftype_long_pcvoid
12413 = build_function_type_list (V8HI_type_node,
12414 long_integer_type_node, pcvoid_type_node,
12415 NULL_TREE);
12416 tree v4si_ftype_long_pcvoid
12417 = build_function_type_list (V4SI_type_node,
12418 long_integer_type_node, pcvoid_type_node,
12419 NULL_TREE);
12420 tree v4sf_ftype_long_pcvoid
12421 = build_function_type_list (V4SF_type_node,
12422 long_integer_type_node, pcvoid_type_node,
12423 NULL_TREE);
12424 tree v2df_ftype_long_pcvoid
12425 = build_function_type_list (V2DF_type_node,
12426 long_integer_type_node, pcvoid_type_node,
12427 NULL_TREE);
12428 tree v2di_ftype_long_pcvoid
12429 = build_function_type_list (V2DI_type_node,
12430 long_integer_type_node, pcvoid_type_node,
12431 NULL_TREE);
12433 tree void_ftype_opaque_long_pvoid
12434 = build_function_type_list (void_type_node,
12435 opaque_V4SI_type_node, long_integer_type_node,
12436 pvoid_type_node, NULL_TREE);
12437 tree void_ftype_v4si_long_pvoid
12438 = build_function_type_list (void_type_node,
12439 V4SI_type_node, long_integer_type_node,
12440 pvoid_type_node, NULL_TREE);
12441 tree void_ftype_v16qi_long_pvoid
12442 = build_function_type_list (void_type_node,
12443 V16QI_type_node, long_integer_type_node,
12444 pvoid_type_node, NULL_TREE);
12445 tree void_ftype_v8hi_long_pvoid
12446 = build_function_type_list (void_type_node,
12447 V8HI_type_node, long_integer_type_node,
12448 pvoid_type_node, NULL_TREE);
12449 tree void_ftype_v4sf_long_pvoid
12450 = build_function_type_list (void_type_node,
12451 V4SF_type_node, long_integer_type_node,
12452 pvoid_type_node, NULL_TREE);
12453 tree void_ftype_v2df_long_pvoid
12454 = build_function_type_list (void_type_node,
12455 V2DF_type_node, long_integer_type_node,
12456 pvoid_type_node, NULL_TREE);
12457 tree void_ftype_v2di_long_pvoid
12458 = build_function_type_list (void_type_node,
12459 V2DI_type_node, long_integer_type_node,
12460 pvoid_type_node, NULL_TREE);
12461 tree int_ftype_int_v8hi_v8hi
12462 = build_function_type_list (integer_type_node,
12463 integer_type_node, V8HI_type_node,
12464 V8HI_type_node, NULL_TREE);
12465 tree int_ftype_int_v16qi_v16qi
12466 = build_function_type_list (integer_type_node,
12467 integer_type_node, V16QI_type_node,
12468 V16QI_type_node, NULL_TREE);
12469 tree int_ftype_int_v4sf_v4sf
12470 = build_function_type_list (integer_type_node,
12471 integer_type_node, V4SF_type_node,
12472 V4SF_type_node, NULL_TREE);
12473 tree int_ftype_int_v2df_v2df
12474 = build_function_type_list (integer_type_node,
12475 integer_type_node, V2DF_type_node,
12476 V2DF_type_node, NULL_TREE);
12477 tree v2di_ftype_v2di
12478 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
12479 tree v4si_ftype_v4si
12480 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
12481 tree v8hi_ftype_v8hi
12482 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
12483 tree v16qi_ftype_v16qi
12484 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
12485 tree v4sf_ftype_v4sf
12486 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12487 tree v2df_ftype_v2df
12488 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12489 tree void_ftype_pcvoid_int_int
12490 = build_function_type_list (void_type_node,
12491 pcvoid_type_node, integer_type_node,
12492 integer_type_node, NULL_TREE);
12494 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
12495 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
12496 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
12497 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
12498 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
12499 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
12500 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
12501 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
12502 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
12503 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
12504 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
12505 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
12506 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
12507 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
12508 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
12509 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
12510 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
12511 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
12512 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
12513 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
12514 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
12515 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
12516 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
12517 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
12518 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
12519 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
12520 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
12521 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
12522 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
12523 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
12525 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
12526 VSX_BUILTIN_LXVD2X_V2DF);
12527 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
12528 VSX_BUILTIN_LXVD2X_V2DI);
12529 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
12530 VSX_BUILTIN_LXVW4X_V4SF);
12531 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
12532 VSX_BUILTIN_LXVW4X_V4SI);
12533 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
12534 VSX_BUILTIN_LXVW4X_V8HI);
12535 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
12536 VSX_BUILTIN_LXVW4X_V16QI);
12537 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
12538 VSX_BUILTIN_STXVD2X_V2DF);
12539 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
12540 VSX_BUILTIN_STXVD2X_V2DI);
12541 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
12542 VSX_BUILTIN_STXVW4X_V4SF);
12543 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
12544 VSX_BUILTIN_STXVW4X_V4SI);
12545 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
12546 VSX_BUILTIN_STXVW4X_V8HI);
12547 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
12548 VSX_BUILTIN_STXVW4X_V16QI);
12549 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
12550 VSX_BUILTIN_VEC_LD);
12551 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
12552 VSX_BUILTIN_VEC_ST);
12554 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
12555 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
12556 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
12558 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
12559 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
12560 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
12561 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
12562 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
12563 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
12564 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
12565 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
12566 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
12567 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
12568 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
12569 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
12571 /* Cell builtins. */
12572 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
12573 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
12574 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
12575 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
12577 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
12578 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
12579 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
12580 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
12582 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
12583 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
12584 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
12585 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
12587 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
12588 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
12589 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
12590 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
12592 /* Add the DST variants. */
12593 d = bdesc_dst;
12594 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12595 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
12597 /* Initialize the predicates. */
12598 d = bdesc_altivec_preds;
12599 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12601 enum machine_mode mode1;
12602 tree type;
12604 if (rs6000_overloaded_builtin_p (d->code))
12605 mode1 = VOIDmode;
12606 else
12607 mode1 = insn_data[d->icode].operand[1].mode;
12609 switch (mode1)
12611 case VOIDmode:
12612 type = int_ftype_int_opaque_opaque;
12613 break;
12614 case V2DImode:
12615 type = int_ftype_int_v2di_v2di;
12616 break;
12617 case V4SImode:
12618 type = int_ftype_int_v4si_v4si;
12619 break;
12620 case V8HImode:
12621 type = int_ftype_int_v8hi_v8hi;
12622 break;
12623 case V16QImode:
12624 type = int_ftype_int_v16qi_v16qi;
12625 break;
12626 case V4SFmode:
12627 type = int_ftype_int_v4sf_v4sf;
12628 break;
12629 case V2DFmode:
12630 type = int_ftype_int_v2df_v2df;
12631 break;
12632 default:
12633 gcc_unreachable ();
12636 def_builtin (d->name, type, d->code);
12639 /* Initialize the abs* operators. */
12640 d = bdesc_abs;
12641 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12643 enum machine_mode mode0;
12644 tree type;
12646 mode0 = insn_data[d->icode].operand[0].mode;
12648 switch (mode0)
12650 case V2DImode:
12651 type = v2di_ftype_v2di;
12652 break;
12653 case V4SImode:
12654 type = v4si_ftype_v4si;
12655 break;
12656 case V8HImode:
12657 type = v8hi_ftype_v8hi;
12658 break;
12659 case V16QImode:
12660 type = v16qi_ftype_v16qi;
12661 break;
12662 case V4SFmode:
12663 type = v4sf_ftype_v4sf;
12664 break;
12665 case V2DFmode:
12666 type = v2df_ftype_v2df;
12667 break;
12668 default:
12669 gcc_unreachable ();
12672 def_builtin (d->name, type, d->code);
12675 /* Initialize target builtin that implements
12676 targetm.vectorize.builtin_mask_for_load. */
12678 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12679 v16qi_ftype_long_pcvoid,
12680 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12681 BUILT_IN_MD, NULL, NULL_TREE);
12682 TREE_READONLY (decl) = 1;
12683 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12684 altivec_builtin_mask_for_load = decl;
12686 /* Access to the vec_init patterns. */
12687 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12688 integer_type_node, integer_type_node,
12689 integer_type_node, NULL_TREE);
12690 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12692 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12693 short_integer_type_node,
12694 short_integer_type_node,
12695 short_integer_type_node,
12696 short_integer_type_node,
12697 short_integer_type_node,
12698 short_integer_type_node,
12699 short_integer_type_node, NULL_TREE);
12700 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12702 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12703 char_type_node, char_type_node,
12704 char_type_node, char_type_node,
12705 char_type_node, char_type_node,
12706 char_type_node, char_type_node,
12707 char_type_node, char_type_node,
12708 char_type_node, char_type_node,
12709 char_type_node, char_type_node,
12710 char_type_node, NULL_TREE);
12711 def_builtin ("__builtin_vec_init_v16qi", ftype,
12712 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
12714 ftype = build_function_type_list (V4SF_type_node, float_type_node,
12715 float_type_node, float_type_node,
12716 float_type_node, NULL_TREE);
12717 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
12719 /* VSX builtins. */
12720 ftype = build_function_type_list (V2DF_type_node, double_type_node,
12721 double_type_node, NULL_TREE);
12722 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
12724 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12725 intDI_type_node, NULL_TREE);
12726 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
12728 /* Access to the vec_set patterns. */
12729 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12730 intSI_type_node,
12731 integer_type_node, NULL_TREE);
12732 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
12734 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12735 intHI_type_node,
12736 integer_type_node, NULL_TREE);
12737 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
12739 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12740 intQI_type_node,
12741 integer_type_node, NULL_TREE);
12742 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
12744 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12745 float_type_node,
12746 integer_type_node, NULL_TREE);
12747 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
12749 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12750 double_type_node,
12751 integer_type_node, NULL_TREE);
12752 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
12754 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12755 intDI_type_node,
12756 integer_type_node, NULL_TREE);
12757 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
12759 /* Access to the vec_extract patterns. */
12760 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12761 integer_type_node, NULL_TREE);
12762 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12764 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12765 integer_type_node, NULL_TREE);
12766 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12768 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12769 integer_type_node, NULL_TREE);
12770 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12772 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12773 integer_type_node, NULL_TREE);
12774 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12776 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12777 integer_type_node, NULL_TREE);
12778 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
12780 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12781 integer_type_node, NULL_TREE);
12782 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
12785 /* Hash function for builtin functions with up to 3 arguments and a return
12786 type. */
12787 static unsigned
12788 builtin_hash_function (const void *hash_entry)
12790 unsigned ret = 0;
12791 int i;
12792 const struct builtin_hash_struct *bh =
12793 (const struct builtin_hash_struct *) hash_entry;
12795 for (i = 0; i < 4; i++)
12797 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12798 ret = (ret * 2) + bh->uns_p[i];
12801 return ret;
12804 /* Compare builtin hash entries H1 and H2 for equivalence. */
12805 static int
12806 builtin_hash_eq (const void *h1, const void *h2)
12808 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12809 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12811 return ((p1->mode[0] == p2->mode[0])
12812 && (p1->mode[1] == p2->mode[1])
12813 && (p1->mode[2] == p2->mode[2])
12814 && (p1->mode[3] == p2->mode[3])
12815 && (p1->uns_p[0] == p2->uns_p[0])
12816 && (p1->uns_p[1] == p2->uns_p[1])
12817 && (p1->uns_p[2] == p2->uns_p[2])
12818 && (p1->uns_p[3] == p2->uns_p[3]));
12821 /* Map types for builtin functions with an explicit return type and up to 3
12822 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12823 of the argument. */
12824 static tree
12825 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12826 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12827 enum rs6000_builtins builtin, const char *name)
12829 struct builtin_hash_struct h;
12830 struct builtin_hash_struct *h2;
12831 void **found;
12832 int num_args = 3;
12833 int i;
12834 tree ret_type = NULL_TREE;
12835 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12837 /* Create builtin_hash_table. */
12838 if (builtin_hash_table == NULL)
12839 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12840 builtin_hash_eq, NULL);
12842 h.type = NULL_TREE;
12843 h.mode[0] = mode_ret;
12844 h.mode[1] = mode_arg0;
12845 h.mode[2] = mode_arg1;
12846 h.mode[3] = mode_arg2;
12847 h.uns_p[0] = 0;
12848 h.uns_p[1] = 0;
12849 h.uns_p[2] = 0;
12850 h.uns_p[3] = 0;
12852 /* If the builtin is a type that produces unsigned results or takes unsigned
12853 arguments, and it is returned as a decl for the vectorizer (such as
12854 widening multiplies, permute), make sure the arguments and return value
12855 are type correct. */
12856 switch (builtin)
12858 /* unsigned 1 argument functions. */
12859 case CRYPTO_BUILTIN_VSBOX:
12860 h.uns_p[0] = 1;
12861 h.uns_p[1] = 1;
12862 break;
12864 /* unsigned 2 argument functions. */
12865 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12866 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12867 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12868 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12869 case CRYPTO_BUILTIN_VCIPHER:
12870 case CRYPTO_BUILTIN_VCIPHERLAST:
12871 case CRYPTO_BUILTIN_VNCIPHER:
12872 case CRYPTO_BUILTIN_VNCIPHERLAST:
12873 case CRYPTO_BUILTIN_VPMSUMB:
12874 case CRYPTO_BUILTIN_VPMSUMH:
12875 case CRYPTO_BUILTIN_VPMSUMW:
12876 case CRYPTO_BUILTIN_VPMSUMD:
12877 case CRYPTO_BUILTIN_VPMSUM:
12878 h.uns_p[0] = 1;
12879 h.uns_p[1] = 1;
12880 h.uns_p[2] = 1;
12881 break;
12883 /* unsigned 3 argument functions. */
12884 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12885 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12886 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12887 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12888 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12889 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12890 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12891 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12892 case VSX_BUILTIN_VPERM_16QI_UNS:
12893 case VSX_BUILTIN_VPERM_8HI_UNS:
12894 case VSX_BUILTIN_VPERM_4SI_UNS:
12895 case VSX_BUILTIN_VPERM_2DI_UNS:
12896 case VSX_BUILTIN_XXSEL_16QI_UNS:
12897 case VSX_BUILTIN_XXSEL_8HI_UNS:
12898 case VSX_BUILTIN_XXSEL_4SI_UNS:
12899 case VSX_BUILTIN_XXSEL_2DI_UNS:
12900 case CRYPTO_BUILTIN_VPERMXOR:
12901 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
12902 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
12903 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
12904 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
12905 case CRYPTO_BUILTIN_VSHASIGMAW:
12906 case CRYPTO_BUILTIN_VSHASIGMAD:
12907 case CRYPTO_BUILTIN_VSHASIGMA:
12908 h.uns_p[0] = 1;
12909 h.uns_p[1] = 1;
12910 h.uns_p[2] = 1;
12911 h.uns_p[3] = 1;
12912 break;
12914 /* signed permute functions with unsigned char mask. */
12915 case ALTIVEC_BUILTIN_VPERM_16QI:
12916 case ALTIVEC_BUILTIN_VPERM_8HI:
12917 case ALTIVEC_BUILTIN_VPERM_4SI:
12918 case ALTIVEC_BUILTIN_VPERM_4SF:
12919 case ALTIVEC_BUILTIN_VPERM_2DI:
12920 case ALTIVEC_BUILTIN_VPERM_2DF:
12921 case VSX_BUILTIN_VPERM_16QI:
12922 case VSX_BUILTIN_VPERM_8HI:
12923 case VSX_BUILTIN_VPERM_4SI:
12924 case VSX_BUILTIN_VPERM_4SF:
12925 case VSX_BUILTIN_VPERM_2DI:
12926 case VSX_BUILTIN_VPERM_2DF:
12927 h.uns_p[3] = 1;
12928 break;
12930 /* unsigned args, signed return. */
12931 case VSX_BUILTIN_XVCVUXDDP_UNS:
12932 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
12933 h.uns_p[1] = 1;
12934 break;
12936 /* signed args, unsigned return. */
12937 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12938 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
12939 h.uns_p[0] = 1;
12940 break;
12942 default:
12943 break;
12946 /* Figure out how many args are present. */
12947 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12948 num_args--;
12950 if (num_args == 0)
12951 fatal_error ("internal error: builtin function %s had no type", name);
12953 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12954 if (!ret_type && h.uns_p[0])
12955 ret_type = builtin_mode_to_type[h.mode[0]][0];
12957 if (!ret_type)
12958 fatal_error ("internal error: builtin function %s had an unexpected "
12959 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12961 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
12962 arg_type[i] = NULL_TREE;
12964 for (i = 0; i < num_args; i++)
12966 int m = (int) h.mode[i+1];
12967 int uns_p = h.uns_p[i+1];
12969 arg_type[i] = builtin_mode_to_type[m][uns_p];
12970 if (!arg_type[i] && uns_p)
12971 arg_type[i] = builtin_mode_to_type[m][0];
12973 if (!arg_type[i])
12974 fatal_error ("internal error: builtin function %s, argument %d "
12975 "had unexpected argument type %s", name, i,
12976 GET_MODE_NAME (m));
12979 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12980 if (*found == NULL)
12982 h2 = ggc_alloc_builtin_hash_struct ();
12983 *h2 = h;
12984 *found = (void *)h2;
12986 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
12987 arg_type[2], NULL_TREE);
12990 return ((struct builtin_hash_struct *)(*found))->type;
12993 static void
12994 rs6000_common_init_builtins (void)
12996 const struct builtin_description *d;
12997 size_t i;
12999 tree opaque_ftype_opaque = NULL_TREE;
13000 tree opaque_ftype_opaque_opaque = NULL_TREE;
13001 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
13002 tree v2si_ftype_qi = NULL_TREE;
13003 tree v2si_ftype_v2si_qi = NULL_TREE;
13004 tree v2si_ftype_int_qi = NULL_TREE;
13005 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
13007 if (!TARGET_PAIRED_FLOAT)
13009 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
13010 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
13013 /* Paired and SPE builtins are only available if you build a compiler with
13014 the appropriate options, so only create those builtins with the
13015 appropriate compiler option. Create Altivec and VSX builtins on machines
13016 with at least the general purpose extensions (970 and newer) to allow the
13017 use of the target attribute.. */
13019 if (TARGET_EXTRA_BUILTINS)
13020 builtin_mask |= RS6000_BTM_COMMON;
13022 /* Add the ternary operators. */
13023 d = bdesc_3arg;
13024 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
13026 tree type;
13027 HOST_WIDE_INT mask = d->mask;
13029 if ((mask & builtin_mask) != mask)
13031 if (TARGET_DEBUG_BUILTIN)
13032 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
13033 continue;
13036 if (rs6000_overloaded_builtin_p (d->code))
13038 if (! (type = opaque_ftype_opaque_opaque_opaque))
13039 type = opaque_ftype_opaque_opaque_opaque
13040 = build_function_type_list (opaque_V4SI_type_node,
13041 opaque_V4SI_type_node,
13042 opaque_V4SI_type_node,
13043 opaque_V4SI_type_node,
13044 NULL_TREE);
13046 else
13048 enum insn_code icode = d->icode;
13049 if (d->name == 0)
13051 if (TARGET_DEBUG_BUILTIN)
13052 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
13053 (long unsigned)i);
13055 continue;
13058 if (icode == CODE_FOR_nothing)
13060 if (TARGET_DEBUG_BUILTIN)
13061 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
13062 d->name);
13064 continue;
13067 type = builtin_function_type (insn_data[icode].operand[0].mode,
13068 insn_data[icode].operand[1].mode,
13069 insn_data[icode].operand[2].mode,
13070 insn_data[icode].operand[3].mode,
13071 d->code, d->name);
13074 def_builtin (d->name, type, d->code);
13077 /* Add the binary operators. */
13078 d = bdesc_2arg;
13079 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13081 enum machine_mode mode0, mode1, mode2;
13082 tree type;
13083 HOST_WIDE_INT mask = d->mask;
13085 if ((mask & builtin_mask) != mask)
13087 if (TARGET_DEBUG_BUILTIN)
13088 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
13089 continue;
13092 if (rs6000_overloaded_builtin_p (d->code))
13094 if (! (type = opaque_ftype_opaque_opaque))
13095 type = opaque_ftype_opaque_opaque
13096 = build_function_type_list (opaque_V4SI_type_node,
13097 opaque_V4SI_type_node,
13098 opaque_V4SI_type_node,
13099 NULL_TREE);
13101 else
13103 enum insn_code icode = d->icode;
13104 if (d->name == 0)
13106 if (TARGET_DEBUG_BUILTIN)
13107 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
13108 (long unsigned)i);
13110 continue;
13113 if (icode == CODE_FOR_nothing)
13115 if (TARGET_DEBUG_BUILTIN)
13116 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
13117 d->name);
13119 continue;
13122 mode0 = insn_data[icode].operand[0].mode;
13123 mode1 = insn_data[icode].operand[1].mode;
13124 mode2 = insn_data[icode].operand[2].mode;
13126 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
13128 if (! (type = v2si_ftype_v2si_qi))
13129 type = v2si_ftype_v2si_qi
13130 = build_function_type_list (opaque_V2SI_type_node,
13131 opaque_V2SI_type_node,
13132 char_type_node,
13133 NULL_TREE);
13136 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
13137 && mode2 == QImode)
13139 if (! (type = v2si_ftype_int_qi))
13140 type = v2si_ftype_int_qi
13141 = build_function_type_list (opaque_V2SI_type_node,
13142 integer_type_node,
13143 char_type_node,
13144 NULL_TREE);
13147 else
13148 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
13149 d->code, d->name);
13152 def_builtin (d->name, type, d->code);
13155 /* Add the simple unary operators. */
13156 d = bdesc_1arg;
13157 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13159 enum machine_mode mode0, mode1;
13160 tree type;
13161 HOST_WIDE_INT mask = d->mask;
13163 if ((mask & builtin_mask) != mask)
13165 if (TARGET_DEBUG_BUILTIN)
13166 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
13167 continue;
13170 if (rs6000_overloaded_builtin_p (d->code))
13172 if (! (type = opaque_ftype_opaque))
13173 type = opaque_ftype_opaque
13174 = build_function_type_list (opaque_V4SI_type_node,
13175 opaque_V4SI_type_node,
13176 NULL_TREE);
13178 else
13180 enum insn_code icode = d->icode;
13181 if (d->name == 0)
13183 if (TARGET_DEBUG_BUILTIN)
13184 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
13185 (long unsigned)i);
13187 continue;
13190 if (icode == CODE_FOR_nothing)
13192 if (TARGET_DEBUG_BUILTIN)
13193 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
13194 d->name);
13196 continue;
13199 mode0 = insn_data[icode].operand[0].mode;
13200 mode1 = insn_data[icode].operand[1].mode;
13202 if (mode0 == V2SImode && mode1 == QImode)
13204 if (! (type = v2si_ftype_qi))
13205 type = v2si_ftype_qi
13206 = build_function_type_list (opaque_V2SI_type_node,
13207 char_type_node,
13208 NULL_TREE);
13211 else
13212 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
13213 d->code, d->name);
13216 def_builtin (d->name, type, d->code);
13220 static void
13221 rs6000_init_libfuncs (void)
13223 if (!TARGET_IEEEQUAD)
13224 /* AIX/Darwin/64-bit Linux quad floating point routines. */
13225 if (!TARGET_XL_COMPAT)
13227 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
13228 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
13229 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
13230 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
13232 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
13234 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
13235 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
13236 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
13237 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
13238 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
13239 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
13240 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
13242 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
13243 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
13244 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
13245 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
13246 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
13247 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
13248 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
13249 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
13252 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
13253 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
13255 else
13257 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
13258 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
13259 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
13260 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
13262 else
13264 /* 32-bit SVR4 quad floating point routines. */
13266 set_optab_libfunc (add_optab, TFmode, "_q_add");
13267 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
13268 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
13269 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
13270 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
13271 if (TARGET_PPC_GPOPT)
13272 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
13274 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
13275 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
13276 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
13277 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
13278 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
13279 set_optab_libfunc (le_optab, TFmode, "_q_fle");
13281 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
13282 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
13283 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
13284 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
13285 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
13286 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
13287 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
13288 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
13293 /* Expand a block clear operation, and return 1 if successful. Return 0
13294 if we should let the compiler generate normal code.
13296 operands[0] is the destination
13297 operands[1] is the length
13298 operands[3] is the alignment */
13301 expand_block_clear (rtx operands[])
13303 rtx orig_dest = operands[0];
13304 rtx bytes_rtx = operands[1];
13305 rtx align_rtx = operands[3];
13306 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
13307 HOST_WIDE_INT align;
13308 HOST_WIDE_INT bytes;
13309 int offset;
13310 int clear_bytes;
13311 int clear_step;
13313 /* If this is not a fixed size move, just call memcpy */
13314 if (! constp)
13315 return 0;
13317 /* This must be a fixed size alignment */
13318 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
13319 align = INTVAL (align_rtx) * BITS_PER_UNIT;
13321 /* Anything to clear? */
13322 bytes = INTVAL (bytes_rtx);
13323 if (bytes <= 0)
13324 return 1;
13326 /* Use the builtin memset after a point, to avoid huge code bloat.
13327 When optimize_size, avoid any significant code bloat; calling
13328 memset is about 4 instructions, so allow for one instruction to
13329 load zero and three to do clearing. */
13330 if (TARGET_ALTIVEC && align >= 128)
13331 clear_step = 16;
13332 else if (TARGET_POWERPC64 && align >= 32)
13333 clear_step = 8;
13334 else if (TARGET_SPE && align >= 64)
13335 clear_step = 8;
13336 else
13337 clear_step = 4;
13339 if (optimize_size && bytes > 3 * clear_step)
13340 return 0;
13341 if (! optimize_size && bytes > 8 * clear_step)
13342 return 0;
13344 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
13346 enum machine_mode mode = BLKmode;
13347 rtx dest;
13349 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
13351 clear_bytes = 16;
13352 mode = V4SImode;
13354 else if (bytes >= 8 && TARGET_SPE && align >= 64)
13356 clear_bytes = 8;
13357 mode = V2SImode;
13359 else if (bytes >= 8 && TARGET_POWERPC64
13360 /* 64-bit loads and stores require word-aligned
13361 displacements. */
13362 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13364 clear_bytes = 8;
13365 mode = DImode;
13367 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13368 { /* move 4 bytes */
13369 clear_bytes = 4;
13370 mode = SImode;
13372 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13373 { /* move 2 bytes */
13374 clear_bytes = 2;
13375 mode = HImode;
13377 else /* move 1 byte at a time */
13379 clear_bytes = 1;
13380 mode = QImode;
13383 dest = adjust_address (orig_dest, mode, offset);
13385 emit_move_insn (dest, CONST0_RTX (mode));
13388 return 1;
13392 /* Expand a block move operation, and return 1 if successful. Return 0
13393 if we should let the compiler generate normal code.
13395 operands[0] is the destination
13396 operands[1] is the source
13397 operands[2] is the length
13398 operands[3] is the alignment */
13400 #define MAX_MOVE_REG 4
13403 expand_block_move (rtx operands[])
13405 rtx orig_dest = operands[0];
13406 rtx orig_src = operands[1];
13407 rtx bytes_rtx = operands[2];
13408 rtx align_rtx = operands[3];
13409 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
13410 int align;
13411 int bytes;
13412 int offset;
13413 int move_bytes;
13414 rtx stores[MAX_MOVE_REG];
13415 int num_reg = 0;
13417 /* If this is not a fixed size move, just call memcpy */
13418 if (! constp)
13419 return 0;
13421 /* This must be a fixed size alignment */
13422 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
13423 align = INTVAL (align_rtx) * BITS_PER_UNIT;
13425 /* Anything to move? */
13426 bytes = INTVAL (bytes_rtx);
13427 if (bytes <= 0)
13428 return 1;
13430 if (bytes > rs6000_block_move_inline_limit)
13431 return 0;
13433 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
13435 union {
13436 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
13437 rtx (*mov) (rtx, rtx);
13438 } gen_func;
13439 enum machine_mode mode = BLKmode;
13440 rtx src, dest;
13442 /* Altivec first, since it will be faster than a string move
13443 when it applies, and usually not significantly larger. */
13444 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
13446 move_bytes = 16;
13447 mode = V4SImode;
13448 gen_func.mov = gen_movv4si;
13450 else if (TARGET_SPE && bytes >= 8 && align >= 64)
13452 move_bytes = 8;
13453 mode = V2SImode;
13454 gen_func.mov = gen_movv2si;
13456 else if (TARGET_STRING
13457 && bytes > 24 /* move up to 32 bytes at a time */
13458 && ! fixed_regs[5]
13459 && ! fixed_regs[6]
13460 && ! fixed_regs[7]
13461 && ! fixed_regs[8]
13462 && ! fixed_regs[9]
13463 && ! fixed_regs[10]
13464 && ! fixed_regs[11]
13465 && ! fixed_regs[12])
13467 move_bytes = (bytes > 32) ? 32 : bytes;
13468 gen_func.movmemsi = gen_movmemsi_8reg;
13470 else if (TARGET_STRING
13471 && bytes > 16 /* move up to 24 bytes at a time */
13472 && ! fixed_regs[5]
13473 && ! fixed_regs[6]
13474 && ! fixed_regs[7]
13475 && ! fixed_regs[8]
13476 && ! fixed_regs[9]
13477 && ! fixed_regs[10])
13479 move_bytes = (bytes > 24) ? 24 : bytes;
13480 gen_func.movmemsi = gen_movmemsi_6reg;
13482 else if (TARGET_STRING
13483 && bytes > 8 /* move up to 16 bytes at a time */
13484 && ! fixed_regs[5]
13485 && ! fixed_regs[6]
13486 && ! fixed_regs[7]
13487 && ! fixed_regs[8])
13489 move_bytes = (bytes > 16) ? 16 : bytes;
13490 gen_func.movmemsi = gen_movmemsi_4reg;
13492 else if (bytes >= 8 && TARGET_POWERPC64
13493 /* 64-bit loads and stores require word-aligned
13494 displacements. */
13495 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13497 move_bytes = 8;
13498 mode = DImode;
13499 gen_func.mov = gen_movdi;
13501 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
13502 { /* move up to 8 bytes at a time */
13503 move_bytes = (bytes > 8) ? 8 : bytes;
13504 gen_func.movmemsi = gen_movmemsi_2reg;
13506 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13507 { /* move 4 bytes */
13508 move_bytes = 4;
13509 mode = SImode;
13510 gen_func.mov = gen_movsi;
13512 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13513 { /* move 2 bytes */
13514 move_bytes = 2;
13515 mode = HImode;
13516 gen_func.mov = gen_movhi;
13518 else if (TARGET_STRING && bytes > 1)
13519 { /* move up to 4 bytes at a time */
13520 move_bytes = (bytes > 4) ? 4 : bytes;
13521 gen_func.movmemsi = gen_movmemsi_1reg;
13523 else /* move 1 byte at a time */
13525 move_bytes = 1;
13526 mode = QImode;
13527 gen_func.mov = gen_movqi;
13530 src = adjust_address (orig_src, mode, offset);
13531 dest = adjust_address (orig_dest, mode, offset);
13533 if (mode != BLKmode)
13535 rtx tmp_reg = gen_reg_rtx (mode);
13537 emit_insn ((*gen_func.mov) (tmp_reg, src));
13538 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
13541 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
13543 int i;
13544 for (i = 0; i < num_reg; i++)
13545 emit_insn (stores[i]);
13546 num_reg = 0;
13549 if (mode == BLKmode)
13551 /* Move the address into scratch registers. The movmemsi
13552 patterns require zero offset. */
13553 if (!REG_P (XEXP (src, 0)))
13555 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
13556 src = replace_equiv_address (src, src_reg);
13558 set_mem_size (src, move_bytes);
13560 if (!REG_P (XEXP (dest, 0)))
13562 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
13563 dest = replace_equiv_address (dest, dest_reg);
13565 set_mem_size (dest, move_bytes);
13567 emit_insn ((*gen_func.movmemsi) (dest, src,
13568 GEN_INT (move_bytes & 31),
13569 align_rtx));
13573 return 1;
13577 /* Return a string to perform a load_multiple operation.
13578 operands[0] is the vector.
13579 operands[1] is the source address.
13580 operands[2] is the first destination register. */
13582 const char *
13583 rs6000_output_load_multiple (rtx operands[3])
13585 /* We have to handle the case where the pseudo used to contain the address
13586 is assigned to one of the output registers. */
13587 int i, j;
13588 int words = XVECLEN (operands[0], 0);
13589 rtx xop[10];
13591 if (XVECLEN (operands[0], 0) == 1)
13592 return "lwz %2,0(%1)";
13594 for (i = 0; i < words; i++)
13595 if (refers_to_regno_p (REGNO (operands[2]) + i,
13596 REGNO (operands[2]) + i + 1, operands[1], 0))
13598 if (i == words-1)
13600 xop[0] = GEN_INT (4 * (words-1));
13601 xop[1] = operands[1];
13602 xop[2] = operands[2];
13603 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
13604 return "";
13606 else if (i == 0)
13608 xop[0] = GEN_INT (4 * (words-1));
13609 xop[1] = operands[1];
13610 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
13611 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
13612 return "";
13614 else
13616 for (j = 0; j < words; j++)
13617 if (j != i)
13619 xop[0] = GEN_INT (j * 4);
13620 xop[1] = operands[1];
13621 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
13622 output_asm_insn ("lwz %2,%0(%1)", xop);
13624 xop[0] = GEN_INT (i * 4);
13625 xop[1] = operands[1];
13626 output_asm_insn ("lwz %1,%0(%1)", xop);
13627 return "";
13631 return "lswi %2,%1,%N0";
13635 /* A validation routine: say whether CODE, a condition code, and MODE
13636 match. The other alternatives either don't make sense or should
13637 never be generated. */
13639 void
13640 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
13642 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
13643 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
13644 && GET_MODE_CLASS (mode) == MODE_CC);
13646 /* These don't make sense. */
13647 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
13648 || mode != CCUNSmode);
13650 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
13651 || mode == CCUNSmode);
13653 gcc_assert (mode == CCFPmode
13654 || (code != ORDERED && code != UNORDERED
13655 && code != UNEQ && code != LTGT
13656 && code != UNGT && code != UNLT
13657 && code != UNGE && code != UNLE));
13659 /* These should never be generated except for
13660 flag_finite_math_only. */
13661 gcc_assert (mode != CCFPmode
13662 || flag_finite_math_only
13663 || (code != LE && code != GE
13664 && code != UNEQ && code != LTGT
13665 && code != UNGT && code != UNLT));
13667 /* These are invalid; the information is not there. */
13668 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
13672 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13673 mask required to convert the result of a rotate insn into a shift
13674 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13677 includes_lshift_p (rtx shiftop, rtx andop)
13679 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13681 shift_mask <<= INTVAL (shiftop);
13683 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13686 /* Similar, but for right shift. */
13689 includes_rshift_p (rtx shiftop, rtx andop)
13691 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13693 shift_mask >>= INTVAL (shiftop);
13695 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13698 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13699 to perform a left shift. It must have exactly SHIFTOP least
13700 significant 0's, then one or more 1's, then zero or more 0's. */
13703 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13705 if (GET_CODE (andop) == CONST_INT)
13707 HOST_WIDE_INT c, lsb, shift_mask;
13709 c = INTVAL (andop);
13710 if (c == 0 || c == ~0)
13711 return 0;
13713 shift_mask = ~0;
13714 shift_mask <<= INTVAL (shiftop);
13716 /* Find the least significant one bit. */
13717 lsb = c & -c;
13719 /* It must coincide with the LSB of the shift mask. */
13720 if (-lsb != shift_mask)
13721 return 0;
13723 /* Invert to look for the next transition (if any). */
13724 c = ~c;
13726 /* Remove the low group of ones (originally low group of zeros). */
13727 c &= -lsb;
13729 /* Again find the lsb, and check we have all 1's above. */
13730 lsb = c & -c;
13731 return c == -lsb;
13733 else
13734 return 0;
13737 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13738 to perform a left shift. It must have SHIFTOP or more least
13739 significant 0's, with the remainder of the word 1's. */
13742 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13744 if (GET_CODE (andop) == CONST_INT)
13746 HOST_WIDE_INT c, lsb, shift_mask;
13748 shift_mask = ~0;
13749 shift_mask <<= INTVAL (shiftop);
13750 c = INTVAL (andop);
13752 /* Find the least significant one bit. */
13753 lsb = c & -c;
13755 /* It must be covered by the shift mask.
13756 This test also rejects c == 0. */
13757 if ((lsb & shift_mask) == 0)
13758 return 0;
13760 /* Check we have all 1's above the transition, and reject all 1's. */
13761 return c == -lsb && lsb != 1;
13763 else
13764 return 0;
13767 /* Return 1 if operands will generate a valid arguments to rlwimi
13768 instruction for insert with right shift in 64-bit mode. The mask may
13769 not start on the first bit or stop on the last bit because wrap-around
13770 effects of instruction do not correspond to semantics of RTL insn. */
13773 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13775 if (INTVAL (startop) > 32
13776 && INTVAL (startop) < 64
13777 && INTVAL (sizeop) > 1
13778 && INTVAL (sizeop) + INTVAL (startop) < 64
13779 && INTVAL (shiftop) > 0
13780 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13781 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13782 return 1;
13784 return 0;
13787 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13788 for lfq and stfq insns iff the registers are hard registers. */
13791 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13793 /* We might have been passed a SUBREG. */
13794 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13795 return 0;
13797 /* We might have been passed non floating point registers. */
13798 if (!FP_REGNO_P (REGNO (reg1))
13799 || !FP_REGNO_P (REGNO (reg2)))
13800 return 0;
13802 return (REGNO (reg1) == REGNO (reg2) - 1);
13805 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13806 addr1 and addr2 must be in consecutive memory locations
13807 (addr2 == addr1 + 8). */
13810 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13812 rtx addr1, addr2;
13813 unsigned int reg1, reg2;
13814 int offset1, offset2;
13816 /* The mems cannot be volatile. */
13817 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13818 return 0;
13820 addr1 = XEXP (mem1, 0);
13821 addr2 = XEXP (mem2, 0);
13823 /* Extract an offset (if used) from the first addr. */
13824 if (GET_CODE (addr1) == PLUS)
13826 /* If not a REG, return zero. */
13827 if (GET_CODE (XEXP (addr1, 0)) != REG)
13828 return 0;
13829 else
13831 reg1 = REGNO (XEXP (addr1, 0));
13832 /* The offset must be constant! */
13833 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13834 return 0;
13835 offset1 = INTVAL (XEXP (addr1, 1));
13838 else if (GET_CODE (addr1) != REG)
13839 return 0;
13840 else
13842 reg1 = REGNO (addr1);
13843 /* This was a simple (mem (reg)) expression. Offset is 0. */
13844 offset1 = 0;
13847 /* And now for the second addr. */
13848 if (GET_CODE (addr2) == PLUS)
13850 /* If not a REG, return zero. */
13851 if (GET_CODE (XEXP (addr2, 0)) != REG)
13852 return 0;
13853 else
13855 reg2 = REGNO (XEXP (addr2, 0));
13856 /* The offset must be constant. */
13857 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13858 return 0;
13859 offset2 = INTVAL (XEXP (addr2, 1));
13862 else if (GET_CODE (addr2) != REG)
13863 return 0;
13864 else
13866 reg2 = REGNO (addr2);
13867 /* This was a simple (mem (reg)) expression. Offset is 0. */
13868 offset2 = 0;
13871 /* Both of these must have the same base register. */
13872 if (reg1 != reg2)
13873 return 0;
13875 /* The offset for the second addr must be 8 more than the first addr. */
13876 if (offset2 != offset1 + 8)
13877 return 0;
13879 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13880 instructions. */
13881 return 1;
13886 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13888 static bool eliminated = false;
13889 rtx ret;
13891 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
13892 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13893 else
13895 rtx mem = cfun->machine->sdmode_stack_slot;
13896 gcc_assert (mem != NULL_RTX);
13898 if (!eliminated)
13900 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13901 cfun->machine->sdmode_stack_slot = mem;
13902 eliminated = true;
13904 ret = mem;
13907 if (TARGET_DEBUG_ADDR)
13909 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13910 GET_MODE_NAME (mode));
13911 if (!ret)
13912 fprintf (stderr, "\tNULL_RTX\n");
13913 else
13914 debug_rtx (ret);
13917 return ret;
13920 static tree
13921 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13923 /* Don't walk into types. */
13924 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13926 *walk_subtrees = 0;
13927 return NULL_TREE;
13930 switch (TREE_CODE (*tp))
13932 case VAR_DECL:
13933 case PARM_DECL:
13934 case FIELD_DECL:
13935 case RESULT_DECL:
13936 case SSA_NAME:
13937 case REAL_CST:
13938 case MEM_REF:
13939 case VIEW_CONVERT_EXPR:
13940 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13941 return *tp;
13942 break;
13943 default:
13944 break;
13947 return NULL_TREE;
13950 enum reload_reg_type {
13951 GPR_REGISTER_TYPE,
13952 VECTOR_REGISTER_TYPE,
13953 OTHER_REGISTER_TYPE
13956 static enum reload_reg_type
13957 rs6000_reload_register_type (enum reg_class rclass)
13959 switch (rclass)
13961 case GENERAL_REGS:
13962 case BASE_REGS:
13963 return GPR_REGISTER_TYPE;
13965 case FLOAT_REGS:
13966 case ALTIVEC_REGS:
13967 case VSX_REGS:
13968 return VECTOR_REGISTER_TYPE;
13970 default:
13971 return OTHER_REGISTER_TYPE;
13975 /* Inform reload about cases where moving X with a mode MODE to a register in
13976 RCLASS requires an extra scratch or immediate register. Return the class
13977 needed for the immediate register.
13979 For VSX and Altivec, we may need a register to convert sp+offset into
13980 reg+sp.
13982 For misaligned 64-bit gpr loads and stores we need a register to
13983 convert an offset address to indirect. */
13985 static reg_class_t
13986 rs6000_secondary_reload (bool in_p,
13987 rtx x,
13988 reg_class_t rclass_i,
13989 enum machine_mode mode,
13990 secondary_reload_info *sri)
13992 enum reg_class rclass = (enum reg_class) rclass_i;
13993 reg_class_t ret = ALL_REGS;
13994 enum insn_code icode;
13995 bool default_p = false;
13997 sri->icode = CODE_FOR_nothing;
13999 /* Convert vector loads and stores into gprs to use an additional base
14000 register. */
14001 icode = rs6000_vector_reload[mode][in_p != false];
14002 if (icode != CODE_FOR_nothing)
14004 ret = NO_REGS;
14005 sri->icode = CODE_FOR_nothing;
14006 sri->extra_cost = 0;
14008 if (GET_CODE (x) == MEM)
14010 rtx addr = XEXP (x, 0);
14012 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
14013 an extra register in that case, but it would need an extra
14014 register if the addressing is reg+reg or (reg+reg)&(-16). */
14015 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
14017 if (!legitimate_indirect_address_p (addr, false)
14018 && !rs6000_legitimate_offset_address_p (PTImode, addr,
14019 false, true))
14021 sri->icode = icode;
14022 /* account for splitting the loads, and converting the
14023 address from reg+reg to reg. */
14024 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
14025 + ((GET_CODE (addr) == AND) ? 1 : 0));
14028 /* Allow scalar loads to/from the traditional floating point
14029 registers, even if VSX memory is set. */
14030 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
14031 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
14032 && (legitimate_indirect_address_p (addr, false)
14033 || legitimate_indirect_address_p (XEXP (addr, 0), false)
14034 || rs6000_legitimate_offset_address_p (mode, addr,
14035 false, true)))
14038 /* Loads to and stores from vector registers can only do reg+reg
14039 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
14040 scalar modes loading up the traditional floating point registers
14041 to use offset addresses. */
14042 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
14043 || rclass == FLOAT_REGS || rclass == NO_REGS)
14045 if (!VECTOR_MEM_ALTIVEC_P (mode)
14046 && GET_CODE (addr) == AND
14047 && GET_CODE (XEXP (addr, 1)) == CONST_INT
14048 && INTVAL (XEXP (addr, 1)) == -16
14049 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
14050 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
14052 sri->icode = icode;
14053 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
14054 ? 2 : 1);
14056 else if (!legitimate_indirect_address_p (addr, false)
14057 && (rclass == NO_REGS
14058 || !legitimate_indexed_address_p (addr, false)))
14060 sri->icode = icode;
14061 sri->extra_cost = 1;
14063 else
14064 icode = CODE_FOR_nothing;
14066 /* Any other loads, including to pseudo registers which haven't been
14067 assigned to a register yet, default to require a scratch
14068 register. */
14069 else
14071 sri->icode = icode;
14072 sri->extra_cost = 2;
14075 else if (REG_P (x))
14077 int regno = true_regnum (x);
14079 icode = CODE_FOR_nothing;
14080 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
14081 default_p = true;
14082 else
14084 enum reg_class xclass = REGNO_REG_CLASS (regno);
14085 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
14086 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
14088 /* If memory is needed, use default_secondary_reload to create the
14089 stack slot. */
14090 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
14091 default_p = true;
14092 else
14093 ret = NO_REGS;
14096 else
14097 default_p = true;
14099 else if (TARGET_POWERPC64
14100 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
14101 && MEM_P (x)
14102 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
14104 rtx addr = XEXP (x, 0);
14105 rtx off = address_offset (addr);
14107 if (off != NULL_RTX)
14109 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
14110 unsigned HOST_WIDE_INT offset = INTVAL (off);
14112 /* We need a secondary reload when our legitimate_address_p
14113 says the address is good (as otherwise the entire address
14114 will be reloaded), and the offset is not a multiple of
14115 four or we have an address wrap. Address wrap will only
14116 occur for LO_SUMs since legitimate_offset_address_p
14117 rejects addresses for 16-byte mems that will wrap. */
14118 if (GET_CODE (addr) == LO_SUM
14119 ? (1 /* legitimate_address_p allows any offset for lo_sum */
14120 && ((offset & 3) != 0
14121 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
14122 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
14123 && (offset & 3) != 0))
14125 if (in_p)
14126 sri->icode = CODE_FOR_reload_di_load;
14127 else
14128 sri->icode = CODE_FOR_reload_di_store;
14129 sri->extra_cost = 2;
14130 ret = NO_REGS;
14132 else
14133 default_p = true;
14135 else
14136 default_p = true;
14138 else if (!TARGET_POWERPC64
14139 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
14140 && MEM_P (x)
14141 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
14143 rtx addr = XEXP (x, 0);
14144 rtx off = address_offset (addr);
14146 if (off != NULL_RTX)
14148 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
14149 unsigned HOST_WIDE_INT offset = INTVAL (off);
14151 /* We need a secondary reload when our legitimate_address_p
14152 says the address is good (as otherwise the entire address
14153 will be reloaded), and we have a wrap.
14155 legitimate_lo_sum_address_p allows LO_SUM addresses to
14156 have any offset so test for wrap in the low 16 bits.
14158 legitimate_offset_address_p checks for the range
14159 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
14160 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
14161 [0x7ff4,0x7fff] respectively, so test for the
14162 intersection of these ranges, [0x7ffc,0x7fff] and
14163 [0x7ff4,0x7ff7] respectively.
14165 Note that the address we see here may have been
14166 manipulated by legitimize_reload_address. */
14167 if (GET_CODE (addr) == LO_SUM
14168 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
14169 : offset - (0x8000 - extra) < UNITS_PER_WORD)
14171 if (in_p)
14172 sri->icode = CODE_FOR_reload_si_load;
14173 else
14174 sri->icode = CODE_FOR_reload_si_store;
14175 sri->extra_cost = 2;
14176 ret = NO_REGS;
14178 else
14179 default_p = true;
14181 else
14182 default_p = true;
14184 else
14185 default_p = true;
14187 if (default_p)
14188 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
14190 gcc_assert (ret != ALL_REGS);
14192 if (TARGET_DEBUG_ADDR)
14194 fprintf (stderr,
14195 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
14196 "mode = %s",
14197 reg_class_names[ret],
14198 in_p ? "true" : "false",
14199 reg_class_names[rclass],
14200 GET_MODE_NAME (mode));
14202 if (default_p)
14203 fprintf (stderr, ", default secondary reload");
14205 if (sri->icode != CODE_FOR_nothing)
14206 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
14207 insn_data[sri->icode].name, sri->extra_cost);
14208 else
14209 fprintf (stderr, "\n");
14211 debug_rtx (x);
14214 return ret;
14217 /* Better tracing for rs6000_secondary_reload_inner. */
14219 static void
14220 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
14221 bool store_p)
14223 rtx set, clobber;
14225 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
14227 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
14228 store_p ? "store" : "load");
14230 if (store_p)
14231 set = gen_rtx_SET (VOIDmode, mem, reg);
14232 else
14233 set = gen_rtx_SET (VOIDmode, reg, mem);
14235 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
14236 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
14239 static void
14240 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
14241 bool store_p)
14243 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
14244 gcc_unreachable ();
14247 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
14248 to SP+reg addressing. */
14250 void
14251 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
14253 int regno = true_regnum (reg);
14254 enum machine_mode mode = GET_MODE (reg);
14255 enum reg_class rclass;
14256 rtx addr;
14257 rtx and_op2 = NULL_RTX;
14258 rtx addr_op1;
14259 rtx addr_op2;
14260 rtx scratch_or_premodify = scratch;
14261 rtx and_rtx;
14262 rtx cc_clobber;
14264 if (TARGET_DEBUG_ADDR)
14265 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
14267 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
14268 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14270 if (GET_CODE (mem) != MEM)
14271 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14273 rclass = REGNO_REG_CLASS (regno);
14274 addr = XEXP (mem, 0);
14276 switch (rclass)
14278 /* GPRs can handle reg + small constant, all other addresses need to use
14279 the scratch register. */
14280 case GENERAL_REGS:
14281 case BASE_REGS:
14282 if (GET_CODE (addr) == AND)
14284 and_op2 = XEXP (addr, 1);
14285 addr = XEXP (addr, 0);
14288 if (GET_CODE (addr) == PRE_MODIFY)
14290 scratch_or_premodify = XEXP (addr, 0);
14291 if (!REG_P (scratch_or_premodify))
14292 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14294 if (GET_CODE (XEXP (addr, 1)) != PLUS)
14295 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14297 addr = XEXP (addr, 1);
14300 if (GET_CODE (addr) == PLUS
14301 && (and_op2 != NULL_RTX
14302 || !rs6000_legitimate_offset_address_p (PTImode, addr,
14303 false, true)))
14305 addr_op1 = XEXP (addr, 0);
14306 addr_op2 = XEXP (addr, 1);
14307 if (!legitimate_indirect_address_p (addr_op1, false))
14308 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14310 if (!REG_P (addr_op2)
14311 && (GET_CODE (addr_op2) != CONST_INT
14312 || !satisfies_constraint_I (addr_op2)))
14314 if (TARGET_DEBUG_ADDR)
14316 fprintf (stderr,
14317 "\nMove plus addr to register %s, mode = %s: ",
14318 rs6000_reg_names[REGNO (scratch)],
14319 GET_MODE_NAME (mode));
14320 debug_rtx (addr_op2);
14322 rs6000_emit_move (scratch, addr_op2, Pmode);
14323 addr_op2 = scratch;
14326 emit_insn (gen_rtx_SET (VOIDmode,
14327 scratch_or_premodify,
14328 gen_rtx_PLUS (Pmode,
14329 addr_op1,
14330 addr_op2)));
14332 addr = scratch_or_premodify;
14333 scratch_or_premodify = scratch;
14335 else if (!legitimate_indirect_address_p (addr, false)
14336 && !rs6000_legitimate_offset_address_p (PTImode, addr,
14337 false, true))
14339 if (TARGET_DEBUG_ADDR)
14341 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
14342 rs6000_reg_names[REGNO (scratch_or_premodify)],
14343 GET_MODE_NAME (mode));
14344 debug_rtx (addr);
14346 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14347 addr = scratch_or_premodify;
14348 scratch_or_premodify = scratch;
14350 break;
14352 /* Float registers can do offset+reg addressing for scalar types. */
14353 case FLOAT_REGS:
14354 if (legitimate_indirect_address_p (addr, false) /* reg */
14355 || legitimate_indexed_address_p (addr, false) /* reg+reg */
14356 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
14357 && and_op2 == NULL_RTX
14358 && scratch_or_premodify == scratch
14359 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
14360 break;
14362 /* If this isn't a legacy floating point load/store, fall through to the
14363 VSX defaults. */
14365 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
14366 addresses into a scratch register. */
14367 case VSX_REGS:
14368 case ALTIVEC_REGS:
14370 /* With float regs, we need to handle the AND ourselves, since we can't
14371 use the Altivec instruction with an implicit AND -16. Allow scalar
14372 loads to float registers to use reg+offset even if VSX. */
14373 if (GET_CODE (addr) == AND
14374 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
14375 || GET_CODE (XEXP (addr, 1)) != CONST_INT
14376 || INTVAL (XEXP (addr, 1)) != -16
14377 || !VECTOR_MEM_ALTIVEC_P (mode)))
14379 and_op2 = XEXP (addr, 1);
14380 addr = XEXP (addr, 0);
14383 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
14384 as the address later. */
14385 if (GET_CODE (addr) == PRE_MODIFY
14386 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
14387 && (rclass != FLOAT_REGS
14388 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
14389 || and_op2 != NULL_RTX
14390 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
14392 scratch_or_premodify = XEXP (addr, 0);
14393 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
14394 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14396 if (GET_CODE (XEXP (addr, 1)) != PLUS)
14397 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14399 addr = XEXP (addr, 1);
14402 if (legitimate_indirect_address_p (addr, false) /* reg */
14403 || legitimate_indexed_address_p (addr, false) /* reg+reg */
14404 || (GET_CODE (addr) == AND /* Altivec memory */
14405 && rclass == ALTIVEC_REGS
14406 && GET_CODE (XEXP (addr, 1)) == CONST_INT
14407 && INTVAL (XEXP (addr, 1)) == -16
14408 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
14409 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
14412 else if (GET_CODE (addr) == PLUS)
14414 addr_op1 = XEXP (addr, 0);
14415 addr_op2 = XEXP (addr, 1);
14416 if (!REG_P (addr_op1))
14417 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14419 if (TARGET_DEBUG_ADDR)
14421 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
14422 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
14423 debug_rtx (addr_op2);
14425 rs6000_emit_move (scratch, addr_op2, Pmode);
14426 emit_insn (gen_rtx_SET (VOIDmode,
14427 scratch_or_premodify,
14428 gen_rtx_PLUS (Pmode,
14429 addr_op1,
14430 scratch)));
14431 addr = scratch_or_premodify;
14432 scratch_or_premodify = scratch;
14435 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
14436 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
14437 || REG_P (addr))
14439 if (TARGET_DEBUG_ADDR)
14441 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
14442 rs6000_reg_names[REGNO (scratch_or_premodify)],
14443 GET_MODE_NAME (mode));
14444 debug_rtx (addr);
14447 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14448 addr = scratch_or_premodify;
14449 scratch_or_premodify = scratch;
14452 else
14453 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14455 break;
14457 default:
14458 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
14461 /* If the original address involved a pre-modify that we couldn't use the VSX
14462 memory instruction with update, and we haven't taken care of already,
14463 store the address in the pre-modify register and use that as the
14464 address. */
14465 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
14467 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
14468 addr = scratch_or_premodify;
14471 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
14472 memory instruction, recreate the AND now, including the clobber which is
14473 generated by the general ANDSI3/ANDDI3 patterns for the
14474 andi. instruction. */
14475 if (and_op2 != NULL_RTX)
14477 if (! legitimate_indirect_address_p (addr, false))
14479 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
14480 addr = scratch;
14483 if (TARGET_DEBUG_ADDR)
14485 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
14486 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
14487 debug_rtx (and_op2);
14490 and_rtx = gen_rtx_SET (VOIDmode,
14491 scratch,
14492 gen_rtx_AND (Pmode,
14493 addr,
14494 and_op2));
14496 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
14497 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14498 gen_rtvec (2, and_rtx, cc_clobber)));
14499 addr = scratch;
14502 /* Adjust the address if it changed. */
14503 if (addr != XEXP (mem, 0))
14505 mem = replace_equiv_address_nv (mem, addr);
14506 if (TARGET_DEBUG_ADDR)
14507 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
14510 /* Now create the move. */
14511 if (store_p)
14512 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14513 else
14514 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14516 return;
14519 /* Convert reloads involving 64-bit gprs and misaligned offset
14520 addressing, or multiple 32-bit gprs and offsets that are too large,
14521 to use indirect addressing. */
14523 void
14524 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
14526 int regno = true_regnum (reg);
14527 enum reg_class rclass;
14528 rtx addr;
14529 rtx scratch_or_premodify = scratch;
14531 if (TARGET_DEBUG_ADDR)
14533 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
14534 store_p ? "store" : "load");
14535 fprintf (stderr, "reg:\n");
14536 debug_rtx (reg);
14537 fprintf (stderr, "mem:\n");
14538 debug_rtx (mem);
14539 fprintf (stderr, "scratch:\n");
14540 debug_rtx (scratch);
14543 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
14544 gcc_assert (GET_CODE (mem) == MEM);
14545 rclass = REGNO_REG_CLASS (regno);
14546 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
14547 addr = XEXP (mem, 0);
14549 if (GET_CODE (addr) == PRE_MODIFY)
14551 scratch_or_premodify = XEXP (addr, 0);
14552 gcc_assert (REG_P (scratch_or_premodify));
14553 addr = XEXP (addr, 1);
14555 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
14557 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14559 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
14561 /* Now create the move. */
14562 if (store_p)
14563 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14564 else
14565 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14567 return;
14570 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
14571 this function has any SDmode references. If we are on a power7 or later, we
14572 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
14573 can load/store the value. */
14575 static void
14576 rs6000_alloc_sdmode_stack_slot (void)
14578 tree t;
14579 basic_block bb;
14580 gimple_stmt_iterator gsi;
14582 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
14584 if (TARGET_NO_SDMODE_STACK)
14585 return;
14587 FOR_EACH_BB (bb)
14588 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
14590 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
14591 if (ret)
14593 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14594 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14595 SDmode, 0);
14596 return;
14600 /* Check for any SDmode parameters of the function. */
14601 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
14603 if (TREE_TYPE (t) == error_mark_node)
14604 continue;
14606 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
14607 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
14609 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14610 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14611 SDmode, 0);
14612 return;
14617 static void
14618 rs6000_instantiate_decls (void)
14620 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
14621 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
14624 /* Given an rtx X being reloaded into a reg required to be
14625 in class CLASS, return the class of reg to actually use.
14626 In general this is just CLASS; but on some machines
14627 in some cases it is preferable to use a more restrictive class.
14629 On the RS/6000, we have to return NO_REGS when we want to reload a
14630 floating-point CONST_DOUBLE to force it to be copied to memory.
14632 We also don't want to reload integer values into floating-point
14633 registers if we can at all help it. In fact, this can
14634 cause reload to die, if it tries to generate a reload of CTR
14635 into a FP register and discovers it doesn't have the memory location
14636 required.
14638 ??? Would it be a good idea to have reload do the converse, that is
14639 try to reload floating modes into FP registers if possible?
14642 static enum reg_class
14643 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
14645 enum machine_mode mode = GET_MODE (x);
14647 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
14648 return rclass;
14650 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
14651 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
14652 && easy_vector_constant (x, mode))
14653 return ALTIVEC_REGS;
14655 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
14656 return NO_REGS;
14658 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
14659 return GENERAL_REGS;
14661 /* For VSX, prefer the traditional registers for 64-bit values because we can
14662 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14663 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14664 prefer Altivec loads.. */
14665 if (rclass == VSX_REGS)
14667 if (GET_MODE_SIZE (mode) <= 8)
14668 return FLOAT_REGS;
14670 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
14671 return ALTIVEC_REGS;
14673 return rclass;
14676 return rclass;
14679 /* Debug version of rs6000_preferred_reload_class. */
14680 static enum reg_class
14681 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
14683 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
14685 fprintf (stderr,
14686 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14687 "mode = %s, x:\n",
14688 reg_class_names[ret], reg_class_names[rclass],
14689 GET_MODE_NAME (GET_MODE (x)));
14690 debug_rtx (x);
14692 return ret;
14695 /* If we are copying between FP or AltiVec registers and anything else, we need
14696 a memory location. The exception is when we are targeting ppc64 and the
14697 move to/from fpr to gpr instructions are available. Also, under VSX, you
14698 can copy vector registers from the FP register set to the Altivec register
14699 set and vice versa. */
14701 static bool
14702 rs6000_secondary_memory_needed (enum reg_class class1,
14703 enum reg_class class2,
14704 enum machine_mode mode)
14706 if (class1 == class2)
14707 return false;
14709 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14710 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14711 between these classes. But we need memory for other things that can go in
14712 FLOAT_REGS like SFmode. */
14713 if (TARGET_VSX
14714 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
14715 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
14716 || class1 == FLOAT_REGS))
14717 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
14718 && class2 != FLOAT_REGS);
14720 if (class1 == VSX_REGS || class2 == VSX_REGS)
14721 return true;
14723 if (class1 == FLOAT_REGS
14724 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14725 || ((mode != DFmode)
14726 && (mode != DDmode)
14727 && (mode != DImode))))
14728 return true;
14730 if (class2 == FLOAT_REGS
14731 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14732 || ((mode != DFmode)
14733 && (mode != DDmode)
14734 && (mode != DImode))))
14735 return true;
14737 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
14738 return true;
14740 return false;
14743 /* Debug version of rs6000_secondary_memory_needed. */
14744 static bool
14745 rs6000_debug_secondary_memory_needed (enum reg_class class1,
14746 enum reg_class class2,
14747 enum machine_mode mode)
14749 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
14751 fprintf (stderr,
14752 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14753 "class2 = %s, mode = %s\n",
14754 ret ? "true" : "false", reg_class_names[class1],
14755 reg_class_names[class2], GET_MODE_NAME (mode));
14757 return ret;
14760 /* Return the register class of a scratch register needed to copy IN into
14761 or out of a register in RCLASS in MODE. If it can be done directly,
14762 NO_REGS is returned. */
14764 static enum reg_class
14765 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
14766 rtx in)
14768 int regno;
14770 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
14771 #if TARGET_MACHO
14772 && MACHOPIC_INDIRECT
14773 #endif
14776 /* We cannot copy a symbolic operand directly into anything
14777 other than BASE_REGS for TARGET_ELF. So indicate that a
14778 register from BASE_REGS is needed as an intermediate
14779 register.
14781 On Darwin, pic addresses require a load from memory, which
14782 needs a base register. */
14783 if (rclass != BASE_REGS
14784 && (GET_CODE (in) == SYMBOL_REF
14785 || GET_CODE (in) == HIGH
14786 || GET_CODE (in) == LABEL_REF
14787 || GET_CODE (in) == CONST))
14788 return BASE_REGS;
14791 if (GET_CODE (in) == REG)
14793 regno = REGNO (in);
14794 if (regno >= FIRST_PSEUDO_REGISTER)
14796 regno = true_regnum (in);
14797 if (regno >= FIRST_PSEUDO_REGISTER)
14798 regno = -1;
14801 else if (GET_CODE (in) == SUBREG)
14803 regno = true_regnum (in);
14804 if (regno >= FIRST_PSEUDO_REGISTER)
14805 regno = -1;
14807 else
14808 regno = -1;
14810 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14811 into anything. */
14812 if (rclass == GENERAL_REGS || rclass == BASE_REGS
14813 || (regno >= 0 && INT_REGNO_P (regno)))
14814 return NO_REGS;
14816 /* Constants, memory, and FP registers can go into FP registers. */
14817 if ((regno == -1 || FP_REGNO_P (regno))
14818 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
14819 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
14821 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14822 VSX. However, for scalar variables, use the traditional floating point
14823 registers so that we can use offset+register addressing. */
14824 if (TARGET_VSX
14825 && (regno == -1 || VSX_REGNO_P (regno))
14826 && VSX_REG_CLASS_P (rclass))
14828 if (GET_MODE_SIZE (mode) < 16)
14829 return FLOAT_REGS;
14831 return NO_REGS;
14834 /* Memory, and AltiVec registers can go into AltiVec registers. */
14835 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
14836 && rclass == ALTIVEC_REGS)
14837 return NO_REGS;
14839 /* We can copy among the CR registers. */
14840 if ((rclass == CR_REGS || rclass == CR0_REGS)
14841 && regno >= 0 && CR_REGNO_P (regno))
14842 return NO_REGS;
14844 /* Otherwise, we need GENERAL_REGS. */
14845 return GENERAL_REGS;
14848 /* Debug version of rs6000_secondary_reload_class. */
14849 static enum reg_class
14850 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14851 enum machine_mode mode, rtx in)
14853 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14854 fprintf (stderr,
14855 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14856 "mode = %s, input rtx:\n",
14857 reg_class_names[ret], reg_class_names[rclass],
14858 GET_MODE_NAME (mode));
14859 debug_rtx (in);
14861 return ret;
14864 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14866 static bool
14867 rs6000_cannot_change_mode_class (enum machine_mode from,
14868 enum machine_mode to,
14869 enum reg_class rclass)
14871 unsigned from_size = GET_MODE_SIZE (from);
14872 unsigned to_size = GET_MODE_SIZE (to);
14874 if (from_size != to_size)
14876 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14878 if (reg_classes_intersect_p (xclass, rclass))
14880 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
14881 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
14883 /* Don't allow 64-bit types to overlap with 128-bit types that take a
14884 single register under VSX because the scalar part of the register
14885 is in the upper 64-bits, and not the lower 64-bits. Types like
14886 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
14887 IEEE floating point can't overlap, and neither can small
14888 values. */
14890 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
14891 return true;
14893 if (from_size < 8 || to_size < 8)
14894 return true;
14896 if (from_size == 8 && (8 * to_nregs) != to_size)
14897 return true;
14899 if (to_size == 8 && (8 * from_nregs) != from_size)
14900 return true;
14902 return false;
14904 else
14905 return false;
14908 if (TARGET_E500_DOUBLE
14909 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14910 || (((to) == TFmode) + ((from) == TFmode)) == 1
14911 || (((to) == DDmode) + ((from) == DDmode)) == 1
14912 || (((to) == TDmode) + ((from) == TDmode)) == 1
14913 || (((to) == DImode) + ((from) == DImode)) == 1))
14914 return true;
14916 /* Since the VSX register set includes traditional floating point registers
14917 and altivec registers, just check for the size being different instead of
14918 trying to check whether the modes are vector modes. Otherwise it won't
14919 allow say DF and DI to change classes. For types like TFmode and TDmode
14920 that take 2 64-bit registers, rather than a single 128-bit register, don't
14921 allow subregs of those types to other 128 bit types. */
14922 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14924 unsigned num_regs = (from_size + 15) / 16;
14925 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
14926 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
14927 return true;
14929 return (from_size != 8 && from_size != 16);
14932 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14933 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14934 return true;
14936 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14937 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14938 return true;
14940 return false;
14943 /* Debug version of rs6000_cannot_change_mode_class. */
14944 static bool
14945 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14946 enum machine_mode to,
14947 enum reg_class rclass)
14949 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14951 fprintf (stderr,
14952 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14953 "to = %s, rclass = %s\n",
14954 ret ? "true" : "false",
14955 GET_MODE_NAME (from), GET_MODE_NAME (to),
14956 reg_class_names[rclass]);
14958 return ret;
14961 /* Given a comparison operation, return the bit number in CCR to test. We
14962 know this is a valid comparison.
14964 SCC_P is 1 if this is for an scc. That means that %D will have been
14965 used instead of %C, so the bits will be in different places.
14967 Return -1 if OP isn't a valid comparison for some reason. */
14970 ccr_bit (rtx op, int scc_p)
14972 enum rtx_code code = GET_CODE (op);
14973 enum machine_mode cc_mode;
14974 int cc_regnum;
14975 int base_bit;
14976 rtx reg;
14978 if (!COMPARISON_P (op))
14979 return -1;
14981 reg = XEXP (op, 0);
14983 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14985 cc_mode = GET_MODE (reg);
14986 cc_regnum = REGNO (reg);
14987 base_bit = 4 * (cc_regnum - CR0_REGNO);
14989 validate_condition_mode (code, cc_mode);
14991 /* When generating a sCOND operation, only positive conditions are
14992 allowed. */
14993 gcc_assert (!scc_p
14994 || code == EQ || code == GT || code == LT || code == UNORDERED
14995 || code == GTU || code == LTU);
14997 switch (code)
14999 case NE:
15000 return scc_p ? base_bit + 3 : base_bit + 2;
15001 case EQ:
15002 return base_bit + 2;
15003 case GT: case GTU: case UNLE:
15004 return base_bit + 1;
15005 case LT: case LTU: case UNGE:
15006 return base_bit;
15007 case ORDERED: case UNORDERED:
15008 return base_bit + 3;
15010 case GE: case GEU:
15011 /* If scc, we will have done a cror to put the bit in the
15012 unordered position. So test that bit. For integer, this is ! LT
15013 unless this is an scc insn. */
15014 return scc_p ? base_bit + 3 : base_bit;
15016 case LE: case LEU:
15017 return scc_p ? base_bit + 3 : base_bit + 1;
15019 default:
15020 gcc_unreachable ();
15024 /* Return the GOT register. */
15027 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
15029 /* The second flow pass currently (June 1999) can't update
15030 regs_ever_live without disturbing other parts of the compiler, so
15031 update it here to make the prolog/epilogue code happy. */
15032 if (!can_create_pseudo_p ()
15033 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
15034 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
15036 crtl->uses_pic_offset_table = 1;
15038 return pic_offset_table_rtx;
15041 static rs6000_stack_t stack_info;
15043 /* Function to init struct machine_function.
15044 This will be called, via a pointer variable,
15045 from push_function_context. */
15047 static struct machine_function *
15048 rs6000_init_machine_status (void)
15050 stack_info.reload_completed = 0;
15051 return ggc_alloc_cleared_machine_function ();
15054 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
15057 extract_MB (rtx op)
15059 int i;
15060 unsigned long val = INTVAL (op);
15062 /* If the high bit is zero, the value is the first 1 bit we find
15063 from the left. */
15064 if ((val & 0x80000000) == 0)
15066 gcc_assert (val & 0xffffffff);
15068 i = 1;
15069 while (((val <<= 1) & 0x80000000) == 0)
15070 ++i;
15071 return i;
15074 /* If the high bit is set and the low bit is not, or the mask is all
15075 1's, the value is zero. */
15076 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
15077 return 0;
15079 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
15080 from the right. */
15081 i = 31;
15082 while (((val >>= 1) & 1) != 0)
15083 --i;
15085 return i;
15089 extract_ME (rtx op)
15091 int i;
15092 unsigned long val = INTVAL (op);
15094 /* If the low bit is zero, the value is the first 1 bit we find from
15095 the right. */
15096 if ((val & 1) == 0)
15098 gcc_assert (val & 0xffffffff);
15100 i = 30;
15101 while (((val >>= 1) & 1) == 0)
15102 --i;
15104 return i;
15107 /* If the low bit is set and the high bit is not, or the mask is all
15108 1's, the value is 31. */
15109 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
15110 return 31;
15112 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
15113 from the left. */
15114 i = 0;
15115 while (((val <<= 1) & 0x80000000) != 0)
15116 ++i;
15118 return i;
15121 /* Locate some local-dynamic symbol still in use by this function
15122 so that we can print its name in some tls_ld pattern. */
15124 static const char *
15125 rs6000_get_some_local_dynamic_name (void)
15127 rtx insn;
15129 if (cfun->machine->some_ld_name)
15130 return cfun->machine->some_ld_name;
15132 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
15133 if (INSN_P (insn)
15134 && for_each_rtx (&PATTERN (insn),
15135 rs6000_get_some_local_dynamic_name_1, 0))
15136 return cfun->machine->some_ld_name;
15138 gcc_unreachable ();
15141 /* Helper function for rs6000_get_some_local_dynamic_name. */
15143 static int
15144 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
15146 rtx x = *px;
15148 if (GET_CODE (x) == SYMBOL_REF)
15150 const char *str = XSTR (x, 0);
15151 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
15153 cfun->machine->some_ld_name = str;
15154 return 1;
15158 return 0;
15161 /* Write out a function code label. */
15163 void
15164 rs6000_output_function_entry (FILE *file, const char *fname)
15166 if (fname[0] != '.')
15168 switch (DEFAULT_ABI)
15170 default:
15171 gcc_unreachable ();
15173 case ABI_AIX:
15174 if (DOT_SYMBOLS)
15175 putc ('.', file);
15176 else
15177 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
15178 break;
15180 case ABI_V4:
15181 case ABI_DARWIN:
15182 break;
15186 RS6000_OUTPUT_BASENAME (file, fname);
15189 /* Print an operand. Recognize special options, documented below. */
15191 #if TARGET_ELF
15192 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
15193 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
15194 #else
15195 #define SMALL_DATA_RELOC "sda21"
15196 #define SMALL_DATA_REG 0
15197 #endif
15199 void
15200 print_operand (FILE *file, rtx x, int code)
15202 int i;
15203 unsigned HOST_WIDE_INT uval;
15205 switch (code)
15207 /* %a is output_address. */
15209 case 'b':
15210 /* If constant, low-order 16 bits of constant, unsigned.
15211 Otherwise, write normally. */
15212 if (INT_P (x))
15213 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
15214 else
15215 print_operand (file, x, 0);
15216 return;
15218 case 'B':
15219 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
15220 for 64-bit mask direction. */
15221 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
15222 return;
15224 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
15225 output_operand. */
15227 case 'D':
15228 /* Like 'J' but get to the GT bit only. */
15229 gcc_assert (REG_P (x));
15231 /* Bit 1 is GT bit. */
15232 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
15234 /* Add one for shift count in rlinm for scc. */
15235 fprintf (file, "%d", i + 1);
15236 return;
15238 case 'E':
15239 /* X is a CR register. Print the number of the EQ bit of the CR */
15240 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15241 output_operand_lossage ("invalid %%E value");
15242 else
15243 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
15244 return;
15246 case 'f':
15247 /* X is a CR register. Print the shift count needed to move it
15248 to the high-order four bits. */
15249 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15250 output_operand_lossage ("invalid %%f value");
15251 else
15252 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
15253 return;
15255 case 'F':
15256 /* Similar, but print the count for the rotate in the opposite
15257 direction. */
15258 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15259 output_operand_lossage ("invalid %%F value");
15260 else
15261 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
15262 return;
15264 case 'G':
15265 /* X is a constant integer. If it is negative, print "m",
15266 otherwise print "z". This is to make an aze or ame insn. */
15267 if (GET_CODE (x) != CONST_INT)
15268 output_operand_lossage ("invalid %%G value");
15269 else if (INTVAL (x) >= 0)
15270 putc ('z', file);
15271 else
15272 putc ('m', file);
15273 return;
15275 case 'h':
15276 /* If constant, output low-order five bits. Otherwise, write
15277 normally. */
15278 if (INT_P (x))
15279 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
15280 else
15281 print_operand (file, x, 0);
15282 return;
15284 case 'H':
15285 /* If constant, output low-order six bits. Otherwise, write
15286 normally. */
15287 if (INT_P (x))
15288 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
15289 else
15290 print_operand (file, x, 0);
15291 return;
15293 case 'I':
15294 /* Print `i' if this is a constant, else nothing. */
15295 if (INT_P (x))
15296 putc ('i', file);
15297 return;
15299 case 'j':
15300 /* Write the bit number in CCR for jump. */
15301 i = ccr_bit (x, 0);
15302 if (i == -1)
15303 output_operand_lossage ("invalid %%j code");
15304 else
15305 fprintf (file, "%d", i);
15306 return;
15308 case 'J':
15309 /* Similar, but add one for shift count in rlinm for scc and pass
15310 scc flag to `ccr_bit'. */
15311 i = ccr_bit (x, 1);
15312 if (i == -1)
15313 output_operand_lossage ("invalid %%J code");
15314 else
15315 /* If we want bit 31, write a shift count of zero, not 32. */
15316 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15317 return;
15319 case 'k':
15320 /* X must be a constant. Write the 1's complement of the
15321 constant. */
15322 if (! INT_P (x))
15323 output_operand_lossage ("invalid %%k value");
15324 else
15325 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
15326 return;
15328 case 'K':
15329 /* X must be a symbolic constant on ELF. Write an
15330 expression suitable for an 'addi' that adds in the low 16
15331 bits of the MEM. */
15332 if (GET_CODE (x) == CONST)
15334 if (GET_CODE (XEXP (x, 0)) != PLUS
15335 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
15336 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
15337 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
15338 output_operand_lossage ("invalid %%K value");
15340 print_operand_address (file, x);
15341 fputs ("@l", file);
15342 return;
15344 /* %l is output_asm_label. */
15346 case 'L':
15347 /* Write second word of DImode or DFmode reference. Works on register
15348 or non-indexed memory only. */
15349 if (REG_P (x))
15350 fputs (reg_names[REGNO (x) + 1], file);
15351 else if (MEM_P (x))
15353 /* Handle possible auto-increment. Since it is pre-increment and
15354 we have already done it, we can just use an offset of word. */
15355 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15356 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15357 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
15358 UNITS_PER_WORD));
15359 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15360 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
15361 UNITS_PER_WORD));
15362 else
15363 output_address (XEXP (adjust_address_nv (x, SImode,
15364 UNITS_PER_WORD),
15365 0));
15367 if (small_data_operand (x, GET_MODE (x)))
15368 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15369 reg_names[SMALL_DATA_REG]);
15371 return;
15373 case 'm':
15374 /* MB value for a mask operand. */
15375 if (! mask_operand (x, SImode))
15376 output_operand_lossage ("invalid %%m value");
15378 fprintf (file, "%d", extract_MB (x));
15379 return;
15381 case 'M':
15382 /* ME value for a mask operand. */
15383 if (! mask_operand (x, SImode))
15384 output_operand_lossage ("invalid %%M value");
15386 fprintf (file, "%d", extract_ME (x));
15387 return;
15389 /* %n outputs the negative of its operand. */
15391 case 'N':
15392 /* Write the number of elements in the vector times 4. */
15393 if (GET_CODE (x) != PARALLEL)
15394 output_operand_lossage ("invalid %%N value");
15395 else
15396 fprintf (file, "%d", XVECLEN (x, 0) * 4);
15397 return;
15399 case 'O':
15400 /* Similar, but subtract 1 first. */
15401 if (GET_CODE (x) != PARALLEL)
15402 output_operand_lossage ("invalid %%O value");
15403 else
15404 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
15405 return;
15407 case 'p':
15408 /* X is a CONST_INT that is a power of two. Output the logarithm. */
15409 if (! INT_P (x)
15410 || INTVAL (x) < 0
15411 || (i = exact_log2 (INTVAL (x))) < 0)
15412 output_operand_lossage ("invalid %%p value");
15413 else
15414 fprintf (file, "%d", i);
15415 return;
15417 case 'P':
15418 /* The operand must be an indirect memory reference. The result
15419 is the register name. */
15420 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
15421 || REGNO (XEXP (x, 0)) >= 32)
15422 output_operand_lossage ("invalid %%P value");
15423 else
15424 fputs (reg_names[REGNO (XEXP (x, 0))], file);
15425 return;
15427 case 'q':
15428 /* This outputs the logical code corresponding to a boolean
15429 expression. The expression may have one or both operands
15430 negated (if one, only the first one). For condition register
15431 logical operations, it will also treat the negated
15432 CR codes as NOTs, but not handle NOTs of them. */
15434 const char *const *t = 0;
15435 const char *s;
15436 enum rtx_code code = GET_CODE (x);
15437 static const char * const tbl[3][3] = {
15438 { "and", "andc", "nor" },
15439 { "or", "orc", "nand" },
15440 { "xor", "eqv", "xor" } };
15442 if (code == AND)
15443 t = tbl[0];
15444 else if (code == IOR)
15445 t = tbl[1];
15446 else if (code == XOR)
15447 t = tbl[2];
15448 else
15449 output_operand_lossage ("invalid %%q value");
15451 if (GET_CODE (XEXP (x, 0)) != NOT)
15452 s = t[0];
15453 else
15455 if (GET_CODE (XEXP (x, 1)) == NOT)
15456 s = t[2];
15457 else
15458 s = t[1];
15461 fputs (s, file);
15463 return;
15465 case 'Q':
15466 if (! TARGET_MFCRF)
15467 return;
15468 fputc (',', file);
15469 /* FALLTHRU */
15471 case 'R':
15472 /* X is a CR register. Print the mask for `mtcrf'. */
15473 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15474 output_operand_lossage ("invalid %%R value");
15475 else
15476 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
15477 return;
15479 case 's':
15480 /* Low 5 bits of 32 - value */
15481 if (! INT_P (x))
15482 output_operand_lossage ("invalid %%s value");
15483 else
15484 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
15485 return;
15487 case 'S':
15488 /* PowerPC64 mask position. All 0's is excluded.
15489 CONST_INT 32-bit mask is considered sign-extended so any
15490 transition must occur within the CONST_INT, not on the boundary. */
15491 if (! mask64_operand (x, DImode))
15492 output_operand_lossage ("invalid %%S value");
15494 uval = INTVAL (x);
15496 if (uval & 1) /* Clear Left */
15498 #if HOST_BITS_PER_WIDE_INT > 64
15499 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
15500 #endif
15501 i = 64;
15503 else /* Clear Right */
15505 uval = ~uval;
15506 #if HOST_BITS_PER_WIDE_INT > 64
15507 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
15508 #endif
15509 i = 63;
15511 while (uval != 0)
15512 --i, uval >>= 1;
15513 gcc_assert (i >= 0);
15514 fprintf (file, "%d", i);
15515 return;
15517 case 't':
15518 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
15519 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
15521 /* Bit 3 is OV bit. */
15522 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
15524 /* If we want bit 31, write a shift count of zero, not 32. */
15525 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15526 return;
15528 case 'T':
15529 /* Print the symbolic name of a branch target register. */
15530 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
15531 && REGNO (x) != CTR_REGNO))
15532 output_operand_lossage ("invalid %%T value");
15533 else if (REGNO (x) == LR_REGNO)
15534 fputs ("lr", file);
15535 else
15536 fputs ("ctr", file);
15537 return;
15539 case 'u':
15540 /* High-order 16 bits of constant for use in unsigned operand. */
15541 if (! INT_P (x))
15542 output_operand_lossage ("invalid %%u value");
15543 else
15544 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15545 (INTVAL (x) >> 16) & 0xffff);
15546 return;
15548 case 'v':
15549 /* High-order 16 bits of constant for use in signed operand. */
15550 if (! INT_P (x))
15551 output_operand_lossage ("invalid %%v value");
15552 else
15553 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15554 (INTVAL (x) >> 16) & 0xffff);
15555 return;
15557 case 'U':
15558 /* Print `u' if this has an auto-increment or auto-decrement. */
15559 if (MEM_P (x)
15560 && (GET_CODE (XEXP (x, 0)) == PRE_INC
15561 || GET_CODE (XEXP (x, 0)) == PRE_DEC
15562 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
15563 putc ('u', file);
15564 return;
15566 case 'V':
15567 /* Print the trap code for this operand. */
15568 switch (GET_CODE (x))
15570 case EQ:
15571 fputs ("eq", file); /* 4 */
15572 break;
15573 case NE:
15574 fputs ("ne", file); /* 24 */
15575 break;
15576 case LT:
15577 fputs ("lt", file); /* 16 */
15578 break;
15579 case LE:
15580 fputs ("le", file); /* 20 */
15581 break;
15582 case GT:
15583 fputs ("gt", file); /* 8 */
15584 break;
15585 case GE:
15586 fputs ("ge", file); /* 12 */
15587 break;
15588 case LTU:
15589 fputs ("llt", file); /* 2 */
15590 break;
15591 case LEU:
15592 fputs ("lle", file); /* 6 */
15593 break;
15594 case GTU:
15595 fputs ("lgt", file); /* 1 */
15596 break;
15597 case GEU:
15598 fputs ("lge", file); /* 5 */
15599 break;
15600 default:
15601 gcc_unreachable ();
15603 break;
15605 case 'w':
15606 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15607 normally. */
15608 if (INT_P (x))
15609 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
15610 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
15611 else
15612 print_operand (file, x, 0);
15613 return;
15615 case 'W':
15616 /* MB value for a PowerPC64 rldic operand. */
15617 i = clz_hwi (INTVAL (x));
15619 fprintf (file, "%d", i);
15620 return;
15622 case 'x':
15623 /* X is a FPR or Altivec register used in a VSX context. */
15624 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
15625 output_operand_lossage ("invalid %%x value");
15626 else
15628 int reg = REGNO (x);
15629 int vsx_reg = (FP_REGNO_P (reg)
15630 ? reg - 32
15631 : reg - FIRST_ALTIVEC_REGNO + 32);
15633 #ifdef TARGET_REGNAMES
15634 if (TARGET_REGNAMES)
15635 fprintf (file, "%%vs%d", vsx_reg);
15636 else
15637 #endif
15638 fprintf (file, "%d", vsx_reg);
15640 return;
15642 case 'X':
15643 if (MEM_P (x)
15644 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
15645 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
15646 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
15647 putc ('x', file);
15648 return;
15650 case 'Y':
15651 /* Like 'L', for third word of TImode/PTImode */
15652 if (REG_P (x))
15653 fputs (reg_names[REGNO (x) + 2], file);
15654 else if (MEM_P (x))
15656 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15657 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15658 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15659 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15660 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15661 else
15662 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
15663 if (small_data_operand (x, GET_MODE (x)))
15664 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15665 reg_names[SMALL_DATA_REG]);
15667 return;
15669 case 'z':
15670 /* X is a SYMBOL_REF. Write out the name preceded by a
15671 period and without any trailing data in brackets. Used for function
15672 names. If we are configured for System V (or the embedded ABI) on
15673 the PowerPC, do not emit the period, since those systems do not use
15674 TOCs and the like. */
15675 gcc_assert (GET_CODE (x) == SYMBOL_REF);
15677 /* Mark the decl as referenced so that cgraph will output the
15678 function. */
15679 if (SYMBOL_REF_DECL (x))
15680 mark_decl_referenced (SYMBOL_REF_DECL (x));
15682 /* For macho, check to see if we need a stub. */
15683 if (TARGET_MACHO)
15685 const char *name = XSTR (x, 0);
15686 #if TARGET_MACHO
15687 if (darwin_emit_branch_islands
15688 && MACHOPIC_INDIRECT
15689 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
15690 name = machopic_indirection_name (x, /*stub_p=*/true);
15691 #endif
15692 assemble_name (file, name);
15694 else if (!DOT_SYMBOLS)
15695 assemble_name (file, XSTR (x, 0));
15696 else
15697 rs6000_output_function_entry (file, XSTR (x, 0));
15698 return;
15700 case 'Z':
15701 /* Like 'L', for last word of TImode/PTImode. */
15702 if (REG_P (x))
15703 fputs (reg_names[REGNO (x) + 3], file);
15704 else if (MEM_P (x))
15706 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15707 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15708 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15709 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15710 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15711 else
15712 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
15713 if (small_data_operand (x, GET_MODE (x)))
15714 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15715 reg_names[SMALL_DATA_REG]);
15717 return;
15719 /* Print AltiVec or SPE memory operand. */
15720 case 'y':
15722 rtx tmp;
15724 gcc_assert (MEM_P (x));
15726 tmp = XEXP (x, 0);
15728 /* Ugly hack because %y is overloaded. */
15729 if ((TARGET_SPE || TARGET_E500_DOUBLE)
15730 && (GET_MODE_SIZE (GET_MODE (x)) == 8
15731 || GET_MODE (x) == TFmode
15732 || GET_MODE (x) == TImode
15733 || GET_MODE (x) == PTImode))
15735 /* Handle [reg]. */
15736 if (REG_P (tmp))
15738 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
15739 break;
15741 /* Handle [reg+UIMM]. */
15742 else if (GET_CODE (tmp) == PLUS &&
15743 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
15745 int x;
15747 gcc_assert (REG_P (XEXP (tmp, 0)));
15749 x = INTVAL (XEXP (tmp, 1));
15750 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
15751 break;
15754 /* Fall through. Must be [reg+reg]. */
15756 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
15757 && GET_CODE (tmp) == AND
15758 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
15759 && INTVAL (XEXP (tmp, 1)) == -16)
15760 tmp = XEXP (tmp, 0);
15761 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
15762 && GET_CODE (tmp) == PRE_MODIFY)
15763 tmp = XEXP (tmp, 1);
15764 if (REG_P (tmp))
15765 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
15766 else
15768 if (!GET_CODE (tmp) == PLUS
15769 || !REG_P (XEXP (tmp, 0))
15770 || !REG_P (XEXP (tmp, 1)))
15772 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15773 break;
15776 if (REGNO (XEXP (tmp, 0)) == 0)
15777 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
15778 reg_names[ REGNO (XEXP (tmp, 0)) ]);
15779 else
15780 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
15781 reg_names[ REGNO (XEXP (tmp, 1)) ]);
15783 break;
15786 case 0:
15787 if (REG_P (x))
15788 fprintf (file, "%s", reg_names[REGNO (x)]);
15789 else if (MEM_P (x))
15791 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15792 know the width from the mode. */
15793 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
15794 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
15795 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15796 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
15797 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
15798 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15799 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15800 output_address (XEXP (XEXP (x, 0), 1));
15801 else
15802 output_address (XEXP (x, 0));
15804 else
15806 if (toc_relative_expr_p (x, false))
15807 /* This hack along with a corresponding hack in
15808 rs6000_output_addr_const_extra arranges to output addends
15809 where the assembler expects to find them. eg.
15810 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15811 without this hack would be output as "x@toc+4". We
15812 want "x+4@toc". */
15813 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15814 else
15815 output_addr_const (file, x);
15817 return;
15819 case '&':
15820 assemble_name (file, rs6000_get_some_local_dynamic_name ());
15821 return;
15823 default:
15824 output_operand_lossage ("invalid %%xn code");
15828 /* Print the address of an operand. */
15830 void
15831 print_operand_address (FILE *file, rtx x)
15833 if (REG_P (x))
15834 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15835 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15836 || GET_CODE (x) == LABEL_REF)
15838 output_addr_const (file, x);
15839 if (small_data_operand (x, GET_MODE (x)))
15840 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15841 reg_names[SMALL_DATA_REG]);
15842 else
15843 gcc_assert (!TARGET_TOC);
15845 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15846 && REG_P (XEXP (x, 1)))
15848 if (REGNO (XEXP (x, 0)) == 0)
15849 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15850 reg_names[ REGNO (XEXP (x, 0)) ]);
15851 else
15852 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15853 reg_names[ REGNO (XEXP (x, 1)) ]);
15855 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15856 && GET_CODE (XEXP (x, 1)) == CONST_INT)
15857 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15858 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15859 #if TARGET_MACHO
15860 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15861 && CONSTANT_P (XEXP (x, 1)))
15863 fprintf (file, "lo16(");
15864 output_addr_const (file, XEXP (x, 1));
15865 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15867 #endif
15868 #if TARGET_ELF
15869 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15870 && CONSTANT_P (XEXP (x, 1)))
15872 output_addr_const (file, XEXP (x, 1));
15873 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15875 #endif
15876 else if (toc_relative_expr_p (x, false))
15878 /* This hack along with a corresponding hack in
15879 rs6000_output_addr_const_extra arranges to output addends
15880 where the assembler expects to find them. eg.
15881 (lo_sum (reg 9)
15882 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15883 without this hack would be output as "x@toc+8@l(9)". We
15884 want "x+8@toc@l(9)". */
15885 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15886 if (GET_CODE (x) == LO_SUM)
15887 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
15888 else
15889 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
15891 else
15892 gcc_unreachable ();
15895 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15897 static bool
15898 rs6000_output_addr_const_extra (FILE *file, rtx x)
15900 if (GET_CODE (x) == UNSPEC)
15901 switch (XINT (x, 1))
15903 case UNSPEC_TOCREL:
15904 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
15905 && REG_P (XVECEXP (x, 0, 1))
15906 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
15907 output_addr_const (file, XVECEXP (x, 0, 0));
15908 if (x == tocrel_base && tocrel_offset != const0_rtx)
15910 if (INTVAL (tocrel_offset) >= 0)
15911 fprintf (file, "+");
15912 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
15914 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15916 putc ('-', file);
15917 assemble_name (file, toc_label_name);
15919 else if (TARGET_ELF)
15920 fputs ("@toc", file);
15921 return true;
15923 #if TARGET_MACHO
15924 case UNSPEC_MACHOPIC_OFFSET:
15925 output_addr_const (file, XVECEXP (x, 0, 0));
15926 putc ('-', file);
15927 machopic_output_function_base_name (file);
15928 return true;
15929 #endif
15931 return false;
15934 /* Target hook for assembling integer objects. The PowerPC version has
15935 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15936 is defined. It also needs to handle DI-mode objects on 64-bit
15937 targets. */
15939 static bool
15940 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15942 #ifdef RELOCATABLE_NEEDS_FIXUP
15943 /* Special handling for SI values. */
15944 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15946 static int recurse = 0;
15948 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15949 the .fixup section. Since the TOC section is already relocated, we
15950 don't need to mark it here. We used to skip the text section, but it
15951 should never be valid for relocated addresses to be placed in the text
15952 section. */
15953 if (TARGET_RELOCATABLE
15954 && in_section != toc_section
15955 && !recurse
15956 && GET_CODE (x) != CONST_INT
15957 && GET_CODE (x) != CONST_DOUBLE
15958 && CONSTANT_P (x))
15960 char buf[256];
15962 recurse = 1;
15963 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15964 fixuplabelno++;
15965 ASM_OUTPUT_LABEL (asm_out_file, buf);
15966 fprintf (asm_out_file, "\t.long\t(");
15967 output_addr_const (asm_out_file, x);
15968 fprintf (asm_out_file, ")@fixup\n");
15969 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15970 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15971 fprintf (asm_out_file, "\t.long\t");
15972 assemble_name (asm_out_file, buf);
15973 fprintf (asm_out_file, "\n\t.previous\n");
15974 recurse = 0;
15975 return true;
15977 /* Remove initial .'s to turn a -mcall-aixdesc function
15978 address into the address of the descriptor, not the function
15979 itself. */
15980 else if (GET_CODE (x) == SYMBOL_REF
15981 && XSTR (x, 0)[0] == '.'
15982 && DEFAULT_ABI == ABI_AIX)
15984 const char *name = XSTR (x, 0);
15985 while (*name == '.')
15986 name++;
15988 fprintf (asm_out_file, "\t.long\t%s\n", name);
15989 return true;
15992 #endif /* RELOCATABLE_NEEDS_FIXUP */
15993 return default_assemble_integer (x, size, aligned_p);
15996 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15997 /* Emit an assembler directive to set symbol visibility for DECL to
15998 VISIBILITY_TYPE. */
16000 static void
16001 rs6000_assemble_visibility (tree decl, int vis)
16003 if (TARGET_XCOFF)
16004 return;
16006 /* Functions need to have their entry point symbol visibility set as
16007 well as their descriptor symbol visibility. */
16008 if (DEFAULT_ABI == ABI_AIX
16009 && DOT_SYMBOLS
16010 && TREE_CODE (decl) == FUNCTION_DECL)
16012 static const char * const visibility_types[] = {
16013 NULL, "internal", "hidden", "protected"
16016 const char *name, *type;
16018 name = ((* targetm.strip_name_encoding)
16019 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
16020 type = visibility_types[vis];
16022 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
16023 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
16025 else
16026 default_assemble_visibility (decl, vis);
16028 #endif
16030 enum rtx_code
16031 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
16033 /* Reversal of FP compares takes care -- an ordered compare
16034 becomes an unordered compare and vice versa. */
16035 if (mode == CCFPmode
16036 && (!flag_finite_math_only
16037 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
16038 || code == UNEQ || code == LTGT))
16039 return reverse_condition_maybe_unordered (code);
16040 else
16041 return reverse_condition (code);
16044 /* Generate a compare for CODE. Return a brand-new rtx that
16045 represents the result of the compare. */
16047 static rtx
16048 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
16050 enum machine_mode comp_mode;
16051 rtx compare_result;
16052 enum rtx_code code = GET_CODE (cmp);
16053 rtx op0 = XEXP (cmp, 0);
16054 rtx op1 = XEXP (cmp, 1);
16056 if (FLOAT_MODE_P (mode))
16057 comp_mode = CCFPmode;
16058 else if (code == GTU || code == LTU
16059 || code == GEU || code == LEU)
16060 comp_mode = CCUNSmode;
16061 else if ((code == EQ || code == NE)
16062 && unsigned_reg_p (op0)
16063 && (unsigned_reg_p (op1)
16064 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
16065 /* These are unsigned values, perhaps there will be a later
16066 ordering compare that can be shared with this one. */
16067 comp_mode = CCUNSmode;
16068 else
16069 comp_mode = CCmode;
16071 /* If we have an unsigned compare, make sure we don't have a signed value as
16072 an immediate. */
16073 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
16074 && INTVAL (op1) < 0)
16076 op0 = copy_rtx_if_shared (op0);
16077 op1 = force_reg (GET_MODE (op0), op1);
16078 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
16081 /* First, the compare. */
16082 compare_result = gen_reg_rtx (comp_mode);
16084 /* E500 FP compare instructions on the GPRs. Yuck! */
16085 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
16086 && FLOAT_MODE_P (mode))
16088 rtx cmp, or_result, compare_result2;
16089 enum machine_mode op_mode = GET_MODE (op0);
16091 if (op_mode == VOIDmode)
16092 op_mode = GET_MODE (op1);
16094 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
16095 This explains the following mess. */
16097 switch (code)
16099 case EQ: case UNEQ: case NE: case LTGT:
16100 switch (op_mode)
16102 case SFmode:
16103 cmp = (flag_finite_math_only && !flag_trapping_math)
16104 ? gen_tstsfeq_gpr (compare_result, op0, op1)
16105 : gen_cmpsfeq_gpr (compare_result, op0, op1);
16106 break;
16108 case DFmode:
16109 cmp = (flag_finite_math_only && !flag_trapping_math)
16110 ? gen_tstdfeq_gpr (compare_result, op0, op1)
16111 : gen_cmpdfeq_gpr (compare_result, op0, op1);
16112 break;
16114 case TFmode:
16115 cmp = (flag_finite_math_only && !flag_trapping_math)
16116 ? gen_tsttfeq_gpr (compare_result, op0, op1)
16117 : gen_cmptfeq_gpr (compare_result, op0, op1);
16118 break;
16120 default:
16121 gcc_unreachable ();
16123 break;
16125 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
16126 switch (op_mode)
16128 case SFmode:
16129 cmp = (flag_finite_math_only && !flag_trapping_math)
16130 ? gen_tstsfgt_gpr (compare_result, op0, op1)
16131 : gen_cmpsfgt_gpr (compare_result, op0, op1);
16132 break;
16134 case DFmode:
16135 cmp = (flag_finite_math_only && !flag_trapping_math)
16136 ? gen_tstdfgt_gpr (compare_result, op0, op1)
16137 : gen_cmpdfgt_gpr (compare_result, op0, op1);
16138 break;
16140 case TFmode:
16141 cmp = (flag_finite_math_only && !flag_trapping_math)
16142 ? gen_tsttfgt_gpr (compare_result, op0, op1)
16143 : gen_cmptfgt_gpr (compare_result, op0, op1);
16144 break;
16146 default:
16147 gcc_unreachable ();
16149 break;
16151 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
16152 switch (op_mode)
16154 case SFmode:
16155 cmp = (flag_finite_math_only && !flag_trapping_math)
16156 ? gen_tstsflt_gpr (compare_result, op0, op1)
16157 : gen_cmpsflt_gpr (compare_result, op0, op1);
16158 break;
16160 case DFmode:
16161 cmp = (flag_finite_math_only && !flag_trapping_math)
16162 ? gen_tstdflt_gpr (compare_result, op0, op1)
16163 : gen_cmpdflt_gpr (compare_result, op0, op1);
16164 break;
16166 case TFmode:
16167 cmp = (flag_finite_math_only && !flag_trapping_math)
16168 ? gen_tsttflt_gpr (compare_result, op0, op1)
16169 : gen_cmptflt_gpr (compare_result, op0, op1);
16170 break;
16172 default:
16173 gcc_unreachable ();
16175 break;
16176 default:
16177 gcc_unreachable ();
16180 /* Synthesize LE and GE from LT/GT || EQ. */
16181 if (code == LE || code == GE || code == LEU || code == GEU)
16183 emit_insn (cmp);
16185 switch (code)
16187 case LE: code = LT; break;
16188 case GE: code = GT; break;
16189 case LEU: code = LT; break;
16190 case GEU: code = GT; break;
16191 default: gcc_unreachable ();
16194 compare_result2 = gen_reg_rtx (CCFPmode);
16196 /* Do the EQ. */
16197 switch (op_mode)
16199 case SFmode:
16200 cmp = (flag_finite_math_only && !flag_trapping_math)
16201 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
16202 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
16203 break;
16205 case DFmode:
16206 cmp = (flag_finite_math_only && !flag_trapping_math)
16207 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
16208 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
16209 break;
16211 case TFmode:
16212 cmp = (flag_finite_math_only && !flag_trapping_math)
16213 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
16214 : gen_cmptfeq_gpr (compare_result2, op0, op1);
16215 break;
16217 default:
16218 gcc_unreachable ();
16220 emit_insn (cmp);
16222 /* OR them together. */
16223 or_result = gen_reg_rtx (CCFPmode);
16224 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
16225 compare_result2);
16226 compare_result = or_result;
16227 code = EQ;
16229 else
16231 if (code == NE || code == LTGT)
16232 code = NE;
16233 else
16234 code = EQ;
16237 emit_insn (cmp);
16239 else
16241 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
16242 CLOBBERs to match cmptf_internal2 pattern. */
16243 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
16244 && GET_MODE (op0) == TFmode
16245 && !TARGET_IEEEQUAD
16246 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
16247 emit_insn (gen_rtx_PARALLEL (VOIDmode,
16248 gen_rtvec (10,
16249 gen_rtx_SET (VOIDmode,
16250 compare_result,
16251 gen_rtx_COMPARE (comp_mode, op0, op1)),
16252 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16253 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16254 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16255 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16256 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16257 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16258 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16259 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
16260 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
16261 else if (GET_CODE (op1) == UNSPEC
16262 && XINT (op1, 1) == UNSPEC_SP_TEST)
16264 rtx op1b = XVECEXP (op1, 0, 0);
16265 comp_mode = CCEQmode;
16266 compare_result = gen_reg_rtx (CCEQmode);
16267 if (TARGET_64BIT)
16268 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
16269 else
16270 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
16272 else
16273 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
16274 gen_rtx_COMPARE (comp_mode, op0, op1)));
16277 /* Some kinds of FP comparisons need an OR operation;
16278 under flag_finite_math_only we don't bother. */
16279 if (FLOAT_MODE_P (mode)
16280 && !flag_finite_math_only
16281 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
16282 && (code == LE || code == GE
16283 || code == UNEQ || code == LTGT
16284 || code == UNGT || code == UNLT))
16286 enum rtx_code or1, or2;
16287 rtx or1_rtx, or2_rtx, compare2_rtx;
16288 rtx or_result = gen_reg_rtx (CCEQmode);
16290 switch (code)
16292 case LE: or1 = LT; or2 = EQ; break;
16293 case GE: or1 = GT; or2 = EQ; break;
16294 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
16295 case LTGT: or1 = LT; or2 = GT; break;
16296 case UNGT: or1 = UNORDERED; or2 = GT; break;
16297 case UNLT: or1 = UNORDERED; or2 = LT; break;
16298 default: gcc_unreachable ();
16300 validate_condition_mode (or1, comp_mode);
16301 validate_condition_mode (or2, comp_mode);
16302 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
16303 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
16304 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
16305 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
16306 const_true_rtx);
16307 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
16309 compare_result = or_result;
16310 code = EQ;
16313 validate_condition_mode (code, GET_MODE (compare_result));
16315 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
16319 /* Emit the RTL for an sISEL pattern. */
16321 void
16322 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
16324 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
16327 void
16328 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
16330 rtx condition_rtx;
16331 enum machine_mode op_mode;
16332 enum rtx_code cond_code;
16333 rtx result = operands[0];
16335 if (TARGET_ISEL && (mode == SImode || mode == DImode))
16337 rs6000_emit_sISEL (mode, operands);
16338 return;
16341 condition_rtx = rs6000_generate_compare (operands[1], mode);
16342 cond_code = GET_CODE (condition_rtx);
16344 if (FLOAT_MODE_P (mode)
16345 && !TARGET_FPRS && TARGET_HARD_FLOAT)
16347 rtx t;
16349 PUT_MODE (condition_rtx, SImode);
16350 t = XEXP (condition_rtx, 0);
16352 gcc_assert (cond_code == NE || cond_code == EQ);
16354 if (cond_code == NE)
16355 emit_insn (gen_e500_flip_gt_bit (t, t));
16357 emit_insn (gen_move_from_CR_gt_bit (result, t));
16358 return;
16361 if (cond_code == NE
16362 || cond_code == GE || cond_code == LE
16363 || cond_code == GEU || cond_code == LEU
16364 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
16366 rtx not_result = gen_reg_rtx (CCEQmode);
16367 rtx not_op, rev_cond_rtx;
16368 enum machine_mode cc_mode;
16370 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
16372 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
16373 SImode, XEXP (condition_rtx, 0), const0_rtx);
16374 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
16375 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
16376 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
16379 op_mode = GET_MODE (XEXP (operands[1], 0));
16380 if (op_mode == VOIDmode)
16381 op_mode = GET_MODE (XEXP (operands[1], 1));
16383 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
16385 PUT_MODE (condition_rtx, DImode);
16386 convert_move (result, condition_rtx, 0);
16388 else
16390 PUT_MODE (condition_rtx, SImode);
16391 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
16395 /* Emit a branch of kind CODE to location LOC. */
16397 void
16398 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
16400 rtx condition_rtx, loc_ref;
16402 condition_rtx = rs6000_generate_compare (operands[0], mode);
16403 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
16404 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
16405 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
16406 loc_ref, pc_rtx)));
16409 /* Return the string to output a conditional branch to LABEL, which is
16410 the operand template of the label, or NULL if the branch is really a
16411 conditional return.
16413 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
16414 condition code register and its mode specifies what kind of
16415 comparison we made.
16417 REVERSED is nonzero if we should reverse the sense of the comparison.
16419 INSN is the insn. */
16421 char *
16422 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
16424 static char string[64];
16425 enum rtx_code code = GET_CODE (op);
16426 rtx cc_reg = XEXP (op, 0);
16427 enum machine_mode mode = GET_MODE (cc_reg);
16428 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
16429 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
16430 int really_reversed = reversed ^ need_longbranch;
16431 char *s = string;
16432 const char *ccode;
16433 const char *pred;
16434 rtx note;
16436 validate_condition_mode (code, mode);
16438 /* Work out which way this really branches. We could use
16439 reverse_condition_maybe_unordered here always but this
16440 makes the resulting assembler clearer. */
16441 if (really_reversed)
16443 /* Reversal of FP compares takes care -- an ordered compare
16444 becomes an unordered compare and vice versa. */
16445 if (mode == CCFPmode)
16446 code = reverse_condition_maybe_unordered (code);
16447 else
16448 code = reverse_condition (code);
16451 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
16453 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
16454 to the GT bit. */
16455 switch (code)
16457 case EQ:
16458 /* Opposite of GT. */
16459 code = GT;
16460 break;
16462 case NE:
16463 code = UNLE;
16464 break;
16466 default:
16467 gcc_unreachable ();
16471 switch (code)
16473 /* Not all of these are actually distinct opcodes, but
16474 we distinguish them for clarity of the resulting assembler. */
16475 case NE: case LTGT:
16476 ccode = "ne"; break;
16477 case EQ: case UNEQ:
16478 ccode = "eq"; break;
16479 case GE: case GEU:
16480 ccode = "ge"; break;
16481 case GT: case GTU: case UNGT:
16482 ccode = "gt"; break;
16483 case LE: case LEU:
16484 ccode = "le"; break;
16485 case LT: case LTU: case UNLT:
16486 ccode = "lt"; break;
16487 case UNORDERED: ccode = "un"; break;
16488 case ORDERED: ccode = "nu"; break;
16489 case UNGE: ccode = "nl"; break;
16490 case UNLE: ccode = "ng"; break;
16491 default:
16492 gcc_unreachable ();
16495 /* Maybe we have a guess as to how likely the branch is. */
16496 pred = "";
16497 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
16498 if (note != NULL_RTX)
16500 /* PROB is the difference from 50%. */
16501 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
16503 /* Only hint for highly probable/improbable branches on newer
16504 cpus as static prediction overrides processor dynamic
16505 prediction. For older cpus we may as well always hint, but
16506 assume not taken for branches that are very close to 50% as a
16507 mispredicted taken branch is more expensive than a
16508 mispredicted not-taken branch. */
16509 if (rs6000_always_hint
16510 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
16511 && br_prob_note_reliable_p (note)))
16513 if (abs (prob) > REG_BR_PROB_BASE / 20
16514 && ((prob > 0) ^ need_longbranch))
16515 pred = "+";
16516 else
16517 pred = "-";
16521 if (label == NULL)
16522 s += sprintf (s, "b%slr%s ", ccode, pred);
16523 else
16524 s += sprintf (s, "b%s%s ", ccode, pred);
16526 /* We need to escape any '%' characters in the reg_names string.
16527 Assume they'd only be the first character.... */
16528 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
16529 *s++ = '%';
16530 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
16532 if (label != NULL)
16534 /* If the branch distance was too far, we may have to use an
16535 unconditional branch to go the distance. */
16536 if (need_longbranch)
16537 s += sprintf (s, ",$+8\n\tb %s", label);
16538 else
16539 s += sprintf (s, ",%s", label);
16542 return string;
16545 /* Return the string to flip the GT bit on a CR. */
16546 char *
16547 output_e500_flip_gt_bit (rtx dst, rtx src)
16549 static char string[64];
16550 int a, b;
16552 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
16553 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
16555 /* GT bit. */
16556 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
16557 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
16559 sprintf (string, "crnot %d,%d", a, b);
16560 return string;
16563 /* Return insn for VSX or Altivec comparisons. */
16565 static rtx
16566 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
16568 rtx mask;
16569 enum machine_mode mode = GET_MODE (op0);
16571 switch (code)
16573 default:
16574 break;
16576 case GE:
16577 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
16578 return NULL_RTX;
16580 case EQ:
16581 case GT:
16582 case GTU:
16583 case ORDERED:
16584 case UNORDERED:
16585 case UNEQ:
16586 case LTGT:
16587 mask = gen_reg_rtx (mode);
16588 emit_insn (gen_rtx_SET (VOIDmode,
16589 mask,
16590 gen_rtx_fmt_ee (code, mode, op0, op1)));
16591 return mask;
16594 return NULL_RTX;
16597 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16598 DMODE is expected destination mode. This is a recursive function. */
16600 static rtx
16601 rs6000_emit_vector_compare (enum rtx_code rcode,
16602 rtx op0, rtx op1,
16603 enum machine_mode dmode)
16605 rtx mask;
16606 bool swap_operands = false;
16607 bool try_again = false;
16609 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
16610 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
16612 /* See if the comparison works as is. */
16613 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16614 if (mask)
16615 return mask;
16617 switch (rcode)
16619 case LT:
16620 rcode = GT;
16621 swap_operands = true;
16622 try_again = true;
16623 break;
16624 case LTU:
16625 rcode = GTU;
16626 swap_operands = true;
16627 try_again = true;
16628 break;
16629 case NE:
16630 case UNLE:
16631 case UNLT:
16632 case UNGE:
16633 case UNGT:
16634 /* Invert condition and try again.
16635 e.g., A != B becomes ~(A==B). */
16637 enum rtx_code rev_code;
16638 enum insn_code nor_code;
16639 rtx mask2;
16641 rev_code = reverse_condition_maybe_unordered (rcode);
16642 if (rev_code == UNKNOWN)
16643 return NULL_RTX;
16645 nor_code = optab_handler (one_cmpl_optab, dmode);
16646 if (nor_code == CODE_FOR_nothing)
16647 return NULL_RTX;
16649 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
16650 if (!mask2)
16651 return NULL_RTX;
16653 mask = gen_reg_rtx (dmode);
16654 emit_insn (GEN_FCN (nor_code) (mask, mask2));
16655 return mask;
16657 break;
16658 case GE:
16659 case GEU:
16660 case LE:
16661 case LEU:
16662 /* Try GT/GTU/LT/LTU OR EQ */
16664 rtx c_rtx, eq_rtx;
16665 enum insn_code ior_code;
16666 enum rtx_code new_code;
16668 switch (rcode)
16670 case GE:
16671 new_code = GT;
16672 break;
16674 case GEU:
16675 new_code = GTU;
16676 break;
16678 case LE:
16679 new_code = LT;
16680 break;
16682 case LEU:
16683 new_code = LTU;
16684 break;
16686 default:
16687 gcc_unreachable ();
16690 ior_code = optab_handler (ior_optab, dmode);
16691 if (ior_code == CODE_FOR_nothing)
16692 return NULL_RTX;
16694 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
16695 if (!c_rtx)
16696 return NULL_RTX;
16698 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
16699 if (!eq_rtx)
16700 return NULL_RTX;
16702 mask = gen_reg_rtx (dmode);
16703 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
16704 return mask;
16706 break;
16707 default:
16708 return NULL_RTX;
16711 if (try_again)
16713 if (swap_operands)
16715 rtx tmp;
16716 tmp = op0;
16717 op0 = op1;
16718 op1 = tmp;
16721 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16722 if (mask)
16723 return mask;
16726 /* You only get two chances. */
16727 return NULL_RTX;
16730 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16731 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16732 operands for the relation operation COND. */
16735 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
16736 rtx cond, rtx cc_op0, rtx cc_op1)
16738 enum machine_mode dest_mode = GET_MODE (dest);
16739 enum machine_mode mask_mode = GET_MODE (cc_op0);
16740 enum rtx_code rcode = GET_CODE (cond);
16741 enum machine_mode cc_mode = CCmode;
16742 rtx mask;
16743 rtx cond2;
16744 rtx tmp;
16745 bool invert_move = false;
16747 if (VECTOR_UNIT_NONE_P (dest_mode))
16748 return 0;
16750 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
16751 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
16753 switch (rcode)
16755 /* Swap operands if we can, and fall back to doing the operation as
16756 specified, and doing a NOR to invert the test. */
16757 case NE:
16758 case UNLE:
16759 case UNLT:
16760 case UNGE:
16761 case UNGT:
16762 /* Invert condition and try again.
16763 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16764 invert_move = true;
16765 rcode = reverse_condition_maybe_unordered (rcode);
16766 if (rcode == UNKNOWN)
16767 return 0;
16768 break;
16770 /* Mark unsigned tests with CCUNSmode. */
16771 case GTU:
16772 case GEU:
16773 case LTU:
16774 case LEU:
16775 cc_mode = CCUNSmode;
16776 break;
16778 default:
16779 break;
16782 /* Get the vector mask for the given relational operations. */
16783 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
16785 if (!mask)
16786 return 0;
16788 if (invert_move)
16790 tmp = op_true;
16791 op_true = op_false;
16792 op_false = tmp;
16795 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
16796 CONST0_RTX (dest_mode));
16797 emit_insn (gen_rtx_SET (VOIDmode,
16798 dest,
16799 gen_rtx_IF_THEN_ELSE (dest_mode,
16800 cond2,
16801 op_true,
16802 op_false)));
16803 return 1;
16806 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16807 operands of the last comparison is nonzero/true, FALSE_COND if it
16808 is zero/false. Return 0 if the hardware has no such operation. */
16811 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16813 enum rtx_code code = GET_CODE (op);
16814 rtx op0 = XEXP (op, 0);
16815 rtx op1 = XEXP (op, 1);
16816 REAL_VALUE_TYPE c1;
16817 enum machine_mode compare_mode = GET_MODE (op0);
16818 enum machine_mode result_mode = GET_MODE (dest);
16819 rtx temp;
16820 bool is_against_zero;
16822 /* These modes should always match. */
16823 if (GET_MODE (op1) != compare_mode
16824 /* In the isel case however, we can use a compare immediate, so
16825 op1 may be a small constant. */
16826 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
16827 return 0;
16828 if (GET_MODE (true_cond) != result_mode)
16829 return 0;
16830 if (GET_MODE (false_cond) != result_mode)
16831 return 0;
16833 /* Don't allow using floating point comparisons for integer results for
16834 now. */
16835 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
16836 return 0;
16838 /* First, work out if the hardware can do this at all, or
16839 if it's too slow.... */
16840 if (!FLOAT_MODE_P (compare_mode))
16842 if (TARGET_ISEL)
16843 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16844 return 0;
16846 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16847 && SCALAR_FLOAT_MODE_P (compare_mode))
16848 return 0;
16850 is_against_zero = op1 == CONST0_RTX (compare_mode);
16852 /* A floating-point subtract might overflow, underflow, or produce
16853 an inexact result, thus changing the floating-point flags, so it
16854 can't be generated if we care about that. It's safe if one side
16855 of the construct is zero, since then no subtract will be
16856 generated. */
16857 if (SCALAR_FLOAT_MODE_P (compare_mode)
16858 && flag_trapping_math && ! is_against_zero)
16859 return 0;
16861 /* Eliminate half of the comparisons by switching operands, this
16862 makes the remaining code simpler. */
16863 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16864 || code == LTGT || code == LT || code == UNLE)
16866 code = reverse_condition_maybe_unordered (code);
16867 temp = true_cond;
16868 true_cond = false_cond;
16869 false_cond = temp;
16872 /* UNEQ and LTGT take four instructions for a comparison with zero,
16873 it'll probably be faster to use a branch here too. */
16874 if (code == UNEQ && HONOR_NANS (compare_mode))
16875 return 0;
16877 if (GET_CODE (op1) == CONST_DOUBLE)
16878 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16880 /* We're going to try to implement comparisons by performing
16881 a subtract, then comparing against zero. Unfortunately,
16882 Inf - Inf is NaN which is not zero, and so if we don't
16883 know that the operand is finite and the comparison
16884 would treat EQ different to UNORDERED, we can't do it. */
16885 if (HONOR_INFINITIES (compare_mode)
16886 && code != GT && code != UNGE
16887 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16888 /* Constructs of the form (a OP b ? a : b) are safe. */
16889 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16890 || (! rtx_equal_p (op0, true_cond)
16891 && ! rtx_equal_p (op1, true_cond))))
16892 return 0;
16894 /* At this point we know we can use fsel. */
16896 /* Reduce the comparison to a comparison against zero. */
16897 if (! is_against_zero)
16899 temp = gen_reg_rtx (compare_mode);
16900 emit_insn (gen_rtx_SET (VOIDmode, temp,
16901 gen_rtx_MINUS (compare_mode, op0, op1)));
16902 op0 = temp;
16903 op1 = CONST0_RTX (compare_mode);
16906 /* If we don't care about NaNs we can reduce some of the comparisons
16907 down to faster ones. */
16908 if (! HONOR_NANS (compare_mode))
16909 switch (code)
16911 case GT:
16912 code = LE;
16913 temp = true_cond;
16914 true_cond = false_cond;
16915 false_cond = temp;
16916 break;
16917 case UNGE:
16918 code = GE;
16919 break;
16920 case UNEQ:
16921 code = EQ;
16922 break;
16923 default:
16924 break;
16927 /* Now, reduce everything down to a GE. */
16928 switch (code)
16930 case GE:
16931 break;
16933 case LE:
16934 temp = gen_reg_rtx (compare_mode);
16935 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16936 op0 = temp;
16937 break;
16939 case ORDERED:
16940 temp = gen_reg_rtx (compare_mode);
16941 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16942 op0 = temp;
16943 break;
16945 case EQ:
16946 temp = gen_reg_rtx (compare_mode);
16947 emit_insn (gen_rtx_SET (VOIDmode, temp,
16948 gen_rtx_NEG (compare_mode,
16949 gen_rtx_ABS (compare_mode, op0))));
16950 op0 = temp;
16951 break;
16953 case UNGE:
16954 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16955 temp = gen_reg_rtx (result_mode);
16956 emit_insn (gen_rtx_SET (VOIDmode, temp,
16957 gen_rtx_IF_THEN_ELSE (result_mode,
16958 gen_rtx_GE (VOIDmode,
16959 op0, op1),
16960 true_cond, false_cond)));
16961 false_cond = true_cond;
16962 true_cond = temp;
16964 temp = gen_reg_rtx (compare_mode);
16965 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16966 op0 = temp;
16967 break;
16969 case GT:
16970 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16971 temp = gen_reg_rtx (result_mode);
16972 emit_insn (gen_rtx_SET (VOIDmode, temp,
16973 gen_rtx_IF_THEN_ELSE (result_mode,
16974 gen_rtx_GE (VOIDmode,
16975 op0, op1),
16976 true_cond, false_cond)));
16977 true_cond = false_cond;
16978 false_cond = temp;
16980 temp = gen_reg_rtx (compare_mode);
16981 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16982 op0 = temp;
16983 break;
16985 default:
16986 gcc_unreachable ();
16989 emit_insn (gen_rtx_SET (VOIDmode, dest,
16990 gen_rtx_IF_THEN_ELSE (result_mode,
16991 gen_rtx_GE (VOIDmode,
16992 op0, op1),
16993 true_cond, false_cond)));
16994 return 1;
16997 /* Same as above, but for ints (isel). */
16999 static int
17000 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
17002 rtx condition_rtx, cr;
17003 enum machine_mode mode = GET_MODE (dest);
17004 enum rtx_code cond_code;
17005 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
17006 bool signedp;
17008 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
17009 return 0;
17011 /* We still have to do the compare, because isel doesn't do a
17012 compare, it just looks at the CRx bits set by a previous compare
17013 instruction. */
17014 condition_rtx = rs6000_generate_compare (op, mode);
17015 cond_code = GET_CODE (condition_rtx);
17016 cr = XEXP (condition_rtx, 0);
17017 signedp = GET_MODE (cr) == CCmode;
17019 isel_func = (mode == SImode
17020 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
17021 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
17023 switch (cond_code)
17025 case LT: case GT: case LTU: case GTU: case EQ:
17026 /* isel handles these directly. */
17027 break;
17029 default:
17030 /* We need to swap the sense of the comparison. */
17032 rtx t = true_cond;
17033 true_cond = false_cond;
17034 false_cond = t;
17035 PUT_CODE (condition_rtx, reverse_condition (cond_code));
17037 break;
17040 false_cond = force_reg (mode, false_cond);
17041 if (true_cond != const0_rtx)
17042 true_cond = force_reg (mode, true_cond);
17044 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
17046 return 1;
17049 const char *
17050 output_isel (rtx *operands)
17052 enum rtx_code code;
17054 code = GET_CODE (operands[1]);
17056 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
17058 gcc_assert (GET_CODE (operands[2]) == REG
17059 && GET_CODE (operands[3]) == REG);
17060 PUT_CODE (operands[1], reverse_condition (code));
17061 return "isel %0,%3,%2,%j1";
17064 return "isel %0,%2,%3,%j1";
17067 void
17068 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
17070 enum machine_mode mode = GET_MODE (op0);
17071 enum rtx_code c;
17072 rtx target;
17074 /* VSX/altivec have direct min/max insns. */
17075 if ((code == SMAX || code == SMIN)
17076 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
17077 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
17079 emit_insn (gen_rtx_SET (VOIDmode,
17080 dest,
17081 gen_rtx_fmt_ee (code, mode, op0, op1)));
17082 return;
17085 if (code == SMAX || code == SMIN)
17086 c = GE;
17087 else
17088 c = GEU;
17090 if (code == SMAX || code == UMAX)
17091 target = emit_conditional_move (dest, c, op0, op1, mode,
17092 op0, op1, mode, 0);
17093 else
17094 target = emit_conditional_move (dest, c, op0, op1, mode,
17095 op1, op0, mode, 0);
17096 gcc_assert (target);
17097 if (target != dest)
17098 emit_move_insn (dest, target);
17101 /* A subroutine of the atomic operation splitters. Jump to LABEL if
17102 COND is true. Mark the jump as unlikely to be taken. */
17104 static void
17105 emit_unlikely_jump (rtx cond, rtx label)
17107 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
17108 rtx x;
17110 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
17111 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
17112 add_reg_note (x, REG_BR_PROB, very_unlikely);
17115 /* A subroutine of the atomic operation splitters. Emit a load-locked
17116 instruction in MODE. */
17118 static void
17119 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
17121 rtx (*fn) (rtx, rtx) = NULL;
17123 switch (mode)
17125 case SImode:
17126 fn = gen_load_lockedsi;
17127 break;
17128 case DImode:
17129 fn = gen_load_lockeddi;
17130 break;
17131 default:
17132 gcc_unreachable ();
17134 emit_insn (fn (reg, mem));
17137 /* A subroutine of the atomic operation splitters. Emit a store-conditional
17138 instruction in MODE. */
17140 static void
17141 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
17143 rtx (*fn) (rtx, rtx, rtx) = NULL;
17145 switch (mode)
17147 case SImode:
17148 fn = gen_store_conditionalsi;
17149 break;
17150 case DImode:
17151 fn = gen_store_conditionaldi;
17152 break;
17153 default:
17154 gcc_unreachable ();
17157 /* Emit sync before stwcx. to address PPC405 Erratum. */
17158 if (PPC405_ERRATUM77)
17159 emit_insn (gen_hwsync ());
17161 emit_insn (fn (res, mem, val));
17164 /* Expand barriers before and after a load_locked/store_cond sequence. */
17166 static rtx
17167 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
17169 rtx addr = XEXP (mem, 0);
17170 int strict_p = (reload_in_progress || reload_completed);
17172 if (!legitimate_indirect_address_p (addr, strict_p)
17173 && !legitimate_indexed_address_p (addr, strict_p))
17175 addr = force_reg (Pmode, addr);
17176 mem = replace_equiv_address_nv (mem, addr);
17179 switch (model)
17181 case MEMMODEL_RELAXED:
17182 case MEMMODEL_CONSUME:
17183 case MEMMODEL_ACQUIRE:
17184 break;
17185 case MEMMODEL_RELEASE:
17186 case MEMMODEL_ACQ_REL:
17187 emit_insn (gen_lwsync ());
17188 break;
17189 case MEMMODEL_SEQ_CST:
17190 emit_insn (gen_hwsync ());
17191 break;
17192 default:
17193 gcc_unreachable ();
17195 return mem;
17198 static void
17199 rs6000_post_atomic_barrier (enum memmodel model)
17201 switch (model)
17203 case MEMMODEL_RELAXED:
17204 case MEMMODEL_CONSUME:
17205 case MEMMODEL_RELEASE:
17206 break;
17207 case MEMMODEL_ACQUIRE:
17208 case MEMMODEL_ACQ_REL:
17209 case MEMMODEL_SEQ_CST:
17210 emit_insn (gen_isync ());
17211 break;
17212 default:
17213 gcc_unreachable ();
17217 /* A subroutine of the various atomic expanders. For sub-word operations,
17218 we must adjust things to operate on SImode. Given the original MEM,
17219 return a new aligned memory. Also build and return the quantities by
17220 which to shift and mask. */
17222 static rtx
17223 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
17225 rtx addr, align, shift, mask, mem;
17226 HOST_WIDE_INT shift_mask;
17227 enum machine_mode mode = GET_MODE (orig_mem);
17229 /* For smaller modes, we have to implement this via SImode. */
17230 shift_mask = (mode == QImode ? 0x18 : 0x10);
17232 addr = XEXP (orig_mem, 0);
17233 addr = force_reg (GET_MODE (addr), addr);
17235 /* Aligned memory containing subword. Generate a new memory. We
17236 do not want any of the existing MEM_ATTR data, as we're now
17237 accessing memory outside the original object. */
17238 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
17239 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17240 mem = gen_rtx_MEM (SImode, align);
17241 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
17242 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
17243 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
17245 /* Shift amount for subword relative to aligned word. */
17246 shift = gen_reg_rtx (SImode);
17247 addr = gen_lowpart (SImode, addr);
17248 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
17249 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
17250 shift, 1, OPTAB_LIB_WIDEN);
17251 *pshift = shift;
17253 /* Mask for insertion. */
17254 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
17255 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
17256 *pmask = mask;
17258 return mem;
17261 /* A subroutine of the various atomic expanders. For sub-word operands,
17262 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
17264 static rtx
17265 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
17267 rtx x;
17269 x = gen_reg_rtx (SImode);
17270 emit_insn (gen_rtx_SET (VOIDmode, x,
17271 gen_rtx_AND (SImode,
17272 gen_rtx_NOT (SImode, mask),
17273 oldval)));
17275 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
17277 return x;
17280 /* A subroutine of the various atomic expanders. For sub-word operands,
17281 extract WIDE to NARROW via SHIFT. */
17283 static void
17284 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
17286 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
17287 wide, 1, OPTAB_LIB_WIDEN);
17288 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
17291 /* Expand an atomic compare and swap operation. */
17293 void
17294 rs6000_expand_atomic_compare_and_swap (rtx operands[])
17296 rtx boolval, retval, mem, oldval, newval, cond;
17297 rtx label1, label2, x, mask, shift;
17298 enum machine_mode mode;
17299 enum memmodel mod_s, mod_f;
17300 bool is_weak;
17302 boolval = operands[0];
17303 retval = operands[1];
17304 mem = operands[2];
17305 oldval = operands[3];
17306 newval = operands[4];
17307 is_weak = (INTVAL (operands[5]) != 0);
17308 mod_s = (enum memmodel) INTVAL (operands[6]);
17309 mod_f = (enum memmodel) INTVAL (operands[7]);
17310 mode = GET_MODE (mem);
17312 mask = shift = NULL_RTX;
17313 if (mode == QImode || mode == HImode)
17315 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
17317 /* Shift and mask OLDVAL into position with the word. */
17318 oldval = convert_modes (SImode, mode, oldval, 1);
17319 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
17320 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17322 /* Shift and mask NEWVAL into position within the word. */
17323 newval = convert_modes (SImode, mode, newval, 1);
17324 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
17325 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17327 /* Prepare to adjust the return value. */
17328 retval = gen_reg_rtx (SImode);
17329 mode = SImode;
17331 else if (reg_overlap_mentioned_p (retval, oldval))
17332 oldval = copy_to_reg (oldval);
17334 mem = rs6000_pre_atomic_barrier (mem, mod_s);
17336 label1 = NULL_RTX;
17337 if (!is_weak)
17339 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
17340 emit_label (XEXP (label1, 0));
17342 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
17344 emit_load_locked (mode, retval, mem);
17346 x = retval;
17347 if (mask)
17349 x = expand_simple_binop (SImode, AND, retval, mask,
17350 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17353 cond = gen_reg_rtx (CCmode);
17354 x = gen_rtx_COMPARE (CCmode, x, oldval);
17355 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
17357 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17358 emit_unlikely_jump (x, label2);
17360 x = newval;
17361 if (mask)
17362 x = rs6000_mask_atomic_subword (retval, newval, mask);
17364 emit_store_conditional (mode, cond, mem, x);
17366 if (!is_weak)
17368 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17369 emit_unlikely_jump (x, label1);
17372 if (mod_f != MEMMODEL_RELAXED)
17373 emit_label (XEXP (label2, 0));
17375 rs6000_post_atomic_barrier (mod_s);
17377 if (mod_f == MEMMODEL_RELAXED)
17378 emit_label (XEXP (label2, 0));
17380 if (shift)
17381 rs6000_finish_atomic_subword (operands[1], retval, shift);
17383 /* In all cases, CR0 contains EQ on success, and NE on failure. */
17384 x = gen_rtx_EQ (SImode, cond, const0_rtx);
17385 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
17388 /* Expand an atomic exchange operation. */
17390 void
17391 rs6000_expand_atomic_exchange (rtx operands[])
17393 rtx retval, mem, val, cond;
17394 enum machine_mode mode;
17395 enum memmodel model;
17396 rtx label, x, mask, shift;
17398 retval = operands[0];
17399 mem = operands[1];
17400 val = operands[2];
17401 model = (enum memmodel) INTVAL (operands[3]);
17402 mode = GET_MODE (mem);
17404 mask = shift = NULL_RTX;
17405 if (mode == QImode || mode == HImode)
17407 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
17409 /* Shift and mask VAL into position with the word. */
17410 val = convert_modes (SImode, mode, val, 1);
17411 val = expand_simple_binop (SImode, ASHIFT, val, shift,
17412 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17414 /* Prepare to adjust the return value. */
17415 retval = gen_reg_rtx (SImode);
17416 mode = SImode;
17419 mem = rs6000_pre_atomic_barrier (mem, model);
17421 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
17422 emit_label (XEXP (label, 0));
17424 emit_load_locked (mode, retval, mem);
17426 x = val;
17427 if (mask)
17428 x = rs6000_mask_atomic_subword (retval, val, mask);
17430 cond = gen_reg_rtx (CCmode);
17431 emit_store_conditional (mode, cond, mem, x);
17433 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17434 emit_unlikely_jump (x, label);
17436 rs6000_post_atomic_barrier (model);
17438 if (shift)
17439 rs6000_finish_atomic_subword (operands[0], retval, shift);
17442 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
17443 to perform. MEM is the memory on which to operate. VAL is the second
17444 operand of the binary operator. BEFORE and AFTER are optional locations to
17445 return the value of MEM either before of after the operation. MODEL_RTX
17446 is a CONST_INT containing the memory model to use. */
17448 void
17449 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
17450 rtx orig_before, rtx orig_after, rtx model_rtx)
17452 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
17453 enum machine_mode mode = GET_MODE (mem);
17454 rtx label, x, cond, mask, shift;
17455 rtx before = orig_before, after = orig_after;
17457 mask = shift = NULL_RTX;
17458 if (mode == QImode || mode == HImode)
17460 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
17462 /* Shift and mask VAL into position with the word. */
17463 val = convert_modes (SImode, mode, val, 1);
17464 val = expand_simple_binop (SImode, ASHIFT, val, shift,
17465 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17467 switch (code)
17469 case IOR:
17470 case XOR:
17471 /* We've already zero-extended VAL. That is sufficient to
17472 make certain that it does not affect other bits. */
17473 mask = NULL;
17474 break;
17476 case AND:
17477 /* If we make certain that all of the other bits in VAL are
17478 set, that will be sufficient to not affect other bits. */
17479 x = gen_rtx_NOT (SImode, mask);
17480 x = gen_rtx_IOR (SImode, x, val);
17481 emit_insn (gen_rtx_SET (VOIDmode, val, x));
17482 mask = NULL;
17483 break;
17485 case NOT:
17486 case PLUS:
17487 case MINUS:
17488 /* These will all affect bits outside the field and need
17489 adjustment via MASK within the loop. */
17490 break;
17492 default:
17493 gcc_unreachable ();
17496 /* Prepare to adjust the return value. */
17497 before = gen_reg_rtx (SImode);
17498 if (after)
17499 after = gen_reg_rtx (SImode);
17500 mode = SImode;
17503 mem = rs6000_pre_atomic_barrier (mem, model);
17505 label = gen_label_rtx ();
17506 emit_label (label);
17507 label = gen_rtx_LABEL_REF (VOIDmode, label);
17509 if (before == NULL_RTX)
17510 before = gen_reg_rtx (mode);
17512 emit_load_locked (mode, before, mem);
17514 if (code == NOT)
17516 x = expand_simple_binop (mode, AND, before, val,
17517 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17518 after = expand_simple_unop (mode, NOT, x, after, 1);
17520 else
17522 after = expand_simple_binop (mode, code, before, val,
17523 after, 1, OPTAB_LIB_WIDEN);
17526 x = after;
17527 if (mask)
17529 x = expand_simple_binop (SImode, AND, after, mask,
17530 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17531 x = rs6000_mask_atomic_subword (before, x, mask);
17534 cond = gen_reg_rtx (CCmode);
17535 emit_store_conditional (mode, cond, mem, x);
17537 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17538 emit_unlikely_jump (x, label);
17540 rs6000_post_atomic_barrier (model);
17542 if (shift)
17544 if (orig_before)
17545 rs6000_finish_atomic_subword (orig_before, before, shift);
17546 if (orig_after)
17547 rs6000_finish_atomic_subword (orig_after, after, shift);
17549 else if (orig_after && after != orig_after)
17550 emit_move_insn (orig_after, after);
17553 /* Emit instructions to move SRC to DST. Called by splitters for
17554 multi-register moves. It will emit at most one instruction for
17555 each register that is accessed; that is, it won't emit li/lis pairs
17556 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17557 register. */
17559 void
17560 rs6000_split_multireg_move (rtx dst, rtx src)
17562 /* The register number of the first register being moved. */
17563 int reg;
17564 /* The mode that is to be moved. */
17565 enum machine_mode mode;
17566 /* The mode that the move is being done in, and its size. */
17567 enum machine_mode reg_mode;
17568 int reg_mode_size;
17569 /* The number of registers that will be moved. */
17570 int nregs;
17572 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
17573 mode = GET_MODE (dst);
17574 nregs = hard_regno_nregs[reg][mode];
17575 if (FP_REGNO_P (reg))
17576 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
17577 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
17578 else if (ALTIVEC_REGNO_P (reg))
17579 reg_mode = V16QImode;
17580 else if (TARGET_E500_DOUBLE && mode == TFmode)
17581 reg_mode = DFmode;
17582 else
17583 reg_mode = word_mode;
17584 reg_mode_size = GET_MODE_SIZE (reg_mode);
17586 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
17588 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
17590 /* Move register range backwards, if we might have destructive
17591 overlap. */
17592 int i;
17593 for (i = nregs - 1; i >= 0; i--)
17594 emit_insn (gen_rtx_SET (VOIDmode,
17595 simplify_gen_subreg (reg_mode, dst, mode,
17596 i * reg_mode_size),
17597 simplify_gen_subreg (reg_mode, src, mode,
17598 i * reg_mode_size)));
17600 else
17602 int i;
17603 int j = -1;
17604 bool used_update = false;
17605 rtx restore_basereg = NULL_RTX;
17607 if (MEM_P (src) && INT_REGNO_P (reg))
17609 rtx breg;
17611 if (GET_CODE (XEXP (src, 0)) == PRE_INC
17612 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
17614 rtx delta_rtx;
17615 breg = XEXP (XEXP (src, 0), 0);
17616 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
17617 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
17618 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
17619 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17620 src = replace_equiv_address (src, breg);
17622 else if (! rs6000_offsettable_memref_p (src, reg_mode))
17624 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
17626 rtx basereg = XEXP (XEXP (src, 0), 0);
17627 if (TARGET_UPDATE)
17629 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
17630 emit_insn (gen_rtx_SET (VOIDmode, ndst,
17631 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
17632 used_update = true;
17634 else
17635 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17636 XEXP (XEXP (src, 0), 1)));
17637 src = replace_equiv_address (src, basereg);
17639 else
17641 rtx basereg = gen_rtx_REG (Pmode, reg);
17642 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
17643 src = replace_equiv_address (src, basereg);
17647 breg = XEXP (src, 0);
17648 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
17649 breg = XEXP (breg, 0);
17651 /* If the base register we are using to address memory is
17652 also a destination reg, then change that register last. */
17653 if (REG_P (breg)
17654 && REGNO (breg) >= REGNO (dst)
17655 && REGNO (breg) < REGNO (dst) + nregs)
17656 j = REGNO (breg) - REGNO (dst);
17658 else if (MEM_P (dst) && INT_REGNO_P (reg))
17660 rtx breg;
17662 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
17663 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
17665 rtx delta_rtx;
17666 breg = XEXP (XEXP (dst, 0), 0);
17667 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
17668 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
17669 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
17671 /* We have to update the breg before doing the store.
17672 Use store with update, if available. */
17674 if (TARGET_UPDATE)
17676 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17677 emit_insn (TARGET_32BIT
17678 ? (TARGET_POWERPC64
17679 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
17680 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
17681 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
17682 used_update = true;
17684 else
17685 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17686 dst = replace_equiv_address (dst, breg);
17688 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
17689 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
17691 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
17693 rtx basereg = XEXP (XEXP (dst, 0), 0);
17694 if (TARGET_UPDATE)
17696 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17697 emit_insn (gen_rtx_SET (VOIDmode,
17698 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
17699 used_update = true;
17701 else
17702 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17703 XEXP (XEXP (dst, 0), 1)));
17704 dst = replace_equiv_address (dst, basereg);
17706 else
17708 rtx basereg = XEXP (XEXP (dst, 0), 0);
17709 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
17710 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
17711 && REG_P (basereg)
17712 && REG_P (offsetreg)
17713 && REGNO (basereg) != REGNO (offsetreg));
17714 if (REGNO (basereg) == 0)
17716 rtx tmp = offsetreg;
17717 offsetreg = basereg;
17718 basereg = tmp;
17720 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
17721 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
17722 dst = replace_equiv_address (dst, basereg);
17725 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
17726 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
17729 for (i = 0; i < nregs; i++)
17731 /* Calculate index to next subword. */
17732 ++j;
17733 if (j == nregs)
17734 j = 0;
17736 /* If compiler already emitted move of first word by
17737 store with update, no need to do anything. */
17738 if (j == 0 && used_update)
17739 continue;
17741 emit_insn (gen_rtx_SET (VOIDmode,
17742 simplify_gen_subreg (reg_mode, dst, mode,
17743 j * reg_mode_size),
17744 simplify_gen_subreg (reg_mode, src, mode,
17745 j * reg_mode_size)));
17747 if (restore_basereg != NULL_RTX)
17748 emit_insn (restore_basereg);
17753 /* This page contains routines that are used to determine what the
17754 function prologue and epilogue code will do and write them out. */
17756 static inline bool
17757 save_reg_p (int r)
17759 return !call_used_regs[r] && df_regs_ever_live_p (r);
17762 /* Return the first fixed-point register that is required to be
17763 saved. 32 if none. */
17766 first_reg_to_save (void)
17768 int first_reg;
17770 /* Find lowest numbered live register. */
17771 for (first_reg = 13; first_reg <= 31; first_reg++)
17772 if (save_reg_p (first_reg))
17773 break;
17775 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
17776 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
17777 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
17778 || (TARGET_TOC && TARGET_MINIMAL_TOC))
17779 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17780 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
17782 #if TARGET_MACHO
17783 if (flag_pic
17784 && crtl->uses_pic_offset_table
17785 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
17786 return RS6000_PIC_OFFSET_TABLE_REGNUM;
17787 #endif
17789 return first_reg;
17792 /* Similar, for FP regs. */
17795 first_fp_reg_to_save (void)
17797 int first_reg;
17799 /* Find lowest numbered live register. */
17800 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
17801 if (save_reg_p (first_reg))
17802 break;
17804 return first_reg;
17807 /* Similar, for AltiVec regs. */
17809 static int
17810 first_altivec_reg_to_save (void)
17812 int i;
17814 /* Stack frame remains as is unless we are in AltiVec ABI. */
17815 if (! TARGET_ALTIVEC_ABI)
17816 return LAST_ALTIVEC_REGNO + 1;
17818 /* On Darwin, the unwind routines are compiled without
17819 TARGET_ALTIVEC, and use save_world to save/restore the
17820 altivec registers when necessary. */
17821 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17822 && ! TARGET_ALTIVEC)
17823 return FIRST_ALTIVEC_REGNO + 20;
17825 /* Find lowest numbered live register. */
17826 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17827 if (save_reg_p (i))
17828 break;
17830 return i;
17833 /* Return a 32-bit mask of the AltiVec registers we need to set in
17834 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17835 the 32-bit word is 0. */
17837 static unsigned int
17838 compute_vrsave_mask (void)
17840 unsigned int i, mask = 0;
17842 /* On Darwin, the unwind routines are compiled without
17843 TARGET_ALTIVEC, and use save_world to save/restore the
17844 call-saved altivec registers when necessary. */
17845 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17846 && ! TARGET_ALTIVEC)
17847 mask |= 0xFFF;
17849 /* First, find out if we use _any_ altivec registers. */
17850 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
17851 if (df_regs_ever_live_p (i))
17852 mask |= ALTIVEC_REG_BIT (i);
17854 if (mask == 0)
17855 return mask;
17857 /* Next, remove the argument registers from the set. These must
17858 be in the VRSAVE mask set by the caller, so we don't need to add
17859 them in again. More importantly, the mask we compute here is
17860 used to generate CLOBBERs in the set_vrsave insn, and we do not
17861 wish the argument registers to die. */
17862 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
17863 mask &= ~ALTIVEC_REG_BIT (i);
17865 /* Similarly, remove the return value from the set. */
17867 bool yes = false;
17868 diddle_return_value (is_altivec_return_reg, &yes);
17869 if (yes)
17870 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
17873 return mask;
17876 /* For a very restricted set of circumstances, we can cut down the
17877 size of prologues/epilogues by calling our own save/restore-the-world
17878 routines. */
17880 static void
17881 compute_save_world_info (rs6000_stack_t *info_ptr)
17883 info_ptr->world_save_p = 1;
17884 info_ptr->world_save_p
17885 = (WORLD_SAVE_P (info_ptr)
17886 && DEFAULT_ABI == ABI_DARWIN
17887 && !cfun->has_nonlocal_label
17888 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17889 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17890 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17891 && info_ptr->cr_save_p);
17893 /* This will not work in conjunction with sibcalls. Make sure there
17894 are none. (This check is expensive, but seldom executed.) */
17895 if (WORLD_SAVE_P (info_ptr))
17897 rtx insn;
17898 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17899 if (CALL_P (insn) && SIBLING_CALL_P (insn))
17901 info_ptr->world_save_p = 0;
17902 break;
17906 if (WORLD_SAVE_P (info_ptr))
17908 /* Even if we're not touching VRsave, make sure there's room on the
17909 stack for it, if it looks like we're calling SAVE_WORLD, which
17910 will attempt to save it. */
17911 info_ptr->vrsave_size = 4;
17913 /* If we are going to save the world, we need to save the link register too. */
17914 info_ptr->lr_save_p = 1;
17916 /* "Save" the VRsave register too if we're saving the world. */
17917 if (info_ptr->vrsave_mask == 0)
17918 info_ptr->vrsave_mask = compute_vrsave_mask ();
17920 /* Because the Darwin register save/restore routines only handle
17921 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17922 check. */
17923 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17924 && (info_ptr->first_altivec_reg_save
17925 >= FIRST_SAVED_ALTIVEC_REGNO));
17927 return;
17931 static void
17932 is_altivec_return_reg (rtx reg, void *xyes)
17934 bool *yes = (bool *) xyes;
17935 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17936 *yes = true;
17940 /* Look for user-defined global regs in the range FIRST to LAST-1.
17941 We should not restore these, and so cannot use lmw or out-of-line
17942 restore functions if there are any. We also can't save them
17943 (well, emit frame notes for them), because frame unwinding during
17944 exception handling will restore saved registers. */
17946 static bool
17947 global_regs_p (unsigned first, unsigned last)
17949 while (first < last)
17950 if (global_regs[first++])
17951 return true;
17952 return false;
17955 /* Determine the strategy for savings/restoring registers. */
17957 enum {
17958 SAVRES_MULTIPLE = 0x1,
17959 SAVE_INLINE_FPRS = 0x2,
17960 SAVE_INLINE_GPRS = 0x4,
17961 REST_INLINE_FPRS = 0x8,
17962 REST_INLINE_GPRS = 0x10,
17963 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
17964 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
17965 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
17966 SAVE_INLINE_VRS = 0x100,
17967 REST_INLINE_VRS = 0x200
17970 static int
17971 rs6000_savres_strategy (rs6000_stack_t *info,
17972 bool using_static_chain_p)
17974 int strategy = 0;
17975 bool lr_save_p;
17977 if (TARGET_MULTIPLE
17978 && !TARGET_POWERPC64
17979 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
17980 && info->first_gp_reg_save < 31
17981 && !global_regs_p (info->first_gp_reg_save, 32))
17982 strategy |= SAVRES_MULTIPLE;
17984 if (crtl->calls_eh_return
17985 || cfun->machine->ra_need_lr)
17986 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
17987 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
17988 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17990 if (info->first_fp_reg_save == 64
17991 /* The out-of-line FP routines use double-precision stores;
17992 we can't use those routines if we don't have such stores. */
17993 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
17994 || global_regs_p (info->first_fp_reg_save, 64))
17995 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17997 if (info->first_gp_reg_save == 32
17998 || (!(strategy & SAVRES_MULTIPLE)
17999 && global_regs_p (info->first_gp_reg_save, 32)))
18000 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18002 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
18003 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
18004 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18006 /* Define cutoff for using out-of-line functions to save registers. */
18007 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
18009 if (!optimize_size)
18011 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
18012 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18013 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18015 else
18017 /* Prefer out-of-line restore if it will exit. */
18018 if (info->first_fp_reg_save > 61)
18019 strategy |= SAVE_INLINE_FPRS;
18020 if (info->first_gp_reg_save > 29)
18022 if (info->first_fp_reg_save == 64)
18023 strategy |= SAVE_INLINE_GPRS;
18024 else
18025 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18027 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
18028 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18031 else if (DEFAULT_ABI == ABI_DARWIN)
18033 if (info->first_fp_reg_save > 60)
18034 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
18035 if (info->first_gp_reg_save > 29)
18036 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18037 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18039 else
18041 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
18042 if (info->first_fp_reg_save > 61)
18043 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
18044 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
18045 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
18048 /* Don't bother to try to save things out-of-line if r11 is occupied
18049 by the static chain. It would require too much fiddling and the
18050 static chain is rarely used anyway. FPRs are saved w.r.t the stack
18051 pointer on Darwin, and AIX uses r1 or r12. */
18052 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
18053 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
18054 | SAVE_INLINE_GPRS
18055 | SAVE_INLINE_VRS | REST_INLINE_VRS);
18057 /* We can only use the out-of-line routines to restore if we've
18058 saved all the registers from first_fp_reg_save in the prologue.
18059 Otherwise, we risk loading garbage. */
18060 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
18062 int i;
18064 for (i = info->first_fp_reg_save; i < 64; i++)
18065 if (!save_reg_p (i))
18067 strategy |= REST_INLINE_FPRS;
18068 break;
18072 /* If we are going to use store multiple, then don't even bother
18073 with the out-of-line routines, since the store-multiple
18074 instruction will always be smaller. */
18075 if ((strategy & SAVRES_MULTIPLE))
18076 strategy |= SAVE_INLINE_GPRS;
18078 /* info->lr_save_p isn't yet set if the only reason lr needs to be
18079 saved is an out-of-line save or restore. Set up the value for
18080 the next test (excluding out-of-line gpr restore). */
18081 lr_save_p = (info->lr_save_p
18082 || !(strategy & SAVE_INLINE_GPRS)
18083 || !(strategy & SAVE_INLINE_FPRS)
18084 || !(strategy & SAVE_INLINE_VRS)
18085 || !(strategy & REST_INLINE_FPRS)
18086 || !(strategy & REST_INLINE_VRS));
18088 /* The situation is more complicated with load multiple. We'd
18089 prefer to use the out-of-line routines for restores, since the
18090 "exit" out-of-line routines can handle the restore of LR and the
18091 frame teardown. However if doesn't make sense to use the
18092 out-of-line routine if that is the only reason we'd need to save
18093 LR, and we can't use the "exit" out-of-line gpr restore if we
18094 have saved some fprs; In those cases it is advantageous to use
18095 load multiple when available. */
18096 if ((strategy & SAVRES_MULTIPLE)
18097 && (!lr_save_p
18098 || info->first_fp_reg_save != 64))
18099 strategy |= REST_INLINE_GPRS;
18101 /* Saving CR interferes with the exit routines used on the SPE, so
18102 just punt here. */
18103 if (TARGET_SPE_ABI
18104 && info->spe_64bit_regs_used
18105 && info->cr_save_p)
18106 strategy |= REST_INLINE_GPRS;
18108 /* We can only use load multiple or the out-of-line routines to
18109 restore if we've used store multiple or out-of-line routines
18110 in the prologue, i.e. if we've saved all the registers from
18111 first_gp_reg_save. Otherwise, we risk loading garbage. */
18112 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
18113 == SAVE_INLINE_GPRS)
18115 int i;
18117 for (i = info->first_gp_reg_save; i < 32; i++)
18118 if (!save_reg_p (i))
18120 strategy |= REST_INLINE_GPRS;
18121 break;
18125 if (TARGET_ELF && TARGET_64BIT)
18127 if (!(strategy & SAVE_INLINE_FPRS))
18128 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
18129 else if (!(strategy & SAVE_INLINE_GPRS)
18130 && info->first_fp_reg_save == 64)
18131 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
18133 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
18134 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
18136 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
18137 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
18139 return strategy;
18142 /* Calculate the stack information for the current function. This is
18143 complicated by having two separate calling sequences, the AIX calling
18144 sequence and the V.4 calling sequence.
18146 AIX (and Darwin/Mac OS X) stack frames look like:
18147 32-bit 64-bit
18148 SP----> +---------------------------------------+
18149 | back chain to caller | 0 0
18150 +---------------------------------------+
18151 | saved CR | 4 8 (8-11)
18152 +---------------------------------------+
18153 | saved LR | 8 16
18154 +---------------------------------------+
18155 | reserved for compilers | 12 24
18156 +---------------------------------------+
18157 | reserved for binders | 16 32
18158 +---------------------------------------+
18159 | saved TOC pointer | 20 40
18160 +---------------------------------------+
18161 | Parameter save area (P) | 24 48
18162 +---------------------------------------+
18163 | Alloca space (A) | 24+P etc.
18164 +---------------------------------------+
18165 | Local variable space (L) | 24+P+A
18166 +---------------------------------------+
18167 | Float/int conversion temporary (X) | 24+P+A+L
18168 +---------------------------------------+
18169 | Save area for AltiVec registers (W) | 24+P+A+L+X
18170 +---------------------------------------+
18171 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
18172 +---------------------------------------+
18173 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
18174 +---------------------------------------+
18175 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
18176 +---------------------------------------+
18177 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
18178 +---------------------------------------+
18179 old SP->| back chain to caller's caller |
18180 +---------------------------------------+
18182 The required alignment for AIX configurations is two words (i.e., 8
18183 or 16 bytes).
18186 V.4 stack frames look like:
18188 SP----> +---------------------------------------+
18189 | back chain to caller | 0
18190 +---------------------------------------+
18191 | caller's saved LR | 4
18192 +---------------------------------------+
18193 | Parameter save area (P) | 8
18194 +---------------------------------------+
18195 | Alloca space (A) | 8+P
18196 +---------------------------------------+
18197 | Varargs save area (V) | 8+P+A
18198 +---------------------------------------+
18199 | Local variable space (L) | 8+P+A+V
18200 +---------------------------------------+
18201 | Float/int conversion temporary (X) | 8+P+A+V+L
18202 +---------------------------------------+
18203 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
18204 +---------------------------------------+
18205 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
18206 +---------------------------------------+
18207 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
18208 +---------------------------------------+
18209 | SPE: area for 64-bit GP registers |
18210 +---------------------------------------+
18211 | SPE alignment padding |
18212 +---------------------------------------+
18213 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
18214 +---------------------------------------+
18215 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
18216 +---------------------------------------+
18217 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
18218 +---------------------------------------+
18219 old SP->| back chain to caller's caller |
18220 +---------------------------------------+
18222 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
18223 given. (But note below and in sysv4.h that we require only 8 and
18224 may round up the size of our stack frame anyways. The historical
18225 reason is early versions of powerpc-linux which didn't properly
18226 align the stack at program startup. A happy side-effect is that
18227 -mno-eabi libraries can be used with -meabi programs.)
18229 The EABI configuration defaults to the V.4 layout. However,
18230 the stack alignment requirements may differ. If -mno-eabi is not
18231 given, the required stack alignment is 8 bytes; if -mno-eabi is
18232 given, the required alignment is 16 bytes. (But see V.4 comment
18233 above.) */
18235 #ifndef ABI_STACK_BOUNDARY
18236 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
18237 #endif
18239 static rs6000_stack_t *
18240 rs6000_stack_info (void)
18242 rs6000_stack_t *info_ptr = &stack_info;
18243 int reg_size = TARGET_32BIT ? 4 : 8;
18244 int ehrd_size;
18245 int save_align;
18246 int first_gp;
18247 HOST_WIDE_INT non_fixed_size;
18248 bool using_static_chain_p;
18250 if (reload_completed && info_ptr->reload_completed)
18251 return info_ptr;
18253 memset (info_ptr, 0, sizeof (*info_ptr));
18254 info_ptr->reload_completed = reload_completed;
18256 if (TARGET_SPE)
18258 /* Cache value so we don't rescan instruction chain over and over. */
18259 if (cfun->machine->insn_chain_scanned_p == 0)
18260 cfun->machine->insn_chain_scanned_p
18261 = spe_func_has_64bit_regs_p () + 1;
18262 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
18265 /* Select which calling sequence. */
18266 info_ptr->abi = DEFAULT_ABI;
18268 /* Calculate which registers need to be saved & save area size. */
18269 info_ptr->first_gp_reg_save = first_reg_to_save ();
18270 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
18271 even if it currently looks like we won't. Reload may need it to
18272 get at a constant; if so, it will have already created a constant
18273 pool entry for it. */
18274 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
18275 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
18276 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
18277 && crtl->uses_const_pool
18278 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
18279 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
18280 else
18281 first_gp = info_ptr->first_gp_reg_save;
18283 info_ptr->gp_size = reg_size * (32 - first_gp);
18285 /* For the SPE, we have an additional upper 32-bits on each GPR.
18286 Ideally we should save the entire 64-bits only when the upper
18287 half is used in SIMD instructions. Since we only record
18288 registers live (not the size they are used in), this proves
18289 difficult because we'd have to traverse the instruction chain at
18290 the right time, taking reload into account. This is a real pain,
18291 so we opt to save the GPRs in 64-bits always if but one register
18292 gets used in 64-bits. Otherwise, all the registers in the frame
18293 get saved in 32-bits.
18295 So... since when we save all GPRs (except the SP) in 64-bits, the
18296 traditional GP save area will be empty. */
18297 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
18298 info_ptr->gp_size = 0;
18300 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
18301 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
18303 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
18304 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
18305 - info_ptr->first_altivec_reg_save);
18307 /* Does this function call anything? */
18308 info_ptr->calls_p = (! crtl->is_leaf
18309 || cfun->machine->ra_needs_full_frame);
18311 /* Determine if we need to save the condition code registers. */
18312 if (df_regs_ever_live_p (CR2_REGNO)
18313 || df_regs_ever_live_p (CR3_REGNO)
18314 || df_regs_ever_live_p (CR4_REGNO))
18316 info_ptr->cr_save_p = 1;
18317 if (DEFAULT_ABI == ABI_V4)
18318 info_ptr->cr_size = reg_size;
18321 /* If the current function calls __builtin_eh_return, then we need
18322 to allocate stack space for registers that will hold data for
18323 the exception handler. */
18324 if (crtl->calls_eh_return)
18326 unsigned int i;
18327 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
18328 continue;
18330 /* SPE saves EH registers in 64-bits. */
18331 ehrd_size = i * (TARGET_SPE_ABI
18332 && info_ptr->spe_64bit_regs_used != 0
18333 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
18335 else
18336 ehrd_size = 0;
18338 /* Determine various sizes. */
18339 info_ptr->reg_size = reg_size;
18340 info_ptr->fixed_size = RS6000_SAVE_AREA;
18341 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
18342 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
18343 TARGET_ALTIVEC ? 16 : 8);
18344 if (FRAME_GROWS_DOWNWARD)
18345 info_ptr->vars_size
18346 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
18347 + info_ptr->parm_size,
18348 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
18349 - (info_ptr->fixed_size + info_ptr->vars_size
18350 + info_ptr->parm_size);
18352 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
18353 info_ptr->spe_gp_size = 8 * (32 - first_gp);
18354 else
18355 info_ptr->spe_gp_size = 0;
18357 if (TARGET_ALTIVEC_ABI)
18358 info_ptr->vrsave_mask = compute_vrsave_mask ();
18359 else
18360 info_ptr->vrsave_mask = 0;
18362 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
18363 info_ptr->vrsave_size = 4;
18364 else
18365 info_ptr->vrsave_size = 0;
18367 compute_save_world_info (info_ptr);
18369 /* Calculate the offsets. */
18370 switch (DEFAULT_ABI)
18372 case ABI_NONE:
18373 default:
18374 gcc_unreachable ();
18376 case ABI_AIX:
18377 case ABI_DARWIN:
18378 info_ptr->fp_save_offset = - info_ptr->fp_size;
18379 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
18381 if (TARGET_ALTIVEC_ABI)
18383 info_ptr->vrsave_save_offset
18384 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
18386 /* Align stack so vector save area is on a quadword boundary.
18387 The padding goes above the vectors. */
18388 if (info_ptr->altivec_size != 0)
18389 info_ptr->altivec_padding_size
18390 = info_ptr->vrsave_save_offset & 0xF;
18391 else
18392 info_ptr->altivec_padding_size = 0;
18394 info_ptr->altivec_save_offset
18395 = info_ptr->vrsave_save_offset
18396 - info_ptr->altivec_padding_size
18397 - info_ptr->altivec_size;
18398 gcc_assert (info_ptr->altivec_size == 0
18399 || info_ptr->altivec_save_offset % 16 == 0);
18401 /* Adjust for AltiVec case. */
18402 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
18404 else
18405 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
18406 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
18407 info_ptr->lr_save_offset = 2*reg_size;
18408 break;
18410 case ABI_V4:
18411 info_ptr->fp_save_offset = - info_ptr->fp_size;
18412 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
18413 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
18415 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
18417 /* Align stack so SPE GPR save area is aligned on a
18418 double-word boundary. */
18419 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
18420 info_ptr->spe_padding_size
18421 = 8 - (-info_ptr->cr_save_offset % 8);
18422 else
18423 info_ptr->spe_padding_size = 0;
18425 info_ptr->spe_gp_save_offset
18426 = info_ptr->cr_save_offset
18427 - info_ptr->spe_padding_size
18428 - info_ptr->spe_gp_size;
18430 /* Adjust for SPE case. */
18431 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
18433 else if (TARGET_ALTIVEC_ABI)
18435 info_ptr->vrsave_save_offset
18436 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
18438 /* Align stack so vector save area is on a quadword boundary. */
18439 if (info_ptr->altivec_size != 0)
18440 info_ptr->altivec_padding_size
18441 = 16 - (-info_ptr->vrsave_save_offset % 16);
18442 else
18443 info_ptr->altivec_padding_size = 0;
18445 info_ptr->altivec_save_offset
18446 = info_ptr->vrsave_save_offset
18447 - info_ptr->altivec_padding_size
18448 - info_ptr->altivec_size;
18450 /* Adjust for AltiVec case. */
18451 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
18453 else
18454 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
18455 info_ptr->ehrd_offset -= ehrd_size;
18456 info_ptr->lr_save_offset = reg_size;
18457 break;
18460 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
18461 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
18462 + info_ptr->gp_size
18463 + info_ptr->altivec_size
18464 + info_ptr->altivec_padding_size
18465 + info_ptr->spe_gp_size
18466 + info_ptr->spe_padding_size
18467 + ehrd_size
18468 + info_ptr->cr_size
18469 + info_ptr->vrsave_size,
18470 save_align);
18472 non_fixed_size = (info_ptr->vars_size
18473 + info_ptr->parm_size
18474 + info_ptr->save_size);
18476 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
18477 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
18479 /* Determine if we need to save the link register. */
18480 if (info_ptr->calls_p
18481 || (DEFAULT_ABI == ABI_AIX
18482 && crtl->profile
18483 && !TARGET_PROFILE_KERNEL)
18484 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
18485 #ifdef TARGET_RELOCATABLE
18486 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
18487 #endif
18488 || rs6000_ra_ever_killed ())
18489 info_ptr->lr_save_p = 1;
18491 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
18492 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
18493 && call_used_regs[STATIC_CHAIN_REGNUM]);
18494 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
18495 using_static_chain_p);
18497 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
18498 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
18499 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
18500 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
18501 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
18502 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
18503 info_ptr->lr_save_p = 1;
18505 if (info_ptr->lr_save_p)
18506 df_set_regs_ever_live (LR_REGNO, true);
18508 /* Determine if we need to allocate any stack frame:
18510 For AIX we need to push the stack if a frame pointer is needed
18511 (because the stack might be dynamically adjusted), if we are
18512 debugging, if we make calls, or if the sum of fp_save, gp_save,
18513 and local variables are more than the space needed to save all
18514 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
18515 + 18*8 = 288 (GPR13 reserved).
18517 For V.4 we don't have the stack cushion that AIX uses, but assume
18518 that the debugger can handle stackless frames. */
18520 if (info_ptr->calls_p)
18521 info_ptr->push_p = 1;
18523 else if (DEFAULT_ABI == ABI_V4)
18524 info_ptr->push_p = non_fixed_size != 0;
18526 else if (frame_pointer_needed)
18527 info_ptr->push_p = 1;
18529 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
18530 info_ptr->push_p = 1;
18532 else
18533 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
18535 /* Zero offsets if we're not saving those registers. */
18536 if (info_ptr->fp_size == 0)
18537 info_ptr->fp_save_offset = 0;
18539 if (info_ptr->gp_size == 0)
18540 info_ptr->gp_save_offset = 0;
18542 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
18543 info_ptr->altivec_save_offset = 0;
18545 /* Zero VRSAVE offset if not saved and restored. */
18546 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
18547 info_ptr->vrsave_save_offset = 0;
18549 if (! TARGET_SPE_ABI
18550 || info_ptr->spe_64bit_regs_used == 0
18551 || info_ptr->spe_gp_size == 0)
18552 info_ptr->spe_gp_save_offset = 0;
18554 if (! info_ptr->lr_save_p)
18555 info_ptr->lr_save_offset = 0;
18557 if (! info_ptr->cr_save_p)
18558 info_ptr->cr_save_offset = 0;
18560 return info_ptr;
18563 /* Return true if the current function uses any GPRs in 64-bit SIMD
18564 mode. */
18566 static bool
18567 spe_func_has_64bit_regs_p (void)
18569 rtx insns, insn;
18571 /* Functions that save and restore all the call-saved registers will
18572 need to save/restore the registers in 64-bits. */
18573 if (crtl->calls_eh_return
18574 || cfun->calls_setjmp
18575 || crtl->has_nonlocal_goto)
18576 return true;
18578 insns = get_insns ();
18580 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
18582 if (INSN_P (insn))
18584 rtx i;
18586 /* FIXME: This should be implemented with attributes...
18588 (set_attr "spe64" "true")....then,
18589 if (get_spe64(insn)) return true;
18591 It's the only reliable way to do the stuff below. */
18593 i = PATTERN (insn);
18594 if (GET_CODE (i) == SET)
18596 enum machine_mode mode = GET_MODE (SET_SRC (i));
18598 if (SPE_VECTOR_MODE (mode))
18599 return true;
18600 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
18601 return true;
18606 return false;
18609 static void
18610 debug_stack_info (rs6000_stack_t *info)
18612 const char *abi_string;
18614 if (! info)
18615 info = rs6000_stack_info ();
18617 fprintf (stderr, "\nStack information for function %s:\n",
18618 ((current_function_decl && DECL_NAME (current_function_decl))
18619 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
18620 : "<unknown>"));
18622 switch (info->abi)
18624 default: abi_string = "Unknown"; break;
18625 case ABI_NONE: abi_string = "NONE"; break;
18626 case ABI_AIX: abi_string = "AIX"; break;
18627 case ABI_DARWIN: abi_string = "Darwin"; break;
18628 case ABI_V4: abi_string = "V.4"; break;
18631 fprintf (stderr, "\tABI = %5s\n", abi_string);
18633 if (TARGET_ALTIVEC_ABI)
18634 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
18636 if (TARGET_SPE_ABI)
18637 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
18639 if (info->first_gp_reg_save != 32)
18640 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
18642 if (info->first_fp_reg_save != 64)
18643 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
18645 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
18646 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
18647 info->first_altivec_reg_save);
18649 if (info->lr_save_p)
18650 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
18652 if (info->cr_save_p)
18653 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
18655 if (info->vrsave_mask)
18656 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
18658 if (info->push_p)
18659 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
18661 if (info->calls_p)
18662 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
18664 if (info->gp_save_offset)
18665 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
18667 if (info->fp_save_offset)
18668 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
18670 if (info->altivec_save_offset)
18671 fprintf (stderr, "\taltivec_save_offset = %5d\n",
18672 info->altivec_save_offset);
18674 if (info->spe_gp_save_offset)
18675 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
18676 info->spe_gp_save_offset);
18678 if (info->vrsave_save_offset)
18679 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
18680 info->vrsave_save_offset);
18682 if (info->lr_save_offset)
18683 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
18685 if (info->cr_save_offset)
18686 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
18688 if (info->varargs_save_offset)
18689 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
18691 if (info->total_size)
18692 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18693 info->total_size);
18695 if (info->vars_size)
18696 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18697 info->vars_size);
18699 if (info->parm_size)
18700 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
18702 if (info->fixed_size)
18703 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
18705 if (info->gp_size)
18706 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
18708 if (info->spe_gp_size)
18709 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
18711 if (info->fp_size)
18712 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
18714 if (info->altivec_size)
18715 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
18717 if (info->vrsave_size)
18718 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
18720 if (info->altivec_padding_size)
18721 fprintf (stderr, "\taltivec_padding_size= %5d\n",
18722 info->altivec_padding_size);
18724 if (info->spe_padding_size)
18725 fprintf (stderr, "\tspe_padding_size = %5d\n",
18726 info->spe_padding_size);
18728 if (info->cr_size)
18729 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
18731 if (info->save_size)
18732 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
18734 if (info->reg_size != 4)
18735 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
18737 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
18739 fprintf (stderr, "\n");
18743 rs6000_return_addr (int count, rtx frame)
18745 /* Currently we don't optimize very well between prolog and body
18746 code and for PIC code the code can be actually quite bad, so
18747 don't try to be too clever here. */
18748 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
18750 cfun->machine->ra_needs_full_frame = 1;
18752 return
18753 gen_rtx_MEM
18754 (Pmode,
18755 memory_address
18756 (Pmode,
18757 plus_constant (Pmode,
18758 copy_to_reg
18759 (gen_rtx_MEM (Pmode,
18760 memory_address (Pmode, frame))),
18761 RETURN_ADDRESS_OFFSET)));
18764 cfun->machine->ra_need_lr = 1;
18765 return get_hard_reg_initial_val (Pmode, LR_REGNO);
18768 /* Say whether a function is a candidate for sibcall handling or not. */
18770 static bool
18771 rs6000_function_ok_for_sibcall (tree decl, tree exp)
18773 tree fntype;
18775 if (decl)
18776 fntype = TREE_TYPE (decl);
18777 else
18778 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
18780 /* We can't do it if the called function has more vector parameters
18781 than the current function; there's nowhere to put the VRsave code. */
18782 if (TARGET_ALTIVEC_ABI
18783 && TARGET_ALTIVEC_VRSAVE
18784 && !(decl && decl == current_function_decl))
18786 function_args_iterator args_iter;
18787 tree type;
18788 int nvreg = 0;
18790 /* Functions with vector parameters are required to have a
18791 prototype, so the argument type info must be available
18792 here. */
18793 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
18794 if (TREE_CODE (type) == VECTOR_TYPE
18795 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18796 nvreg++;
18798 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
18799 if (TREE_CODE (type) == VECTOR_TYPE
18800 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18801 nvreg--;
18803 if (nvreg > 0)
18804 return false;
18807 /* Under the AIX ABI we can't allow calls to non-local functions,
18808 because the callee may have a different TOC pointer to the
18809 caller and there's no way to ensure we restore the TOC when we
18810 return. With the secure-plt SYSV ABI we can't make non-local
18811 calls when -fpic/PIC because the plt call stubs use r30. */
18812 if (DEFAULT_ABI == ABI_DARWIN
18813 || (DEFAULT_ABI == ABI_AIX
18814 && decl
18815 && !DECL_EXTERNAL (decl)
18816 && (*targetm.binds_local_p) (decl))
18817 || (DEFAULT_ABI == ABI_V4
18818 && (!TARGET_SECURE_PLT
18819 || !flag_pic
18820 || (decl
18821 && (*targetm.binds_local_p) (decl)))))
18823 tree attr_list = TYPE_ATTRIBUTES (fntype);
18825 if (!lookup_attribute ("longcall", attr_list)
18826 || lookup_attribute ("shortcall", attr_list))
18827 return true;
18830 return false;
18833 static int
18834 rs6000_ra_ever_killed (void)
18836 rtx top;
18837 rtx reg;
18838 rtx insn;
18840 if (cfun->is_thunk)
18841 return 0;
18843 if (cfun->machine->lr_save_state)
18844 return cfun->machine->lr_save_state - 1;
18846 /* regs_ever_live has LR marked as used if any sibcalls are present,
18847 but this should not force saving and restoring in the
18848 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18849 clobbers LR, so that is inappropriate. */
18851 /* Also, the prologue can generate a store into LR that
18852 doesn't really count, like this:
18854 move LR->R0
18855 bcl to set PIC register
18856 move LR->R31
18857 move R0->LR
18859 When we're called from the epilogue, we need to avoid counting
18860 this as a store. */
18862 push_topmost_sequence ();
18863 top = get_insns ();
18864 pop_topmost_sequence ();
18865 reg = gen_rtx_REG (Pmode, LR_REGNO);
18867 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
18869 if (INSN_P (insn))
18871 if (CALL_P (insn))
18873 if (!SIBLING_CALL_P (insn))
18874 return 1;
18876 else if (find_regno_note (insn, REG_INC, LR_REGNO))
18877 return 1;
18878 else if (set_of (reg, insn) != NULL_RTX
18879 && !prologue_epilogue_contains (insn))
18880 return 1;
18883 return 0;
18886 /* Emit instructions needed to load the TOC register.
18887 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18888 a constant pool; or for SVR4 -fpic. */
18890 void
18891 rs6000_emit_load_toc_table (int fromprolog)
18893 rtx dest;
18894 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
18896 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
18898 char buf[30];
18899 rtx lab, tmp1, tmp2, got;
18901 lab = gen_label_rtx ();
18902 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
18903 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18904 if (flag_pic == 2)
18905 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18906 else
18907 got = rs6000_got_sym ();
18908 tmp1 = tmp2 = dest;
18909 if (!fromprolog)
18911 tmp1 = gen_reg_rtx (Pmode);
18912 tmp2 = gen_reg_rtx (Pmode);
18914 emit_insn (gen_load_toc_v4_PIC_1 (lab));
18915 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
18916 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
18917 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
18919 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
18921 emit_insn (gen_load_toc_v4_pic_si ());
18922 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18924 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
18926 char buf[30];
18927 rtx temp0 = (fromprolog
18928 ? gen_rtx_REG (Pmode, 0)
18929 : gen_reg_rtx (Pmode));
18931 if (fromprolog)
18933 rtx symF, symL;
18935 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
18936 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18938 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
18939 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18941 emit_insn (gen_load_toc_v4_PIC_1 (symF));
18942 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18943 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
18945 else
18947 rtx tocsym, lab;
18949 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18950 lab = gen_label_rtx ();
18951 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
18952 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18953 if (TARGET_LINK_STACK)
18954 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
18955 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
18957 emit_insn (gen_addsi3 (dest, temp0, dest));
18959 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
18961 /* This is for AIX code running in non-PIC ELF32. */
18962 char buf[30];
18963 rtx realsym;
18964 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
18965 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18967 emit_insn (gen_elf_high (dest, realsym));
18968 emit_insn (gen_elf_low (dest, dest, realsym));
18970 else
18972 gcc_assert (DEFAULT_ABI == ABI_AIX);
18974 if (TARGET_32BIT)
18975 emit_insn (gen_load_toc_aix_si (dest));
18976 else
18977 emit_insn (gen_load_toc_aix_di (dest));
18981 /* Emit instructions to restore the link register after determining where
18982 its value has been stored. */
18984 void
18985 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
18987 rs6000_stack_t *info = rs6000_stack_info ();
18988 rtx operands[2];
18990 operands[0] = source;
18991 operands[1] = scratch;
18993 if (info->lr_save_p)
18995 rtx frame_rtx = stack_pointer_rtx;
18996 HOST_WIDE_INT sp_offset = 0;
18997 rtx tmp;
18999 if (frame_pointer_needed
19000 || cfun->calls_alloca
19001 || info->total_size > 32767)
19003 tmp = gen_frame_mem (Pmode, frame_rtx);
19004 emit_move_insn (operands[1], tmp);
19005 frame_rtx = operands[1];
19007 else if (info->push_p)
19008 sp_offset = info->total_size;
19010 tmp = plus_constant (Pmode, frame_rtx,
19011 info->lr_save_offset + sp_offset);
19012 tmp = gen_frame_mem (Pmode, tmp);
19013 emit_move_insn (tmp, operands[0]);
19015 else
19016 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
19018 /* Freeze lr_save_p. We've just emitted rtl that depends on the
19019 state of lr_save_p so any change from here on would be a bug. In
19020 particular, stop rs6000_ra_ever_killed from considering the SET
19021 of lr we may have added just above. */
19022 cfun->machine->lr_save_state = info->lr_save_p + 1;
19025 static GTY(()) alias_set_type set = -1;
19027 alias_set_type
19028 get_TOC_alias_set (void)
19030 if (set == -1)
19031 set = new_alias_set ();
19032 return set;
19035 /* This returns nonzero if the current function uses the TOC. This is
19036 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
19037 is generated by the ABI_V4 load_toc_* patterns. */
19038 #if TARGET_ELF
19039 static int
19040 uses_TOC (void)
19042 rtx insn;
19044 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
19045 if (INSN_P (insn))
19047 rtx pat = PATTERN (insn);
19048 int i;
19050 if (GET_CODE (pat) == PARALLEL)
19051 for (i = 0; i < XVECLEN (pat, 0); i++)
19053 rtx sub = XVECEXP (pat, 0, i);
19054 if (GET_CODE (sub) == USE)
19056 sub = XEXP (sub, 0);
19057 if (GET_CODE (sub) == UNSPEC
19058 && XINT (sub, 1) == UNSPEC_TOC)
19059 return 1;
19063 return 0;
19065 #endif
19068 create_TOC_reference (rtx symbol, rtx largetoc_reg)
19070 rtx tocrel, tocreg, hi;
19072 if (TARGET_DEBUG_ADDR)
19074 if (GET_CODE (symbol) == SYMBOL_REF)
19075 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
19076 XSTR (symbol, 0));
19077 else
19079 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
19080 GET_RTX_NAME (GET_CODE (symbol)));
19081 debug_rtx (symbol);
19085 if (!can_create_pseudo_p ())
19086 df_set_regs_ever_live (TOC_REGISTER, true);
19088 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
19089 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
19090 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
19091 return tocrel;
19093 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
19094 if (largetoc_reg != NULL)
19096 emit_move_insn (largetoc_reg, hi);
19097 hi = largetoc_reg;
19099 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
19102 /* Issue assembly directives that create a reference to the given DWARF
19103 FRAME_TABLE_LABEL from the current function section. */
19104 void
19105 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
19107 fprintf (asm_out_file, "\t.ref %s\n",
19108 (* targetm.strip_name_encoding) (frame_table_label));
19111 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
19112 and the change to the stack pointer. */
19114 static void
19115 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
19117 rtvec p;
19118 int i;
19119 rtx regs[3];
19121 i = 0;
19122 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19123 if (hard_frame_needed)
19124 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
19125 if (!(REGNO (fp) == STACK_POINTER_REGNUM
19126 || (hard_frame_needed
19127 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
19128 regs[i++] = fp;
19130 p = rtvec_alloc (i);
19131 while (--i >= 0)
19133 rtx mem = gen_frame_mem (BLKmode, regs[i]);
19134 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
19137 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
19140 /* Emit the correct code for allocating stack space, as insns.
19141 If COPY_REG, make sure a copy of the old frame is left there.
19142 The generated code may use hard register 0 as a temporary. */
19144 static void
19145 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
19147 rtx insn;
19148 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19149 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
19150 rtx todec = gen_int_mode (-size, Pmode);
19151 rtx par, set, mem;
19153 if (INTVAL (todec) != -size)
19155 warning (0, "stack frame too large");
19156 emit_insn (gen_trap ());
19157 return;
19160 if (crtl->limit_stack)
19162 if (REG_P (stack_limit_rtx)
19163 && REGNO (stack_limit_rtx) > 1
19164 && REGNO (stack_limit_rtx) <= 31)
19166 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
19167 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
19168 const0_rtx));
19170 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
19171 && TARGET_32BIT
19172 && DEFAULT_ABI == ABI_V4)
19174 rtx toload = gen_rtx_CONST (VOIDmode,
19175 gen_rtx_PLUS (Pmode,
19176 stack_limit_rtx,
19177 GEN_INT (size)));
19179 emit_insn (gen_elf_high (tmp_reg, toload));
19180 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
19181 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
19182 const0_rtx));
19184 else
19185 warning (0, "stack limit expression is not supported");
19188 if (copy_reg)
19190 if (copy_off != 0)
19191 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
19192 else
19193 emit_move_insn (copy_reg, stack_reg);
19196 if (size > 32767)
19198 /* Need a note here so that try_split doesn't get confused. */
19199 if (get_last_insn () == NULL_RTX)
19200 emit_note (NOTE_INSN_DELETED);
19201 insn = emit_move_insn (tmp_reg, todec);
19202 try_split (PATTERN (insn), insn, 0);
19203 todec = tmp_reg;
19206 insn = emit_insn (TARGET_32BIT
19207 ? gen_movsi_update_stack (stack_reg, stack_reg,
19208 todec, stack_reg)
19209 : gen_movdi_di_update_stack (stack_reg, stack_reg,
19210 todec, stack_reg));
19211 /* Since we didn't use gen_frame_mem to generate the MEM, grab
19212 it now and set the alias set/attributes. The above gen_*_update
19213 calls will generate a PARALLEL with the MEM set being the first
19214 operation. */
19215 par = PATTERN (insn);
19216 gcc_assert (GET_CODE (par) == PARALLEL);
19217 set = XVECEXP (par, 0, 0);
19218 gcc_assert (GET_CODE (set) == SET);
19219 mem = SET_DEST (set);
19220 gcc_assert (MEM_P (mem));
19221 MEM_NOTRAP_P (mem) = 1;
19222 set_mem_alias_set (mem, get_frame_alias_set ());
19224 RTX_FRAME_RELATED_P (insn) = 1;
19225 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
19226 gen_rtx_SET (VOIDmode, stack_reg,
19227 gen_rtx_PLUS (Pmode, stack_reg,
19228 GEN_INT (-size))));
19231 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
19233 #if PROBE_INTERVAL > 32768
19234 #error Cannot use indexed addressing mode for stack probing
19235 #endif
19237 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
19238 inclusive. These are offsets from the current stack pointer. */
19240 static void
19241 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
19243 /* See if we have a constant small number of probes to generate. If so,
19244 that's the easy case. */
19245 if (first + size <= 32768)
19247 HOST_WIDE_INT i;
19249 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
19250 it exceeds SIZE. If only one probe is needed, this will not
19251 generate any code. Then probe at FIRST + SIZE. */
19252 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
19253 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
19254 -(first + i)));
19256 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
19257 -(first + size)));
19260 /* Otherwise, do the same as above, but in a loop. Note that we must be
19261 extra careful with variables wrapping around because we might be at
19262 the very top (or the very bottom) of the address space and we have
19263 to be able to handle this case properly; in particular, we use an
19264 equality test for the loop condition. */
19265 else
19267 HOST_WIDE_INT rounded_size;
19268 rtx r12 = gen_rtx_REG (Pmode, 12);
19269 rtx r0 = gen_rtx_REG (Pmode, 0);
19271 /* Sanity check for the addressing mode we're going to use. */
19272 gcc_assert (first <= 32768);
19274 /* Step 1: round SIZE to the previous multiple of the interval. */
19276 rounded_size = size & -PROBE_INTERVAL;
19279 /* Step 2: compute initial and final value of the loop counter. */
19281 /* TEST_ADDR = SP + FIRST. */
19282 emit_insn (gen_rtx_SET (VOIDmode, r12,
19283 plus_constant (Pmode, stack_pointer_rtx,
19284 -first)));
19286 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
19287 if (rounded_size > 32768)
19289 emit_move_insn (r0, GEN_INT (-rounded_size));
19290 emit_insn (gen_rtx_SET (VOIDmode, r0,
19291 gen_rtx_PLUS (Pmode, r12, r0)));
19293 else
19294 emit_insn (gen_rtx_SET (VOIDmode, r0,
19295 plus_constant (Pmode, r12, -rounded_size)));
19298 /* Step 3: the loop
19300 while (TEST_ADDR != LAST_ADDR)
19302 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
19303 probe at TEST_ADDR
19306 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
19307 until it is equal to ROUNDED_SIZE. */
19309 if (TARGET_64BIT)
19310 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
19311 else
19312 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
19315 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
19316 that SIZE is equal to ROUNDED_SIZE. */
19318 if (size != rounded_size)
19319 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
19323 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
19324 absolute addresses. */
19326 const char *
19327 output_probe_stack_range (rtx reg1, rtx reg2)
19329 static int labelno = 0;
19330 char loop_lab[32], end_lab[32];
19331 rtx xops[2];
19333 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
19334 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
19336 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
19338 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
19339 xops[0] = reg1;
19340 xops[1] = reg2;
19341 if (TARGET_64BIT)
19342 output_asm_insn ("cmpd 0,%0,%1", xops);
19343 else
19344 output_asm_insn ("cmpw 0,%0,%1", xops);
19346 fputs ("\tbeq 0,", asm_out_file);
19347 assemble_name_raw (asm_out_file, end_lab);
19348 fputc ('\n', asm_out_file);
19350 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
19351 xops[1] = GEN_INT (-PROBE_INTERVAL);
19352 output_asm_insn ("addi %0,%0,%1", xops);
19354 /* Probe at TEST_ADDR and branch. */
19355 xops[1] = gen_rtx_REG (Pmode, 0);
19356 output_asm_insn ("stw %1,0(%0)", xops);
19357 fprintf (asm_out_file, "\tb ");
19358 assemble_name_raw (asm_out_file, loop_lab);
19359 fputc ('\n', asm_out_file);
19361 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
19363 return "";
19366 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
19367 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
19368 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
19369 deduce these equivalences by itself so it wasn't necessary to hold
19370 its hand so much. Don't be tempted to always supply d2_f_d_e with
19371 the actual cfa register, ie. r31 when we are using a hard frame
19372 pointer. That fails when saving regs off r1, and sched moves the
19373 r31 setup past the reg saves. */
19375 static rtx
19376 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
19377 rtx reg2, rtx rreg)
19379 rtx real, temp;
19381 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
19383 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
19384 int i;
19386 gcc_checking_assert (val == 0);
19387 real = PATTERN (insn);
19388 if (GET_CODE (real) == PARALLEL)
19389 for (i = 0; i < XVECLEN (real, 0); i++)
19390 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
19392 rtx set = XVECEXP (real, 0, i);
19394 RTX_FRAME_RELATED_P (set) = 1;
19396 RTX_FRAME_RELATED_P (insn) = 1;
19397 return insn;
19400 /* copy_rtx will not make unique copies of registers, so we need to
19401 ensure we don't have unwanted sharing here. */
19402 if (reg == reg2)
19403 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
19405 if (reg == rreg)
19406 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
19408 real = copy_rtx (PATTERN (insn));
19410 if (reg2 != NULL_RTX)
19411 real = replace_rtx (real, reg2, rreg);
19413 if (REGNO (reg) == STACK_POINTER_REGNUM)
19414 gcc_checking_assert (val == 0);
19415 else
19416 real = replace_rtx (real, reg,
19417 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
19418 STACK_POINTER_REGNUM),
19419 GEN_INT (val)));
19421 /* We expect that 'real' is either a SET or a PARALLEL containing
19422 SETs (and possibly other stuff). In a PARALLEL, all the SETs
19423 are important so they all have to be marked RTX_FRAME_RELATED_P. */
19425 if (GET_CODE (real) == SET)
19427 rtx set = real;
19429 temp = simplify_rtx (SET_SRC (set));
19430 if (temp)
19431 SET_SRC (set) = temp;
19432 temp = simplify_rtx (SET_DEST (set));
19433 if (temp)
19434 SET_DEST (set) = temp;
19435 if (GET_CODE (SET_DEST (set)) == MEM)
19437 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
19438 if (temp)
19439 XEXP (SET_DEST (set), 0) = temp;
19442 else
19444 int i;
19446 gcc_assert (GET_CODE (real) == PARALLEL);
19447 for (i = 0; i < XVECLEN (real, 0); i++)
19448 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
19450 rtx set = XVECEXP (real, 0, i);
19452 temp = simplify_rtx (SET_SRC (set));
19453 if (temp)
19454 SET_SRC (set) = temp;
19455 temp = simplify_rtx (SET_DEST (set));
19456 if (temp)
19457 SET_DEST (set) = temp;
19458 if (GET_CODE (SET_DEST (set)) == MEM)
19460 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
19461 if (temp)
19462 XEXP (SET_DEST (set), 0) = temp;
19464 RTX_FRAME_RELATED_P (set) = 1;
19468 RTX_FRAME_RELATED_P (insn) = 1;
19469 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
19471 return insn;
19474 /* Returns an insn that has a vrsave set operation with the
19475 appropriate CLOBBERs. */
19477 static rtx
19478 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
19480 int nclobs, i;
19481 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
19482 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
19484 clobs[0]
19485 = gen_rtx_SET (VOIDmode,
19486 vrsave,
19487 gen_rtx_UNSPEC_VOLATILE (SImode,
19488 gen_rtvec (2, reg, vrsave),
19489 UNSPECV_SET_VRSAVE));
19491 nclobs = 1;
19493 /* We need to clobber the registers in the mask so the scheduler
19494 does not move sets to VRSAVE before sets of AltiVec registers.
19496 However, if the function receives nonlocal gotos, reload will set
19497 all call saved registers live. We will end up with:
19499 (set (reg 999) (mem))
19500 (parallel [ (set (reg vrsave) (unspec blah))
19501 (clobber (reg 999))])
19503 The clobber will cause the store into reg 999 to be dead, and
19504 flow will attempt to delete an epilogue insn. In this case, we
19505 need an unspec use/set of the register. */
19507 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
19508 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19510 if (!epiloguep || call_used_regs [i])
19511 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
19512 gen_rtx_REG (V4SImode, i));
19513 else
19515 rtx reg = gen_rtx_REG (V4SImode, i);
19517 clobs[nclobs++]
19518 = gen_rtx_SET (VOIDmode,
19519 reg,
19520 gen_rtx_UNSPEC (V4SImode,
19521 gen_rtvec (1, reg), 27));
19525 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
19527 for (i = 0; i < nclobs; ++i)
19528 XVECEXP (insn, 0, i) = clobs[i];
19530 return insn;
19533 static rtx
19534 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
19536 rtx addr, mem;
19538 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
19539 mem = gen_frame_mem (GET_MODE (reg), addr);
19540 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
19543 static rtx
19544 gen_frame_load (rtx reg, rtx frame_reg, int offset)
19546 return gen_frame_set (reg, frame_reg, offset, false);
19549 static rtx
19550 gen_frame_store (rtx reg, rtx frame_reg, int offset)
19552 return gen_frame_set (reg, frame_reg, offset, true);
19555 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19556 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19558 static rtx
19559 emit_frame_save (rtx frame_reg, enum machine_mode mode,
19560 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
19562 rtx reg, insn;
19564 /* Some cases that need register indexed addressing. */
19565 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
19566 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
19567 || (TARGET_E500_DOUBLE && mode == DFmode)
19568 || (TARGET_SPE_ABI
19569 && SPE_VECTOR_MODE (mode)
19570 && !SPE_CONST_OFFSET_OK (offset))));
19572 reg = gen_rtx_REG (mode, regno);
19573 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
19574 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
19575 NULL_RTX, NULL_RTX);
19578 /* Emit an offset memory reference suitable for a frame store, while
19579 converting to a valid addressing mode. */
19581 static rtx
19582 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
19584 rtx int_rtx, offset_rtx;
19586 int_rtx = GEN_INT (offset);
19588 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
19589 || (TARGET_E500_DOUBLE && mode == DFmode))
19591 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
19592 emit_move_insn (offset_rtx, int_rtx);
19594 else
19595 offset_rtx = int_rtx;
19597 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
19600 #ifndef TARGET_FIX_AND_CONTINUE
19601 #define TARGET_FIX_AND_CONTINUE 0
19602 #endif
19604 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19605 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19606 #define LAST_SAVRES_REGISTER 31
19607 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19609 enum {
19610 SAVRES_LR = 0x1,
19611 SAVRES_SAVE = 0x2,
19612 SAVRES_REG = 0x0c,
19613 SAVRES_GPR = 0,
19614 SAVRES_FPR = 4,
19615 SAVRES_VR = 8
19618 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
19620 /* Temporary holding space for an out-of-line register save/restore
19621 routine name. */
19622 static char savres_routine_name[30];
19624 /* Return the name for an out-of-line register save/restore routine.
19625 We are saving/restoring GPRs if GPR is true. */
19627 static char *
19628 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
19630 const char *prefix = "";
19631 const char *suffix = "";
19633 /* Different targets are supposed to define
19634 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19635 routine name could be defined with:
19637 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19639 This is a nice idea in practice, but in reality, things are
19640 complicated in several ways:
19642 - ELF targets have save/restore routines for GPRs.
19644 - SPE targets use different prefixes for 32/64-bit registers, and
19645 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19647 - PPC64 ELF targets have routines for save/restore of GPRs that
19648 differ in what they do with the link register, so having a set
19649 prefix doesn't work. (We only use one of the save routines at
19650 the moment, though.)
19652 - PPC32 elf targets have "exit" versions of the restore routines
19653 that restore the link register and can save some extra space.
19654 These require an extra suffix. (There are also "tail" versions
19655 of the restore routines and "GOT" versions of the save routines,
19656 but we don't generate those at present. Same problems apply,
19657 though.)
19659 We deal with all this by synthesizing our own prefix/suffix and
19660 using that for the simple sprintf call shown above. */
19661 if (TARGET_SPE)
19663 /* No floating point saves on the SPE. */
19664 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
19666 if ((sel & SAVRES_SAVE))
19667 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
19668 else
19669 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
19671 if ((sel & SAVRES_LR))
19672 suffix = "_x";
19674 else if (DEFAULT_ABI == ABI_V4)
19676 if (TARGET_64BIT)
19677 goto aix_names;
19679 if ((sel & SAVRES_REG) == SAVRES_GPR)
19680 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
19681 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19682 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
19683 else if ((sel & SAVRES_REG) == SAVRES_VR)
19684 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19685 else
19686 abort ();
19688 if ((sel & SAVRES_LR))
19689 suffix = "_x";
19691 else if (DEFAULT_ABI == ABI_AIX)
19693 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19694 /* No out-of-line save/restore routines for GPRs on AIX. */
19695 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
19696 #endif
19698 aix_names:
19699 if ((sel & SAVRES_REG) == SAVRES_GPR)
19700 prefix = ((sel & SAVRES_SAVE)
19701 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
19702 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
19703 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19705 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19706 if ((sel & SAVRES_LR))
19707 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
19708 else
19709 #endif
19711 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
19712 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
19715 else if ((sel & SAVRES_REG) == SAVRES_VR)
19716 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19717 else
19718 abort ();
19721 if (DEFAULT_ABI == ABI_DARWIN)
19723 /* The Darwin approach is (slightly) different, in order to be
19724 compatible with code generated by the system toolchain. There is a
19725 single symbol for the start of save sequence, and the code here
19726 embeds an offset into that code on the basis of the first register
19727 to be saved. */
19728 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
19729 if ((sel & SAVRES_REG) == SAVRES_GPR)
19730 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
19731 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
19732 (regno - 13) * 4, prefix, regno);
19733 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19734 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
19735 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
19736 else if ((sel & SAVRES_REG) == SAVRES_VR)
19737 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
19738 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
19739 else
19740 abort ();
19742 else
19743 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
19745 return savres_routine_name;
19748 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19749 We are saving/restoring GPRs if GPR is true. */
19751 static rtx
19752 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
19754 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
19755 ? info->first_gp_reg_save
19756 : (sel & SAVRES_REG) == SAVRES_FPR
19757 ? info->first_fp_reg_save - 32
19758 : (sel & SAVRES_REG) == SAVRES_VR
19759 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
19760 : -1);
19761 rtx sym;
19762 int select = sel;
19764 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19765 versions of the gpr routines. */
19766 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
19767 && info->spe_64bit_regs_used)
19768 select ^= SAVRES_FPR ^ SAVRES_GPR;
19770 /* Don't generate bogus routine names. */
19771 gcc_assert (FIRST_SAVRES_REGISTER <= regno
19772 && regno <= LAST_SAVRES_REGISTER
19773 && select >= 0 && select <= 12);
19775 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
19777 if (sym == NULL)
19779 char *name;
19781 name = rs6000_savres_routine_name (info, regno, sel);
19783 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
19784 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
19785 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
19788 return sym;
19791 /* Emit a sequence of insns, including a stack tie if needed, for
19792 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19793 reset the stack pointer, but move the base of the frame into
19794 reg UPDT_REGNO for use by out-of-line register restore routines. */
19796 static rtx
19797 rs6000_emit_stack_reset (rs6000_stack_t *info,
19798 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
19799 unsigned updt_regno)
19801 rtx updt_reg_rtx;
19803 /* This blockage is needed so that sched doesn't decide to move
19804 the sp change before the register restores. */
19805 if (DEFAULT_ABI == ABI_V4
19806 || (TARGET_SPE_ABI
19807 && info->spe_64bit_regs_used != 0
19808 && info->first_gp_reg_save != 32))
19809 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
19811 /* If we are restoring registers out-of-line, we will be using the
19812 "exit" variants of the restore routines, which will reset the
19813 stack for us. But we do need to point updt_reg into the
19814 right place for those routines. */
19815 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
19817 if (frame_off != 0)
19818 return emit_insn (gen_add3_insn (updt_reg_rtx,
19819 frame_reg_rtx, GEN_INT (frame_off)));
19820 else if (REGNO (frame_reg_rtx) != updt_regno)
19821 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
19823 return NULL_RTX;
19826 /* Return the register number used as a pointer by out-of-line
19827 save/restore functions. */
19829 static inline unsigned
19830 ptr_regno_for_savres (int sel)
19832 if (DEFAULT_ABI == ABI_AIX)
19833 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
19834 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
19837 /* Construct a parallel rtx describing the effect of a call to an
19838 out-of-line register save/restore routine, and emit the insn
19839 or jump_insn as appropriate. */
19841 static rtx
19842 rs6000_emit_savres_rtx (rs6000_stack_t *info,
19843 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
19844 enum machine_mode reg_mode, int sel)
19846 int i;
19847 int offset, start_reg, end_reg, n_regs, use_reg;
19848 int reg_size = GET_MODE_SIZE (reg_mode);
19849 rtx sym;
19850 rtvec p;
19851 rtx par, insn;
19853 offset = 0;
19854 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19855 ? info->first_gp_reg_save
19856 : (sel & SAVRES_REG) == SAVRES_FPR
19857 ? info->first_fp_reg_save
19858 : (sel & SAVRES_REG) == SAVRES_VR
19859 ? info->first_altivec_reg_save
19860 : -1);
19861 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19862 ? 32
19863 : (sel & SAVRES_REG) == SAVRES_FPR
19864 ? 64
19865 : (sel & SAVRES_REG) == SAVRES_VR
19866 ? LAST_ALTIVEC_REGNO + 1
19867 : -1);
19868 n_regs = end_reg - start_reg;
19869 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
19870 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
19871 + n_regs);
19873 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19874 RTVEC_ELT (p, offset++) = ret_rtx;
19876 RTVEC_ELT (p, offset++)
19877 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
19879 sym = rs6000_savres_routine_sym (info, sel);
19880 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
19882 use_reg = ptr_regno_for_savres (sel);
19883 if ((sel & SAVRES_REG) == SAVRES_VR)
19885 /* Vector regs are saved/restored using [reg+reg] addressing. */
19886 RTVEC_ELT (p, offset++)
19887 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19888 RTVEC_ELT (p, offset++)
19889 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
19891 else
19892 RTVEC_ELT (p, offset++)
19893 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19895 for (i = 0; i < end_reg - start_reg; i++)
19896 RTVEC_ELT (p, i + offset)
19897 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
19898 frame_reg_rtx, save_area_offset + reg_size * i,
19899 (sel & SAVRES_SAVE) != 0);
19901 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19902 RTVEC_ELT (p, i + offset)
19903 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
19905 par = gen_rtx_PARALLEL (VOIDmode, p);
19907 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19909 insn = emit_jump_insn (par);
19910 JUMP_LABEL (insn) = ret_rtx;
19912 else
19913 insn = emit_insn (par);
19914 return insn;
19917 /* Determine whether the gp REG is really used. */
19919 static bool
19920 rs6000_reg_live_or_pic_offset_p (int reg)
19922 /* If the function calls eh_return, claim used all the registers that would
19923 be checked for liveness otherwise. This is required for the PIC offset
19924 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19925 register allocation purposes in this case. */
19927 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
19928 && (!call_used_regs[reg]
19929 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19930 && !TARGET_SINGLE_PIC_BASE
19931 && TARGET_TOC && TARGET_MINIMAL_TOC)))
19932 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19933 && !TARGET_SINGLE_PIC_BASE
19934 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19935 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
19938 /* Emit function prologue as insns. */
19940 void
19941 rs6000_emit_prologue (void)
19943 rs6000_stack_t *info = rs6000_stack_info ();
19944 enum machine_mode reg_mode = Pmode;
19945 int reg_size = TARGET_32BIT ? 4 : 8;
19946 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19947 rtx frame_reg_rtx = sp_reg_rtx;
19948 unsigned int cr_save_regno;
19949 rtx cr_save_rtx = NULL_RTX;
19950 rtx insn;
19951 int strategy;
19952 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19953 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19954 && call_used_regs[STATIC_CHAIN_REGNUM]);
19955 /* Offset to top of frame for frame_reg and sp respectively. */
19956 HOST_WIDE_INT frame_off = 0;
19957 HOST_WIDE_INT sp_off = 0;
19959 #ifdef ENABLE_CHECKING
19960 /* Track and check usage of r0, r11, r12. */
19961 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
19962 #define START_USE(R) do \
19964 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19965 reg_inuse |= 1 << (R); \
19966 } while (0)
19967 #define END_USE(R) do \
19969 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19970 reg_inuse &= ~(1 << (R)); \
19971 } while (0)
19972 #define NOT_INUSE(R) do \
19974 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19975 } while (0)
19976 #else
19977 #define START_USE(R) do {} while (0)
19978 #define END_USE(R) do {} while (0)
19979 #define NOT_INUSE(R) do {} while (0)
19980 #endif
19982 if (flag_stack_usage_info)
19983 current_function_static_stack_size = info->total_size;
19985 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
19986 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
19988 if (TARGET_FIX_AND_CONTINUE)
19990 /* gdb on darwin arranges to forward a function from the old
19991 address by modifying the first 5 instructions of the function
19992 to branch to the overriding function. This is necessary to
19993 permit function pointers that point to the old function to
19994 actually forward to the new function. */
19995 emit_insn (gen_nop ());
19996 emit_insn (gen_nop ());
19997 emit_insn (gen_nop ());
19998 emit_insn (gen_nop ());
19999 emit_insn (gen_nop ());
20002 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20004 reg_mode = V2SImode;
20005 reg_size = 8;
20008 /* Handle world saves specially here. */
20009 if (WORLD_SAVE_P (info))
20011 int i, j, sz;
20012 rtx treg;
20013 rtvec p;
20014 rtx reg0;
20016 /* save_world expects lr in r0. */
20017 reg0 = gen_rtx_REG (Pmode, 0);
20018 if (info->lr_save_p)
20020 insn = emit_move_insn (reg0,
20021 gen_rtx_REG (Pmode, LR_REGNO));
20022 RTX_FRAME_RELATED_P (insn) = 1;
20025 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
20026 assumptions about the offsets of various bits of the stack
20027 frame. */
20028 gcc_assert (info->gp_save_offset == -220
20029 && info->fp_save_offset == -144
20030 && info->lr_save_offset == 8
20031 && info->cr_save_offset == 4
20032 && info->push_p
20033 && info->lr_save_p
20034 && (!crtl->calls_eh_return
20035 || info->ehrd_offset == -432)
20036 && info->vrsave_save_offset == -224
20037 && info->altivec_save_offset == -416);
20039 treg = gen_rtx_REG (SImode, 11);
20040 emit_move_insn (treg, GEN_INT (-info->total_size));
20042 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
20043 in R11. It also clobbers R12, so beware! */
20045 /* Preserve CR2 for save_world prologues */
20046 sz = 5;
20047 sz += 32 - info->first_gp_reg_save;
20048 sz += 64 - info->first_fp_reg_save;
20049 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
20050 p = rtvec_alloc (sz);
20051 j = 0;
20052 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
20053 gen_rtx_REG (SImode,
20054 LR_REGNO));
20055 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20056 gen_rtx_SYMBOL_REF (Pmode,
20057 "*save_world"));
20058 /* We do floats first so that the instruction pattern matches
20059 properly. */
20060 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
20061 RTVEC_ELT (p, j++)
20062 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20063 ? DFmode : SFmode,
20064 info->first_fp_reg_save + i),
20065 frame_reg_rtx,
20066 info->fp_save_offset + frame_off + 8 * i);
20067 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20068 RTVEC_ELT (p, j++)
20069 = gen_frame_store (gen_rtx_REG (V4SImode,
20070 info->first_altivec_reg_save + i),
20071 frame_reg_rtx,
20072 info->altivec_save_offset + frame_off + 16 * i);
20073 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20074 RTVEC_ELT (p, j++)
20075 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
20076 frame_reg_rtx,
20077 info->gp_save_offset + frame_off + reg_size * i);
20079 /* CR register traditionally saved as CR2. */
20080 RTVEC_ELT (p, j++)
20081 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
20082 frame_reg_rtx, info->cr_save_offset + frame_off);
20083 /* Explain about use of R0. */
20084 if (info->lr_save_p)
20085 RTVEC_ELT (p, j++)
20086 = gen_frame_store (reg0,
20087 frame_reg_rtx, info->lr_save_offset + frame_off);
20088 /* Explain what happens to the stack pointer. */
20090 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
20091 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
20094 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20095 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20096 treg, GEN_INT (-info->total_size));
20097 sp_off = frame_off = info->total_size;
20100 strategy = info->savres_strategy;
20102 /* For V.4, update stack before we do any saving and set back pointer. */
20103 if (! WORLD_SAVE_P (info)
20104 && info->push_p
20105 && (DEFAULT_ABI == ABI_V4
20106 || crtl->calls_eh_return))
20108 bool need_r11 = (TARGET_SPE
20109 ? (!(strategy & SAVE_INLINE_GPRS)
20110 && info->spe_64bit_regs_used == 0)
20111 : (!(strategy & SAVE_INLINE_FPRS)
20112 || !(strategy & SAVE_INLINE_GPRS)
20113 || !(strategy & SAVE_INLINE_VRS)));
20114 int ptr_regno = -1;
20115 rtx ptr_reg = NULL_RTX;
20116 int ptr_off = 0;
20118 if (info->total_size < 32767)
20119 frame_off = info->total_size;
20120 else if (need_r11)
20121 ptr_regno = 11;
20122 else if (info->cr_save_p
20123 || info->lr_save_p
20124 || info->first_fp_reg_save < 64
20125 || info->first_gp_reg_save < 32
20126 || info->altivec_size != 0
20127 || info->vrsave_mask != 0
20128 || crtl->calls_eh_return)
20129 ptr_regno = 12;
20130 else
20132 /* The prologue won't be saving any regs so there is no need
20133 to set up a frame register to access any frame save area.
20134 We also won't be using frame_off anywhere below, but set
20135 the correct value anyway to protect against future
20136 changes to this function. */
20137 frame_off = info->total_size;
20139 if (ptr_regno != -1)
20141 /* Set up the frame offset to that needed by the first
20142 out-of-line save function. */
20143 START_USE (ptr_regno);
20144 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20145 frame_reg_rtx = ptr_reg;
20146 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
20147 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
20148 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
20149 ptr_off = info->gp_save_offset + info->gp_size;
20150 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
20151 ptr_off = info->altivec_save_offset + info->altivec_size;
20152 frame_off = -ptr_off;
20154 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20155 sp_off = info->total_size;
20156 if (frame_reg_rtx != sp_reg_rtx)
20157 rs6000_emit_stack_tie (frame_reg_rtx, false);
20160 /* If we use the link register, get it into r0. */
20161 if (!WORLD_SAVE_P (info) && info->lr_save_p)
20163 rtx addr, reg, mem;
20165 reg = gen_rtx_REG (Pmode, 0);
20166 START_USE (0);
20167 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
20168 RTX_FRAME_RELATED_P (insn) = 1;
20170 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
20171 | SAVE_NOINLINE_FPRS_SAVES_LR)))
20173 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20174 GEN_INT (info->lr_save_offset + frame_off));
20175 mem = gen_rtx_MEM (Pmode, addr);
20176 /* This should not be of rs6000_sr_alias_set, because of
20177 __builtin_return_address. */
20179 insn = emit_move_insn (mem, reg);
20180 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20181 NULL_RTX, NULL_RTX);
20182 END_USE (0);
20186 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
20187 r12 will be needed by out-of-line gpr restore. */
20188 cr_save_regno = (DEFAULT_ABI == ABI_AIX
20189 && !(strategy & (SAVE_INLINE_GPRS
20190 | SAVE_NOINLINE_GPRS_SAVES_LR))
20191 ? 11 : 12);
20192 if (!WORLD_SAVE_P (info)
20193 && info->cr_save_p
20194 && REGNO (frame_reg_rtx) != cr_save_regno
20195 && !(using_static_chain_p && cr_save_regno == 11))
20197 rtx set;
20199 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
20200 START_USE (cr_save_regno);
20201 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20202 RTX_FRAME_RELATED_P (insn) = 1;
20203 /* Now, there's no way that dwarf2out_frame_debug_expr is going
20204 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
20205 But that's OK. All we have to do is specify that _one_ condition
20206 code register is saved in this stack slot. The thrower's epilogue
20207 will then restore all the call-saved registers.
20208 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
20209 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
20210 gen_rtx_REG (SImode, CR2_REGNO));
20211 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20214 /* Do any required saving of fpr's. If only one or two to save, do
20215 it ourselves. Otherwise, call function. */
20216 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
20218 int i;
20219 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
20220 if (save_reg_p (info->first_fp_reg_save + i))
20221 emit_frame_save (frame_reg_rtx,
20222 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20223 ? DFmode : SFmode),
20224 info->first_fp_reg_save + i,
20225 info->fp_save_offset + frame_off + 8 * i,
20226 sp_off - frame_off);
20228 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
20230 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20231 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20232 unsigned ptr_regno = ptr_regno_for_savres (sel);
20233 rtx ptr_reg = frame_reg_rtx;
20235 if (REGNO (frame_reg_rtx) == ptr_regno)
20236 gcc_checking_assert (frame_off == 0);
20237 else
20239 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20240 NOT_INUSE (ptr_regno);
20241 emit_insn (gen_add3_insn (ptr_reg,
20242 frame_reg_rtx, GEN_INT (frame_off)));
20244 insn = rs6000_emit_savres_rtx (info, ptr_reg,
20245 info->fp_save_offset,
20246 info->lr_save_offset,
20247 DFmode, sel);
20248 rs6000_frame_related (insn, ptr_reg, sp_off,
20249 NULL_RTX, NULL_RTX);
20250 if (lr)
20251 END_USE (0);
20254 /* Save GPRs. This is done as a PARALLEL if we are using
20255 the store-multiple instructions. */
20256 if (!WORLD_SAVE_P (info)
20257 && TARGET_SPE_ABI
20258 && info->spe_64bit_regs_used != 0
20259 && info->first_gp_reg_save != 32)
20261 int i;
20262 rtx spe_save_area_ptr;
20263 HOST_WIDE_INT save_off;
20264 int ool_adjust = 0;
20266 /* Determine whether we can address all of the registers that need
20267 to be saved with an offset from frame_reg_rtx that fits in
20268 the small const field for SPE memory instructions. */
20269 int spe_regs_addressable
20270 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
20271 + reg_size * (32 - info->first_gp_reg_save - 1))
20272 && (strategy & SAVE_INLINE_GPRS));
20274 if (spe_regs_addressable)
20276 spe_save_area_ptr = frame_reg_rtx;
20277 save_off = frame_off;
20279 else
20281 /* Make r11 point to the start of the SPE save area. We need
20282 to be careful here if r11 is holding the static chain. If
20283 it is, then temporarily save it in r0. */
20284 HOST_WIDE_INT offset;
20286 if (!(strategy & SAVE_INLINE_GPRS))
20287 ool_adjust = 8 * (info->first_gp_reg_save
20288 - (FIRST_SAVRES_REGISTER + 1));
20289 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
20290 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
20291 save_off = frame_off - offset;
20293 if (using_static_chain_p)
20295 rtx r0 = gen_rtx_REG (Pmode, 0);
20297 START_USE (0);
20298 gcc_assert (info->first_gp_reg_save > 11);
20300 emit_move_insn (r0, spe_save_area_ptr);
20302 else if (REGNO (frame_reg_rtx) != 11)
20303 START_USE (11);
20305 emit_insn (gen_addsi3 (spe_save_area_ptr,
20306 frame_reg_rtx, GEN_INT (offset)));
20307 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
20308 frame_off = -info->spe_gp_save_offset + ool_adjust;
20311 if ((strategy & SAVE_INLINE_GPRS))
20313 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20314 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
20315 emit_frame_save (spe_save_area_ptr, reg_mode,
20316 info->first_gp_reg_save + i,
20317 (info->spe_gp_save_offset + save_off
20318 + reg_size * i),
20319 sp_off - save_off);
20321 else
20323 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
20324 info->spe_gp_save_offset + save_off,
20325 0, reg_mode,
20326 SAVRES_SAVE | SAVRES_GPR);
20328 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
20329 NULL_RTX, NULL_RTX);
20332 /* Move the static chain pointer back. */
20333 if (!spe_regs_addressable)
20335 if (using_static_chain_p)
20337 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
20338 END_USE (0);
20340 else if (REGNO (frame_reg_rtx) != 11)
20341 END_USE (11);
20344 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
20346 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
20347 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
20348 unsigned ptr_regno = ptr_regno_for_savres (sel);
20349 rtx ptr_reg = frame_reg_rtx;
20350 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
20351 int end_save = info->gp_save_offset + info->gp_size;
20352 int ptr_off;
20354 if (!ptr_set_up)
20355 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20357 /* Need to adjust r11 (r12) if we saved any FPRs. */
20358 if (end_save + frame_off != 0)
20360 rtx offset = GEN_INT (end_save + frame_off);
20362 if (ptr_set_up)
20363 frame_off = -end_save;
20364 else
20365 NOT_INUSE (ptr_regno);
20366 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20368 else if (!ptr_set_up)
20370 NOT_INUSE (ptr_regno);
20371 emit_move_insn (ptr_reg, frame_reg_rtx);
20373 ptr_off = -end_save;
20374 insn = rs6000_emit_savres_rtx (info, ptr_reg,
20375 info->gp_save_offset + ptr_off,
20376 info->lr_save_offset + ptr_off,
20377 reg_mode, sel);
20378 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
20379 NULL_RTX, NULL_RTX);
20380 if (lr)
20381 END_USE (0);
20383 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
20385 rtvec p;
20386 int i;
20387 p = rtvec_alloc (32 - info->first_gp_reg_save);
20388 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20389 RTVEC_ELT (p, i)
20390 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
20391 frame_reg_rtx,
20392 info->gp_save_offset + frame_off + reg_size * i);
20393 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20394 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20395 NULL_RTX, NULL_RTX);
20397 else if (!WORLD_SAVE_P (info))
20399 int i;
20400 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20401 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
20402 emit_frame_save (frame_reg_rtx, reg_mode,
20403 info->first_gp_reg_save + i,
20404 info->gp_save_offset + frame_off + reg_size * i,
20405 sp_off - frame_off);
20408 if (crtl->calls_eh_return)
20410 unsigned int i;
20411 rtvec p;
20413 for (i = 0; ; ++i)
20415 unsigned int regno = EH_RETURN_DATA_REGNO (i);
20416 if (regno == INVALID_REGNUM)
20417 break;
20420 p = rtvec_alloc (i);
20422 for (i = 0; ; ++i)
20424 unsigned int regno = EH_RETURN_DATA_REGNO (i);
20425 if (regno == INVALID_REGNUM)
20426 break;
20428 insn
20429 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
20430 sp_reg_rtx,
20431 info->ehrd_offset + sp_off + reg_size * (int) i);
20432 RTVEC_ELT (p, i) = insn;
20433 RTX_FRAME_RELATED_P (insn) = 1;
20436 insn = emit_insn (gen_blockage ());
20437 RTX_FRAME_RELATED_P (insn) = 1;
20438 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
20441 /* In AIX ABI we need to make sure r2 is really saved. */
20442 if (TARGET_AIX && crtl->calls_eh_return)
20444 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
20445 rtx save_insn, join_insn, note;
20446 long toc_restore_insn;
20448 tmp_reg = gen_rtx_REG (Pmode, 11);
20449 tmp_reg_si = gen_rtx_REG (SImode, 11);
20450 if (using_static_chain_p)
20452 START_USE (0);
20453 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
20455 else
20456 START_USE (11);
20457 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
20458 /* Peek at instruction to which this function returns. If it's
20459 restoring r2, then we know we've already saved r2. We can't
20460 unconditionally save r2 because the value we have will already
20461 be updated if we arrived at this function via a plt call or
20462 toc adjusting stub. */
20463 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
20464 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
20465 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
20466 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
20467 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
20468 validate_condition_mode (EQ, CCUNSmode);
20469 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
20470 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
20471 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
20472 toc_save_done = gen_label_rtx ();
20473 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
20474 gen_rtx_EQ (VOIDmode, compare_result,
20475 const0_rtx),
20476 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
20477 pc_rtx);
20478 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
20479 JUMP_LABEL (jump) = toc_save_done;
20480 LABEL_NUSES (toc_save_done) += 1;
20482 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
20483 TOC_REGNUM, frame_off + 5 * reg_size,
20484 sp_off - frame_off);
20486 emit_label (toc_save_done);
20488 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
20489 have a CFG that has different saves along different paths.
20490 Move the note to a dummy blockage insn, which describes that
20491 R2 is unconditionally saved after the label. */
20492 /* ??? An alternate representation might be a special insn pattern
20493 containing both the branch and the store. That might let the
20494 code that minimizes the number of DW_CFA_advance opcodes better
20495 freedom in placing the annotations. */
20496 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
20497 if (note)
20498 remove_note (save_insn, note);
20499 else
20500 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
20501 copy_rtx (PATTERN (save_insn)), NULL_RTX);
20502 RTX_FRAME_RELATED_P (save_insn) = 0;
20504 join_insn = emit_insn (gen_blockage ());
20505 REG_NOTES (join_insn) = note;
20506 RTX_FRAME_RELATED_P (join_insn) = 1;
20508 if (using_static_chain_p)
20510 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
20511 END_USE (0);
20513 else
20514 END_USE (11);
20517 /* Save CR if we use any that must be preserved. */
20518 if (!WORLD_SAVE_P (info) && info->cr_save_p)
20520 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20521 GEN_INT (info->cr_save_offset + frame_off));
20522 rtx mem = gen_frame_mem (SImode, addr);
20523 /* See the large comment above about why CR2_REGNO is used. */
20524 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
20526 /* If we didn't copy cr before, do so now using r0. */
20527 if (cr_save_rtx == NULL_RTX)
20529 rtx set;
20531 START_USE (0);
20532 cr_save_rtx = gen_rtx_REG (SImode, 0);
20533 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20534 RTX_FRAME_RELATED_P (insn) = 1;
20535 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
20536 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20538 insn = emit_move_insn (mem, cr_save_rtx);
20539 END_USE (REGNO (cr_save_rtx));
20541 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20542 NULL_RTX, NULL_RTX);
20545 /* Update stack and set back pointer unless this is V.4,
20546 for which it was done previously. */
20547 if (!WORLD_SAVE_P (info) && info->push_p
20548 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
20550 rtx ptr_reg = NULL;
20551 int ptr_off = 0;
20553 /* If saving altivec regs we need to be able to address all save
20554 locations using a 16-bit offset. */
20555 if ((strategy & SAVE_INLINE_VRS) == 0
20556 || (info->altivec_size != 0
20557 && (info->altivec_save_offset + info->altivec_size - 16
20558 + info->total_size - frame_off) > 32767)
20559 || (info->vrsave_size != 0
20560 && (info->vrsave_save_offset
20561 + info->total_size - frame_off) > 32767))
20563 int sel = SAVRES_SAVE | SAVRES_VR;
20564 unsigned ptr_regno = ptr_regno_for_savres (sel);
20566 if (using_static_chain_p
20567 && ptr_regno == STATIC_CHAIN_REGNUM)
20568 ptr_regno = 12;
20569 if (REGNO (frame_reg_rtx) != ptr_regno)
20570 START_USE (ptr_regno);
20571 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20572 frame_reg_rtx = ptr_reg;
20573 ptr_off = info->altivec_save_offset + info->altivec_size;
20574 frame_off = -ptr_off;
20576 else if (REGNO (frame_reg_rtx) == 1)
20577 frame_off = info->total_size;
20578 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20579 sp_off = info->total_size;
20580 if (frame_reg_rtx != sp_reg_rtx)
20581 rs6000_emit_stack_tie (frame_reg_rtx, false);
20584 /* Set frame pointer, if needed. */
20585 if (frame_pointer_needed)
20587 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
20588 sp_reg_rtx);
20589 RTX_FRAME_RELATED_P (insn) = 1;
20592 /* Save AltiVec registers if needed. Save here because the red zone does
20593 not always include AltiVec registers. */
20594 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20595 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
20597 int end_save = info->altivec_save_offset + info->altivec_size;
20598 int ptr_off;
20599 /* Oddly, the vector save/restore functions point r0 at the end
20600 of the save area, then use r11 or r12 to load offsets for
20601 [reg+reg] addressing. */
20602 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20603 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
20604 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20606 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20607 NOT_INUSE (0);
20608 if (end_save + frame_off != 0)
20610 rtx offset = GEN_INT (end_save + frame_off);
20612 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20614 else
20615 emit_move_insn (ptr_reg, frame_reg_rtx);
20617 ptr_off = -end_save;
20618 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20619 info->altivec_save_offset + ptr_off,
20620 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
20621 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
20622 NULL_RTX, NULL_RTX);
20623 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20625 /* The oddity mentioned above clobbered our frame reg. */
20626 emit_move_insn (frame_reg_rtx, ptr_reg);
20627 frame_off = ptr_off;
20630 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20631 && info->altivec_size != 0)
20633 int i;
20635 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20636 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20638 rtx areg, savereg, mem;
20639 int offset;
20641 offset = (info->altivec_save_offset + frame_off
20642 + 16 * (i - info->first_altivec_reg_save));
20644 savereg = gen_rtx_REG (V4SImode, i);
20646 NOT_INUSE (0);
20647 areg = gen_rtx_REG (Pmode, 0);
20648 emit_move_insn (areg, GEN_INT (offset));
20650 /* AltiVec addressing mode is [reg+reg]. */
20651 mem = gen_frame_mem (V4SImode,
20652 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
20654 insn = emit_move_insn (mem, savereg);
20656 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20657 areg, GEN_INT (offset));
20661 /* VRSAVE is a bit vector representing which AltiVec registers
20662 are used. The OS uses this to determine which vector
20663 registers to save on a context switch. We need to save
20664 VRSAVE on the stack frame, add whatever AltiVec registers we
20665 used in this function, and do the corresponding magic in the
20666 epilogue. */
20668 if (!WORLD_SAVE_P (info)
20669 && TARGET_ALTIVEC
20670 && TARGET_ALTIVEC_VRSAVE
20671 && info->vrsave_mask != 0)
20673 rtx reg, vrsave;
20674 int offset;
20675 int save_regno;
20677 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20678 be using r12 as frame_reg_rtx and r11 as the static chain
20679 pointer for nested functions. */
20680 save_regno = 12;
20681 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
20682 save_regno = 11;
20683 else if (REGNO (frame_reg_rtx) == 12)
20685 save_regno = 11;
20686 if (using_static_chain_p)
20687 save_regno = 0;
20690 NOT_INUSE (save_regno);
20691 reg = gen_rtx_REG (SImode, save_regno);
20692 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20693 if (TARGET_MACHO)
20694 emit_insn (gen_get_vrsave_internal (reg));
20695 else
20696 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
20698 /* Save VRSAVE. */
20699 offset = info->vrsave_save_offset + frame_off;
20700 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
20702 /* Include the registers in the mask. */
20703 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
20705 insn = emit_insn (generate_set_vrsave (reg, info, 0));
20708 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20709 if (!TARGET_SINGLE_PIC_BASE
20710 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
20711 || (DEFAULT_ABI == ABI_V4
20712 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
20713 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
20715 /* If emit_load_toc_table will use the link register, we need to save
20716 it. We use R12 for this purpose because emit_load_toc_table
20717 can use register 0. This allows us to use a plain 'blr' to return
20718 from the procedure more often. */
20719 int save_LR_around_toc_setup = (TARGET_ELF
20720 && DEFAULT_ABI != ABI_AIX
20721 && flag_pic
20722 && ! info->lr_save_p
20723 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
20724 if (save_LR_around_toc_setup)
20726 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20727 rtx tmp = gen_rtx_REG (Pmode, 12);
20729 insn = emit_move_insn (tmp, lr);
20730 RTX_FRAME_RELATED_P (insn) = 1;
20732 rs6000_emit_load_toc_table (TRUE);
20734 insn = emit_move_insn (lr, tmp);
20735 add_reg_note (insn, REG_CFA_RESTORE, lr);
20736 RTX_FRAME_RELATED_P (insn) = 1;
20738 else
20739 rs6000_emit_load_toc_table (TRUE);
20742 #if TARGET_MACHO
20743 if (!TARGET_SINGLE_PIC_BASE
20744 && DEFAULT_ABI == ABI_DARWIN
20745 && flag_pic && crtl->uses_pic_offset_table)
20747 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20748 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
20750 /* Save and restore LR locally around this call (in R0). */
20751 if (!info->lr_save_p)
20752 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
20754 emit_insn (gen_load_macho_picbase (src));
20756 emit_move_insn (gen_rtx_REG (Pmode,
20757 RS6000_PIC_OFFSET_TABLE_REGNUM),
20758 lr);
20760 if (!info->lr_save_p)
20761 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
20763 #endif
20765 /* If we need to, save the TOC register after doing the stack setup.
20766 Do not emit eh frame info for this save. The unwinder wants info,
20767 conceptually attached to instructions in this function, about
20768 register values in the caller of this function. This R2 may have
20769 already been changed from the value in the caller.
20770 We don't attempt to write accurate DWARF EH frame info for R2
20771 because code emitted by gcc for a (non-pointer) function call
20772 doesn't save and restore R2. Instead, R2 is managed out-of-line
20773 by a linker generated plt call stub when the function resides in
20774 a shared library. This behaviour is costly to describe in DWARF,
20775 both in terms of the size of DWARF info and the time taken in the
20776 unwinder to interpret it. R2 changes, apart from the
20777 calls_eh_return case earlier in this function, are handled by
20778 linux-unwind.h frob_update_context. */
20779 if (rs6000_save_toc_in_prologue_p ())
20781 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
20782 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
20786 /* Write function prologue. */
20788 static void
20789 rs6000_output_function_prologue (FILE *file,
20790 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20792 rs6000_stack_t *info = rs6000_stack_info ();
20794 if (TARGET_DEBUG_STACK)
20795 debug_stack_info (info);
20797 /* Write .extern for any function we will call to save and restore
20798 fp values. */
20799 if (info->first_fp_reg_save < 64
20800 && !TARGET_MACHO
20801 && !TARGET_ELF)
20803 char *name;
20804 int regno = info->first_fp_reg_save - 32;
20806 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
20808 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20809 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20810 name = rs6000_savres_routine_name (info, regno, sel);
20811 fprintf (file, "\t.extern %s\n", name);
20813 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
20815 bool lr = (info->savres_strategy
20816 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20817 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20818 name = rs6000_savres_routine_name (info, regno, sel);
20819 fprintf (file, "\t.extern %s\n", name);
20823 rs6000_pic_labelno++;
20826 /* Non-zero if vmx regs are restored before the frame pop, zero if
20827 we restore after the pop when possible. */
20828 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20830 /* Restoring cr is a two step process: loading a reg from the frame
20831 save, then moving the reg to cr. For ABI_V4 we must let the
20832 unwinder know that the stack location is no longer valid at or
20833 before the stack deallocation, but we can't emit a cfa_restore for
20834 cr at the stack deallocation like we do for other registers.
20835 The trouble is that it is possible for the move to cr to be
20836 scheduled after the stack deallocation. So say exactly where cr
20837 is located on each of the two insns. */
20839 static rtx
20840 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
20842 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
20843 rtx reg = gen_rtx_REG (SImode, regno);
20844 rtx insn = emit_move_insn (reg, mem);
20846 if (!exit_func && DEFAULT_ABI == ABI_V4)
20848 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20849 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
20851 add_reg_note (insn, REG_CFA_REGISTER, set);
20852 RTX_FRAME_RELATED_P (insn) = 1;
20854 return reg;
20857 /* Reload CR from REG. */
20859 static void
20860 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
20862 int count = 0;
20863 int i;
20865 if (using_mfcr_multiple)
20867 for (i = 0; i < 8; i++)
20868 if (save_reg_p (CR0_REGNO + i))
20869 count++;
20870 gcc_assert (count);
20873 if (using_mfcr_multiple && count > 1)
20875 rtvec p;
20876 int ndx;
20878 p = rtvec_alloc (count);
20880 ndx = 0;
20881 for (i = 0; i < 8; i++)
20882 if (save_reg_p (CR0_REGNO + i))
20884 rtvec r = rtvec_alloc (2);
20885 RTVEC_ELT (r, 0) = reg;
20886 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
20887 RTVEC_ELT (p, ndx) =
20888 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
20889 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
20890 ndx++;
20892 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20893 gcc_assert (ndx == count);
20895 else
20896 for (i = 0; i < 8; i++)
20897 if (save_reg_p (CR0_REGNO + i))
20898 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
20899 reg));
20901 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20903 rtx insn = get_last_insn ();
20904 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20906 add_reg_note (insn, REG_CFA_RESTORE, cr);
20907 RTX_FRAME_RELATED_P (insn) = 1;
20911 /* Like cr, the move to lr instruction can be scheduled after the
20912 stack deallocation, but unlike cr, its stack frame save is still
20913 valid. So we only need to emit the cfa_restore on the correct
20914 instruction. */
20916 static void
20917 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
20919 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
20920 rtx reg = gen_rtx_REG (Pmode, regno);
20922 emit_move_insn (reg, mem);
20925 static void
20926 restore_saved_lr (int regno, bool exit_func)
20928 rtx reg = gen_rtx_REG (Pmode, regno);
20929 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20930 rtx insn = emit_move_insn (lr, reg);
20932 if (!exit_func && flag_shrink_wrap)
20934 add_reg_note (insn, REG_CFA_RESTORE, lr);
20935 RTX_FRAME_RELATED_P (insn) = 1;
20939 static rtx
20940 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
20942 if (info->cr_save_p)
20943 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20944 gen_rtx_REG (SImode, CR2_REGNO),
20945 cfa_restores);
20946 if (info->lr_save_p)
20947 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20948 gen_rtx_REG (Pmode, LR_REGNO),
20949 cfa_restores);
20950 return cfa_restores;
20953 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20954 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20955 below stack pointer not cloberred by signals. */
20957 static inline bool
20958 offset_below_red_zone_p (HOST_WIDE_INT offset)
20960 return offset < (DEFAULT_ABI == ABI_V4
20962 : TARGET_32BIT ? -220 : -288);
20965 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20967 static void
20968 emit_cfa_restores (rtx cfa_restores)
20970 rtx insn = get_last_insn ();
20971 rtx *loc = &REG_NOTES (insn);
20973 while (*loc)
20974 loc = &XEXP (*loc, 1);
20975 *loc = cfa_restores;
20976 RTX_FRAME_RELATED_P (insn) = 1;
20979 /* Emit function epilogue as insns. */
20981 void
20982 rs6000_emit_epilogue (int sibcall)
20984 rs6000_stack_t *info;
20985 int restoring_GPRs_inline;
20986 int restoring_FPRs_inline;
20987 int using_load_multiple;
20988 int using_mtcr_multiple;
20989 int use_backchain_to_restore_sp;
20990 int restore_lr;
20991 int strategy;
20992 HOST_WIDE_INT frame_off = 0;
20993 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
20994 rtx frame_reg_rtx = sp_reg_rtx;
20995 rtx cfa_restores = NULL_RTX;
20996 rtx insn;
20997 rtx cr_save_reg = NULL_RTX;
20998 enum machine_mode reg_mode = Pmode;
20999 int reg_size = TARGET_32BIT ? 4 : 8;
21000 int i;
21001 bool exit_func;
21002 unsigned ptr_regno;
21004 info = rs6000_stack_info ();
21006 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
21008 reg_mode = V2SImode;
21009 reg_size = 8;
21012 strategy = info->savres_strategy;
21013 using_load_multiple = strategy & SAVRES_MULTIPLE;
21014 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
21015 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
21016 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
21017 || rs6000_cpu == PROCESSOR_PPC603
21018 || rs6000_cpu == PROCESSOR_PPC750
21019 || optimize_size);
21020 /* Restore via the backchain when we have a large frame, since this
21021 is more efficient than an addis, addi pair. The second condition
21022 here will not trigger at the moment; We don't actually need a
21023 frame pointer for alloca, but the generic parts of the compiler
21024 give us one anyway. */
21025 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
21026 || (cfun->calls_alloca
21027 && !frame_pointer_needed));
21028 restore_lr = (info->lr_save_p
21029 && (restoring_FPRs_inline
21030 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
21031 && (restoring_GPRs_inline
21032 || info->first_fp_reg_save < 64));
21034 if (WORLD_SAVE_P (info))
21036 int i, j;
21037 char rname[30];
21038 const char *alloc_rname;
21039 rtvec p;
21041 /* eh_rest_world_r10 will return to the location saved in the LR
21042 stack slot (which is not likely to be our caller.)
21043 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
21044 rest_world is similar, except any R10 parameter is ignored.
21045 The exception-handling stuff that was here in 2.95 is no
21046 longer necessary. */
21048 p = rtvec_alloc (9
21050 + 32 - info->first_gp_reg_save
21051 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
21052 + 63 + 1 - info->first_fp_reg_save);
21054 strcpy (rname, ((crtl->calls_eh_return) ?
21055 "*eh_rest_world_r10" : "*rest_world"));
21056 alloc_rname = ggc_strdup (rname);
21058 j = 0;
21059 RTVEC_ELT (p, j++) = ret_rtx;
21060 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
21061 gen_rtx_REG (Pmode,
21062 LR_REGNO));
21063 RTVEC_ELT (p, j++)
21064 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
21065 /* The instruction pattern requires a clobber here;
21066 it is shared with the restVEC helper. */
21067 RTVEC_ELT (p, j++)
21068 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
21071 /* CR register traditionally saved as CR2. */
21072 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
21073 RTVEC_ELT (p, j++)
21074 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
21075 if (flag_shrink_wrap)
21077 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
21078 gen_rtx_REG (Pmode, LR_REGNO),
21079 cfa_restores);
21080 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21084 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21086 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21087 RTVEC_ELT (p, j++)
21088 = gen_frame_load (reg,
21089 frame_reg_rtx, info->gp_save_offset + reg_size * i);
21090 if (flag_shrink_wrap)
21091 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21093 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
21095 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
21096 RTVEC_ELT (p, j++)
21097 = gen_frame_load (reg,
21098 frame_reg_rtx, info->altivec_save_offset + 16 * i);
21099 if (flag_shrink_wrap)
21100 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21102 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
21104 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21105 ? DFmode : SFmode),
21106 info->first_fp_reg_save + i);
21107 RTVEC_ELT (p, j++)
21108 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
21109 if (flag_shrink_wrap)
21110 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21112 RTVEC_ELT (p, j++)
21113 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
21114 RTVEC_ELT (p, j++)
21115 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
21116 RTVEC_ELT (p, j++)
21117 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
21118 RTVEC_ELT (p, j++)
21119 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
21120 RTVEC_ELT (p, j++)
21121 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
21122 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21124 if (flag_shrink_wrap)
21126 REG_NOTES (insn) = cfa_restores;
21127 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21128 RTX_FRAME_RELATED_P (insn) = 1;
21130 return;
21133 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
21134 if (info->push_p)
21135 frame_off = info->total_size;
21137 /* Restore AltiVec registers if we must do so before adjusting the
21138 stack. */
21139 if (TARGET_ALTIVEC_ABI
21140 && info->altivec_size != 0
21141 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21142 || (DEFAULT_ABI != ABI_V4
21143 && offset_below_red_zone_p (info->altivec_save_offset))))
21145 int i;
21146 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
21148 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
21149 if (use_backchain_to_restore_sp)
21151 int frame_regno = 11;
21153 if ((strategy & REST_INLINE_VRS) == 0)
21155 /* Of r11 and r12, select the one not clobbered by an
21156 out-of-line restore function for the frame register. */
21157 frame_regno = 11 + 12 - scratch_regno;
21159 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
21160 emit_move_insn (frame_reg_rtx,
21161 gen_rtx_MEM (Pmode, sp_reg_rtx));
21162 frame_off = 0;
21164 else if (frame_pointer_needed)
21165 frame_reg_rtx = hard_frame_pointer_rtx;
21167 if ((strategy & REST_INLINE_VRS) == 0)
21169 int end_save = info->altivec_save_offset + info->altivec_size;
21170 int ptr_off;
21171 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
21172 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
21174 if (end_save + frame_off != 0)
21176 rtx offset = GEN_INT (end_save + frame_off);
21178 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21180 else
21181 emit_move_insn (ptr_reg, frame_reg_rtx);
21183 ptr_off = -end_save;
21184 insn = rs6000_emit_savres_rtx (info, scratch_reg,
21185 info->altivec_save_offset + ptr_off,
21186 0, V4SImode, SAVRES_VR);
21188 else
21190 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21191 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
21193 rtx addr, areg, mem, reg;
21195 areg = gen_rtx_REG (Pmode, 0);
21196 emit_move_insn
21197 (areg, GEN_INT (info->altivec_save_offset
21198 + frame_off
21199 + 16 * (i - info->first_altivec_reg_save)));
21201 /* AltiVec addressing mode is [reg+reg]. */
21202 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
21203 mem = gen_frame_mem (V4SImode, addr);
21205 reg = gen_rtx_REG (V4SImode, i);
21206 emit_move_insn (reg, mem);
21210 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21211 if (((strategy & REST_INLINE_VRS) == 0
21212 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
21213 && (flag_shrink_wrap
21214 || (offset_below_red_zone_p
21215 (info->altivec_save_offset
21216 + 16 * (i - info->first_altivec_reg_save)))))
21218 rtx reg = gen_rtx_REG (V4SImode, i);
21219 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21223 /* Restore VRSAVE if we must do so before adjusting the stack. */
21224 if (TARGET_ALTIVEC
21225 && TARGET_ALTIVEC_VRSAVE
21226 && info->vrsave_mask != 0
21227 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21228 || (DEFAULT_ABI != ABI_V4
21229 && offset_below_red_zone_p (info->vrsave_save_offset))))
21231 rtx reg;
21233 if (frame_reg_rtx == sp_reg_rtx)
21235 if (use_backchain_to_restore_sp)
21237 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21238 emit_move_insn (frame_reg_rtx,
21239 gen_rtx_MEM (Pmode, sp_reg_rtx));
21240 frame_off = 0;
21242 else if (frame_pointer_needed)
21243 frame_reg_rtx = hard_frame_pointer_rtx;
21246 reg = gen_rtx_REG (SImode, 12);
21247 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21248 info->vrsave_save_offset + frame_off));
21250 emit_insn (generate_set_vrsave (reg, info, 1));
21253 insn = NULL_RTX;
21254 /* If we have a large stack frame, restore the old stack pointer
21255 using the backchain. */
21256 if (use_backchain_to_restore_sp)
21258 if (frame_reg_rtx == sp_reg_rtx)
21260 /* Under V.4, don't reset the stack pointer until after we're done
21261 loading the saved registers. */
21262 if (DEFAULT_ABI == ABI_V4)
21263 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21265 insn = emit_move_insn (frame_reg_rtx,
21266 gen_rtx_MEM (Pmode, sp_reg_rtx));
21267 frame_off = 0;
21269 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21270 && DEFAULT_ABI == ABI_V4)
21271 /* frame_reg_rtx has been set up by the altivec restore. */
21273 else
21275 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
21276 frame_reg_rtx = sp_reg_rtx;
21279 /* If we have a frame pointer, we can restore the old stack pointer
21280 from it. */
21281 else if (frame_pointer_needed)
21283 frame_reg_rtx = sp_reg_rtx;
21284 if (DEFAULT_ABI == ABI_V4)
21285 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21286 /* Prevent reordering memory accesses against stack pointer restore. */
21287 else if (cfun->calls_alloca
21288 || offset_below_red_zone_p (-info->total_size))
21289 rs6000_emit_stack_tie (frame_reg_rtx, true);
21291 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
21292 GEN_INT (info->total_size)));
21293 frame_off = 0;
21295 else if (info->push_p
21296 && DEFAULT_ABI != ABI_V4
21297 && !crtl->calls_eh_return)
21299 /* Prevent reordering memory accesses against stack pointer restore. */
21300 if (cfun->calls_alloca
21301 || offset_below_red_zone_p (-info->total_size))
21302 rs6000_emit_stack_tie (frame_reg_rtx, false);
21303 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
21304 GEN_INT (info->total_size)));
21305 frame_off = 0;
21307 if (insn && frame_reg_rtx == sp_reg_rtx)
21309 if (cfa_restores)
21311 REG_NOTES (insn) = cfa_restores;
21312 cfa_restores = NULL_RTX;
21314 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21315 RTX_FRAME_RELATED_P (insn) = 1;
21318 /* Restore AltiVec registers if we have not done so already. */
21319 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21320 && TARGET_ALTIVEC_ABI
21321 && info->altivec_size != 0
21322 && (DEFAULT_ABI == ABI_V4
21323 || !offset_below_red_zone_p (info->altivec_save_offset)))
21325 int i;
21327 if ((strategy & REST_INLINE_VRS) == 0)
21329 int end_save = info->altivec_save_offset + info->altivec_size;
21330 int ptr_off;
21331 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
21332 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
21333 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
21335 if (end_save + frame_off != 0)
21337 rtx offset = GEN_INT (end_save + frame_off);
21339 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21341 else
21342 emit_move_insn (ptr_reg, frame_reg_rtx);
21344 ptr_off = -end_save;
21345 insn = rs6000_emit_savres_rtx (info, scratch_reg,
21346 info->altivec_save_offset + ptr_off,
21347 0, V4SImode, SAVRES_VR);
21348 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
21350 /* Frame reg was clobbered by out-of-line save. Restore it
21351 from ptr_reg, and if we are calling out-of-line gpr or
21352 fpr restore set up the correct pointer and offset. */
21353 unsigned newptr_regno = 1;
21354 if (!restoring_GPRs_inline)
21356 bool lr = info->gp_save_offset + info->gp_size == 0;
21357 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
21358 newptr_regno = ptr_regno_for_savres (sel);
21359 end_save = info->gp_save_offset + info->gp_size;
21361 else if (!restoring_FPRs_inline)
21363 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
21364 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21365 newptr_regno = ptr_regno_for_savres (sel);
21366 end_save = info->gp_save_offset + info->gp_size;
21369 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
21370 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
21372 if (end_save + ptr_off != 0)
21374 rtx offset = GEN_INT (end_save + ptr_off);
21376 frame_off = -end_save;
21377 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
21379 else
21381 frame_off = ptr_off;
21382 emit_move_insn (frame_reg_rtx, ptr_reg);
21386 else
21388 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21389 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
21391 rtx addr, areg, mem, reg;
21393 areg = gen_rtx_REG (Pmode, 0);
21394 emit_move_insn
21395 (areg, GEN_INT (info->altivec_save_offset
21396 + frame_off
21397 + 16 * (i - info->first_altivec_reg_save)));
21399 /* AltiVec addressing mode is [reg+reg]. */
21400 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
21401 mem = gen_frame_mem (V4SImode, addr);
21403 reg = gen_rtx_REG (V4SImode, i);
21404 emit_move_insn (reg, mem);
21408 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21409 if (((strategy & REST_INLINE_VRS) == 0
21410 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
21411 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
21413 rtx reg = gen_rtx_REG (V4SImode, i);
21414 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21418 /* Restore VRSAVE if we have not done so already. */
21419 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21420 && TARGET_ALTIVEC
21421 && TARGET_ALTIVEC_VRSAVE
21422 && info->vrsave_mask != 0
21423 && (DEFAULT_ABI == ABI_V4
21424 || !offset_below_red_zone_p (info->vrsave_save_offset)))
21426 rtx reg;
21428 reg = gen_rtx_REG (SImode, 12);
21429 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21430 info->vrsave_save_offset + frame_off));
21432 emit_insn (generate_set_vrsave (reg, info, 1));
21435 /* If we exit by an out-of-line restore function on ABI_V4 then that
21436 function will deallocate the stack, so we don't need to worry
21437 about the unwinder restoring cr from an invalid stack frame
21438 location. */
21439 exit_func = (!restoring_FPRs_inline
21440 || (!restoring_GPRs_inline
21441 && info->first_fp_reg_save == 64));
21443 /* Get the old lr if we saved it. If we are restoring registers
21444 out-of-line, then the out-of-line routines can do this for us. */
21445 if (restore_lr && restoring_GPRs_inline)
21446 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21448 /* Get the old cr if we saved it. */
21449 if (info->cr_save_p)
21451 unsigned cr_save_regno = 12;
21453 if (!restoring_GPRs_inline)
21455 /* Ensure we don't use the register used by the out-of-line
21456 gpr register restore below. */
21457 bool lr = info->gp_save_offset + info->gp_size == 0;
21458 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
21459 int gpr_ptr_regno = ptr_regno_for_savres (sel);
21461 if (gpr_ptr_regno == 12)
21462 cr_save_regno = 11;
21463 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
21465 else if (REGNO (frame_reg_rtx) == 12)
21466 cr_save_regno = 11;
21468 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
21469 info->cr_save_offset + frame_off,
21470 exit_func);
21473 /* Set LR here to try to overlap restores below. */
21474 if (restore_lr && restoring_GPRs_inline)
21475 restore_saved_lr (0, exit_func);
21477 /* Load exception handler data registers, if needed. */
21478 if (crtl->calls_eh_return)
21480 unsigned int i, regno;
21482 if (TARGET_AIX)
21484 rtx reg = gen_rtx_REG (reg_mode, 2);
21485 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21486 frame_off + 5 * reg_size));
21489 for (i = 0; ; ++i)
21491 rtx mem;
21493 regno = EH_RETURN_DATA_REGNO (i);
21494 if (regno == INVALID_REGNUM)
21495 break;
21497 /* Note: possible use of r0 here to address SPE regs. */
21498 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
21499 info->ehrd_offset + frame_off
21500 + reg_size * (int) i);
21502 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
21506 /* Restore GPRs. This is done as a PARALLEL if we are using
21507 the load-multiple instructions. */
21508 if (TARGET_SPE_ABI
21509 && info->spe_64bit_regs_used
21510 && info->first_gp_reg_save != 32)
21512 /* Determine whether we can address all of the registers that need
21513 to be saved with an offset from frame_reg_rtx that fits in
21514 the small const field for SPE memory instructions. */
21515 int spe_regs_addressable
21516 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21517 + reg_size * (32 - info->first_gp_reg_save - 1))
21518 && restoring_GPRs_inline);
21520 if (!spe_regs_addressable)
21522 int ool_adjust = 0;
21523 rtx old_frame_reg_rtx = frame_reg_rtx;
21524 /* Make r11 point to the start of the SPE save area. We worried about
21525 not clobbering it when we were saving registers in the prologue.
21526 There's no need to worry here because the static chain is passed
21527 anew to every function. */
21529 if (!restoring_GPRs_inline)
21530 ool_adjust = 8 * (info->first_gp_reg_save
21531 - (FIRST_SAVRES_REGISTER + 1));
21532 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21533 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
21534 GEN_INT (info->spe_gp_save_offset
21535 + frame_off
21536 - ool_adjust)));
21537 /* Keep the invariant that frame_reg_rtx + frame_off points
21538 at the top of the stack frame. */
21539 frame_off = -info->spe_gp_save_offset + ool_adjust;
21542 if (restoring_GPRs_inline)
21544 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
21546 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21547 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21549 rtx offset, addr, mem, reg;
21551 /* We're doing all this to ensure that the immediate offset
21552 fits into the immediate field of 'evldd'. */
21553 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
21555 offset = GEN_INT (spe_offset + reg_size * i);
21556 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
21557 mem = gen_rtx_MEM (V2SImode, addr);
21558 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21560 emit_move_insn (reg, mem);
21563 else
21564 rs6000_emit_savres_rtx (info, frame_reg_rtx,
21565 info->spe_gp_save_offset + frame_off,
21566 info->lr_save_offset + frame_off,
21567 reg_mode,
21568 SAVRES_GPR | SAVRES_LR);
21570 else if (!restoring_GPRs_inline)
21572 /* We are jumping to an out-of-line function. */
21573 rtx ptr_reg;
21574 int end_save = info->gp_save_offset + info->gp_size;
21575 bool can_use_exit = end_save == 0;
21576 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
21577 int ptr_off;
21579 /* Emit stack reset code if we need it. */
21580 ptr_regno = ptr_regno_for_savres (sel);
21581 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21582 if (can_use_exit)
21583 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21584 else if (end_save + frame_off != 0)
21585 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
21586 GEN_INT (end_save + frame_off)));
21587 else if (REGNO (frame_reg_rtx) != ptr_regno)
21588 emit_move_insn (ptr_reg, frame_reg_rtx);
21589 if (REGNO (frame_reg_rtx) == ptr_regno)
21590 frame_off = -end_save;
21592 if (can_use_exit && info->cr_save_p)
21593 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
21595 ptr_off = -end_save;
21596 rs6000_emit_savres_rtx (info, ptr_reg,
21597 info->gp_save_offset + ptr_off,
21598 info->lr_save_offset + ptr_off,
21599 reg_mode, sel);
21601 else if (using_load_multiple)
21603 rtvec p;
21604 p = rtvec_alloc (32 - info->first_gp_reg_save);
21605 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21606 RTVEC_ELT (p, i)
21607 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21608 frame_reg_rtx,
21609 info->gp_save_offset + frame_off + reg_size * i);
21610 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21612 else
21614 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21615 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21616 emit_insn (gen_frame_load
21617 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21618 frame_reg_rtx,
21619 info->gp_save_offset + frame_off + reg_size * i));
21622 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21624 /* If the frame pointer was used then we can't delay emitting
21625 a REG_CFA_DEF_CFA note. This must happen on the insn that
21626 restores the frame pointer, r31. We may have already emitted
21627 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21628 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21629 be harmless if emitted. */
21630 if (frame_pointer_needed)
21632 insn = get_last_insn ();
21633 add_reg_note (insn, REG_CFA_DEF_CFA,
21634 plus_constant (Pmode, frame_reg_rtx, frame_off));
21635 RTX_FRAME_RELATED_P (insn) = 1;
21638 /* Set up cfa_restores. We always need these when
21639 shrink-wrapping. If not shrink-wrapping then we only need
21640 the cfa_restore when the stack location is no longer valid.
21641 The cfa_restores must be emitted on or before the insn that
21642 invalidates the stack, and of course must not be emitted
21643 before the insn that actually does the restore. The latter
21644 is why it is a bad idea to emit the cfa_restores as a group
21645 on the last instruction here that actually does a restore:
21646 That insn may be reordered with respect to others doing
21647 restores. */
21648 if (flag_shrink_wrap
21649 && !restoring_GPRs_inline
21650 && info->first_fp_reg_save == 64)
21651 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21653 for (i = info->first_gp_reg_save; i < 32; i++)
21654 if (!restoring_GPRs_inline
21655 || using_load_multiple
21656 || rs6000_reg_live_or_pic_offset_p (i))
21658 rtx reg = gen_rtx_REG (reg_mode, i);
21660 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21664 if (!restoring_GPRs_inline
21665 && info->first_fp_reg_save == 64)
21667 /* We are jumping to an out-of-line function. */
21668 if (cfa_restores)
21669 emit_cfa_restores (cfa_restores);
21670 return;
21673 if (restore_lr && !restoring_GPRs_inline)
21675 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21676 restore_saved_lr (0, exit_func);
21679 /* Restore fpr's if we need to do it without calling a function. */
21680 if (restoring_FPRs_inline)
21681 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21682 if (save_reg_p (info->first_fp_reg_save + i))
21684 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21685 ? DFmode : SFmode),
21686 info->first_fp_reg_save + i);
21687 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21688 info->fp_save_offset + frame_off + 8 * i));
21689 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21690 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21693 /* If we saved cr, restore it here. Just those that were used. */
21694 if (info->cr_save_p)
21695 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
21697 /* If this is V.4, unwind the stack pointer after all of the loads
21698 have been done, or set up r11 if we are restoring fp out of line. */
21699 ptr_regno = 1;
21700 if (!restoring_FPRs_inline)
21702 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21703 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21704 ptr_regno = ptr_regno_for_savres (sel);
21707 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21708 if (REGNO (frame_reg_rtx) == ptr_regno)
21709 frame_off = 0;
21711 if (insn && restoring_FPRs_inline)
21713 if (cfa_restores)
21715 REG_NOTES (insn) = cfa_restores;
21716 cfa_restores = NULL_RTX;
21718 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21719 RTX_FRAME_RELATED_P (insn) = 1;
21722 if (crtl->calls_eh_return)
21724 rtx sa = EH_RETURN_STACKADJ_RTX;
21725 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
21728 if (!sibcall)
21730 rtvec p;
21731 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21732 if (! restoring_FPRs_inline)
21734 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
21735 RTVEC_ELT (p, 0) = ret_rtx;
21737 else
21739 if (cfa_restores)
21741 /* We can't hang the cfa_restores off a simple return,
21742 since the shrink-wrap code sometimes uses an existing
21743 return. This means there might be a path from
21744 pre-prologue code to this return, and dwarf2cfi code
21745 wants the eh_frame unwinder state to be the same on
21746 all paths to any point. So we need to emit the
21747 cfa_restores before the return. For -m64 we really
21748 don't need epilogue cfa_restores at all, except for
21749 this irritating dwarf2cfi with shrink-wrap
21750 requirement; The stack red-zone means eh_frame info
21751 from the prologue telling the unwinder to restore
21752 from the stack is perfectly good right to the end of
21753 the function. */
21754 emit_insn (gen_blockage ());
21755 emit_cfa_restores (cfa_restores);
21756 cfa_restores = NULL_RTX;
21758 p = rtvec_alloc (2);
21759 RTVEC_ELT (p, 0) = simple_return_rtx;
21762 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
21763 ? gen_rtx_USE (VOIDmode,
21764 gen_rtx_REG (Pmode, LR_REGNO))
21765 : gen_rtx_CLOBBER (VOIDmode,
21766 gen_rtx_REG (Pmode, LR_REGNO)));
21768 /* If we have to restore more than two FP registers, branch to the
21769 restore function. It will return to our caller. */
21770 if (! restoring_FPRs_inline)
21772 int i;
21773 rtx sym;
21775 if (flag_shrink_wrap)
21776 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21778 sym = rs6000_savres_routine_sym (info,
21779 SAVRES_FPR | (lr ? SAVRES_LR : 0));
21780 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
21781 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
21782 gen_rtx_REG (Pmode,
21783 DEFAULT_ABI == ABI_AIX
21784 ? 1 : 11));
21785 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21787 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
21789 RTVEC_ELT (p, i + 4)
21790 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
21791 if (flag_shrink_wrap)
21792 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
21793 cfa_restores);
21797 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21800 if (cfa_restores)
21802 if (sibcall)
21803 /* Ensure the cfa_restores are hung off an insn that won't
21804 be reordered above other restores. */
21805 emit_insn (gen_blockage ());
21807 emit_cfa_restores (cfa_restores);
21811 /* Write function epilogue. */
21813 static void
21814 rs6000_output_function_epilogue (FILE *file,
21815 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21817 #if TARGET_MACHO
21818 macho_branch_islands ();
21819 /* Mach-O doesn't support labels at the end of objects, so if
21820 it looks like we might want one, insert a NOP. */
21822 rtx insn = get_last_insn ();
21823 rtx deleted_debug_label = NULL_RTX;
21824 while (insn
21825 && NOTE_P (insn)
21826 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
21828 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21829 notes only, instead set their CODE_LABEL_NUMBER to -1,
21830 otherwise there would be code generation differences
21831 in between -g and -g0. */
21832 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21833 deleted_debug_label = insn;
21834 insn = PREV_INSN (insn);
21836 if (insn
21837 && (LABEL_P (insn)
21838 || (NOTE_P (insn)
21839 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
21840 fputs ("\tnop\n", file);
21841 else if (deleted_debug_label)
21842 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
21843 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21844 CODE_LABEL_NUMBER (insn) = -1;
21846 #endif
21848 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21849 on its format.
21851 We don't output a traceback table if -finhibit-size-directive was
21852 used. The documentation for -finhibit-size-directive reads
21853 ``don't output a @code{.size} assembler directive, or anything
21854 else that would cause trouble if the function is split in the
21855 middle, and the two halves are placed at locations far apart in
21856 memory.'' The traceback table has this property, since it
21857 includes the offset from the start of the function to the
21858 traceback table itself.
21860 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21861 different traceback table. */
21862 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
21863 && rs6000_traceback != traceback_none && !cfun->is_thunk)
21865 const char *fname = NULL;
21866 const char *language_string = lang_hooks.name;
21867 int fixed_parms = 0, float_parms = 0, parm_info = 0;
21868 int i;
21869 int optional_tbtab;
21870 rs6000_stack_t *info = rs6000_stack_info ();
21872 if (rs6000_traceback == traceback_full)
21873 optional_tbtab = 1;
21874 else if (rs6000_traceback == traceback_part)
21875 optional_tbtab = 0;
21876 else
21877 optional_tbtab = !optimize_size && !TARGET_ELF;
21879 if (optional_tbtab)
21881 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
21882 while (*fname == '.') /* V.4 encodes . in the name */
21883 fname++;
21885 /* Need label immediately before tbtab, so we can compute
21886 its offset from the function start. */
21887 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21888 ASM_OUTPUT_LABEL (file, fname);
21891 /* The .tbtab pseudo-op can only be used for the first eight
21892 expressions, since it can't handle the possibly variable
21893 length fields that follow. However, if you omit the optional
21894 fields, the assembler outputs zeros for all optional fields
21895 anyways, giving each variable length field is minimum length
21896 (as defined in sys/debug.h). Thus we can not use the .tbtab
21897 pseudo-op at all. */
21899 /* An all-zero word flags the start of the tbtab, for debuggers
21900 that have to find it by searching forward from the entry
21901 point or from the current pc. */
21902 fputs ("\t.long 0\n", file);
21904 /* Tbtab format type. Use format type 0. */
21905 fputs ("\t.byte 0,", file);
21907 /* Language type. Unfortunately, there does not seem to be any
21908 official way to discover the language being compiled, so we
21909 use language_string.
21910 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21911 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21912 a number, so for now use 9. LTO and Go aren't assigned numbers
21913 either, so for now use 0. */
21914 if (! strcmp (language_string, "GNU C")
21915 || ! strcmp (language_string, "GNU GIMPLE")
21916 || ! strcmp (language_string, "GNU Go"))
21917 i = 0;
21918 else if (! strcmp (language_string, "GNU F77")
21919 || ! strcmp (language_string, "GNU Fortran"))
21920 i = 1;
21921 else if (! strcmp (language_string, "GNU Pascal"))
21922 i = 2;
21923 else if (! strcmp (language_string, "GNU Ada"))
21924 i = 3;
21925 else if (! strcmp (language_string, "GNU C++")
21926 || ! strcmp (language_string, "GNU Objective-C++"))
21927 i = 9;
21928 else if (! strcmp (language_string, "GNU Java"))
21929 i = 13;
21930 else if (! strcmp (language_string, "GNU Objective-C"))
21931 i = 14;
21932 else
21933 gcc_unreachable ();
21934 fprintf (file, "%d,", i);
21936 /* 8 single bit fields: global linkage (not set for C extern linkage,
21937 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21938 from start of procedure stored in tbtab, internal function, function
21939 has controlled storage, function has no toc, function uses fp,
21940 function logs/aborts fp operations. */
21941 /* Assume that fp operations are used if any fp reg must be saved. */
21942 fprintf (file, "%d,",
21943 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
21945 /* 6 bitfields: function is interrupt handler, name present in
21946 proc table, function calls alloca, on condition directives
21947 (controls stack walks, 3 bits), saves condition reg, saves
21948 link reg. */
21949 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21950 set up as a frame pointer, even when there is no alloca call. */
21951 fprintf (file, "%d,",
21952 ((optional_tbtab << 6)
21953 | ((optional_tbtab & frame_pointer_needed) << 5)
21954 | (info->cr_save_p << 1)
21955 | (info->lr_save_p)));
21957 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21958 (6 bits). */
21959 fprintf (file, "%d,",
21960 (info->push_p << 7) | (64 - info->first_fp_reg_save));
21962 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21963 fprintf (file, "%d,", (32 - first_reg_to_save ()));
21965 if (optional_tbtab)
21967 /* Compute the parameter info from the function decl argument
21968 list. */
21969 tree decl;
21970 int next_parm_info_bit = 31;
21972 for (decl = DECL_ARGUMENTS (current_function_decl);
21973 decl; decl = DECL_CHAIN (decl))
21975 rtx parameter = DECL_INCOMING_RTL (decl);
21976 enum machine_mode mode = GET_MODE (parameter);
21978 if (GET_CODE (parameter) == REG)
21980 if (SCALAR_FLOAT_MODE_P (mode))
21982 int bits;
21984 float_parms++;
21986 switch (mode)
21988 case SFmode:
21989 case SDmode:
21990 bits = 0x2;
21991 break;
21993 case DFmode:
21994 case DDmode:
21995 case TFmode:
21996 case TDmode:
21997 bits = 0x3;
21998 break;
22000 default:
22001 gcc_unreachable ();
22004 /* If only one bit will fit, don't or in this entry. */
22005 if (next_parm_info_bit > 0)
22006 parm_info |= (bits << (next_parm_info_bit - 1));
22007 next_parm_info_bit -= 2;
22009 else
22011 fixed_parms += ((GET_MODE_SIZE (mode)
22012 + (UNITS_PER_WORD - 1))
22013 / UNITS_PER_WORD);
22014 next_parm_info_bit -= 1;
22020 /* Number of fixed point parameters. */
22021 /* This is actually the number of words of fixed point parameters; thus
22022 an 8 byte struct counts as 2; and thus the maximum value is 8. */
22023 fprintf (file, "%d,", fixed_parms);
22025 /* 2 bitfields: number of floating point parameters (7 bits), parameters
22026 all on stack. */
22027 /* This is actually the number of fp registers that hold parameters;
22028 and thus the maximum value is 13. */
22029 /* Set parameters on stack bit if parameters are not in their original
22030 registers, regardless of whether they are on the stack? Xlc
22031 seems to set the bit when not optimizing. */
22032 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
22034 if (! optional_tbtab)
22035 return;
22037 /* Optional fields follow. Some are variable length. */
22039 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
22040 11 double float. */
22041 /* There is an entry for each parameter in a register, in the order that
22042 they occur in the parameter list. Any intervening arguments on the
22043 stack are ignored. If the list overflows a long (max possible length
22044 34 bits) then completely leave off all elements that don't fit. */
22045 /* Only emit this long if there was at least one parameter. */
22046 if (fixed_parms || float_parms)
22047 fprintf (file, "\t.long %d\n", parm_info);
22049 /* Offset from start of code to tb table. */
22050 fputs ("\t.long ", file);
22051 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
22052 RS6000_OUTPUT_BASENAME (file, fname);
22053 putc ('-', file);
22054 rs6000_output_function_entry (file, fname);
22055 putc ('\n', file);
22057 /* Interrupt handler mask. */
22058 /* Omit this long, since we never set the interrupt handler bit
22059 above. */
22061 /* Number of CTL (controlled storage) anchors. */
22062 /* Omit this long, since the has_ctl bit is never set above. */
22064 /* Displacement into stack of each CTL anchor. */
22065 /* Omit this list of longs, because there are no CTL anchors. */
22067 /* Length of function name. */
22068 if (*fname == '*')
22069 ++fname;
22070 fprintf (file, "\t.short %d\n", (int) strlen (fname));
22072 /* Function name. */
22073 assemble_string (fname, strlen (fname));
22075 /* Register for alloca automatic storage; this is always reg 31.
22076 Only emit this if the alloca bit was set above. */
22077 if (frame_pointer_needed)
22078 fputs ("\t.byte 31\n", file);
22080 fputs ("\t.align 2\n", file);
22084 /* A C compound statement that outputs the assembler code for a thunk
22085 function, used to implement C++ virtual function calls with
22086 multiple inheritance. The thunk acts as a wrapper around a virtual
22087 function, adjusting the implicit object parameter before handing
22088 control off to the real function.
22090 First, emit code to add the integer DELTA to the location that
22091 contains the incoming first argument. Assume that this argument
22092 contains a pointer, and is the one used to pass the `this' pointer
22093 in C++. This is the incoming argument *before* the function
22094 prologue, e.g. `%o0' on a sparc. The addition must preserve the
22095 values of all other incoming arguments.
22097 After the addition, emit code to jump to FUNCTION, which is a
22098 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
22099 not touch the return address. Hence returning from FUNCTION will
22100 return to whoever called the current `thunk'.
22102 The effect must be as if FUNCTION had been called directly with the
22103 adjusted first argument. This macro is responsible for emitting
22104 all of the code for a thunk function; output_function_prologue()
22105 and output_function_epilogue() are not invoked.
22107 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
22108 been extracted from it.) It might possibly be useful on some
22109 targets, but probably not.
22111 If you do not define this macro, the target-independent code in the
22112 C++ frontend will generate a less efficient heavyweight thunk that
22113 calls FUNCTION instead of jumping to it. The generic approach does
22114 not support varargs. */
22116 static void
22117 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
22118 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
22119 tree function)
22121 rtx this_rtx, insn, funexp;
22123 reload_completed = 1;
22124 epilogue_completed = 1;
22126 /* Mark the end of the (empty) prologue. */
22127 emit_note (NOTE_INSN_PROLOGUE_END);
22129 /* Find the "this" pointer. If the function returns a structure,
22130 the structure return pointer is in r3. */
22131 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
22132 this_rtx = gen_rtx_REG (Pmode, 4);
22133 else
22134 this_rtx = gen_rtx_REG (Pmode, 3);
22136 /* Apply the constant offset, if required. */
22137 if (delta)
22138 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
22140 /* Apply the offset from the vtable, if required. */
22141 if (vcall_offset)
22143 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
22144 rtx tmp = gen_rtx_REG (Pmode, 12);
22146 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
22147 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
22149 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
22150 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
22152 else
22154 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
22156 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
22158 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
22161 /* Generate a tail call to the target function. */
22162 if (!TREE_USED (function))
22164 assemble_external (function);
22165 TREE_USED (function) = 1;
22167 funexp = XEXP (DECL_RTL (function), 0);
22168 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
22170 #if TARGET_MACHO
22171 if (MACHOPIC_INDIRECT)
22172 funexp = machopic_indirect_call_target (funexp);
22173 #endif
22175 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
22176 generate sibcall RTL explicitly. */
22177 insn = emit_call_insn (
22178 gen_rtx_PARALLEL (VOIDmode,
22179 gen_rtvec (4,
22180 gen_rtx_CALL (VOIDmode,
22181 funexp, const0_rtx),
22182 gen_rtx_USE (VOIDmode, const0_rtx),
22183 gen_rtx_USE (VOIDmode,
22184 gen_rtx_REG (SImode,
22185 LR_REGNO)),
22186 simple_return_rtx)));
22187 SIBLING_CALL_P (insn) = 1;
22188 emit_barrier ();
22190 /* Run just enough of rest_of_compilation to get the insns emitted.
22191 There's not really enough bulk here to make other passes such as
22192 instruction scheduling worth while. Note that use_thunk calls
22193 assemble_start_function and assemble_end_function. */
22194 insn = get_insns ();
22195 shorten_branches (insn);
22196 final_start_function (insn, file, 1);
22197 final (insn, file, 1);
22198 final_end_function ();
22200 reload_completed = 0;
22201 epilogue_completed = 0;
22204 /* A quick summary of the various types of 'constant-pool tables'
22205 under PowerPC:
22207 Target Flags Name One table per
22208 AIX (none) AIX TOC object file
22209 AIX -mfull-toc AIX TOC object file
22210 AIX -mminimal-toc AIX minimal TOC translation unit
22211 SVR4/EABI (none) SVR4 SDATA object file
22212 SVR4/EABI -fpic SVR4 pic object file
22213 SVR4/EABI -fPIC SVR4 PIC translation unit
22214 SVR4/EABI -mrelocatable EABI TOC function
22215 SVR4/EABI -maix AIX TOC object file
22216 SVR4/EABI -maix -mminimal-toc
22217 AIX minimal TOC translation unit
22219 Name Reg. Set by entries contains:
22220 made by addrs? fp? sum?
22222 AIX TOC 2 crt0 as Y option option
22223 AIX minimal TOC 30 prolog gcc Y Y option
22224 SVR4 SDATA 13 crt0 gcc N Y N
22225 SVR4 pic 30 prolog ld Y not yet N
22226 SVR4 PIC 30 prolog gcc Y option option
22227 EABI TOC 30 prolog gcc Y option option
22231 /* Hash functions for the hash table. */
22233 static unsigned
22234 rs6000_hash_constant (rtx k)
22236 enum rtx_code code = GET_CODE (k);
22237 enum machine_mode mode = GET_MODE (k);
22238 unsigned result = (code << 3) ^ mode;
22239 const char *format;
22240 int flen, fidx;
22242 format = GET_RTX_FORMAT (code);
22243 flen = strlen (format);
22244 fidx = 0;
22246 switch (code)
22248 case LABEL_REF:
22249 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
22251 case CONST_DOUBLE:
22252 if (mode != VOIDmode)
22253 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
22254 flen = 2;
22255 break;
22257 case CODE_LABEL:
22258 fidx = 3;
22259 break;
22261 default:
22262 break;
22265 for (; fidx < flen; fidx++)
22266 switch (format[fidx])
22268 case 's':
22270 unsigned i, len;
22271 const char *str = XSTR (k, fidx);
22272 len = strlen (str);
22273 result = result * 613 + len;
22274 for (i = 0; i < len; i++)
22275 result = result * 613 + (unsigned) str[i];
22276 break;
22278 case 'u':
22279 case 'e':
22280 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
22281 break;
22282 case 'i':
22283 case 'n':
22284 result = result * 613 + (unsigned) XINT (k, fidx);
22285 break;
22286 case 'w':
22287 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
22288 result = result * 613 + (unsigned) XWINT (k, fidx);
22289 else
22291 size_t i;
22292 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
22293 result = result * 613 + (unsigned) (XWINT (k, fidx)
22294 >> CHAR_BIT * i);
22296 break;
22297 case '0':
22298 break;
22299 default:
22300 gcc_unreachable ();
22303 return result;
22306 static unsigned
22307 toc_hash_function (const void *hash_entry)
22309 const struct toc_hash_struct *thc =
22310 (const struct toc_hash_struct *) hash_entry;
22311 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
22314 /* Compare H1 and H2 for equivalence. */
22316 static int
22317 toc_hash_eq (const void *h1, const void *h2)
22319 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
22320 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
22322 if (((const struct toc_hash_struct *) h1)->key_mode
22323 != ((const struct toc_hash_struct *) h2)->key_mode)
22324 return 0;
22326 return rtx_equal_p (r1, r2);
22329 /* These are the names given by the C++ front-end to vtables, and
22330 vtable-like objects. Ideally, this logic should not be here;
22331 instead, there should be some programmatic way of inquiring as
22332 to whether or not an object is a vtable. */
22334 #define VTABLE_NAME_P(NAME) \
22335 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
22336 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
22337 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
22338 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
22339 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
22341 #ifdef NO_DOLLAR_IN_LABEL
22342 /* Return a GGC-allocated character string translating dollar signs in
22343 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
22345 const char *
22346 rs6000_xcoff_strip_dollar (const char *name)
22348 char *strip, *p;
22349 const char *q;
22350 size_t len;
22352 q = (const char *) strchr (name, '$');
22354 if (q == 0 || q == name)
22355 return name;
22357 len = strlen (name);
22358 strip = XALLOCAVEC (char, len + 1);
22359 strcpy (strip, name);
22360 p = strip + (q - name);
22361 while (p)
22363 *p = '_';
22364 p = strchr (p + 1, '$');
22367 return ggc_alloc_string (strip, len);
22369 #endif
22371 void
22372 rs6000_output_symbol_ref (FILE *file, rtx x)
22374 /* Currently C++ toc references to vtables can be emitted before it
22375 is decided whether the vtable is public or private. If this is
22376 the case, then the linker will eventually complain that there is
22377 a reference to an unknown section. Thus, for vtables only,
22378 we emit the TOC reference to reference the symbol and not the
22379 section. */
22380 const char *name = XSTR (x, 0);
22382 if (VTABLE_NAME_P (name))
22384 RS6000_OUTPUT_BASENAME (file, name);
22386 else
22387 assemble_name (file, name);
22390 /* Output a TOC entry. We derive the entry name from what is being
22391 written. */
22393 void
22394 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
22396 char buf[256];
22397 const char *name = buf;
22398 rtx base = x;
22399 HOST_WIDE_INT offset = 0;
22401 gcc_assert (!TARGET_NO_TOC);
22403 /* When the linker won't eliminate them, don't output duplicate
22404 TOC entries (this happens on AIX if there is any kind of TOC,
22405 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
22406 CODE_LABELs. */
22407 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
22409 struct toc_hash_struct *h;
22410 void * * found;
22412 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
22413 time because GGC is not initialized at that point. */
22414 if (toc_hash_table == NULL)
22415 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
22416 toc_hash_eq, NULL);
22418 h = ggc_alloc_toc_hash_struct ();
22419 h->key = x;
22420 h->key_mode = mode;
22421 h->labelno = labelno;
22423 found = htab_find_slot (toc_hash_table, h, INSERT);
22424 if (*found == NULL)
22425 *found = h;
22426 else /* This is indeed a duplicate.
22427 Set this label equal to that label. */
22429 fputs ("\t.set ", file);
22430 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
22431 fprintf (file, "%d,", labelno);
22432 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
22433 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
22434 found)->labelno));
22436 #ifdef HAVE_AS_TLS
22437 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
22438 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
22439 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
22441 fputs ("\t.set ", file);
22442 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
22443 fprintf (file, "%d,", labelno);
22444 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
22445 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
22446 found)->labelno));
22448 #endif
22449 return;
22453 /* If we're going to put a double constant in the TOC, make sure it's
22454 aligned properly when strict alignment is on. */
22455 if (GET_CODE (x) == CONST_DOUBLE
22456 && STRICT_ALIGNMENT
22457 && GET_MODE_BITSIZE (mode) >= 64
22458 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
22459 ASM_OUTPUT_ALIGN (file, 3);
22462 (*targetm.asm_out.internal_label) (file, "LC", labelno);
22464 /* Handle FP constants specially. Note that if we have a minimal
22465 TOC, things we put here aren't actually in the TOC, so we can allow
22466 FP constants. */
22467 if (GET_CODE (x) == CONST_DOUBLE &&
22468 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
22470 REAL_VALUE_TYPE rv;
22471 long k[4];
22473 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22474 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22475 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
22476 else
22477 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
22479 if (TARGET_64BIT)
22481 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22482 fputs (DOUBLE_INT_ASM_OP, file);
22483 else
22484 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
22485 k[0] & 0xffffffff, k[1] & 0xffffffff,
22486 k[2] & 0xffffffff, k[3] & 0xffffffff);
22487 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
22488 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
22489 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
22490 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
22491 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
22492 return;
22494 else
22496 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22497 fputs ("\t.long ", file);
22498 else
22499 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
22500 k[0] & 0xffffffff, k[1] & 0xffffffff,
22501 k[2] & 0xffffffff, k[3] & 0xffffffff);
22502 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
22503 k[0] & 0xffffffff, k[1] & 0xffffffff,
22504 k[2] & 0xffffffff, k[3] & 0xffffffff);
22505 return;
22508 else if (GET_CODE (x) == CONST_DOUBLE &&
22509 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
22511 REAL_VALUE_TYPE rv;
22512 long k[2];
22514 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22516 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22517 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
22518 else
22519 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
22521 if (TARGET_64BIT)
22523 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22524 fputs (DOUBLE_INT_ASM_OP, file);
22525 else
22526 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22527 k[0] & 0xffffffff, k[1] & 0xffffffff);
22528 fprintf (file, "0x%lx%08lx\n",
22529 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
22530 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
22531 return;
22533 else
22535 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22536 fputs ("\t.long ", file);
22537 else
22538 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22539 k[0] & 0xffffffff, k[1] & 0xffffffff);
22540 fprintf (file, "0x%lx,0x%lx\n",
22541 k[0] & 0xffffffff, k[1] & 0xffffffff);
22542 return;
22545 else if (GET_CODE (x) == CONST_DOUBLE &&
22546 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
22548 REAL_VALUE_TYPE rv;
22549 long l;
22551 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22552 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22553 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
22554 else
22555 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
22557 if (TARGET_64BIT)
22559 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22560 fputs (DOUBLE_INT_ASM_OP, file);
22561 else
22562 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22563 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
22564 return;
22566 else
22568 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22569 fputs ("\t.long ", file);
22570 else
22571 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22572 fprintf (file, "0x%lx\n", l & 0xffffffff);
22573 return;
22576 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
22578 unsigned HOST_WIDE_INT low;
22579 HOST_WIDE_INT high;
22581 low = INTVAL (x) & 0xffffffff;
22582 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
22584 /* TOC entries are always Pmode-sized, so when big-endian
22585 smaller integer constants in the TOC need to be padded.
22586 (This is still a win over putting the constants in
22587 a separate constant pool, because then we'd have
22588 to have both a TOC entry _and_ the actual constant.)
22590 For a 32-bit target, CONST_INT values are loaded and shifted
22591 entirely within `low' and can be stored in one TOC entry. */
22593 /* It would be easy to make this work, but it doesn't now. */
22594 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
22596 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
22598 low |= high << 32;
22599 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
22600 high = (HOST_WIDE_INT) low >> 32;
22601 low &= 0xffffffff;
22604 if (TARGET_64BIT)
22606 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22607 fputs (DOUBLE_INT_ASM_OP, file);
22608 else
22609 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22610 (long) high & 0xffffffff, (long) low & 0xffffffff);
22611 fprintf (file, "0x%lx%08lx\n",
22612 (long) high & 0xffffffff, (long) low & 0xffffffff);
22613 return;
22615 else
22617 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
22619 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22620 fputs ("\t.long ", file);
22621 else
22622 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22623 (long) high & 0xffffffff, (long) low & 0xffffffff);
22624 fprintf (file, "0x%lx,0x%lx\n",
22625 (long) high & 0xffffffff, (long) low & 0xffffffff);
22627 else
22629 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22630 fputs ("\t.long ", file);
22631 else
22632 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
22633 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
22635 return;
22639 if (GET_CODE (x) == CONST)
22641 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
22642 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
22644 base = XEXP (XEXP (x, 0), 0);
22645 offset = INTVAL (XEXP (XEXP (x, 0), 1));
22648 switch (GET_CODE (base))
22650 case SYMBOL_REF:
22651 name = XSTR (base, 0);
22652 break;
22654 case LABEL_REF:
22655 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
22656 CODE_LABEL_NUMBER (XEXP (base, 0)));
22657 break;
22659 case CODE_LABEL:
22660 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
22661 break;
22663 default:
22664 gcc_unreachable ();
22667 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22668 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
22669 else
22671 fputs ("\t.tc ", file);
22672 RS6000_OUTPUT_BASENAME (file, name);
22674 if (offset < 0)
22675 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
22676 else if (offset)
22677 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
22679 /* Mark large TOC symbols on AIX with [TE] so they are mapped
22680 after other TOC symbols, reducing overflow of small TOC access
22681 to [TC] symbols. */
22682 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
22683 ? "[TE]," : "[TC],", file);
22686 /* Currently C++ toc references to vtables can be emitted before it
22687 is decided whether the vtable is public or private. If this is
22688 the case, then the linker will eventually complain that there is
22689 a TOC reference to an unknown section. Thus, for vtables only,
22690 we emit the TOC reference to reference the symbol and not the
22691 section. */
22692 if (VTABLE_NAME_P (name))
22694 RS6000_OUTPUT_BASENAME (file, name);
22695 if (offset < 0)
22696 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
22697 else if (offset > 0)
22698 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
22700 else
22701 output_addr_const (file, x);
22703 #if HAVE_AS_TLS
22704 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
22705 && SYMBOL_REF_TLS_MODEL (base) != 0)
22707 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
22708 fputs ("@le", file);
22709 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
22710 fputs ("@ie", file);
22711 /* Use global-dynamic for local-dynamic. */
22712 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
22713 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
22715 putc ('\n', file);
22716 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
22717 fputs ("\t.tc .", file);
22718 RS6000_OUTPUT_BASENAME (file, name);
22719 fputs ("[TC],", file);
22720 output_addr_const (file, x);
22721 fputs ("@m", file);
22724 #endif
22726 putc ('\n', file);
22729 /* Output an assembler pseudo-op to write an ASCII string of N characters
22730 starting at P to FILE.
22732 On the RS/6000, we have to do this using the .byte operation and
22733 write out special characters outside the quoted string.
22734 Also, the assembler is broken; very long strings are truncated,
22735 so we must artificially break them up early. */
22737 void
22738 output_ascii (FILE *file, const char *p, int n)
22740 char c;
22741 int i, count_string;
22742 const char *for_string = "\t.byte \"";
22743 const char *for_decimal = "\t.byte ";
22744 const char *to_close = NULL;
22746 count_string = 0;
22747 for (i = 0; i < n; i++)
22749 c = *p++;
22750 if (c >= ' ' && c < 0177)
22752 if (for_string)
22753 fputs (for_string, file);
22754 putc (c, file);
22756 /* Write two quotes to get one. */
22757 if (c == '"')
22759 putc (c, file);
22760 ++count_string;
22763 for_string = NULL;
22764 for_decimal = "\"\n\t.byte ";
22765 to_close = "\"\n";
22766 ++count_string;
22768 if (count_string >= 512)
22770 fputs (to_close, file);
22772 for_string = "\t.byte \"";
22773 for_decimal = "\t.byte ";
22774 to_close = NULL;
22775 count_string = 0;
22778 else
22780 if (for_decimal)
22781 fputs (for_decimal, file);
22782 fprintf (file, "%d", c);
22784 for_string = "\n\t.byte \"";
22785 for_decimal = ", ";
22786 to_close = "\n";
22787 count_string = 0;
22791 /* Now close the string if we have written one. Then end the line. */
22792 if (to_close)
22793 fputs (to_close, file);
22796 /* Generate a unique section name for FILENAME for a section type
22797 represented by SECTION_DESC. Output goes into BUF.
22799 SECTION_DESC can be any string, as long as it is different for each
22800 possible section type.
22802 We name the section in the same manner as xlc. The name begins with an
22803 underscore followed by the filename (after stripping any leading directory
22804 names) with the last period replaced by the string SECTION_DESC. If
22805 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22806 the name. */
22808 void
22809 rs6000_gen_section_name (char **buf, const char *filename,
22810 const char *section_desc)
22812 const char *q, *after_last_slash, *last_period = 0;
22813 char *p;
22814 int len;
22816 after_last_slash = filename;
22817 for (q = filename; *q; q++)
22819 if (*q == '/')
22820 after_last_slash = q + 1;
22821 else if (*q == '.')
22822 last_period = q;
22825 len = strlen (after_last_slash) + strlen (section_desc) + 2;
22826 *buf = (char *) xmalloc (len);
22828 p = *buf;
22829 *p++ = '_';
22831 for (q = after_last_slash; *q; q++)
22833 if (q == last_period)
22835 strcpy (p, section_desc);
22836 p += strlen (section_desc);
22837 break;
22840 else if (ISALNUM (*q))
22841 *p++ = *q;
22844 if (last_period == 0)
22845 strcpy (p, section_desc);
22846 else
22847 *p = '\0';
22850 /* Emit profile function. */
22852 void
22853 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
22855 /* Non-standard profiling for kernels, which just saves LR then calls
22856 _mcount without worrying about arg saves. The idea is to change
22857 the function prologue as little as possible as it isn't easy to
22858 account for arg save/restore code added just for _mcount. */
22859 if (TARGET_PROFILE_KERNEL)
22860 return;
22862 if (DEFAULT_ABI == ABI_AIX)
22864 #ifndef NO_PROFILE_COUNTERS
22865 # define NO_PROFILE_COUNTERS 0
22866 #endif
22867 if (NO_PROFILE_COUNTERS)
22868 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22869 LCT_NORMAL, VOIDmode, 0);
22870 else
22872 char buf[30];
22873 const char *label_name;
22874 rtx fun;
22876 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22877 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
22878 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
22880 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22881 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
22884 else if (DEFAULT_ABI == ABI_DARWIN)
22886 const char *mcount_name = RS6000_MCOUNT;
22887 int caller_addr_regno = LR_REGNO;
22889 /* Be conservative and always set this, at least for now. */
22890 crtl->uses_pic_offset_table = 1;
22892 #if TARGET_MACHO
22893 /* For PIC code, set up a stub and collect the caller's address
22894 from r0, which is where the prologue puts it. */
22895 if (MACHOPIC_INDIRECT
22896 && crtl->uses_pic_offset_table)
22897 caller_addr_regno = 0;
22898 #endif
22899 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
22900 LCT_NORMAL, VOIDmode, 1,
22901 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
22905 /* Write function profiler code. */
22907 void
22908 output_function_profiler (FILE *file, int labelno)
22910 char buf[100];
22912 switch (DEFAULT_ABI)
22914 default:
22915 gcc_unreachable ();
22917 case ABI_V4:
22918 if (!TARGET_32BIT)
22920 warning (0, "no profiling of 64-bit code for this ABI");
22921 return;
22923 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22924 fprintf (file, "\tmflr %s\n", reg_names[0]);
22925 if (NO_PROFILE_COUNTERS)
22927 asm_fprintf (file, "\tstw %s,4(%s)\n",
22928 reg_names[0], reg_names[1]);
22930 else if (TARGET_SECURE_PLT && flag_pic)
22932 if (TARGET_LINK_STACK)
22934 char name[32];
22935 get_ppc476_thunk_name (name);
22936 asm_fprintf (file, "\tbl %s\n", name);
22938 else
22939 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
22940 asm_fprintf (file, "\tstw %s,4(%s)\n",
22941 reg_names[0], reg_names[1]);
22942 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22943 asm_fprintf (file, "\taddis %s,%s,",
22944 reg_names[12], reg_names[12]);
22945 assemble_name (file, buf);
22946 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
22947 assemble_name (file, buf);
22948 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
22950 else if (flag_pic == 1)
22952 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
22953 asm_fprintf (file, "\tstw %s,4(%s)\n",
22954 reg_names[0], reg_names[1]);
22955 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22956 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
22957 assemble_name (file, buf);
22958 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
22960 else if (flag_pic > 1)
22962 asm_fprintf (file, "\tstw %s,4(%s)\n",
22963 reg_names[0], reg_names[1]);
22964 /* Now, we need to get the address of the label. */
22965 if (TARGET_LINK_STACK)
22967 char name[32];
22968 get_ppc476_thunk_name (name);
22969 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
22970 assemble_name (file, buf);
22971 fputs ("-.\n1:", file);
22972 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22973 asm_fprintf (file, "\taddi %s,%s,4\n",
22974 reg_names[11], reg_names[11]);
22976 else
22978 fputs ("\tbcl 20,31,1f\n\t.long ", file);
22979 assemble_name (file, buf);
22980 fputs ("-.\n1:", file);
22981 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22983 asm_fprintf (file, "\tlwz %s,0(%s)\n",
22984 reg_names[0], reg_names[11]);
22985 asm_fprintf (file, "\tadd %s,%s,%s\n",
22986 reg_names[0], reg_names[0], reg_names[11]);
22988 else
22990 asm_fprintf (file, "\tlis %s,", reg_names[12]);
22991 assemble_name (file, buf);
22992 fputs ("@ha\n", file);
22993 asm_fprintf (file, "\tstw %s,4(%s)\n",
22994 reg_names[0], reg_names[1]);
22995 asm_fprintf (file, "\tla %s,", reg_names[0]);
22996 assemble_name (file, buf);
22997 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
23000 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
23001 fprintf (file, "\tbl %s%s\n",
23002 RS6000_MCOUNT, flag_pic ? "@plt" : "");
23003 break;
23005 case ABI_AIX:
23006 case ABI_DARWIN:
23007 if (!TARGET_PROFILE_KERNEL)
23009 /* Don't do anything, done in output_profile_hook (). */
23011 else
23013 gcc_assert (!TARGET_32BIT);
23015 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
23016 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
23018 if (cfun->static_chain_decl != NULL)
23020 asm_fprintf (file, "\tstd %s,24(%s)\n",
23021 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23022 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23023 asm_fprintf (file, "\tld %s,24(%s)\n",
23024 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23026 else
23027 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23029 break;
23035 /* The following variable value is the last issued insn. */
23037 static rtx last_scheduled_insn;
23039 /* The following variable helps to balance issuing of load and
23040 store instructions */
23042 static int load_store_pendulum;
23044 /* Power4 load update and store update instructions are cracked into a
23045 load or store and an integer insn which are executed in the same cycle.
23046 Branches have their own dispatch slot which does not count against the
23047 GCC issue rate, but it changes the program flow so there are no other
23048 instructions to issue in this cycle. */
23050 static int
23051 rs6000_variable_issue_1 (rtx insn, int more)
23053 last_scheduled_insn = insn;
23054 if (GET_CODE (PATTERN (insn)) == USE
23055 || GET_CODE (PATTERN (insn)) == CLOBBER)
23057 cached_can_issue_more = more;
23058 return cached_can_issue_more;
23061 if (insn_terminates_group_p (insn, current_group))
23063 cached_can_issue_more = 0;
23064 return cached_can_issue_more;
23067 /* If no reservation, but reach here */
23068 if (recog_memoized (insn) < 0)
23069 return more;
23071 if (rs6000_sched_groups)
23073 if (is_microcoded_insn (insn))
23074 cached_can_issue_more = 0;
23075 else if (is_cracked_insn (insn))
23076 cached_can_issue_more = more > 2 ? more - 2 : 0;
23077 else
23078 cached_can_issue_more = more - 1;
23080 return cached_can_issue_more;
23083 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
23084 return 0;
23086 cached_can_issue_more = more - 1;
23087 return cached_can_issue_more;
23090 static int
23091 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
23093 int r = rs6000_variable_issue_1 (insn, more);
23094 if (verbose)
23095 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
23096 return r;
23099 /* Adjust the cost of a scheduling dependency. Return the new cost of
23100 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
23102 static int
23103 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
23105 enum attr_type attr_type;
23107 if (! recog_memoized (insn))
23108 return 0;
23110 switch (REG_NOTE_KIND (link))
23112 case REG_DEP_TRUE:
23114 /* Data dependency; DEP_INSN writes a register that INSN reads
23115 some cycles later. */
23117 /* Separate a load from a narrower, dependent store. */
23118 if (rs6000_sched_groups
23119 && GET_CODE (PATTERN (insn)) == SET
23120 && GET_CODE (PATTERN (dep_insn)) == SET
23121 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
23122 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
23123 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
23124 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
23125 return cost + 14;
23127 attr_type = get_attr_type (insn);
23129 switch (attr_type)
23131 case TYPE_JMPREG:
23132 /* Tell the first scheduling pass about the latency between
23133 a mtctr and bctr (and mtlr and br/blr). The first
23134 scheduling pass will not know about this latency since
23135 the mtctr instruction, which has the latency associated
23136 to it, will be generated by reload. */
23137 return 4;
23138 case TYPE_BRANCH:
23139 /* Leave some extra cycles between a compare and its
23140 dependent branch, to inhibit expensive mispredicts. */
23141 if ((rs6000_cpu_attr == CPU_PPC603
23142 || rs6000_cpu_attr == CPU_PPC604
23143 || rs6000_cpu_attr == CPU_PPC604E
23144 || rs6000_cpu_attr == CPU_PPC620
23145 || rs6000_cpu_attr == CPU_PPC630
23146 || rs6000_cpu_attr == CPU_PPC750
23147 || rs6000_cpu_attr == CPU_PPC7400
23148 || rs6000_cpu_attr == CPU_PPC7450
23149 || rs6000_cpu_attr == CPU_PPCE5500
23150 || rs6000_cpu_attr == CPU_PPCE6500
23151 || rs6000_cpu_attr == CPU_POWER4
23152 || rs6000_cpu_attr == CPU_POWER5
23153 || rs6000_cpu_attr == CPU_POWER7
23154 || rs6000_cpu_attr == CPU_POWER8
23155 || rs6000_cpu_attr == CPU_CELL)
23156 && recog_memoized (dep_insn)
23157 && (INSN_CODE (dep_insn) >= 0))
23159 switch (get_attr_type (dep_insn))
23161 case TYPE_CMP:
23162 case TYPE_COMPARE:
23163 case TYPE_DELAYED_COMPARE:
23164 case TYPE_IMUL_COMPARE:
23165 case TYPE_LMUL_COMPARE:
23166 case TYPE_FPCOMPARE:
23167 case TYPE_CR_LOGICAL:
23168 case TYPE_DELAYED_CR:
23169 return cost + 2;
23170 default:
23171 break;
23173 break;
23175 case TYPE_STORE:
23176 case TYPE_STORE_U:
23177 case TYPE_STORE_UX:
23178 case TYPE_FPSTORE:
23179 case TYPE_FPSTORE_U:
23180 case TYPE_FPSTORE_UX:
23181 if ((rs6000_cpu == PROCESSOR_POWER6)
23182 && recog_memoized (dep_insn)
23183 && (INSN_CODE (dep_insn) >= 0))
23186 if (GET_CODE (PATTERN (insn)) != SET)
23187 /* If this happens, we have to extend this to schedule
23188 optimally. Return default for now. */
23189 return cost;
23191 /* Adjust the cost for the case where the value written
23192 by a fixed point operation is used as the address
23193 gen value on a store. */
23194 switch (get_attr_type (dep_insn))
23196 case TYPE_LOAD:
23197 case TYPE_LOAD_U:
23198 case TYPE_LOAD_UX:
23199 case TYPE_CNTLZ:
23201 if (! store_data_bypass_p (dep_insn, insn))
23202 return 4;
23203 break;
23205 case TYPE_LOAD_EXT:
23206 case TYPE_LOAD_EXT_U:
23207 case TYPE_LOAD_EXT_UX:
23208 case TYPE_VAR_SHIFT_ROTATE:
23209 case TYPE_VAR_DELAYED_COMPARE:
23211 if (! store_data_bypass_p (dep_insn, insn))
23212 return 6;
23213 break;
23215 case TYPE_INTEGER:
23216 case TYPE_COMPARE:
23217 case TYPE_FAST_COMPARE:
23218 case TYPE_EXTS:
23219 case TYPE_SHIFT:
23220 case TYPE_INSERT_WORD:
23221 case TYPE_INSERT_DWORD:
23222 case TYPE_FPLOAD_U:
23223 case TYPE_FPLOAD_UX:
23224 case TYPE_STORE_U:
23225 case TYPE_STORE_UX:
23226 case TYPE_FPSTORE_U:
23227 case TYPE_FPSTORE_UX:
23229 if (! store_data_bypass_p (dep_insn, insn))
23230 return 3;
23231 break;
23233 case TYPE_IMUL:
23234 case TYPE_IMUL2:
23235 case TYPE_IMUL3:
23236 case TYPE_LMUL:
23237 case TYPE_IMUL_COMPARE:
23238 case TYPE_LMUL_COMPARE:
23240 if (! store_data_bypass_p (dep_insn, insn))
23241 return 17;
23242 break;
23244 case TYPE_IDIV:
23246 if (! store_data_bypass_p (dep_insn, insn))
23247 return 45;
23248 break;
23250 case TYPE_LDIV:
23252 if (! store_data_bypass_p (dep_insn, insn))
23253 return 57;
23254 break;
23256 default:
23257 break;
23260 break;
23262 case TYPE_LOAD:
23263 case TYPE_LOAD_U:
23264 case TYPE_LOAD_UX:
23265 case TYPE_LOAD_EXT:
23266 case TYPE_LOAD_EXT_U:
23267 case TYPE_LOAD_EXT_UX:
23268 if ((rs6000_cpu == PROCESSOR_POWER6)
23269 && recog_memoized (dep_insn)
23270 && (INSN_CODE (dep_insn) >= 0))
23273 /* Adjust the cost for the case where the value written
23274 by a fixed point instruction is used within the address
23275 gen portion of a subsequent load(u)(x) */
23276 switch (get_attr_type (dep_insn))
23278 case TYPE_LOAD:
23279 case TYPE_LOAD_U:
23280 case TYPE_LOAD_UX:
23281 case TYPE_CNTLZ:
23283 if (set_to_load_agen (dep_insn, insn))
23284 return 4;
23285 break;
23287 case TYPE_LOAD_EXT:
23288 case TYPE_LOAD_EXT_U:
23289 case TYPE_LOAD_EXT_UX:
23290 case TYPE_VAR_SHIFT_ROTATE:
23291 case TYPE_VAR_DELAYED_COMPARE:
23293 if (set_to_load_agen (dep_insn, insn))
23294 return 6;
23295 break;
23297 case TYPE_INTEGER:
23298 case TYPE_COMPARE:
23299 case TYPE_FAST_COMPARE:
23300 case TYPE_EXTS:
23301 case TYPE_SHIFT:
23302 case TYPE_INSERT_WORD:
23303 case TYPE_INSERT_DWORD:
23304 case TYPE_FPLOAD_U:
23305 case TYPE_FPLOAD_UX:
23306 case TYPE_STORE_U:
23307 case TYPE_STORE_UX:
23308 case TYPE_FPSTORE_U:
23309 case TYPE_FPSTORE_UX:
23311 if (set_to_load_agen (dep_insn, insn))
23312 return 3;
23313 break;
23315 case TYPE_IMUL:
23316 case TYPE_IMUL2:
23317 case TYPE_IMUL3:
23318 case TYPE_LMUL:
23319 case TYPE_IMUL_COMPARE:
23320 case TYPE_LMUL_COMPARE:
23322 if (set_to_load_agen (dep_insn, insn))
23323 return 17;
23324 break;
23326 case TYPE_IDIV:
23328 if (set_to_load_agen (dep_insn, insn))
23329 return 45;
23330 break;
23332 case TYPE_LDIV:
23334 if (set_to_load_agen (dep_insn, insn))
23335 return 57;
23336 break;
23338 default:
23339 break;
23342 break;
23344 case TYPE_FPLOAD:
23345 if ((rs6000_cpu == PROCESSOR_POWER6)
23346 && recog_memoized (dep_insn)
23347 && (INSN_CODE (dep_insn) >= 0)
23348 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
23349 return 2;
23351 default:
23352 break;
23355 /* Fall out to return default cost. */
23357 break;
23359 case REG_DEP_OUTPUT:
23360 /* Output dependency; DEP_INSN writes a register that INSN writes some
23361 cycles later. */
23362 if ((rs6000_cpu == PROCESSOR_POWER6)
23363 && recog_memoized (dep_insn)
23364 && (INSN_CODE (dep_insn) >= 0))
23366 attr_type = get_attr_type (insn);
23368 switch (attr_type)
23370 case TYPE_FP:
23371 if (get_attr_type (dep_insn) == TYPE_FP)
23372 return 1;
23373 break;
23374 case TYPE_FPLOAD:
23375 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
23376 return 2;
23377 break;
23378 default:
23379 break;
23382 case REG_DEP_ANTI:
23383 /* Anti dependency; DEP_INSN reads a register that INSN writes some
23384 cycles later. */
23385 return 0;
23387 default:
23388 gcc_unreachable ();
23391 return cost;
23394 /* Debug version of rs6000_adjust_cost. */
23396 static int
23397 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
23399 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
23401 if (ret != cost)
23403 const char *dep;
23405 switch (REG_NOTE_KIND (link))
23407 default: dep = "unknown depencency"; break;
23408 case REG_DEP_TRUE: dep = "data dependency"; break;
23409 case REG_DEP_OUTPUT: dep = "output dependency"; break;
23410 case REG_DEP_ANTI: dep = "anti depencency"; break;
23413 fprintf (stderr,
23414 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
23415 "%s, insn:\n", ret, cost, dep);
23417 debug_rtx (insn);
23420 return ret;
23423 /* The function returns a true if INSN is microcoded.
23424 Return false otherwise. */
23426 static bool
23427 is_microcoded_insn (rtx insn)
23429 if (!insn || !NONDEBUG_INSN_P (insn)
23430 || GET_CODE (PATTERN (insn)) == USE
23431 || GET_CODE (PATTERN (insn)) == CLOBBER)
23432 return false;
23434 if (rs6000_cpu_attr == CPU_CELL)
23435 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
23437 if (rs6000_sched_groups)
23439 enum attr_type type = get_attr_type (insn);
23440 if (type == TYPE_LOAD_EXT_U
23441 || type == TYPE_LOAD_EXT_UX
23442 || type == TYPE_LOAD_UX
23443 || type == TYPE_STORE_UX
23444 || type == TYPE_MFCR)
23445 return true;
23448 return false;
23451 /* The function returns true if INSN is cracked into 2 instructions
23452 by the processor (and therefore occupies 2 issue slots). */
23454 static bool
23455 is_cracked_insn (rtx insn)
23457 if (!insn || !NONDEBUG_INSN_P (insn)
23458 || GET_CODE (PATTERN (insn)) == USE
23459 || GET_CODE (PATTERN (insn)) == CLOBBER)
23460 return false;
23462 if (rs6000_sched_groups)
23464 enum attr_type type = get_attr_type (insn);
23465 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
23466 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
23467 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
23468 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
23469 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
23470 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
23471 || type == TYPE_IDIV || type == TYPE_LDIV
23472 || type == TYPE_INSERT_WORD)
23473 return true;
23476 return false;
23479 /* The function returns true if INSN can be issued only from
23480 the branch slot. */
23482 static bool
23483 is_branch_slot_insn (rtx insn)
23485 if (!insn || !NONDEBUG_INSN_P (insn)
23486 || GET_CODE (PATTERN (insn)) == USE
23487 || GET_CODE (PATTERN (insn)) == CLOBBER)
23488 return false;
23490 if (rs6000_sched_groups)
23492 enum attr_type type = get_attr_type (insn);
23493 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
23494 return true;
23495 return false;
23498 return false;
23501 /* The function returns true if out_inst sets a value that is
23502 used in the address generation computation of in_insn */
23503 static bool
23504 set_to_load_agen (rtx out_insn, rtx in_insn)
23506 rtx out_set, in_set;
23508 /* For performance reasons, only handle the simple case where
23509 both loads are a single_set. */
23510 out_set = single_set (out_insn);
23511 if (out_set)
23513 in_set = single_set (in_insn);
23514 if (in_set)
23515 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
23518 return false;
23521 /* Try to determine base/offset/size parts of the given MEM.
23522 Return true if successful, false if all the values couldn't
23523 be determined.
23525 This function only looks for REG or REG+CONST address forms.
23526 REG+REG address form will return false. */
23528 static bool
23529 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
23530 HOST_WIDE_INT *size)
23532 rtx addr_rtx;
23533 if MEM_SIZE_KNOWN_P (mem)
23534 *size = MEM_SIZE (mem);
23535 else
23536 return false;
23538 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
23539 addr_rtx = XEXP (XEXP (mem, 0), 1);
23540 else
23541 addr_rtx = (XEXP (mem, 0));
23543 if (GET_CODE (addr_rtx) == REG)
23545 *base = addr_rtx;
23546 *offset = 0;
23548 else if (GET_CODE (addr_rtx) == PLUS
23549 && CONST_INT_P (XEXP (addr_rtx, 1)))
23551 *base = XEXP (addr_rtx, 0);
23552 *offset = INTVAL (XEXP (addr_rtx, 1));
23554 else
23555 return false;
23557 return true;
23560 /* The function returns true if the target storage location of
23561 mem1 is adjacent to the target storage location of mem2 */
23562 /* Return 1 if memory locations are adjacent. */
23564 static bool
23565 adjacent_mem_locations (rtx mem1, rtx mem2)
23567 rtx reg1, reg2;
23568 HOST_WIDE_INT off1, size1, off2, size2;
23570 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23571 && get_memref_parts (mem2, &reg2, &off2, &size2))
23572 return ((REGNO (reg1) == REGNO (reg2))
23573 && ((off1 + size1 == off2)
23574 || (off2 + size2 == off1)));
23576 return false;
23579 /* This function returns true if it can be determined that the two MEM
23580 locations overlap by at least 1 byte based on base reg/offset/size. */
23582 static bool
23583 mem_locations_overlap (rtx mem1, rtx mem2)
23585 rtx reg1, reg2;
23586 HOST_WIDE_INT off1, size1, off2, size2;
23588 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23589 && get_memref_parts (mem2, &reg2, &off2, &size2))
23590 return ((REGNO (reg1) == REGNO (reg2))
23591 && (((off1 <= off2) && (off1 + size1 > off2))
23592 || ((off2 <= off1) && (off2 + size2 > off1))));
23594 return false;
23597 /* A C statement (sans semicolon) to update the integer scheduling
23598 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23599 INSN earlier, reduce the priority to execute INSN later. Do not
23600 define this macro if you do not need to adjust the scheduling
23601 priorities of insns. */
23603 static int
23604 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
23606 rtx load_mem, str_mem;
23607 /* On machines (like the 750) which have asymmetric integer units,
23608 where one integer unit can do multiply and divides and the other
23609 can't, reduce the priority of multiply/divide so it is scheduled
23610 before other integer operations. */
23612 #if 0
23613 if (! INSN_P (insn))
23614 return priority;
23616 if (GET_CODE (PATTERN (insn)) == USE)
23617 return priority;
23619 switch (rs6000_cpu_attr) {
23620 case CPU_PPC750:
23621 switch (get_attr_type (insn))
23623 default:
23624 break;
23626 case TYPE_IMUL:
23627 case TYPE_IDIV:
23628 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
23629 priority, priority);
23630 if (priority >= 0 && priority < 0x01000000)
23631 priority >>= 3;
23632 break;
23635 #endif
23637 if (insn_must_be_first_in_group (insn)
23638 && reload_completed
23639 && current_sched_info->sched_max_insns_priority
23640 && rs6000_sched_restricted_insns_priority)
23643 /* Prioritize insns that can be dispatched only in the first
23644 dispatch slot. */
23645 if (rs6000_sched_restricted_insns_priority == 1)
23646 /* Attach highest priority to insn. This means that in
23647 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23648 precede 'priority' (critical path) considerations. */
23649 return current_sched_info->sched_max_insns_priority;
23650 else if (rs6000_sched_restricted_insns_priority == 2)
23651 /* Increase priority of insn by a minimal amount. This means that in
23652 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23653 considerations precede dispatch-slot restriction considerations. */
23654 return (priority + 1);
23657 if (rs6000_cpu == PROCESSOR_POWER6
23658 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
23659 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
23660 /* Attach highest priority to insn if the scheduler has just issued two
23661 stores and this instruction is a load, or two loads and this instruction
23662 is a store. Power6 wants loads and stores scheduled alternately
23663 when possible */
23664 return current_sched_info->sched_max_insns_priority;
23666 return priority;
23669 /* Return true if the instruction is nonpipelined on the Cell. */
23670 static bool
23671 is_nonpipeline_insn (rtx insn)
23673 enum attr_type type;
23674 if (!insn || !NONDEBUG_INSN_P (insn)
23675 || GET_CODE (PATTERN (insn)) == USE
23676 || GET_CODE (PATTERN (insn)) == CLOBBER)
23677 return false;
23679 type = get_attr_type (insn);
23680 if (type == TYPE_IMUL
23681 || type == TYPE_IMUL2
23682 || type == TYPE_IMUL3
23683 || type == TYPE_LMUL
23684 || type == TYPE_IDIV
23685 || type == TYPE_LDIV
23686 || type == TYPE_SDIV
23687 || type == TYPE_DDIV
23688 || type == TYPE_SSQRT
23689 || type == TYPE_DSQRT
23690 || type == TYPE_MFCR
23691 || type == TYPE_MFCRF
23692 || type == TYPE_MFJMPR)
23694 return true;
23696 return false;
23700 /* Return how many instructions the machine can issue per cycle. */
23702 static int
23703 rs6000_issue_rate (void)
23705 /* Unless scheduling for register pressure, use issue rate of 1 for
23706 first scheduling pass to decrease degradation. */
23707 if (!reload_completed && !flag_sched_pressure)
23708 return 1;
23710 switch (rs6000_cpu_attr) {
23711 case CPU_RS64A:
23712 case CPU_PPC601: /* ? */
23713 case CPU_PPC7450:
23714 return 3;
23715 case CPU_PPC440:
23716 case CPU_PPC603:
23717 case CPU_PPC750:
23718 case CPU_PPC7400:
23719 case CPU_PPC8540:
23720 case CPU_PPC8548:
23721 case CPU_CELL:
23722 case CPU_PPCE300C2:
23723 case CPU_PPCE300C3:
23724 case CPU_PPCE500MC:
23725 case CPU_PPCE500MC64:
23726 case CPU_PPCE5500:
23727 case CPU_PPCE6500:
23728 case CPU_TITAN:
23729 return 2;
23730 case CPU_PPC476:
23731 case CPU_PPC604:
23732 case CPU_PPC604E:
23733 case CPU_PPC620:
23734 case CPU_PPC630:
23735 return 4;
23736 case CPU_POWER4:
23737 case CPU_POWER5:
23738 case CPU_POWER6:
23739 case CPU_POWER7:
23740 return 5;
23741 case CPU_POWER8:
23742 return 7;
23743 default:
23744 return 1;
23748 /* Return how many instructions to look ahead for better insn
23749 scheduling. */
23751 static int
23752 rs6000_use_sched_lookahead (void)
23754 switch (rs6000_cpu_attr)
23756 case CPU_PPC8540:
23757 case CPU_PPC8548:
23758 return 4;
23760 case CPU_CELL:
23761 return (reload_completed ? 8 : 0);
23763 default:
23764 return 0;
23768 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23769 static int
23770 rs6000_use_sched_lookahead_guard (rtx insn)
23772 if (rs6000_cpu_attr != CPU_CELL)
23773 return 1;
23775 if (insn == NULL_RTX || !INSN_P (insn))
23776 abort ();
23778 if (!reload_completed
23779 || is_nonpipeline_insn (insn)
23780 || is_microcoded_insn (insn))
23781 return 0;
23783 return 1;
23786 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23787 and return true. */
23789 static bool
23790 find_mem_ref (rtx pat, rtx *mem_ref)
23792 const char * fmt;
23793 int i, j;
23795 /* stack_tie does not produce any real memory traffic. */
23796 if (tie_operand (pat, VOIDmode))
23797 return false;
23799 if (GET_CODE (pat) == MEM)
23801 *mem_ref = pat;
23802 return true;
23805 /* Recursively process the pattern. */
23806 fmt = GET_RTX_FORMAT (GET_CODE (pat));
23808 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
23810 if (fmt[i] == 'e')
23812 if (find_mem_ref (XEXP (pat, i), mem_ref))
23813 return true;
23815 else if (fmt[i] == 'E')
23816 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
23818 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
23819 return true;
23823 return false;
23826 /* Determine if PAT is a PATTERN of a load insn. */
23828 static bool
23829 is_load_insn1 (rtx pat, rtx *load_mem)
23831 if (!pat || pat == NULL_RTX)
23832 return false;
23834 if (GET_CODE (pat) == SET)
23835 return find_mem_ref (SET_SRC (pat), load_mem);
23837 if (GET_CODE (pat) == PARALLEL)
23839 int i;
23841 for (i = 0; i < XVECLEN (pat, 0); i++)
23842 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
23843 return true;
23846 return false;
23849 /* Determine if INSN loads from memory. */
23851 static bool
23852 is_load_insn (rtx insn, rtx *load_mem)
23854 if (!insn || !INSN_P (insn))
23855 return false;
23857 if (CALL_P (insn))
23858 return false;
23860 return is_load_insn1 (PATTERN (insn), load_mem);
23863 /* Determine if PAT is a PATTERN of a store insn. */
23865 static bool
23866 is_store_insn1 (rtx pat, rtx *str_mem)
23868 if (!pat || pat == NULL_RTX)
23869 return false;
23871 if (GET_CODE (pat) == SET)
23872 return find_mem_ref (SET_DEST (pat), str_mem);
23874 if (GET_CODE (pat) == PARALLEL)
23876 int i;
23878 for (i = 0; i < XVECLEN (pat, 0); i++)
23879 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
23880 return true;
23883 return false;
23886 /* Determine if INSN stores to memory. */
23888 static bool
23889 is_store_insn (rtx insn, rtx *str_mem)
23891 if (!insn || !INSN_P (insn))
23892 return false;
23894 return is_store_insn1 (PATTERN (insn), str_mem);
23897 /* Returns whether the dependence between INSN and NEXT is considered
23898 costly by the given target. */
23900 static bool
23901 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
23903 rtx insn;
23904 rtx next;
23905 rtx load_mem, str_mem;
23907 /* If the flag is not enabled - no dependence is considered costly;
23908 allow all dependent insns in the same group.
23909 This is the most aggressive option. */
23910 if (rs6000_sched_costly_dep == no_dep_costly)
23911 return false;
23913 /* If the flag is set to 1 - a dependence is always considered costly;
23914 do not allow dependent instructions in the same group.
23915 This is the most conservative option. */
23916 if (rs6000_sched_costly_dep == all_deps_costly)
23917 return true;
23919 insn = DEP_PRO (dep);
23920 next = DEP_CON (dep);
23922 if (rs6000_sched_costly_dep == store_to_load_dep_costly
23923 && is_load_insn (next, &load_mem)
23924 && is_store_insn (insn, &str_mem))
23925 /* Prevent load after store in the same group. */
23926 return true;
23928 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
23929 && is_load_insn (next, &load_mem)
23930 && is_store_insn (insn, &str_mem)
23931 && DEP_TYPE (dep) == REG_DEP_TRUE
23932 && mem_locations_overlap(str_mem, load_mem))
23933 /* Prevent load after store in the same group if it is a true
23934 dependence. */
23935 return true;
23937 /* The flag is set to X; dependences with latency >= X are considered costly,
23938 and will not be scheduled in the same group. */
23939 if (rs6000_sched_costly_dep <= max_dep_latency
23940 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
23941 return true;
23943 return false;
23946 /* Return the next insn after INSN that is found before TAIL is reached,
23947 skipping any "non-active" insns - insns that will not actually occupy
23948 an issue slot. Return NULL_RTX if such an insn is not found. */
23950 static rtx
23951 get_next_active_insn (rtx insn, rtx tail)
23953 if (insn == NULL_RTX || insn == tail)
23954 return NULL_RTX;
23956 while (1)
23958 insn = NEXT_INSN (insn);
23959 if (insn == NULL_RTX || insn == tail)
23960 return NULL_RTX;
23962 if (CALL_P (insn)
23963 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
23964 || (NONJUMP_INSN_P (insn)
23965 && GET_CODE (PATTERN (insn)) != USE
23966 && GET_CODE (PATTERN (insn)) != CLOBBER
23967 && INSN_CODE (insn) != CODE_FOR_stack_tie))
23968 break;
23970 return insn;
23973 /* We are about to begin issuing insns for this clock cycle. */
23975 static int
23976 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
23977 rtx *ready ATTRIBUTE_UNUSED,
23978 int *pn_ready ATTRIBUTE_UNUSED,
23979 int clock_var ATTRIBUTE_UNUSED)
23981 int n_ready = *pn_ready;
23983 if (sched_verbose)
23984 fprintf (dump, "// rs6000_sched_reorder :\n");
23986 /* Reorder the ready list, if the second to last ready insn
23987 is a nonepipeline insn. */
23988 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
23990 if (is_nonpipeline_insn (ready[n_ready - 1])
23991 && (recog_memoized (ready[n_ready - 2]) > 0))
23992 /* Simply swap first two insns. */
23994 rtx tmp = ready[n_ready - 1];
23995 ready[n_ready - 1] = ready[n_ready - 2];
23996 ready[n_ready - 2] = tmp;
24000 if (rs6000_cpu == PROCESSOR_POWER6)
24001 load_store_pendulum = 0;
24003 return rs6000_issue_rate ();
24006 /* Like rs6000_sched_reorder, but called after issuing each insn. */
24008 static int
24009 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
24010 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
24012 if (sched_verbose)
24013 fprintf (dump, "// rs6000_sched_reorder2 :\n");
24015 /* For Power6, we need to handle some special cases to try and keep the
24016 store queue from overflowing and triggering expensive flushes.
24018 This code monitors how load and store instructions are being issued
24019 and skews the ready list one way or the other to increase the likelihood
24020 that a desired instruction is issued at the proper time.
24022 A couple of things are done. First, we maintain a "load_store_pendulum"
24023 to track the current state of load/store issue.
24025 - If the pendulum is at zero, then no loads or stores have been
24026 issued in the current cycle so we do nothing.
24028 - If the pendulum is 1, then a single load has been issued in this
24029 cycle and we attempt to locate another load in the ready list to
24030 issue with it.
24032 - If the pendulum is -2, then two stores have already been
24033 issued in this cycle, so we increase the priority of the first load
24034 in the ready list to increase it's likelihood of being chosen first
24035 in the next cycle.
24037 - If the pendulum is -1, then a single store has been issued in this
24038 cycle and we attempt to locate another store in the ready list to
24039 issue with it, preferring a store to an adjacent memory location to
24040 facilitate store pairing in the store queue.
24042 - If the pendulum is 2, then two loads have already been
24043 issued in this cycle, so we increase the priority of the first store
24044 in the ready list to increase it's likelihood of being chosen first
24045 in the next cycle.
24047 - If the pendulum < -2 or > 2, then do nothing.
24049 Note: This code covers the most common scenarios. There exist non
24050 load/store instructions which make use of the LSU and which
24051 would need to be accounted for to strictly model the behavior
24052 of the machine. Those instructions are currently unaccounted
24053 for to help minimize compile time overhead of this code.
24055 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
24057 int pos;
24058 int i;
24059 rtx tmp, load_mem, str_mem;
24061 if (is_store_insn (last_scheduled_insn, &str_mem))
24062 /* Issuing a store, swing the load_store_pendulum to the left */
24063 load_store_pendulum--;
24064 else if (is_load_insn (last_scheduled_insn, &load_mem))
24065 /* Issuing a load, swing the load_store_pendulum to the right */
24066 load_store_pendulum++;
24067 else
24068 return cached_can_issue_more;
24070 /* If the pendulum is balanced, or there is only one instruction on
24071 the ready list, then all is well, so return. */
24072 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
24073 return cached_can_issue_more;
24075 if (load_store_pendulum == 1)
24077 /* A load has been issued in this cycle. Scan the ready list
24078 for another load to issue with it */
24079 pos = *pn_ready-1;
24081 while (pos >= 0)
24083 if (is_load_insn (ready[pos], &load_mem))
24085 /* Found a load. Move it to the head of the ready list,
24086 and adjust it's priority so that it is more likely to
24087 stay there */
24088 tmp = ready[pos];
24089 for (i=pos; i<*pn_ready-1; i++)
24090 ready[i] = ready[i + 1];
24091 ready[*pn_ready-1] = tmp;
24093 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
24094 INSN_PRIORITY (tmp)++;
24095 break;
24097 pos--;
24100 else if (load_store_pendulum == -2)
24102 /* Two stores have been issued in this cycle. Increase the
24103 priority of the first load in the ready list to favor it for
24104 issuing in the next cycle. */
24105 pos = *pn_ready-1;
24107 while (pos >= 0)
24109 if (is_load_insn (ready[pos], &load_mem)
24110 && !sel_sched_p ()
24111 && INSN_PRIORITY_KNOWN (ready[pos]))
24113 INSN_PRIORITY (ready[pos])++;
24115 /* Adjust the pendulum to account for the fact that a load
24116 was found and increased in priority. This is to prevent
24117 increasing the priority of multiple loads */
24118 load_store_pendulum--;
24120 break;
24122 pos--;
24125 else if (load_store_pendulum == -1)
24127 /* A store has been issued in this cycle. Scan the ready list for
24128 another store to issue with it, preferring a store to an adjacent
24129 memory location */
24130 int first_store_pos = -1;
24132 pos = *pn_ready-1;
24134 while (pos >= 0)
24136 if (is_store_insn (ready[pos], &str_mem))
24138 rtx str_mem2;
24139 /* Maintain the index of the first store found on the
24140 list */
24141 if (first_store_pos == -1)
24142 first_store_pos = pos;
24144 if (is_store_insn (last_scheduled_insn, &str_mem2)
24145 && adjacent_mem_locations (str_mem, str_mem2))
24147 /* Found an adjacent store. Move it to the head of the
24148 ready list, and adjust it's priority so that it is
24149 more likely to stay there */
24150 tmp = ready[pos];
24151 for (i=pos; i<*pn_ready-1; i++)
24152 ready[i] = ready[i + 1];
24153 ready[*pn_ready-1] = tmp;
24155 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
24156 INSN_PRIORITY (tmp)++;
24158 first_store_pos = -1;
24160 break;
24163 pos--;
24166 if (first_store_pos >= 0)
24168 /* An adjacent store wasn't found, but a non-adjacent store was,
24169 so move the non-adjacent store to the front of the ready
24170 list, and adjust its priority so that it is more likely to
24171 stay there. */
24172 tmp = ready[first_store_pos];
24173 for (i=first_store_pos; i<*pn_ready-1; i++)
24174 ready[i] = ready[i + 1];
24175 ready[*pn_ready-1] = tmp;
24176 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
24177 INSN_PRIORITY (tmp)++;
24180 else if (load_store_pendulum == 2)
24182 /* Two loads have been issued in this cycle. Increase the priority
24183 of the first store in the ready list to favor it for issuing in
24184 the next cycle. */
24185 pos = *pn_ready-1;
24187 while (pos >= 0)
24189 if (is_store_insn (ready[pos], &str_mem)
24190 && !sel_sched_p ()
24191 && INSN_PRIORITY_KNOWN (ready[pos]))
24193 INSN_PRIORITY (ready[pos])++;
24195 /* Adjust the pendulum to account for the fact that a store
24196 was found and increased in priority. This is to prevent
24197 increasing the priority of multiple stores */
24198 load_store_pendulum++;
24200 break;
24202 pos--;
24207 return cached_can_issue_more;
24210 /* Return whether the presence of INSN causes a dispatch group termination
24211 of group WHICH_GROUP.
24213 If WHICH_GROUP == current_group, this function will return true if INSN
24214 causes the termination of the current group (i.e, the dispatch group to
24215 which INSN belongs). This means that INSN will be the last insn in the
24216 group it belongs to.
24218 If WHICH_GROUP == previous_group, this function will return true if INSN
24219 causes the termination of the previous group (i.e, the dispatch group that
24220 precedes the group to which INSN belongs). This means that INSN will be
24221 the first insn in the group it belongs to). */
24223 static bool
24224 insn_terminates_group_p (rtx insn, enum group_termination which_group)
24226 bool first, last;
24228 if (! insn)
24229 return false;
24231 first = insn_must_be_first_in_group (insn);
24232 last = insn_must_be_last_in_group (insn);
24234 if (first && last)
24235 return true;
24237 if (which_group == current_group)
24238 return last;
24239 else if (which_group == previous_group)
24240 return first;
24242 return false;
24246 static bool
24247 insn_must_be_first_in_group (rtx insn)
24249 enum attr_type type;
24251 if (!insn
24252 || NOTE_P (insn)
24253 || DEBUG_INSN_P (insn)
24254 || GET_CODE (PATTERN (insn)) == USE
24255 || GET_CODE (PATTERN (insn)) == CLOBBER)
24256 return false;
24258 switch (rs6000_cpu)
24260 case PROCESSOR_POWER5:
24261 if (is_cracked_insn (insn))
24262 return true;
24263 case PROCESSOR_POWER4:
24264 if (is_microcoded_insn (insn))
24265 return true;
24267 if (!rs6000_sched_groups)
24268 return false;
24270 type = get_attr_type (insn);
24272 switch (type)
24274 case TYPE_MFCR:
24275 case TYPE_MFCRF:
24276 case TYPE_MTCR:
24277 case TYPE_DELAYED_CR:
24278 case TYPE_CR_LOGICAL:
24279 case TYPE_MTJMPR:
24280 case TYPE_MFJMPR:
24281 case TYPE_IDIV:
24282 case TYPE_LDIV:
24283 case TYPE_LOAD_L:
24284 case TYPE_STORE_C:
24285 case TYPE_ISYNC:
24286 case TYPE_SYNC:
24287 return true;
24288 default:
24289 break;
24291 break;
24292 case PROCESSOR_POWER6:
24293 type = get_attr_type (insn);
24295 switch (type)
24297 case TYPE_INSERT_DWORD:
24298 case TYPE_EXTS:
24299 case TYPE_CNTLZ:
24300 case TYPE_SHIFT:
24301 case TYPE_VAR_SHIFT_ROTATE:
24302 case TYPE_TRAP:
24303 case TYPE_IMUL:
24304 case TYPE_IMUL2:
24305 case TYPE_IMUL3:
24306 case TYPE_LMUL:
24307 case TYPE_IDIV:
24308 case TYPE_INSERT_WORD:
24309 case TYPE_DELAYED_COMPARE:
24310 case TYPE_IMUL_COMPARE:
24311 case TYPE_LMUL_COMPARE:
24312 case TYPE_FPCOMPARE:
24313 case TYPE_MFCR:
24314 case TYPE_MTCR:
24315 case TYPE_MFJMPR:
24316 case TYPE_MTJMPR:
24317 case TYPE_ISYNC:
24318 case TYPE_SYNC:
24319 case TYPE_LOAD_L:
24320 case TYPE_STORE_C:
24321 case TYPE_LOAD_U:
24322 case TYPE_LOAD_UX:
24323 case TYPE_LOAD_EXT_UX:
24324 case TYPE_STORE_U:
24325 case TYPE_STORE_UX:
24326 case TYPE_FPLOAD_U:
24327 case TYPE_FPLOAD_UX:
24328 case TYPE_FPSTORE_U:
24329 case TYPE_FPSTORE_UX:
24330 return true;
24331 default:
24332 break;
24334 break;
24335 case PROCESSOR_POWER7:
24336 case PROCESSOR_POWER8: /* FIXME */
24337 type = get_attr_type (insn);
24339 switch (type)
24341 case TYPE_CR_LOGICAL:
24342 case TYPE_MFCR:
24343 case TYPE_MFCRF:
24344 case TYPE_MTCR:
24345 case TYPE_IDIV:
24346 case TYPE_LDIV:
24347 case TYPE_COMPARE:
24348 case TYPE_DELAYED_COMPARE:
24349 case TYPE_VAR_DELAYED_COMPARE:
24350 case TYPE_ISYNC:
24351 case TYPE_LOAD_L:
24352 case TYPE_STORE_C:
24353 case TYPE_LOAD_U:
24354 case TYPE_LOAD_UX:
24355 case TYPE_LOAD_EXT:
24356 case TYPE_LOAD_EXT_U:
24357 case TYPE_LOAD_EXT_UX:
24358 case TYPE_STORE_U:
24359 case TYPE_STORE_UX:
24360 case TYPE_FPLOAD_U:
24361 case TYPE_FPLOAD_UX:
24362 case TYPE_FPSTORE_U:
24363 case TYPE_FPSTORE_UX:
24364 case TYPE_MFJMPR:
24365 case TYPE_MTJMPR:
24366 return true;
24367 default:
24368 break;
24370 break;
24371 default:
24372 break;
24375 return false;
24378 static bool
24379 insn_must_be_last_in_group (rtx insn)
24381 enum attr_type type;
24383 if (!insn
24384 || NOTE_P (insn)
24385 || DEBUG_INSN_P (insn)
24386 || GET_CODE (PATTERN (insn)) == USE
24387 || GET_CODE (PATTERN (insn)) == CLOBBER)
24388 return false;
24390 switch (rs6000_cpu) {
24391 case PROCESSOR_POWER4:
24392 case PROCESSOR_POWER5:
24393 if (is_microcoded_insn (insn))
24394 return true;
24396 if (is_branch_slot_insn (insn))
24397 return true;
24399 break;
24400 case PROCESSOR_POWER6:
24401 type = get_attr_type (insn);
24403 switch (type)
24405 case TYPE_EXTS:
24406 case TYPE_CNTLZ:
24407 case TYPE_SHIFT:
24408 case TYPE_VAR_SHIFT_ROTATE:
24409 case TYPE_TRAP:
24410 case TYPE_IMUL:
24411 case TYPE_IMUL2:
24412 case TYPE_IMUL3:
24413 case TYPE_LMUL:
24414 case TYPE_IDIV:
24415 case TYPE_DELAYED_COMPARE:
24416 case TYPE_IMUL_COMPARE:
24417 case TYPE_LMUL_COMPARE:
24418 case TYPE_FPCOMPARE:
24419 case TYPE_MFCR:
24420 case TYPE_MTCR:
24421 case TYPE_MFJMPR:
24422 case TYPE_MTJMPR:
24423 case TYPE_ISYNC:
24424 case TYPE_SYNC:
24425 case TYPE_LOAD_L:
24426 case TYPE_STORE_C:
24427 return true;
24428 default:
24429 break;
24431 break;
24432 case PROCESSOR_POWER7:
24433 case PROCESSOR_POWER8: /* FIXME */
24434 type = get_attr_type (insn);
24436 switch (type)
24438 case TYPE_ISYNC:
24439 case TYPE_SYNC:
24440 case TYPE_LOAD_L:
24441 case TYPE_STORE_C:
24442 case TYPE_LOAD_EXT_U:
24443 case TYPE_LOAD_EXT_UX:
24444 case TYPE_STORE_UX:
24445 return true;
24446 default:
24447 break;
24449 break;
24450 default:
24451 break;
24454 return false;
24457 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
24458 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
24460 static bool
24461 is_costly_group (rtx *group_insns, rtx next_insn)
24463 int i;
24464 int issue_rate = rs6000_issue_rate ();
24466 for (i = 0; i < issue_rate; i++)
24468 sd_iterator_def sd_it;
24469 dep_t dep;
24470 rtx insn = group_insns[i];
24472 if (!insn)
24473 continue;
24475 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
24477 rtx next = DEP_CON (dep);
24479 if (next == next_insn
24480 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
24481 return true;
24485 return false;
24488 /* Utility of the function redefine_groups.
24489 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
24490 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
24491 to keep it "far" (in a separate group) from GROUP_INSNS, following
24492 one of the following schemes, depending on the value of the flag
24493 -minsert_sched_nops = X:
24494 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
24495 in order to force NEXT_INSN into a separate group.
24496 (2) X < sched_finish_regroup_exact: insert exactly X nops.
24497 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
24498 insertion (has a group just ended, how many vacant issue slots remain in the
24499 last group, and how many dispatch groups were encountered so far). */
24501 static int
24502 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
24503 rtx next_insn, bool *group_end, int can_issue_more,
24504 int *group_count)
24506 rtx nop;
24507 bool force;
24508 int issue_rate = rs6000_issue_rate ();
24509 bool end = *group_end;
24510 int i;
24512 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
24513 return can_issue_more;
24515 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
24516 return can_issue_more;
24518 force = is_costly_group (group_insns, next_insn);
24519 if (!force)
24520 return can_issue_more;
24522 if (sched_verbose > 6)
24523 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
24524 *group_count ,can_issue_more);
24526 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
24528 if (*group_end)
24529 can_issue_more = 0;
24531 /* Since only a branch can be issued in the last issue_slot, it is
24532 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
24533 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
24534 in this case the last nop will start a new group and the branch
24535 will be forced to the new group. */
24536 if (can_issue_more && !is_branch_slot_insn (next_insn))
24537 can_issue_more--;
24539 /* Power6 and Power7 have special group ending nop. */
24540 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
24541 || rs6000_cpu_attr == CPU_POWER8)
24543 nop = gen_group_ending_nop ();
24544 emit_insn_before (nop, next_insn);
24545 can_issue_more = 0;
24547 else
24548 while (can_issue_more > 0)
24550 nop = gen_nop ();
24551 emit_insn_before (nop, next_insn);
24552 can_issue_more--;
24555 *group_end = true;
24556 return 0;
24559 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
24561 int n_nops = rs6000_sched_insert_nops;
24563 /* Nops can't be issued from the branch slot, so the effective
24564 issue_rate for nops is 'issue_rate - 1'. */
24565 if (can_issue_more == 0)
24566 can_issue_more = issue_rate;
24567 can_issue_more--;
24568 if (can_issue_more == 0)
24570 can_issue_more = issue_rate - 1;
24571 (*group_count)++;
24572 end = true;
24573 for (i = 0; i < issue_rate; i++)
24575 group_insns[i] = 0;
24579 while (n_nops > 0)
24581 nop = gen_nop ();
24582 emit_insn_before (nop, next_insn);
24583 if (can_issue_more == issue_rate - 1) /* new group begins */
24584 end = false;
24585 can_issue_more--;
24586 if (can_issue_more == 0)
24588 can_issue_more = issue_rate - 1;
24589 (*group_count)++;
24590 end = true;
24591 for (i = 0; i < issue_rate; i++)
24593 group_insns[i] = 0;
24596 n_nops--;
24599 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24600 can_issue_more++;
24602 /* Is next_insn going to start a new group? */
24603 *group_end
24604 = (end
24605 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24606 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24607 || (can_issue_more < issue_rate &&
24608 insn_terminates_group_p (next_insn, previous_group)));
24609 if (*group_end && end)
24610 (*group_count)--;
24612 if (sched_verbose > 6)
24613 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
24614 *group_count, can_issue_more);
24615 return can_issue_more;
24618 return can_issue_more;
24621 /* This function tries to synch the dispatch groups that the compiler "sees"
24622 with the dispatch groups that the processor dispatcher is expected to
24623 form in practice. It tries to achieve this synchronization by forcing the
24624 estimated processor grouping on the compiler (as opposed to the function
24625 'pad_goups' which tries to force the scheduler's grouping on the processor).
24627 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24628 examines the (estimated) dispatch groups that will be formed by the processor
24629 dispatcher. It marks these group boundaries to reflect the estimated
24630 processor grouping, overriding the grouping that the scheduler had marked.
24631 Depending on the value of the flag '-minsert-sched-nops' this function can
24632 force certain insns into separate groups or force a certain distance between
24633 them by inserting nops, for example, if there exists a "costly dependence"
24634 between the insns.
24636 The function estimates the group boundaries that the processor will form as
24637 follows: It keeps track of how many vacant issue slots are available after
24638 each insn. A subsequent insn will start a new group if one of the following
24639 4 cases applies:
24640 - no more vacant issue slots remain in the current dispatch group.
24641 - only the last issue slot, which is the branch slot, is vacant, but the next
24642 insn is not a branch.
24643 - only the last 2 or less issue slots, including the branch slot, are vacant,
24644 which means that a cracked insn (which occupies two issue slots) can't be
24645 issued in this group.
24646 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24647 start a new group. */
24649 static int
24650 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24652 rtx insn, next_insn;
24653 int issue_rate;
24654 int can_issue_more;
24655 int slot, i;
24656 bool group_end;
24657 int group_count = 0;
24658 rtx *group_insns;
24660 /* Initialize. */
24661 issue_rate = rs6000_issue_rate ();
24662 group_insns = XALLOCAVEC (rtx, issue_rate);
24663 for (i = 0; i < issue_rate; i++)
24665 group_insns[i] = 0;
24667 can_issue_more = issue_rate;
24668 slot = 0;
24669 insn = get_next_active_insn (prev_head_insn, tail);
24670 group_end = false;
24672 while (insn != NULL_RTX)
24674 slot = (issue_rate - can_issue_more);
24675 group_insns[slot] = insn;
24676 can_issue_more =
24677 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24678 if (insn_terminates_group_p (insn, current_group))
24679 can_issue_more = 0;
24681 next_insn = get_next_active_insn (insn, tail);
24682 if (next_insn == NULL_RTX)
24683 return group_count + 1;
24685 /* Is next_insn going to start a new group? */
24686 group_end
24687 = (can_issue_more == 0
24688 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24689 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24690 || (can_issue_more < issue_rate &&
24691 insn_terminates_group_p (next_insn, previous_group)));
24693 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
24694 next_insn, &group_end, can_issue_more,
24695 &group_count);
24697 if (group_end)
24699 group_count++;
24700 can_issue_more = 0;
24701 for (i = 0; i < issue_rate; i++)
24703 group_insns[i] = 0;
24707 if (GET_MODE (next_insn) == TImode && can_issue_more)
24708 PUT_MODE (next_insn, VOIDmode);
24709 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
24710 PUT_MODE (next_insn, TImode);
24712 insn = next_insn;
24713 if (can_issue_more == 0)
24714 can_issue_more = issue_rate;
24715 } /* while */
24717 return group_count;
24720 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24721 dispatch group boundaries that the scheduler had marked. Pad with nops
24722 any dispatch groups which have vacant issue slots, in order to force the
24723 scheduler's grouping on the processor dispatcher. The function
24724 returns the number of dispatch groups found. */
24726 static int
24727 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24729 rtx insn, next_insn;
24730 rtx nop;
24731 int issue_rate;
24732 int can_issue_more;
24733 int group_end;
24734 int group_count = 0;
24736 /* Initialize issue_rate. */
24737 issue_rate = rs6000_issue_rate ();
24738 can_issue_more = issue_rate;
24740 insn = get_next_active_insn (prev_head_insn, tail);
24741 next_insn = get_next_active_insn (insn, tail);
24743 while (insn != NULL_RTX)
24745 can_issue_more =
24746 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24748 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
24750 if (next_insn == NULL_RTX)
24751 break;
24753 if (group_end)
24755 /* If the scheduler had marked group termination at this location
24756 (between insn and next_insn), and neither insn nor next_insn will
24757 force group termination, pad the group with nops to force group
24758 termination. */
24759 if (can_issue_more
24760 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
24761 && !insn_terminates_group_p (insn, current_group)
24762 && !insn_terminates_group_p (next_insn, previous_group))
24764 if (!is_branch_slot_insn (next_insn))
24765 can_issue_more--;
24767 while (can_issue_more)
24769 nop = gen_nop ();
24770 emit_insn_before (nop, next_insn);
24771 can_issue_more--;
24775 can_issue_more = issue_rate;
24776 group_count++;
24779 insn = next_insn;
24780 next_insn = get_next_active_insn (insn, tail);
24783 return group_count;
24786 /* We're beginning a new block. Initialize data structures as necessary. */
24788 static void
24789 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
24790 int sched_verbose ATTRIBUTE_UNUSED,
24791 int max_ready ATTRIBUTE_UNUSED)
24793 last_scheduled_insn = NULL_RTX;
24794 load_store_pendulum = 0;
24797 /* The following function is called at the end of scheduling BB.
24798 After reload, it inserts nops at insn group bundling. */
24800 static void
24801 rs6000_sched_finish (FILE *dump, int sched_verbose)
24803 int n_groups;
24805 if (sched_verbose)
24806 fprintf (dump, "=== Finishing schedule.\n");
24808 if (reload_completed && rs6000_sched_groups)
24810 /* Do not run sched_finish hook when selective scheduling enabled. */
24811 if (sel_sched_p ())
24812 return;
24814 if (rs6000_sched_insert_nops == sched_finish_none)
24815 return;
24817 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
24818 n_groups = pad_groups (dump, sched_verbose,
24819 current_sched_info->prev_head,
24820 current_sched_info->next_tail);
24821 else
24822 n_groups = redefine_groups (dump, sched_verbose,
24823 current_sched_info->prev_head,
24824 current_sched_info->next_tail);
24826 if (sched_verbose >= 6)
24828 fprintf (dump, "ngroups = %d\n", n_groups);
24829 print_rtl (dump, current_sched_info->prev_head);
24830 fprintf (dump, "Done finish_sched\n");
24835 struct _rs6000_sched_context
24837 short cached_can_issue_more;
24838 rtx last_scheduled_insn;
24839 int load_store_pendulum;
24842 typedef struct _rs6000_sched_context rs6000_sched_context_def;
24843 typedef rs6000_sched_context_def *rs6000_sched_context_t;
24845 /* Allocate store for new scheduling context. */
24846 static void *
24847 rs6000_alloc_sched_context (void)
24849 return xmalloc (sizeof (rs6000_sched_context_def));
24852 /* If CLEAN_P is true then initializes _SC with clean data,
24853 and from the global context otherwise. */
24854 static void
24855 rs6000_init_sched_context (void *_sc, bool clean_p)
24857 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24859 if (clean_p)
24861 sc->cached_can_issue_more = 0;
24862 sc->last_scheduled_insn = NULL_RTX;
24863 sc->load_store_pendulum = 0;
24865 else
24867 sc->cached_can_issue_more = cached_can_issue_more;
24868 sc->last_scheduled_insn = last_scheduled_insn;
24869 sc->load_store_pendulum = load_store_pendulum;
24873 /* Sets the global scheduling context to the one pointed to by _SC. */
24874 static void
24875 rs6000_set_sched_context (void *_sc)
24877 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24879 gcc_assert (sc != NULL);
24881 cached_can_issue_more = sc->cached_can_issue_more;
24882 last_scheduled_insn = sc->last_scheduled_insn;
24883 load_store_pendulum = sc->load_store_pendulum;
24886 /* Free _SC. */
24887 static void
24888 rs6000_free_sched_context (void *_sc)
24890 gcc_assert (_sc != NULL);
24892 free (_sc);
24896 /* Length in units of the trampoline for entering a nested function. */
24899 rs6000_trampoline_size (void)
24901 int ret = 0;
24903 switch (DEFAULT_ABI)
24905 default:
24906 gcc_unreachable ();
24908 case ABI_AIX:
24909 ret = (TARGET_32BIT) ? 12 : 24;
24910 break;
24912 case ABI_DARWIN:
24913 case ABI_V4:
24914 ret = (TARGET_32BIT) ? 40 : 48;
24915 break;
24918 return ret;
24921 /* Emit RTL insns to initialize the variable parts of a trampoline.
24922 FNADDR is an RTX for the address of the function's pure code.
24923 CXT is an RTX for the static chain value for the function. */
24925 static void
24926 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
24928 int regsize = (TARGET_32BIT) ? 4 : 8;
24929 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
24930 rtx ctx_reg = force_reg (Pmode, cxt);
24931 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
24933 switch (DEFAULT_ABI)
24935 default:
24936 gcc_unreachable ();
24938 /* Under AIX, just build the 3 word function descriptor */
24939 case ABI_AIX:
24941 rtx fnmem, fn_reg, toc_reg;
24943 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
24944 error ("You cannot take the address of a nested function if you use "
24945 "the -mno-pointers-to-nested-functions option.");
24947 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
24948 fn_reg = gen_reg_rtx (Pmode);
24949 toc_reg = gen_reg_rtx (Pmode);
24951 /* Macro to shorten the code expansions below. */
24952 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24954 m_tramp = replace_equiv_address (m_tramp, addr);
24956 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
24957 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
24958 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
24959 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
24960 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
24962 # undef MEM_PLUS
24964 break;
24966 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24967 case ABI_DARWIN:
24968 case ABI_V4:
24969 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
24970 LCT_NORMAL, VOIDmode, 4,
24971 addr, Pmode,
24972 GEN_INT (rs6000_trampoline_size ()), SImode,
24973 fnaddr, Pmode,
24974 ctx_reg, Pmode);
24975 break;
24980 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24981 identifier as an argument, so the front end shouldn't look it up. */
24983 static bool
24984 rs6000_attribute_takes_identifier_p (const_tree attr_id)
24986 return is_attribute_p ("altivec", attr_id);
24989 /* Handle the "altivec" attribute. The attribute may have
24990 arguments as follows:
24992 __attribute__((altivec(vector__)))
24993 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24994 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24996 and may appear more than once (e.g., 'vector bool char') in a
24997 given declaration. */
24999 static tree
25000 rs6000_handle_altivec_attribute (tree *node,
25001 tree name ATTRIBUTE_UNUSED,
25002 tree args,
25003 int flags ATTRIBUTE_UNUSED,
25004 bool *no_add_attrs)
25006 tree type = *node, result = NULL_TREE;
25007 enum machine_mode mode;
25008 int unsigned_p;
25009 char altivec_type
25010 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
25011 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
25012 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
25013 : '?');
25015 while (POINTER_TYPE_P (type)
25016 || TREE_CODE (type) == FUNCTION_TYPE
25017 || TREE_CODE (type) == METHOD_TYPE
25018 || TREE_CODE (type) == ARRAY_TYPE)
25019 type = TREE_TYPE (type);
25021 mode = TYPE_MODE (type);
25023 /* Check for invalid AltiVec type qualifiers. */
25024 if (type == long_double_type_node)
25025 error ("use of %<long double%> in AltiVec types is invalid");
25026 else if (type == boolean_type_node)
25027 error ("use of boolean types in AltiVec types is invalid");
25028 else if (TREE_CODE (type) == COMPLEX_TYPE)
25029 error ("use of %<complex%> in AltiVec types is invalid");
25030 else if (DECIMAL_FLOAT_MODE_P (mode))
25031 error ("use of decimal floating point types in AltiVec types is invalid");
25032 else if (!TARGET_VSX)
25034 if (type == long_unsigned_type_node || type == long_integer_type_node)
25036 if (TARGET_64BIT)
25037 error ("use of %<long%> in AltiVec types is invalid for "
25038 "64-bit code without -mvsx");
25039 else if (rs6000_warn_altivec_long)
25040 warning (0, "use of %<long%> in AltiVec types is deprecated; "
25041 "use %<int%>");
25043 else if (type == long_long_unsigned_type_node
25044 || type == long_long_integer_type_node)
25045 error ("use of %<long long%> in AltiVec types is invalid without "
25046 "-mvsx");
25047 else if (type == double_type_node)
25048 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
25051 switch (altivec_type)
25053 case 'v':
25054 unsigned_p = TYPE_UNSIGNED (type);
25055 switch (mode)
25057 case DImode:
25058 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
25059 break;
25060 case SImode:
25061 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
25062 break;
25063 case HImode:
25064 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
25065 break;
25066 case QImode:
25067 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
25068 break;
25069 case SFmode: result = V4SF_type_node; break;
25070 case DFmode: result = V2DF_type_node; break;
25071 /* If the user says 'vector int bool', we may be handed the 'bool'
25072 attribute _before_ the 'vector' attribute, and so select the
25073 proper type in the 'b' case below. */
25074 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
25075 case V2DImode: case V2DFmode:
25076 result = type;
25077 default: break;
25079 break;
25080 case 'b':
25081 switch (mode)
25083 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
25084 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
25085 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
25086 case QImode: case V16QImode: result = bool_V16QI_type_node;
25087 default: break;
25089 break;
25090 case 'p':
25091 switch (mode)
25093 case V8HImode: result = pixel_V8HI_type_node;
25094 default: break;
25096 default: break;
25099 /* Propagate qualifiers attached to the element type
25100 onto the vector type. */
25101 if (result && result != type && TYPE_QUALS (type))
25102 result = build_qualified_type (result, TYPE_QUALS (type));
25104 *no_add_attrs = true; /* No need to hang on to the attribute. */
25106 if (result)
25107 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
25109 return NULL_TREE;
25112 /* AltiVec defines four built-in scalar types that serve as vector
25113 elements; we must teach the compiler how to mangle them. */
25115 static const char *
25116 rs6000_mangle_type (const_tree type)
25118 type = TYPE_MAIN_VARIANT (type);
25120 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
25121 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
25122 return NULL;
25124 if (type == bool_char_type_node) return "U6__boolc";
25125 if (type == bool_short_type_node) return "U6__bools";
25126 if (type == pixel_type_node) return "u7__pixel";
25127 if (type == bool_int_type_node) return "U6__booli";
25128 if (type == bool_long_type_node) return "U6__booll";
25130 /* Mangle IBM extended float long double as `g' (__float128) on
25131 powerpc*-linux where long-double-64 previously was the default. */
25132 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
25133 && TARGET_ELF
25134 && TARGET_LONG_DOUBLE_128
25135 && !TARGET_IEEEQUAD)
25136 return "g";
25138 /* For all other types, use normal C++ mangling. */
25139 return NULL;
25142 /* Handle a "longcall" or "shortcall" attribute; arguments as in
25143 struct attribute_spec.handler. */
25145 static tree
25146 rs6000_handle_longcall_attribute (tree *node, tree name,
25147 tree args ATTRIBUTE_UNUSED,
25148 int flags ATTRIBUTE_UNUSED,
25149 bool *no_add_attrs)
25151 if (TREE_CODE (*node) != FUNCTION_TYPE
25152 && TREE_CODE (*node) != FIELD_DECL
25153 && TREE_CODE (*node) != TYPE_DECL)
25155 warning (OPT_Wattributes, "%qE attribute only applies to functions",
25156 name);
25157 *no_add_attrs = true;
25160 return NULL_TREE;
25163 /* Set longcall attributes on all functions declared when
25164 rs6000_default_long_calls is true. */
25165 static void
25166 rs6000_set_default_type_attributes (tree type)
25168 if (rs6000_default_long_calls
25169 && (TREE_CODE (type) == FUNCTION_TYPE
25170 || TREE_CODE (type) == METHOD_TYPE))
25171 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
25172 NULL_TREE,
25173 TYPE_ATTRIBUTES (type));
25175 #if TARGET_MACHO
25176 darwin_set_default_type_attributes (type);
25177 #endif
25180 /* Return a reference suitable for calling a function with the
25181 longcall attribute. */
25184 rs6000_longcall_ref (rtx call_ref)
25186 const char *call_name;
25187 tree node;
25189 if (GET_CODE (call_ref) != SYMBOL_REF)
25190 return call_ref;
25192 /* System V adds '.' to the internal name, so skip them. */
25193 call_name = XSTR (call_ref, 0);
25194 if (*call_name == '.')
25196 while (*call_name == '.')
25197 call_name++;
25199 node = get_identifier (call_name);
25200 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
25203 return force_reg (Pmode, call_ref);
25206 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
25207 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
25208 #endif
25210 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
25211 struct attribute_spec.handler. */
25212 static tree
25213 rs6000_handle_struct_attribute (tree *node, tree name,
25214 tree args ATTRIBUTE_UNUSED,
25215 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25217 tree *type = NULL;
25218 if (DECL_P (*node))
25220 if (TREE_CODE (*node) == TYPE_DECL)
25221 type = &TREE_TYPE (*node);
25223 else
25224 type = node;
25226 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
25227 || TREE_CODE (*type) == UNION_TYPE)))
25229 warning (OPT_Wattributes, "%qE attribute ignored", name);
25230 *no_add_attrs = true;
25233 else if ((is_attribute_p ("ms_struct", name)
25234 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
25235 || ((is_attribute_p ("gcc_struct", name)
25236 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
25238 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
25239 name);
25240 *no_add_attrs = true;
25243 return NULL_TREE;
25246 static bool
25247 rs6000_ms_bitfield_layout_p (const_tree record_type)
25249 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
25250 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
25251 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
25254 #ifdef USING_ELFOS_H
25256 /* A get_unnamed_section callback, used for switching to toc_section. */
25258 static void
25259 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
25261 if (DEFAULT_ABI == ABI_AIX
25262 && TARGET_MINIMAL_TOC
25263 && !TARGET_RELOCATABLE)
25265 if (!toc_initialized)
25267 toc_initialized = 1;
25268 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
25269 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
25270 fprintf (asm_out_file, "\t.tc ");
25271 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
25272 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
25273 fprintf (asm_out_file, "\n");
25275 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25276 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
25277 fprintf (asm_out_file, " = .+32768\n");
25279 else
25280 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25282 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
25283 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
25284 else
25286 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25287 if (!toc_initialized)
25289 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
25290 fprintf (asm_out_file, " = .+32768\n");
25291 toc_initialized = 1;
25296 /* Implement TARGET_ASM_INIT_SECTIONS. */
25298 static void
25299 rs6000_elf_asm_init_sections (void)
25301 toc_section
25302 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
25304 sdata2_section
25305 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
25306 SDATA2_SECTION_ASM_OP);
25309 /* Implement TARGET_SELECT_RTX_SECTION. */
25311 static section *
25312 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
25313 unsigned HOST_WIDE_INT align)
25315 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25316 return toc_section;
25317 else
25318 return default_elf_select_rtx_section (mode, x, align);
25321 /* For a SYMBOL_REF, set generic flags and then perform some
25322 target-specific processing.
25324 When the AIX ABI is requested on a non-AIX system, replace the
25325 function name with the real name (with a leading .) rather than the
25326 function descriptor name. This saves a lot of overriding code to
25327 read the prefixes. */
25329 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
25330 static void
25331 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
25333 default_encode_section_info (decl, rtl, first);
25335 if (first
25336 && TREE_CODE (decl) == FUNCTION_DECL
25337 && !TARGET_AIX
25338 && DEFAULT_ABI == ABI_AIX)
25340 rtx sym_ref = XEXP (rtl, 0);
25341 size_t len = strlen (XSTR (sym_ref, 0));
25342 char *str = XALLOCAVEC (char, len + 2);
25343 str[0] = '.';
25344 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
25345 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
25349 static inline bool
25350 compare_section_name (const char *section, const char *templ)
25352 int len;
25354 len = strlen (templ);
25355 return (strncmp (section, templ, len) == 0
25356 && (section[len] == 0 || section[len] == '.'));
25359 bool
25360 rs6000_elf_in_small_data_p (const_tree decl)
25362 if (rs6000_sdata == SDATA_NONE)
25363 return false;
25365 /* We want to merge strings, so we never consider them small data. */
25366 if (TREE_CODE (decl) == STRING_CST)
25367 return false;
25369 /* Functions are never in the small data area. */
25370 if (TREE_CODE (decl) == FUNCTION_DECL)
25371 return false;
25373 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
25375 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
25376 if (compare_section_name (section, ".sdata")
25377 || compare_section_name (section, ".sdata2")
25378 || compare_section_name (section, ".gnu.linkonce.s")
25379 || compare_section_name (section, ".sbss")
25380 || compare_section_name (section, ".sbss2")
25381 || compare_section_name (section, ".gnu.linkonce.sb")
25382 || strcmp (section, ".PPC.EMB.sdata0") == 0
25383 || strcmp (section, ".PPC.EMB.sbss0") == 0)
25384 return true;
25386 else
25388 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
25390 if (size > 0
25391 && size <= g_switch_value
25392 /* If it's not public, and we're not going to reference it there,
25393 there's no need to put it in the small data section. */
25394 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
25395 return true;
25398 return false;
25401 #endif /* USING_ELFOS_H */
25403 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
25405 static bool
25406 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
25408 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
25411 /* Do not place thread-local symbols refs in the object blocks. */
25413 static bool
25414 rs6000_use_blocks_for_decl_p (const_tree decl)
25416 return !DECL_THREAD_LOCAL_P (decl);
25419 /* Return a REG that occurs in ADDR with coefficient 1.
25420 ADDR can be effectively incremented by incrementing REG.
25422 r0 is special and we must not select it as an address
25423 register by this routine since our caller will try to
25424 increment the returned register via an "la" instruction. */
25427 find_addr_reg (rtx addr)
25429 while (GET_CODE (addr) == PLUS)
25431 if (GET_CODE (XEXP (addr, 0)) == REG
25432 && REGNO (XEXP (addr, 0)) != 0)
25433 addr = XEXP (addr, 0);
25434 else if (GET_CODE (XEXP (addr, 1)) == REG
25435 && REGNO (XEXP (addr, 1)) != 0)
25436 addr = XEXP (addr, 1);
25437 else if (CONSTANT_P (XEXP (addr, 0)))
25438 addr = XEXP (addr, 1);
25439 else if (CONSTANT_P (XEXP (addr, 1)))
25440 addr = XEXP (addr, 0);
25441 else
25442 gcc_unreachable ();
25444 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
25445 return addr;
25448 void
25449 rs6000_fatal_bad_address (rtx op)
25451 fatal_insn ("bad address", op);
25454 #if TARGET_MACHO
25456 typedef struct branch_island_d {
25457 tree function_name;
25458 tree label_name;
25459 int line_number;
25460 } branch_island;
25463 static vec<branch_island, va_gc> *branch_islands;
25465 /* Remember to generate a branch island for far calls to the given
25466 function. */
25468 static void
25469 add_compiler_branch_island (tree label_name, tree function_name,
25470 int line_number)
25472 branch_island bi = {function_name, label_name, line_number};
25473 vec_safe_push (branch_islands, bi);
25476 /* Generate far-jump branch islands for everything recorded in
25477 branch_islands. Invoked immediately after the last instruction of
25478 the epilogue has been emitted; the branch islands must be appended
25479 to, and contiguous with, the function body. Mach-O stubs are
25480 generated in machopic_output_stub(). */
25482 static void
25483 macho_branch_islands (void)
25485 char tmp_buf[512];
25487 while (!vec_safe_is_empty (branch_islands))
25489 branch_island *bi = &branch_islands->last ();
25490 const char *label = IDENTIFIER_POINTER (bi->label_name);
25491 const char *name = IDENTIFIER_POINTER (bi->function_name);
25492 char name_buf[512];
25493 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
25494 if (name[0] == '*' || name[0] == '&')
25495 strcpy (name_buf, name+1);
25496 else
25498 name_buf[0] = '_';
25499 strcpy (name_buf+1, name);
25501 strcpy (tmp_buf, "\n");
25502 strcat (tmp_buf, label);
25503 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25504 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25505 dbxout_stabd (N_SLINE, bi->line_number);
25506 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25507 if (flag_pic)
25509 if (TARGET_LINK_STACK)
25511 char name[32];
25512 get_ppc476_thunk_name (name);
25513 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
25514 strcat (tmp_buf, name);
25515 strcat (tmp_buf, "\n");
25516 strcat (tmp_buf, label);
25517 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
25519 else
25521 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
25522 strcat (tmp_buf, label);
25523 strcat (tmp_buf, "_pic\n");
25524 strcat (tmp_buf, label);
25525 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
25528 strcat (tmp_buf, "\taddis r11,r11,ha16(");
25529 strcat (tmp_buf, name_buf);
25530 strcat (tmp_buf, " - ");
25531 strcat (tmp_buf, label);
25532 strcat (tmp_buf, "_pic)\n");
25534 strcat (tmp_buf, "\tmtlr r0\n");
25536 strcat (tmp_buf, "\taddi r12,r11,lo16(");
25537 strcat (tmp_buf, name_buf);
25538 strcat (tmp_buf, " - ");
25539 strcat (tmp_buf, label);
25540 strcat (tmp_buf, "_pic)\n");
25542 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
25544 else
25546 strcat (tmp_buf, ":\nlis r12,hi16(");
25547 strcat (tmp_buf, name_buf);
25548 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
25549 strcat (tmp_buf, name_buf);
25550 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
25552 output_asm_insn (tmp_buf, 0);
25553 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25554 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25555 dbxout_stabd (N_SLINE, bi->line_number);
25556 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25557 branch_islands->pop ();
25561 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25562 already there or not. */
25564 static int
25565 no_previous_def (tree function_name)
25567 branch_island *bi;
25568 unsigned ix;
25570 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
25571 if (function_name == bi->function_name)
25572 return 0;
25573 return 1;
25576 /* GET_PREV_LABEL gets the label name from the previous definition of
25577 the function. */
25579 static tree
25580 get_prev_label (tree function_name)
25582 branch_island *bi;
25583 unsigned ix;
25585 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
25586 if (function_name == bi->function_name)
25587 return bi->label_name;
25588 return NULL_TREE;
25591 /* INSN is either a function call or a millicode call. It may have an
25592 unconditional jump in its delay slot.
25594 CALL_DEST is the routine we are calling. */
25596 char *
25597 output_call (rtx insn, rtx *operands, int dest_operand_number,
25598 int cookie_operand_number)
25600 static char buf[256];
25601 if (darwin_emit_branch_islands
25602 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
25603 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
25605 tree labelname;
25606 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
25608 if (no_previous_def (funname))
25610 rtx label_rtx = gen_label_rtx ();
25611 char *label_buf, temp_buf[256];
25612 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
25613 CODE_LABEL_NUMBER (label_rtx));
25614 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
25615 labelname = get_identifier (label_buf);
25616 add_compiler_branch_island (labelname, funname, insn_line (insn));
25618 else
25619 labelname = get_prev_label (funname);
25621 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25622 instruction will reach 'foo', otherwise link as 'bl L42'".
25623 "L42" should be a 'branch island', that will do a far jump to
25624 'foo'. Branch islands are generated in
25625 macho_branch_islands(). */
25626 sprintf (buf, "jbsr %%z%d,%.246s",
25627 dest_operand_number, IDENTIFIER_POINTER (labelname));
25629 else
25630 sprintf (buf, "bl %%z%d", dest_operand_number);
25631 return buf;
25634 /* Generate PIC and indirect symbol stubs. */
25636 void
25637 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25639 unsigned int length;
25640 char *symbol_name, *lazy_ptr_name;
25641 char *local_label_0;
25642 static int label = 0;
25644 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25645 symb = (*targetm.strip_name_encoding) (symb);
25648 length = strlen (symb);
25649 symbol_name = XALLOCAVEC (char, length + 32);
25650 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25652 lazy_ptr_name = XALLOCAVEC (char, length + 32);
25653 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
25655 if (flag_pic == 2)
25656 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
25657 else
25658 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
25660 if (flag_pic == 2)
25662 fprintf (file, "\t.align 5\n");
25664 fprintf (file, "%s:\n", stub);
25665 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25667 label++;
25668 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25669 sprintf (local_label_0, "\"L%011d$spb\"", label);
25671 fprintf (file, "\tmflr r0\n");
25672 if (TARGET_LINK_STACK)
25674 char name[32];
25675 get_ppc476_thunk_name (name);
25676 fprintf (file, "\tbl %s\n", name);
25677 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25679 else
25681 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
25682 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25684 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
25685 lazy_ptr_name, local_label_0);
25686 fprintf (file, "\tmtlr r0\n");
25687 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
25688 (TARGET_64BIT ? "ldu" : "lwzu"),
25689 lazy_ptr_name, local_label_0);
25690 fprintf (file, "\tmtctr r12\n");
25691 fprintf (file, "\tbctr\n");
25693 else
25695 fprintf (file, "\t.align 4\n");
25697 fprintf (file, "%s:\n", stub);
25698 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25700 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
25701 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
25702 (TARGET_64BIT ? "ldu" : "lwzu"),
25703 lazy_ptr_name);
25704 fprintf (file, "\tmtctr r12\n");
25705 fprintf (file, "\tbctr\n");
25708 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25709 fprintf (file, "%s:\n", lazy_ptr_name);
25710 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25711 fprintf (file, "%sdyld_stub_binding_helper\n",
25712 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
25715 /* Legitimize PIC addresses. If the address is already
25716 position-independent, we return ORIG. Newly generated
25717 position-independent addresses go into a reg. This is REG if non
25718 zero, otherwise we allocate register(s) as necessary. */
25720 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25723 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
25724 rtx reg)
25726 rtx base, offset;
25728 if (reg == NULL && ! reload_in_progress && ! reload_completed)
25729 reg = gen_reg_rtx (Pmode);
25731 if (GET_CODE (orig) == CONST)
25733 rtx reg_temp;
25735 if (GET_CODE (XEXP (orig, 0)) == PLUS
25736 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
25737 return orig;
25739 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
25741 /* Use a different reg for the intermediate value, as
25742 it will be marked UNCHANGING. */
25743 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
25744 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
25745 Pmode, reg_temp);
25746 offset =
25747 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
25748 Pmode, reg);
25750 if (GET_CODE (offset) == CONST_INT)
25752 if (SMALL_INT (offset))
25753 return plus_constant (Pmode, base, INTVAL (offset));
25754 else if (! reload_in_progress && ! reload_completed)
25755 offset = force_reg (Pmode, offset);
25756 else
25758 rtx mem = force_const_mem (Pmode, orig);
25759 return machopic_legitimize_pic_address (mem, Pmode, reg);
25762 return gen_rtx_PLUS (Pmode, base, offset);
25765 /* Fall back on generic machopic code. */
25766 return machopic_legitimize_pic_address (orig, mode, reg);
25769 /* Output a .machine directive for the Darwin assembler, and call
25770 the generic start_file routine. */
25772 static void
25773 rs6000_darwin_file_start (void)
25775 static const struct
25777 const char *arg;
25778 const char *name;
25779 HOST_WIDE_INT if_set;
25780 } mapping[] = {
25781 { "ppc64", "ppc64", MASK_64BIT },
25782 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
25783 { "power4", "ppc970", 0 },
25784 { "G5", "ppc970", 0 },
25785 { "7450", "ppc7450", 0 },
25786 { "7400", "ppc7400", MASK_ALTIVEC },
25787 { "G4", "ppc7400", 0 },
25788 { "750", "ppc750", 0 },
25789 { "740", "ppc750", 0 },
25790 { "G3", "ppc750", 0 },
25791 { "604e", "ppc604e", 0 },
25792 { "604", "ppc604", 0 },
25793 { "603e", "ppc603", 0 },
25794 { "603", "ppc603", 0 },
25795 { "601", "ppc601", 0 },
25796 { NULL, "ppc", 0 } };
25797 const char *cpu_id = "";
25798 size_t i;
25800 rs6000_file_start ();
25801 darwin_file_start ();
25803 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25805 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
25806 cpu_id = rs6000_default_cpu;
25808 if (global_options_set.x_rs6000_cpu_index)
25809 cpu_id = processor_target_table[rs6000_cpu_index].name;
25811 /* Look through the mapping array. Pick the first name that either
25812 matches the argument, has a bit set in IF_SET that is also set
25813 in the target flags, or has a NULL name. */
25815 i = 0;
25816 while (mapping[i].arg != NULL
25817 && strcmp (mapping[i].arg, cpu_id) != 0
25818 && (mapping[i].if_set & rs6000_isa_flags) == 0)
25819 i++;
25821 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
25824 #endif /* TARGET_MACHO */
25826 #if TARGET_ELF
25827 static int
25828 rs6000_elf_reloc_rw_mask (void)
25830 if (flag_pic)
25831 return 3;
25832 else if (DEFAULT_ABI == ABI_AIX)
25833 return 2;
25834 else
25835 return 0;
25838 /* Record an element in the table of global constructors. SYMBOL is
25839 a SYMBOL_REF of the function to be called; PRIORITY is a number
25840 between 0 and MAX_INIT_PRIORITY.
25842 This differs from default_named_section_asm_out_constructor in
25843 that we have special handling for -mrelocatable. */
25845 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
25846 static void
25847 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
25849 const char *section = ".ctors";
25850 char buf[16];
25852 if (priority != DEFAULT_INIT_PRIORITY)
25854 sprintf (buf, ".ctors.%.5u",
25855 /* Invert the numbering so the linker puts us in the proper
25856 order; constructors are run from right to left, and the
25857 linker sorts in increasing order. */
25858 MAX_INIT_PRIORITY - priority);
25859 section = buf;
25862 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25863 assemble_align (POINTER_SIZE);
25865 if (TARGET_RELOCATABLE)
25867 fputs ("\t.long (", asm_out_file);
25868 output_addr_const (asm_out_file, symbol);
25869 fputs (")@fixup\n", asm_out_file);
25871 else
25872 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25875 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
25876 static void
25877 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
25879 const char *section = ".dtors";
25880 char buf[16];
25882 if (priority != DEFAULT_INIT_PRIORITY)
25884 sprintf (buf, ".dtors.%.5u",
25885 /* Invert the numbering so the linker puts us in the proper
25886 order; constructors are run from right to left, and the
25887 linker sorts in increasing order. */
25888 MAX_INIT_PRIORITY - priority);
25889 section = buf;
25892 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25893 assemble_align (POINTER_SIZE);
25895 if (TARGET_RELOCATABLE)
25897 fputs ("\t.long (", asm_out_file);
25898 output_addr_const (asm_out_file, symbol);
25899 fputs (")@fixup\n", asm_out_file);
25901 else
25902 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25905 void
25906 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
25908 if (TARGET_64BIT)
25910 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
25911 ASM_OUTPUT_LABEL (file, name);
25912 fputs (DOUBLE_INT_ASM_OP, file);
25913 rs6000_output_function_entry (file, name);
25914 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
25915 if (DOT_SYMBOLS)
25917 fputs ("\t.size\t", file);
25918 assemble_name (file, name);
25919 fputs (",24\n\t.type\t.", file);
25920 assemble_name (file, name);
25921 fputs (",@function\n", file);
25922 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
25924 fputs ("\t.globl\t.", file);
25925 assemble_name (file, name);
25926 putc ('\n', file);
25929 else
25930 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25931 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25932 rs6000_output_function_entry (file, name);
25933 fputs (":\n", file);
25934 return;
25937 if (TARGET_RELOCATABLE
25938 && !TARGET_SECURE_PLT
25939 && (get_pool_size () != 0 || crtl->profile)
25940 && uses_TOC ())
25942 char buf[256];
25944 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
25946 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
25947 fprintf (file, "\t.long ");
25948 assemble_name (file, buf);
25949 putc ('-', file);
25950 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25951 assemble_name (file, buf);
25952 putc ('\n', file);
25955 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25956 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25958 if (DEFAULT_ABI == ABI_AIX)
25960 const char *desc_name, *orig_name;
25962 orig_name = (*targetm.strip_name_encoding) (name);
25963 desc_name = orig_name;
25964 while (*desc_name == '.')
25965 desc_name++;
25967 if (TREE_PUBLIC (decl))
25968 fprintf (file, "\t.globl %s\n", desc_name);
25970 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25971 fprintf (file, "%s:\n", desc_name);
25972 fprintf (file, "\t.long %s\n", orig_name);
25973 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
25974 if (DEFAULT_ABI == ABI_AIX)
25975 fputs ("\t.long 0\n", file);
25976 fprintf (file, "\t.previous\n");
25978 ASM_OUTPUT_LABEL (file, name);
25981 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
25982 static void
25983 rs6000_elf_file_end (void)
25985 #ifdef HAVE_AS_GNU_ATTRIBUTE
25986 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
25988 if (rs6000_passes_float)
25989 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
25990 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
25991 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
25992 : 2));
25993 if (rs6000_passes_vector)
25994 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
25995 (TARGET_ALTIVEC_ABI ? 2
25996 : TARGET_SPE_ABI ? 3
25997 : 1));
25998 if (rs6000_returns_struct)
25999 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
26000 aix_struct_return ? 2 : 1);
26002 #endif
26003 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26004 if (TARGET_32BIT)
26005 file_end_indicate_exec_stack ();
26006 #endif
26008 #endif
26010 #if TARGET_XCOFF
26011 static void
26012 rs6000_xcoff_asm_output_anchor (rtx symbol)
26014 char buffer[100];
26016 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
26017 SYMBOL_REF_BLOCK_OFFSET (symbol));
26018 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
26021 static void
26022 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
26024 fputs (GLOBAL_ASM_OP, stream);
26025 RS6000_OUTPUT_BASENAME (stream, name);
26026 putc ('\n', stream);
26029 /* A get_unnamed_decl callback, used for read-only sections. PTR
26030 points to the section string variable. */
26032 static void
26033 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
26035 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
26036 *(const char *const *) directive,
26037 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
26040 /* Likewise for read-write sections. */
26042 static void
26043 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
26045 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
26046 *(const char *const *) directive,
26047 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
26050 static void
26051 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
26053 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
26054 *(const char *const *) directive,
26055 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
26058 /* A get_unnamed_section callback, used for switching to toc_section. */
26060 static void
26061 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
26063 if (TARGET_MINIMAL_TOC)
26065 /* toc_section is always selected at least once from
26066 rs6000_xcoff_file_start, so this is guaranteed to
26067 always be defined once and only once in each file. */
26068 if (!toc_initialized)
26070 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
26071 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
26072 toc_initialized = 1;
26074 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
26075 (TARGET_32BIT ? "" : ",3"));
26077 else
26078 fputs ("\t.toc\n", asm_out_file);
26081 /* Implement TARGET_ASM_INIT_SECTIONS. */
26083 static void
26084 rs6000_xcoff_asm_init_sections (void)
26086 read_only_data_section
26087 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
26088 &xcoff_read_only_section_name);
26090 private_data_section
26091 = get_unnamed_section (SECTION_WRITE,
26092 rs6000_xcoff_output_readwrite_section_asm_op,
26093 &xcoff_private_data_section_name);
26095 tls_data_section
26096 = get_unnamed_section (SECTION_TLS,
26097 rs6000_xcoff_output_tls_section_asm_op,
26098 &xcoff_tls_data_section_name);
26100 tls_private_data_section
26101 = get_unnamed_section (SECTION_TLS,
26102 rs6000_xcoff_output_tls_section_asm_op,
26103 &xcoff_private_data_section_name);
26105 read_only_private_data_section
26106 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
26107 &xcoff_private_data_section_name);
26109 toc_section
26110 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
26112 readonly_data_section = read_only_data_section;
26113 exception_section = data_section;
26116 static int
26117 rs6000_xcoff_reloc_rw_mask (void)
26119 return 3;
26122 static void
26123 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
26124 tree decl ATTRIBUTE_UNUSED)
26126 int smclass;
26127 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
26129 if (flags & SECTION_CODE)
26130 smclass = 0;
26131 else if (flags & SECTION_TLS)
26132 smclass = 3;
26133 else if (flags & SECTION_WRITE)
26134 smclass = 2;
26135 else
26136 smclass = 1;
26138 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
26139 (flags & SECTION_CODE) ? "." : "",
26140 name, suffix[smclass], flags & SECTION_ENTSIZE);
26143 static section *
26144 rs6000_xcoff_select_section (tree decl, int reloc,
26145 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
26147 if (decl_readonly_section (decl, reloc))
26149 if (TREE_PUBLIC (decl))
26150 return read_only_data_section;
26151 else
26152 return read_only_private_data_section;
26154 else
26156 #if HAVE_AS_TLS
26157 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
26159 if (TREE_PUBLIC (decl))
26160 return tls_data_section;
26161 else if (bss_initializer_p (decl))
26163 /* Convert to COMMON to emit in BSS. */
26164 DECL_COMMON (decl) = 1;
26165 return tls_comm_section;
26167 else
26168 return tls_private_data_section;
26170 else
26171 #endif
26172 if (TREE_PUBLIC (decl))
26173 return data_section;
26174 else
26175 return private_data_section;
26179 static void
26180 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
26182 const char *name;
26184 /* Use select_section for private and uninitialized data. */
26185 if (!TREE_PUBLIC (decl)
26186 || DECL_COMMON (decl)
26187 || DECL_INITIAL (decl) == NULL_TREE
26188 || DECL_INITIAL (decl) == error_mark_node
26189 || (flag_zero_initialized_in_bss
26190 && initializer_zerop (DECL_INITIAL (decl))))
26191 return;
26193 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
26194 name = (*targetm.strip_name_encoding) (name);
26195 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
26198 /* Select section for constant in constant pool.
26200 On RS/6000, all constants are in the private read-only data area.
26201 However, if this is being placed in the TOC it must be output as a
26202 toc entry. */
26204 static section *
26205 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
26206 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
26208 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
26209 return toc_section;
26210 else
26211 return read_only_private_data_section;
26214 /* Remove any trailing [DS] or the like from the symbol name. */
26216 static const char *
26217 rs6000_xcoff_strip_name_encoding (const char *name)
26219 size_t len;
26220 if (*name == '*')
26221 name++;
26222 len = strlen (name);
26223 if (name[len - 1] == ']')
26224 return ggc_alloc_string (name, len - 4);
26225 else
26226 return name;
26229 /* Section attributes. AIX is always PIC. */
26231 static unsigned int
26232 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
26234 unsigned int align;
26235 unsigned int flags = default_section_type_flags (decl, name, reloc);
26237 /* Align to at least UNIT size. */
26238 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
26239 align = MIN_UNITS_PER_WORD;
26240 else
26241 /* Increase alignment of large objects if not already stricter. */
26242 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
26243 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
26244 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
26246 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
26249 /* Output at beginning of assembler file.
26251 Initialize the section names for the RS/6000 at this point.
26253 Specify filename, including full path, to assembler.
26255 We want to go into the TOC section so at least one .toc will be emitted.
26256 Also, in order to output proper .bs/.es pairs, we need at least one static
26257 [RW] section emitted.
26259 Finally, declare mcount when profiling to make the assembler happy. */
26261 static void
26262 rs6000_xcoff_file_start (void)
26264 rs6000_gen_section_name (&xcoff_bss_section_name,
26265 main_input_filename, ".bss_");
26266 rs6000_gen_section_name (&xcoff_private_data_section_name,
26267 main_input_filename, ".rw_");
26268 rs6000_gen_section_name (&xcoff_read_only_section_name,
26269 main_input_filename, ".ro_");
26270 rs6000_gen_section_name (&xcoff_tls_data_section_name,
26271 main_input_filename, ".tls_");
26272 rs6000_gen_section_name (&xcoff_tbss_section_name,
26273 main_input_filename, ".tbss_[UL]");
26275 fputs ("\t.file\t", asm_out_file);
26276 output_quoted_string (asm_out_file, main_input_filename);
26277 fputc ('\n', asm_out_file);
26278 if (write_symbols != NO_DEBUG)
26279 switch_to_section (private_data_section);
26280 switch_to_section (text_section);
26281 if (profile_flag)
26282 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
26283 rs6000_file_start ();
26286 /* Output at end of assembler file.
26287 On the RS/6000, referencing data should automatically pull in text. */
26289 static void
26290 rs6000_xcoff_file_end (void)
26292 switch_to_section (text_section);
26293 fputs ("_section_.text:\n", asm_out_file);
26294 switch_to_section (data_section);
26295 fputs (TARGET_32BIT
26296 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
26297 asm_out_file);
26300 #ifdef HAVE_AS_TLS
26301 static void
26302 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
26304 rtx symbol;
26305 int flags;
26307 default_encode_section_info (decl, rtl, first);
26309 /* Careful not to prod global register variables. */
26310 if (!MEM_P (rtl))
26311 return;
26312 symbol = XEXP (rtl, 0);
26313 if (GET_CODE (symbol) != SYMBOL_REF)
26314 return;
26316 flags = SYMBOL_REF_FLAGS (symbol);
26318 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
26319 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
26321 SYMBOL_REF_FLAGS (symbol) = flags;
26323 #endif /* HAVE_AS_TLS */
26324 #endif /* TARGET_XCOFF */
26326 /* Compute a (partial) cost for rtx X. Return true if the complete
26327 cost has been computed, and false if subexpressions should be
26328 scanned. In either case, *TOTAL contains the cost result. */
26330 static bool
26331 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
26332 int *total, bool speed)
26334 enum machine_mode mode = GET_MODE (x);
26336 switch (code)
26338 /* On the RS/6000, if it is valid in the insn, it is free. */
26339 case CONST_INT:
26340 if (((outer_code == SET
26341 || outer_code == PLUS
26342 || outer_code == MINUS)
26343 && (satisfies_constraint_I (x)
26344 || satisfies_constraint_L (x)))
26345 || (outer_code == AND
26346 && (satisfies_constraint_K (x)
26347 || (mode == SImode
26348 ? satisfies_constraint_L (x)
26349 : satisfies_constraint_J (x))
26350 || mask_operand (x, mode)
26351 || (mode == DImode
26352 && mask64_operand (x, DImode))))
26353 || ((outer_code == IOR || outer_code == XOR)
26354 && (satisfies_constraint_K (x)
26355 || (mode == SImode
26356 ? satisfies_constraint_L (x)
26357 : satisfies_constraint_J (x))))
26358 || outer_code == ASHIFT
26359 || outer_code == ASHIFTRT
26360 || outer_code == LSHIFTRT
26361 || outer_code == ROTATE
26362 || outer_code == ROTATERT
26363 || outer_code == ZERO_EXTRACT
26364 || (outer_code == MULT
26365 && satisfies_constraint_I (x))
26366 || ((outer_code == DIV || outer_code == UDIV
26367 || outer_code == MOD || outer_code == UMOD)
26368 && exact_log2 (INTVAL (x)) >= 0)
26369 || (outer_code == COMPARE
26370 && (satisfies_constraint_I (x)
26371 || satisfies_constraint_K (x)))
26372 || ((outer_code == EQ || outer_code == NE)
26373 && (satisfies_constraint_I (x)
26374 || satisfies_constraint_K (x)
26375 || (mode == SImode
26376 ? satisfies_constraint_L (x)
26377 : satisfies_constraint_J (x))))
26378 || (outer_code == GTU
26379 && satisfies_constraint_I (x))
26380 || (outer_code == LTU
26381 && satisfies_constraint_P (x)))
26383 *total = 0;
26384 return true;
26386 else if ((outer_code == PLUS
26387 && reg_or_add_cint_operand (x, VOIDmode))
26388 || (outer_code == MINUS
26389 && reg_or_sub_cint_operand (x, VOIDmode))
26390 || ((outer_code == SET
26391 || outer_code == IOR
26392 || outer_code == XOR)
26393 && (INTVAL (x)
26394 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
26396 *total = COSTS_N_INSNS (1);
26397 return true;
26399 /* FALLTHRU */
26401 case CONST_DOUBLE:
26402 case CONST:
26403 case HIGH:
26404 case SYMBOL_REF:
26405 case MEM:
26406 /* When optimizing for size, MEM should be slightly more expensive
26407 than generating address, e.g., (plus (reg) (const)).
26408 L1 cache latency is about two instructions. */
26409 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
26410 return true;
26412 case LABEL_REF:
26413 *total = 0;
26414 return true;
26416 case PLUS:
26417 case MINUS:
26418 if (FLOAT_MODE_P (mode))
26419 *total = rs6000_cost->fp;
26420 else
26421 *total = COSTS_N_INSNS (1);
26422 return false;
26424 case MULT:
26425 if (GET_CODE (XEXP (x, 1)) == CONST_INT
26426 && satisfies_constraint_I (XEXP (x, 1)))
26428 if (INTVAL (XEXP (x, 1)) >= -256
26429 && INTVAL (XEXP (x, 1)) <= 255)
26430 *total = rs6000_cost->mulsi_const9;
26431 else
26432 *total = rs6000_cost->mulsi_const;
26434 else if (mode == SFmode)
26435 *total = rs6000_cost->fp;
26436 else if (FLOAT_MODE_P (mode))
26437 *total = rs6000_cost->dmul;
26438 else if (mode == DImode)
26439 *total = rs6000_cost->muldi;
26440 else
26441 *total = rs6000_cost->mulsi;
26442 return false;
26444 case FMA:
26445 if (mode == SFmode)
26446 *total = rs6000_cost->fp;
26447 else
26448 *total = rs6000_cost->dmul;
26449 break;
26451 case DIV:
26452 case MOD:
26453 if (FLOAT_MODE_P (mode))
26455 *total = mode == DFmode ? rs6000_cost->ddiv
26456 : rs6000_cost->sdiv;
26457 return false;
26459 /* FALLTHRU */
26461 case UDIV:
26462 case UMOD:
26463 if (GET_CODE (XEXP (x, 1)) == CONST_INT
26464 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
26466 if (code == DIV || code == MOD)
26467 /* Shift, addze */
26468 *total = COSTS_N_INSNS (2);
26469 else
26470 /* Shift */
26471 *total = COSTS_N_INSNS (1);
26473 else
26475 if (GET_MODE (XEXP (x, 1)) == DImode)
26476 *total = rs6000_cost->divdi;
26477 else
26478 *total = rs6000_cost->divsi;
26480 /* Add in shift and subtract for MOD. */
26481 if (code == MOD || code == UMOD)
26482 *total += COSTS_N_INSNS (2);
26483 return false;
26485 case CTZ:
26486 case FFS:
26487 *total = COSTS_N_INSNS (4);
26488 return false;
26490 case POPCOUNT:
26491 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
26492 return false;
26494 case PARITY:
26495 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
26496 return false;
26498 case NOT:
26499 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
26501 *total = 0;
26502 return false;
26504 /* FALLTHRU */
26506 case AND:
26507 case CLZ:
26508 case IOR:
26509 case XOR:
26510 case ZERO_EXTRACT:
26511 *total = COSTS_N_INSNS (1);
26512 return false;
26514 case ASHIFT:
26515 case ASHIFTRT:
26516 case LSHIFTRT:
26517 case ROTATE:
26518 case ROTATERT:
26519 /* Handle mul_highpart. */
26520 if (outer_code == TRUNCATE
26521 && GET_CODE (XEXP (x, 0)) == MULT)
26523 if (mode == DImode)
26524 *total = rs6000_cost->muldi;
26525 else
26526 *total = rs6000_cost->mulsi;
26527 return true;
26529 else if (outer_code == AND)
26530 *total = 0;
26531 else
26532 *total = COSTS_N_INSNS (1);
26533 return false;
26535 case SIGN_EXTEND:
26536 case ZERO_EXTEND:
26537 if (GET_CODE (XEXP (x, 0)) == MEM)
26538 *total = 0;
26539 else
26540 *total = COSTS_N_INSNS (1);
26541 return false;
26543 case COMPARE:
26544 case NEG:
26545 case ABS:
26546 if (!FLOAT_MODE_P (mode))
26548 *total = COSTS_N_INSNS (1);
26549 return false;
26551 /* FALLTHRU */
26553 case FLOAT:
26554 case UNSIGNED_FLOAT:
26555 case FIX:
26556 case UNSIGNED_FIX:
26557 case FLOAT_TRUNCATE:
26558 *total = rs6000_cost->fp;
26559 return false;
26561 case FLOAT_EXTEND:
26562 if (mode == DFmode)
26563 *total = 0;
26564 else
26565 *total = rs6000_cost->fp;
26566 return false;
26568 case UNSPEC:
26569 switch (XINT (x, 1))
26571 case UNSPEC_FRSP:
26572 *total = rs6000_cost->fp;
26573 return true;
26575 default:
26576 break;
26578 break;
26580 case CALL:
26581 case IF_THEN_ELSE:
26582 if (!speed)
26584 *total = COSTS_N_INSNS (1);
26585 return true;
26587 else if (FLOAT_MODE_P (mode)
26588 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
26590 *total = rs6000_cost->fp;
26591 return false;
26593 break;
26595 case EQ:
26596 case GTU:
26597 case LTU:
26598 /* Carry bit requires mode == Pmode.
26599 NEG or PLUS already counted so only add one. */
26600 if (mode == Pmode
26601 && (outer_code == NEG || outer_code == PLUS))
26603 *total = COSTS_N_INSNS (1);
26604 return true;
26606 if (outer_code == SET)
26608 if (XEXP (x, 1) == const0_rtx)
26610 if (TARGET_ISEL && !TARGET_MFCRF)
26611 *total = COSTS_N_INSNS (8);
26612 else
26613 *total = COSTS_N_INSNS (2);
26614 return true;
26616 else if (mode == Pmode)
26618 *total = COSTS_N_INSNS (3);
26619 return false;
26622 /* FALLTHRU */
26624 case GT:
26625 case LT:
26626 case UNORDERED:
26627 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
26629 if (TARGET_ISEL && !TARGET_MFCRF)
26630 *total = COSTS_N_INSNS (8);
26631 else
26632 *total = COSTS_N_INSNS (2);
26633 return true;
26635 /* CC COMPARE. */
26636 if (outer_code == COMPARE)
26638 *total = 0;
26639 return true;
26641 break;
26643 default:
26644 break;
26647 return false;
26650 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26652 static bool
26653 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
26654 bool speed)
26656 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
26658 fprintf (stderr,
26659 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26660 "opno = %d, total = %d, speed = %s, x:\n",
26661 ret ? "complete" : "scan inner",
26662 GET_RTX_NAME (code),
26663 GET_RTX_NAME (outer_code),
26664 opno,
26665 *total,
26666 speed ? "true" : "false");
26668 debug_rtx (x);
26670 return ret;
26673 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26675 static int
26676 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
26677 addr_space_t as, bool speed)
26679 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
26681 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26682 ret, speed ? "true" : "false");
26683 debug_rtx (x);
26685 return ret;
26689 /* A C expression returning the cost of moving data from a register of class
26690 CLASS1 to one of CLASS2. */
26692 static int
26693 rs6000_register_move_cost (enum machine_mode mode,
26694 reg_class_t from, reg_class_t to)
26696 int ret;
26698 if (TARGET_DEBUG_COST)
26699 dbg_cost_ctrl++;
26701 /* Moves from/to GENERAL_REGS. */
26702 if (reg_classes_intersect_p (to, GENERAL_REGS)
26703 || reg_classes_intersect_p (from, GENERAL_REGS))
26705 reg_class_t rclass = from;
26707 if (! reg_classes_intersect_p (to, GENERAL_REGS))
26708 rclass = to;
26710 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
26711 ret = (rs6000_memory_move_cost (mode, rclass, false)
26712 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
26714 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26715 shift. */
26716 else if (rclass == CR_REGS)
26717 ret = 4;
26719 /* For those processors that have slow LR/CTR moves, make them more
26720 expensive than memory in order to bias spills to memory .*/
26721 else if ((rs6000_cpu == PROCESSOR_POWER6
26722 || rs6000_cpu == PROCESSOR_POWER7
26723 || rs6000_cpu == PROCESSOR_POWER8)
26724 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
26725 ret = 6 * hard_regno_nregs[0][mode];
26727 else
26728 /* A move will cost one instruction per GPR moved. */
26729 ret = 2 * hard_regno_nregs[0][mode];
26732 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26733 else if (VECTOR_MEM_VSX_P (mode)
26734 && reg_classes_intersect_p (to, VSX_REGS)
26735 && reg_classes_intersect_p (from, VSX_REGS))
26736 ret = 2 * hard_regno_nregs[32][mode];
26738 /* Moving between two similar registers is just one instruction. */
26739 else if (reg_classes_intersect_p (to, from))
26740 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
26742 /* Everything else has to go through GENERAL_REGS. */
26743 else
26744 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
26745 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
26747 if (TARGET_DEBUG_COST)
26749 if (dbg_cost_ctrl == 1)
26750 fprintf (stderr,
26751 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26752 ret, GET_MODE_NAME (mode), reg_class_names[from],
26753 reg_class_names[to]);
26754 dbg_cost_ctrl--;
26757 return ret;
26760 /* A C expressions returning the cost of moving data of MODE from a register to
26761 or from memory. */
26763 static int
26764 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
26765 bool in ATTRIBUTE_UNUSED)
26767 int ret;
26769 if (TARGET_DEBUG_COST)
26770 dbg_cost_ctrl++;
26772 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
26773 ret = 4 * hard_regno_nregs[0][mode];
26774 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
26775 || reg_classes_intersect_p (rclass, VSX_REGS)))
26776 ret = 4 * hard_regno_nregs[32][mode];
26777 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
26778 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
26779 else
26780 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
26782 if (TARGET_DEBUG_COST)
26784 if (dbg_cost_ctrl == 1)
26785 fprintf (stderr,
26786 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26787 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
26788 dbg_cost_ctrl--;
26791 return ret;
26794 /* Returns a code for a target-specific builtin that implements
26795 reciprocal of the function, or NULL_TREE if not available. */
26797 static tree
26798 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
26799 bool sqrt ATTRIBUTE_UNUSED)
26801 if (optimize_insn_for_size_p ())
26802 return NULL_TREE;
26804 if (md_fn)
26805 switch (fn)
26807 case VSX_BUILTIN_XVSQRTDP:
26808 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
26809 return NULL_TREE;
26811 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
26813 case VSX_BUILTIN_XVSQRTSP:
26814 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
26815 return NULL_TREE;
26817 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
26819 default:
26820 return NULL_TREE;
26823 else
26824 switch (fn)
26826 case BUILT_IN_SQRT:
26827 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
26828 return NULL_TREE;
26830 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
26832 case BUILT_IN_SQRTF:
26833 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
26834 return NULL_TREE;
26836 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
26838 default:
26839 return NULL_TREE;
26843 /* Load up a constant. If the mode is a vector mode, splat the value across
26844 all of the vector elements. */
26846 static rtx
26847 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
26849 rtx reg;
26851 if (mode == SFmode || mode == DFmode)
26853 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
26854 reg = force_reg (mode, d);
26856 else if (mode == V4SFmode)
26858 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
26859 rtvec v = gen_rtvec (4, d, d, d, d);
26860 reg = gen_reg_rtx (mode);
26861 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26863 else if (mode == V2DFmode)
26865 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
26866 rtvec v = gen_rtvec (2, d, d);
26867 reg = gen_reg_rtx (mode);
26868 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26870 else
26871 gcc_unreachable ();
26873 return reg;
26876 /* Generate an FMA instruction. */
26878 static void
26879 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
26881 enum machine_mode mode = GET_MODE (target);
26882 rtx dst;
26884 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26885 gcc_assert (dst != NULL);
26887 if (dst != target)
26888 emit_move_insn (target, dst);
26891 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26893 static void
26894 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
26896 enum machine_mode mode = GET_MODE (target);
26897 rtx dst;
26899 /* Altivec does not support fms directly;
26900 generate in terms of fma in that case. */
26901 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
26902 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
26903 else
26905 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
26906 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26908 gcc_assert (dst != NULL);
26910 if (dst != target)
26911 emit_move_insn (target, dst);
26914 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26916 static void
26917 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
26919 enum machine_mode mode = GET_MODE (dst);
26920 rtx r;
26922 /* This is a tad more complicated, since the fnma_optab is for
26923 a different expression: fma(-m1, m2, a), which is the same
26924 thing except in the case of signed zeros.
26926 Fortunately we know that if FMA is supported that FNMSUB is
26927 also supported in the ISA. Just expand it directly. */
26929 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
26931 r = gen_rtx_NEG (mode, a);
26932 r = gen_rtx_FMA (mode, m1, m2, r);
26933 r = gen_rtx_NEG (mode, r);
26934 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
26937 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26938 add a reg_note saying that this was a division. Support both scalar and
26939 vector divide. Assumes no trapping math and finite arguments. */
26941 void
26942 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
26944 enum machine_mode mode = GET_MODE (dst);
26945 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
26946 int i;
26948 /* Low precision estimates guarantee 5 bits of accuracy. High
26949 precision estimates guarantee 14 bits of accuracy. SFmode
26950 requires 23 bits of accuracy. DFmode requires 52 bits of
26951 accuracy. Each pass at least doubles the accuracy, leading
26952 to the following. */
26953 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
26954 if (mode == DFmode || mode == V2DFmode)
26955 passes++;
26957 enum insn_code code = optab_handler (smul_optab, mode);
26958 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26960 gcc_assert (code != CODE_FOR_nothing);
26962 one = rs6000_load_constant_and_splat (mode, dconst1);
26964 /* x0 = 1./d estimate */
26965 x0 = gen_reg_rtx (mode);
26966 emit_insn (gen_rtx_SET (VOIDmode, x0,
26967 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26968 UNSPEC_FRES)));
26970 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
26971 if (passes > 1) {
26973 /* e0 = 1. - d * x0 */
26974 e0 = gen_reg_rtx (mode);
26975 rs6000_emit_nmsub (e0, d, x0, one);
26977 /* x1 = x0 + e0 * x0 */
26978 x1 = gen_reg_rtx (mode);
26979 rs6000_emit_madd (x1, e0, x0, x0);
26981 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
26982 ++i, xprev = xnext, eprev = enext) {
26984 /* enext = eprev * eprev */
26985 enext = gen_reg_rtx (mode);
26986 emit_insn (gen_mul (enext, eprev, eprev));
26988 /* xnext = xprev + enext * xprev */
26989 xnext = gen_reg_rtx (mode);
26990 rs6000_emit_madd (xnext, enext, xprev, xprev);
26993 } else
26994 xprev = x0;
26996 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
26998 /* u = n * xprev */
26999 u = gen_reg_rtx (mode);
27000 emit_insn (gen_mul (u, n, xprev));
27002 /* v = n - (d * u) */
27003 v = gen_reg_rtx (mode);
27004 rs6000_emit_nmsub (v, d, u, n);
27006 /* dst = (v * xprev) + u */
27007 rs6000_emit_madd (dst, v, xprev, u);
27009 if (note_p)
27010 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
27013 /* Newton-Raphson approximation of single/double-precision floating point
27014 rsqrt. Assumes no trapping math and finite arguments. */
27016 void
27017 rs6000_emit_swrsqrt (rtx dst, rtx src)
27019 enum machine_mode mode = GET_MODE (src);
27020 rtx x0 = gen_reg_rtx (mode);
27021 rtx y = gen_reg_rtx (mode);
27023 /* Low precision estimates guarantee 5 bits of accuracy. High
27024 precision estimates guarantee 14 bits of accuracy. SFmode
27025 requires 23 bits of accuracy. DFmode requires 52 bits of
27026 accuracy. Each pass at least doubles the accuracy, leading
27027 to the following. */
27028 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
27029 if (mode == DFmode || mode == V2DFmode)
27030 passes++;
27032 REAL_VALUE_TYPE dconst3_2;
27033 int i;
27034 rtx halfthree;
27035 enum insn_code code = optab_handler (smul_optab, mode);
27036 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
27038 gcc_assert (code != CODE_FOR_nothing);
27040 /* Load up the constant 1.5 either as a scalar, or as a vector. */
27041 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
27042 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
27044 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
27046 /* x0 = rsqrt estimate */
27047 emit_insn (gen_rtx_SET (VOIDmode, x0,
27048 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
27049 UNSPEC_RSQRT)));
27051 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
27052 rs6000_emit_msub (y, src, halfthree, src);
27054 for (i = 0; i < passes; i++)
27056 rtx x1 = gen_reg_rtx (mode);
27057 rtx u = gen_reg_rtx (mode);
27058 rtx v = gen_reg_rtx (mode);
27060 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
27061 emit_insn (gen_mul (u, x0, x0));
27062 rs6000_emit_nmsub (v, y, u, halfthree);
27063 emit_insn (gen_mul (x1, x0, v));
27064 x0 = x1;
27067 emit_move_insn (dst, x0);
27068 return;
27071 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
27072 (Power7) targets. DST is the target, and SRC is the argument operand. */
27074 void
27075 rs6000_emit_popcount (rtx dst, rtx src)
27077 enum machine_mode mode = GET_MODE (dst);
27078 rtx tmp1, tmp2;
27080 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
27081 if (TARGET_POPCNTD)
27083 if (mode == SImode)
27084 emit_insn (gen_popcntdsi2 (dst, src));
27085 else
27086 emit_insn (gen_popcntddi2 (dst, src));
27087 return;
27090 tmp1 = gen_reg_rtx (mode);
27092 if (mode == SImode)
27094 emit_insn (gen_popcntbsi2 (tmp1, src));
27095 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
27096 NULL_RTX, 0);
27097 tmp2 = force_reg (SImode, tmp2);
27098 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
27100 else
27102 emit_insn (gen_popcntbdi2 (tmp1, src));
27103 tmp2 = expand_mult (DImode, tmp1,
27104 GEN_INT ((HOST_WIDE_INT)
27105 0x01010101 << 32 | 0x01010101),
27106 NULL_RTX, 0);
27107 tmp2 = force_reg (DImode, tmp2);
27108 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
27113 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
27114 target, and SRC is the argument operand. */
27116 void
27117 rs6000_emit_parity (rtx dst, rtx src)
27119 enum machine_mode mode = GET_MODE (dst);
27120 rtx tmp;
27122 tmp = gen_reg_rtx (mode);
27124 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
27125 if (TARGET_CMPB)
27127 if (mode == SImode)
27129 emit_insn (gen_popcntbsi2 (tmp, src));
27130 emit_insn (gen_paritysi2_cmpb (dst, tmp));
27132 else
27134 emit_insn (gen_popcntbdi2 (tmp, src));
27135 emit_insn (gen_paritydi2_cmpb (dst, tmp));
27137 return;
27140 if (mode == SImode)
27142 /* Is mult+shift >= shift+xor+shift+xor? */
27143 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
27145 rtx tmp1, tmp2, tmp3, tmp4;
27147 tmp1 = gen_reg_rtx (SImode);
27148 emit_insn (gen_popcntbsi2 (tmp1, src));
27150 tmp2 = gen_reg_rtx (SImode);
27151 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
27152 tmp3 = gen_reg_rtx (SImode);
27153 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
27155 tmp4 = gen_reg_rtx (SImode);
27156 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
27157 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
27159 else
27160 rs6000_emit_popcount (tmp, src);
27161 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
27163 else
27165 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
27166 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
27168 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
27170 tmp1 = gen_reg_rtx (DImode);
27171 emit_insn (gen_popcntbdi2 (tmp1, src));
27173 tmp2 = gen_reg_rtx (DImode);
27174 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
27175 tmp3 = gen_reg_rtx (DImode);
27176 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
27178 tmp4 = gen_reg_rtx (DImode);
27179 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
27180 tmp5 = gen_reg_rtx (DImode);
27181 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
27183 tmp6 = gen_reg_rtx (DImode);
27184 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
27185 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
27187 else
27188 rs6000_emit_popcount (tmp, src);
27189 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
27193 /* Expand an Altivec constant permutation. Return true if we match
27194 an efficient implementation; false to fall back to VPERM. */
27196 bool
27197 altivec_expand_vec_perm_const (rtx operands[4])
27199 struct altivec_perm_insn {
27200 enum insn_code impl;
27201 unsigned char perm[16];
27203 static const struct altivec_perm_insn patterns[] = {
27204 { CODE_FOR_altivec_vpkuhum,
27205 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
27206 { CODE_FOR_altivec_vpkuwum,
27207 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
27208 { CODE_FOR_altivec_vmrghb,
27209 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
27210 { CODE_FOR_altivec_vmrghh,
27211 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
27212 { CODE_FOR_altivec_vmrghw,
27213 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
27214 { CODE_FOR_altivec_vmrglb,
27215 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
27216 { CODE_FOR_altivec_vmrglh,
27217 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
27218 { CODE_FOR_altivec_vmrglw,
27219 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
27222 unsigned int i, j, elt, which;
27223 unsigned char perm[16];
27224 rtx target, op0, op1, sel, x;
27225 bool one_vec;
27227 target = operands[0];
27228 op0 = operands[1];
27229 op1 = operands[2];
27230 sel = operands[3];
27232 /* Unpack the constant selector. */
27233 for (i = which = 0; i < 16; ++i)
27235 rtx e = XVECEXP (sel, 0, i);
27236 elt = INTVAL (e) & 31;
27237 which |= (elt < 16 ? 1 : 2);
27238 perm[i] = elt;
27241 /* Simplify the constant selector based on operands. */
27242 switch (which)
27244 default:
27245 gcc_unreachable ();
27247 case 3:
27248 one_vec = false;
27249 if (!rtx_equal_p (op0, op1))
27250 break;
27251 /* FALLTHRU */
27253 case 2:
27254 for (i = 0; i < 16; ++i)
27255 perm[i] &= 15;
27256 op0 = op1;
27257 one_vec = true;
27258 break;
27260 case 1:
27261 op1 = op0;
27262 one_vec = true;
27263 break;
27266 /* Look for splat patterns. */
27267 if (one_vec)
27269 elt = perm[0];
27271 for (i = 0; i < 16; ++i)
27272 if (perm[i] != elt)
27273 break;
27274 if (i == 16)
27276 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
27277 return true;
27280 if (elt % 2 == 0)
27282 for (i = 0; i < 16; i += 2)
27283 if (perm[i] != elt || perm[i + 1] != elt + 1)
27284 break;
27285 if (i == 16)
27287 x = gen_reg_rtx (V8HImode);
27288 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
27289 GEN_INT (elt / 2)));
27290 emit_move_insn (target, gen_lowpart (V16QImode, x));
27291 return true;
27295 if (elt % 4 == 0)
27297 for (i = 0; i < 16; i += 4)
27298 if (perm[i] != elt
27299 || perm[i + 1] != elt + 1
27300 || perm[i + 2] != elt + 2
27301 || perm[i + 3] != elt + 3)
27302 break;
27303 if (i == 16)
27305 x = gen_reg_rtx (V4SImode);
27306 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
27307 GEN_INT (elt / 4)));
27308 emit_move_insn (target, gen_lowpart (V16QImode, x));
27309 return true;
27314 /* Look for merge and pack patterns. */
27315 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
27317 bool swapped;
27319 elt = patterns[j].perm[0];
27320 if (perm[0] == elt)
27321 swapped = false;
27322 else if (perm[0] == elt + 16)
27323 swapped = true;
27324 else
27325 continue;
27326 for (i = 1; i < 16; ++i)
27328 elt = patterns[j].perm[i];
27329 if (swapped)
27330 elt = (elt >= 16 ? elt - 16 : elt + 16);
27331 else if (one_vec && elt >= 16)
27332 elt -= 16;
27333 if (perm[i] != elt)
27334 break;
27336 if (i == 16)
27338 enum insn_code icode = patterns[j].impl;
27339 enum machine_mode omode = insn_data[icode].operand[0].mode;
27340 enum machine_mode imode = insn_data[icode].operand[1].mode;
27342 if (swapped)
27343 x = op0, op0 = op1, op1 = x;
27344 if (imode != V16QImode)
27346 op0 = gen_lowpart (imode, op0);
27347 op1 = gen_lowpart (imode, op1);
27349 if (omode == V16QImode)
27350 x = target;
27351 else
27352 x = gen_reg_rtx (omode);
27353 emit_insn (GEN_FCN (icode) (x, op0, op1));
27354 if (omode != V16QImode)
27355 emit_move_insn (target, gen_lowpart (V16QImode, x));
27356 return true;
27360 return false;
27363 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
27364 Return true if we match an efficient implementation. */
27366 static bool
27367 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
27368 unsigned char perm0, unsigned char perm1)
27370 rtx x;
27372 /* If both selectors come from the same operand, fold to single op. */
27373 if ((perm0 & 2) == (perm1 & 2))
27375 if (perm0 & 2)
27376 op0 = op1;
27377 else
27378 op1 = op0;
27380 /* If both operands are equal, fold to simpler permutation. */
27381 if (rtx_equal_p (op0, op1))
27383 perm0 = perm0 & 1;
27384 perm1 = (perm1 & 1) + 2;
27386 /* If the first selector comes from the second operand, swap. */
27387 else if (perm0 & 2)
27389 if (perm1 & 2)
27390 return false;
27391 perm0 -= 2;
27392 perm1 += 2;
27393 x = op0, op0 = op1, op1 = x;
27395 /* If the second selector does not come from the second operand, fail. */
27396 else if ((perm1 & 2) == 0)
27397 return false;
27399 /* Success! */
27400 if (target != NULL)
27402 enum machine_mode vmode, dmode;
27403 rtvec v;
27405 vmode = GET_MODE (target);
27406 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
27407 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
27409 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
27410 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
27411 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
27412 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27414 return true;
27417 bool
27418 rs6000_expand_vec_perm_const (rtx operands[4])
27420 rtx target, op0, op1, sel;
27421 unsigned char perm0, perm1;
27423 target = operands[0];
27424 op0 = operands[1];
27425 op1 = operands[2];
27426 sel = operands[3];
27428 /* Unpack the constant selector. */
27429 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
27430 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
27432 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
27435 /* Test whether a constant permutation is supported. */
27437 static bool
27438 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
27439 const unsigned char *sel)
27441 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
27442 if (TARGET_ALTIVEC)
27443 return true;
27445 /* Check for ps_merge* or evmerge* insns. */
27446 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
27447 || (TARGET_SPE && vmode == V2SImode))
27449 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
27450 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
27451 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
27454 return false;
27457 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
27459 static void
27460 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
27461 enum machine_mode vmode, unsigned nelt, rtx perm[])
27463 enum machine_mode imode;
27464 rtx x;
27466 imode = vmode;
27467 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
27469 imode = GET_MODE_INNER (vmode);
27470 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
27471 imode = mode_for_vector (imode, nelt);
27474 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
27475 x = expand_vec_perm (vmode, op0, op1, x, target);
27476 if (x != target)
27477 emit_move_insn (target, x);
27480 /* Expand an extract even operation. */
27482 void
27483 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
27485 enum machine_mode vmode = GET_MODE (target);
27486 unsigned i, nelt = GET_MODE_NUNITS (vmode);
27487 rtx perm[16];
27489 for (i = 0; i < nelt; i++)
27490 perm[i] = GEN_INT (i * 2);
27492 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
27495 /* Expand a vector interleave operation. */
27497 void
27498 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
27500 enum machine_mode vmode = GET_MODE (target);
27501 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
27502 rtx perm[16];
27504 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
27505 for (i = 0; i < nelt / 2; i++)
27507 perm[i * 2] = GEN_INT (i + high);
27508 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
27511 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
27514 /* Return an RTX representing where to find the function value of a
27515 function returning MODE. */
27516 static rtx
27517 rs6000_complex_function_value (enum machine_mode mode)
27519 unsigned int regno;
27520 rtx r1, r2;
27521 enum machine_mode inner = GET_MODE_INNER (mode);
27522 unsigned int inner_bytes = GET_MODE_SIZE (inner);
27524 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27525 regno = FP_ARG_RETURN;
27526 else
27528 regno = GP_ARG_RETURN;
27530 /* 32-bit is OK since it'll go in r3/r4. */
27531 if (TARGET_32BIT && inner_bytes >= 4)
27532 return gen_rtx_REG (mode, regno);
27535 if (inner_bytes >= 8)
27536 return gen_rtx_REG (mode, regno);
27538 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
27539 const0_rtx);
27540 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
27541 GEN_INT (inner_bytes));
27542 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
27545 /* Target hook for TARGET_FUNCTION_VALUE.
27547 On the SPE, both FPs and vectors are returned in r3.
27549 On RS/6000 an integer value is in r3 and a floating-point value is in
27550 fp1, unless -msoft-float. */
27552 static rtx
27553 rs6000_function_value (const_tree valtype,
27554 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
27555 bool outgoing ATTRIBUTE_UNUSED)
27557 enum machine_mode mode;
27558 unsigned int regno;
27560 /* Special handling for structs in darwin64. */
27561 if (TARGET_MACHO
27562 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
27564 CUMULATIVE_ARGS valcum;
27565 rtx valret;
27567 valcum.words = 0;
27568 valcum.fregno = FP_ARG_MIN_REG;
27569 valcum.vregno = ALTIVEC_ARG_MIN_REG;
27570 /* Do a trial code generation as if this were going to be passed as
27571 an argument; if any part goes in memory, we return NULL. */
27572 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
27573 if (valret)
27574 return valret;
27575 /* Otherwise fall through to standard ABI rules. */
27578 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
27580 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27581 return gen_rtx_PARALLEL (DImode,
27582 gen_rtvec (2,
27583 gen_rtx_EXPR_LIST (VOIDmode,
27584 gen_rtx_REG (SImode, GP_ARG_RETURN),
27585 const0_rtx),
27586 gen_rtx_EXPR_LIST (VOIDmode,
27587 gen_rtx_REG (SImode,
27588 GP_ARG_RETURN + 1),
27589 GEN_INT (4))));
27591 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
27593 return gen_rtx_PARALLEL (DCmode,
27594 gen_rtvec (4,
27595 gen_rtx_EXPR_LIST (VOIDmode,
27596 gen_rtx_REG (SImode, GP_ARG_RETURN),
27597 const0_rtx),
27598 gen_rtx_EXPR_LIST (VOIDmode,
27599 gen_rtx_REG (SImode,
27600 GP_ARG_RETURN + 1),
27601 GEN_INT (4)),
27602 gen_rtx_EXPR_LIST (VOIDmode,
27603 gen_rtx_REG (SImode,
27604 GP_ARG_RETURN + 2),
27605 GEN_INT (8)),
27606 gen_rtx_EXPR_LIST (VOIDmode,
27607 gen_rtx_REG (SImode,
27608 GP_ARG_RETURN + 3),
27609 GEN_INT (12))));
27612 mode = TYPE_MODE (valtype);
27613 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
27614 || POINTER_TYPE_P (valtype))
27615 mode = TARGET_32BIT ? SImode : DImode;
27617 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27618 /* _Decimal128 must use an even/odd register pair. */
27619 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27620 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
27621 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
27622 regno = FP_ARG_RETURN;
27623 else if (TREE_CODE (valtype) == COMPLEX_TYPE
27624 && targetm.calls.split_complex_arg)
27625 return rs6000_complex_function_value (mode);
27626 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27627 return register is used in both cases, and we won't see V2DImode/V2DFmode
27628 for pure altivec, combine the two cases. */
27629 else if (TREE_CODE (valtype) == VECTOR_TYPE
27630 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
27631 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
27632 regno = ALTIVEC_ARG_RETURN;
27633 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27634 && (mode == DFmode || mode == DCmode
27635 || mode == TFmode || mode == TCmode))
27636 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27637 else
27638 regno = GP_ARG_RETURN;
27640 return gen_rtx_REG (mode, regno);
27643 /* Define how to find the value returned by a library function
27644 assuming the value has mode MODE. */
27646 rs6000_libcall_value (enum machine_mode mode)
27648 unsigned int regno;
27650 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
27652 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27653 return gen_rtx_PARALLEL (DImode,
27654 gen_rtvec (2,
27655 gen_rtx_EXPR_LIST (VOIDmode,
27656 gen_rtx_REG (SImode, GP_ARG_RETURN),
27657 const0_rtx),
27658 gen_rtx_EXPR_LIST (VOIDmode,
27659 gen_rtx_REG (SImode,
27660 GP_ARG_RETURN + 1),
27661 GEN_INT (4))));
27664 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27665 /* _Decimal128 must use an even/odd register pair. */
27666 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27667 else if (SCALAR_FLOAT_MODE_P (mode)
27668 && TARGET_HARD_FLOAT && TARGET_FPRS
27669 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
27670 regno = FP_ARG_RETURN;
27671 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27672 return register is used in both cases, and we won't see V2DImode/V2DFmode
27673 for pure altivec, combine the two cases. */
27674 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
27675 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
27676 regno = ALTIVEC_ARG_RETURN;
27677 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
27678 return rs6000_complex_function_value (mode);
27679 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27680 && (mode == DFmode || mode == DCmode
27681 || mode == TFmode || mode == TCmode))
27682 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27683 else
27684 regno = GP_ARG_RETURN;
27686 return gen_rtx_REG (mode, regno);
27690 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27691 Frame pointer elimination is automatically handled.
27693 For the RS/6000, if frame pointer elimination is being done, we would like
27694 to convert ap into fp, not sp.
27696 We need r30 if -mminimal-toc was specified, and there are constant pool
27697 references. */
27699 static bool
27700 rs6000_can_eliminate (const int from, const int to)
27702 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
27703 ? ! frame_pointer_needed
27704 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
27705 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
27706 : true);
27709 /* Define the offset between two registers, FROM to be eliminated and its
27710 replacement TO, at the start of a routine. */
27711 HOST_WIDE_INT
27712 rs6000_initial_elimination_offset (int from, int to)
27714 rs6000_stack_t *info = rs6000_stack_info ();
27715 HOST_WIDE_INT offset;
27717 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27718 offset = info->push_p ? 0 : -info->total_size;
27719 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27721 offset = info->push_p ? 0 : -info->total_size;
27722 if (FRAME_GROWS_DOWNWARD)
27723 offset += info->fixed_size + info->vars_size + info->parm_size;
27725 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27726 offset = FRAME_GROWS_DOWNWARD
27727 ? info->fixed_size + info->vars_size + info->parm_size
27728 : 0;
27729 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27730 offset = info->total_size;
27731 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27732 offset = info->push_p ? info->total_size : 0;
27733 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
27734 offset = 0;
27735 else
27736 gcc_unreachable ();
27738 return offset;
27741 static rtx
27742 rs6000_dwarf_register_span (rtx reg)
27744 rtx parts[8];
27745 int i, words;
27746 unsigned regno = REGNO (reg);
27747 enum machine_mode mode = GET_MODE (reg);
27749 if (TARGET_SPE
27750 && regno < 32
27751 && (SPE_VECTOR_MODE (GET_MODE (reg))
27752 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
27753 && mode != SFmode && mode != SDmode && mode != SCmode)))
27755 else
27756 return NULL_RTX;
27758 regno = REGNO (reg);
27760 /* The duality of the SPE register size wreaks all kinds of havoc.
27761 This is a way of distinguishing r0 in 32-bits from r0 in
27762 64-bits. */
27763 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
27764 gcc_assert (words <= 4);
27765 for (i = 0; i < words; i++, regno++)
27767 if (BYTES_BIG_ENDIAN)
27769 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
27770 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
27772 else
27774 parts[2 * i] = gen_rtx_REG (SImode, regno);
27775 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
27779 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
27782 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27784 static void
27785 rs6000_init_dwarf_reg_sizes_extra (tree address)
27787 if (TARGET_SPE)
27789 int i;
27790 enum machine_mode mode = TYPE_MODE (char_type_node);
27791 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
27792 rtx mem = gen_rtx_MEM (BLKmode, addr);
27793 rtx value = gen_int_mode (4, mode);
27795 for (i = 1201; i < 1232; i++)
27797 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
27798 HOST_WIDE_INT offset
27799 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
27801 emit_move_insn (adjust_address (mem, mode, offset), value);
27806 /* Map internal gcc register numbers to DWARF2 register numbers. */
27808 unsigned int
27809 rs6000_dbx_register_number (unsigned int regno)
27811 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
27812 return regno;
27813 if (regno == LR_REGNO)
27814 return 108;
27815 if (regno == CTR_REGNO)
27816 return 109;
27817 if (CR_REGNO_P (regno))
27818 return regno - CR0_REGNO + 86;
27819 if (regno == CA_REGNO)
27820 return 101; /* XER */
27821 if (ALTIVEC_REGNO_P (regno))
27822 return regno - FIRST_ALTIVEC_REGNO + 1124;
27823 if (regno == VRSAVE_REGNO)
27824 return 356;
27825 if (regno == VSCR_REGNO)
27826 return 67;
27827 if (regno == SPE_ACC_REGNO)
27828 return 99;
27829 if (regno == SPEFSCR_REGNO)
27830 return 612;
27831 /* SPE high reg number. We get these values of regno from
27832 rs6000_dwarf_register_span. */
27833 gcc_assert (regno >= 1200 && regno < 1232);
27834 return regno;
27837 /* target hook eh_return_filter_mode */
27838 static enum machine_mode
27839 rs6000_eh_return_filter_mode (void)
27841 return TARGET_32BIT ? SImode : word_mode;
27844 /* Target hook for scalar_mode_supported_p. */
27845 static bool
27846 rs6000_scalar_mode_supported_p (enum machine_mode mode)
27848 if (DECIMAL_FLOAT_MODE_P (mode))
27849 return default_decimal_float_supported_p ();
27850 else
27851 return default_scalar_mode_supported_p (mode);
27854 /* Target hook for vector_mode_supported_p. */
27855 static bool
27856 rs6000_vector_mode_supported_p (enum machine_mode mode)
27859 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
27860 return true;
27862 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
27863 return true;
27865 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
27866 return true;
27868 else
27869 return false;
27872 /* Target hook for invalid_arg_for_unprototyped_fn. */
27873 static const char *
27874 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
27876 return (!rs6000_darwin64_abi
27877 && typelist == 0
27878 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
27879 && (funcdecl == NULL_TREE
27880 || (TREE_CODE (funcdecl) == FUNCTION_DECL
27881 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
27882 ? N_("AltiVec argument passed to unprototyped function")
27883 : NULL;
27886 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27887 setup by using __stack_chk_fail_local hidden function instead of
27888 calling __stack_chk_fail directly. Otherwise it is better to call
27889 __stack_chk_fail directly. */
27891 static tree ATTRIBUTE_UNUSED
27892 rs6000_stack_protect_fail (void)
27894 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
27895 ? default_hidden_stack_protect_fail ()
27896 : default_external_stack_protect_fail ();
27899 void
27900 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
27901 int num_operands ATTRIBUTE_UNUSED)
27903 if (rs6000_warn_cell_microcode)
27905 const char *temp;
27906 int insn_code_number = recog_memoized (insn);
27907 location_t location = INSN_LOCATION (insn);
27909 /* Punt on insns we cannot recognize. */
27910 if (insn_code_number < 0)
27911 return;
27913 temp = get_insn_template (insn_code_number, insn);
27915 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
27916 warning_at (location, OPT_mwarn_cell_microcode,
27917 "emitting microcode insn %s\t[%s] #%d",
27918 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27919 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
27920 warning_at (location, OPT_mwarn_cell_microcode,
27921 "emitting conditional microcode insn %s\t[%s] #%d",
27922 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27926 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
27928 #if TARGET_ELF
27929 static unsigned HOST_WIDE_INT
27930 rs6000_asan_shadow_offset (void)
27932 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
27934 #endif
27936 /* Mask options that we want to support inside of attribute((target)) and
27937 #pragma GCC target operations. Note, we do not include things like
27938 64/32-bit, endianess, hard/soft floating point, etc. that would have
27939 different calling sequences. */
27941 struct rs6000_opt_mask {
27942 const char *name; /* option name */
27943 HOST_WIDE_INT mask; /* mask to set */
27944 bool invert; /* invert sense of mask */
27945 bool valid_target; /* option is a target option */
27948 static struct rs6000_opt_mask const rs6000_opt_masks[] =
27950 { "altivec", OPTION_MASK_ALTIVEC, false, true },
27951 { "cmpb", OPTION_MASK_CMPB, false, true },
27952 { "crypto", OPTION_MASK_CRYPTO, false, true },
27953 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
27954 { "dlmzb", OPTION_MASK_DLMZB, false, true },
27955 { "fprnd", OPTION_MASK_FPRND, false, true },
27956 { "hard-dfp", OPTION_MASK_DFP, false, true },
27957 { "isel", OPTION_MASK_ISEL, false, true },
27958 { "mfcrf", OPTION_MASK_MFCRF, false, true },
27959 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
27960 { "mulhw", OPTION_MASK_MULHW, false, true },
27961 { "multiple", OPTION_MASK_MULTIPLE, false, true },
27962 { "popcntb", OPTION_MASK_POPCNTB, false, true },
27963 { "popcntd", OPTION_MASK_POPCNTD, false, true },
27964 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
27965 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
27966 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
27967 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
27968 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
27969 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
27970 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
27971 { "string", OPTION_MASK_STRING, false, true },
27972 { "update", OPTION_MASK_NO_UPDATE, true , true },
27973 { "vsx", OPTION_MASK_VSX, false, true },
27974 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
27975 #ifdef OPTION_MASK_64BIT
27976 #if TARGET_AIX_OS
27977 { "aix64", OPTION_MASK_64BIT, false, false },
27978 { "aix32", OPTION_MASK_64BIT, true, false },
27979 #else
27980 { "64", OPTION_MASK_64BIT, false, false },
27981 { "32", OPTION_MASK_64BIT, true, false },
27982 #endif
27983 #endif
27984 #ifdef OPTION_MASK_EABI
27985 { "eabi", OPTION_MASK_EABI, false, false },
27986 #endif
27987 #ifdef OPTION_MASK_LITTLE_ENDIAN
27988 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
27989 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
27990 #endif
27991 #ifdef OPTION_MASK_RELOCATABLE
27992 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
27993 #endif
27994 #ifdef OPTION_MASK_STRICT_ALIGN
27995 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
27996 #endif
27997 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
27998 { "string", OPTION_MASK_STRING, false, false },
28001 /* Builtin mask mapping for printing the flags. */
28002 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
28004 { "altivec", RS6000_BTM_ALTIVEC, false, false },
28005 { "vsx", RS6000_BTM_VSX, false, false },
28006 { "spe", RS6000_BTM_SPE, false, false },
28007 { "paired", RS6000_BTM_PAIRED, false, false },
28008 { "fre", RS6000_BTM_FRE, false, false },
28009 { "fres", RS6000_BTM_FRES, false, false },
28010 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
28011 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
28012 { "popcntd", RS6000_BTM_POPCNTD, false, false },
28013 { "cell", RS6000_BTM_CELL, false, false },
28014 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
28015 { "crypto", RS6000_BTM_CRYPTO, false, false },
28018 /* Option variables that we want to support inside attribute((target)) and
28019 #pragma GCC target operations. */
28021 struct rs6000_opt_var {
28022 const char *name; /* option name */
28023 size_t global_offset; /* offset of the option in global_options. */
28024 size_t target_offset; /* offset of the option in target optiosn. */
28027 static struct rs6000_opt_var const rs6000_opt_vars[] =
28029 { "friz",
28030 offsetof (struct gcc_options, x_TARGET_FRIZ),
28031 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
28032 { "avoid-indexed-addresses",
28033 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
28034 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
28035 { "paired",
28036 offsetof (struct gcc_options, x_rs6000_paired_float),
28037 offsetof (struct cl_target_option, x_rs6000_paired_float), },
28038 { "longcall",
28039 offsetof (struct gcc_options, x_rs6000_default_long_calls),
28040 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
28043 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
28044 parsing. Return true if there were no errors. */
28046 static bool
28047 rs6000_inner_target_options (tree args, bool attr_p)
28049 bool ret = true;
28051 if (args == NULL_TREE)
28054 else if (TREE_CODE (args) == STRING_CST)
28056 char *p = ASTRDUP (TREE_STRING_POINTER (args));
28057 char *q;
28059 while ((q = strtok (p, ",")) != NULL)
28061 bool error_p = false;
28062 bool not_valid_p = false;
28063 const char *cpu_opt = NULL;
28065 p = NULL;
28066 if (strncmp (q, "cpu=", 4) == 0)
28068 int cpu_index = rs6000_cpu_name_lookup (q+4);
28069 if (cpu_index >= 0)
28070 rs6000_cpu_index = cpu_index;
28071 else
28073 error_p = true;
28074 cpu_opt = q+4;
28077 else if (strncmp (q, "tune=", 5) == 0)
28079 int tune_index = rs6000_cpu_name_lookup (q+5);
28080 if (tune_index >= 0)
28081 rs6000_tune_index = tune_index;
28082 else
28084 error_p = true;
28085 cpu_opt = q+5;
28088 else
28090 size_t i;
28091 bool invert = false;
28092 char *r = q;
28094 error_p = true;
28095 if (strncmp (r, "no-", 3) == 0)
28097 invert = true;
28098 r += 3;
28101 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
28102 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
28104 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
28106 if (!rs6000_opt_masks[i].valid_target)
28107 not_valid_p = true;
28108 else
28110 error_p = false;
28111 rs6000_isa_flags_explicit |= mask;
28113 /* VSX needs altivec, so -mvsx automagically sets
28114 altivec. */
28115 if (mask == OPTION_MASK_VSX && !invert)
28116 mask |= OPTION_MASK_ALTIVEC;
28118 if (rs6000_opt_masks[i].invert)
28119 invert = !invert;
28121 if (invert)
28122 rs6000_isa_flags &= ~mask;
28123 else
28124 rs6000_isa_flags |= mask;
28126 break;
28129 if (error_p && !not_valid_p)
28131 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
28132 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
28134 size_t j = rs6000_opt_vars[i].global_offset;
28135 *((int *) ((char *)&global_options + j)) = !invert;
28136 error_p = false;
28137 break;
28142 if (error_p)
28144 const char *eprefix, *esuffix;
28146 ret = false;
28147 if (attr_p)
28149 eprefix = "__attribute__((__target__(";
28150 esuffix = ")))";
28152 else
28154 eprefix = "#pragma GCC target ";
28155 esuffix = "";
28158 if (cpu_opt)
28159 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
28160 q, esuffix);
28161 else if (not_valid_p)
28162 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
28163 else
28164 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
28169 else if (TREE_CODE (args) == TREE_LIST)
28173 tree value = TREE_VALUE (args);
28174 if (value)
28176 bool ret2 = rs6000_inner_target_options (value, attr_p);
28177 if (!ret2)
28178 ret = false;
28180 args = TREE_CHAIN (args);
28182 while (args != NULL_TREE);
28185 else
28186 gcc_unreachable ();
28188 return ret;
28191 /* Print out the target options as a list for -mdebug=target. */
28193 static void
28194 rs6000_debug_target_options (tree args, const char *prefix)
28196 if (args == NULL_TREE)
28197 fprintf (stderr, "%s<NULL>", prefix);
28199 else if (TREE_CODE (args) == STRING_CST)
28201 char *p = ASTRDUP (TREE_STRING_POINTER (args));
28202 char *q;
28204 while ((q = strtok (p, ",")) != NULL)
28206 p = NULL;
28207 fprintf (stderr, "%s\"%s\"", prefix, q);
28208 prefix = ", ";
28212 else if (TREE_CODE (args) == TREE_LIST)
28216 tree value = TREE_VALUE (args);
28217 if (value)
28219 rs6000_debug_target_options (value, prefix);
28220 prefix = ", ";
28222 args = TREE_CHAIN (args);
28224 while (args != NULL_TREE);
28227 else
28228 gcc_unreachable ();
28230 return;
28234 /* Hook to validate attribute((target("..."))). */
28236 static bool
28237 rs6000_valid_attribute_p (tree fndecl,
28238 tree ARG_UNUSED (name),
28239 tree args,
28240 int flags)
28242 struct cl_target_option cur_target;
28243 bool ret;
28244 tree old_optimize = build_optimization_node ();
28245 tree new_target, new_optimize;
28246 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
28248 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
28250 if (TARGET_DEBUG_TARGET)
28252 tree tname = DECL_NAME (fndecl);
28253 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
28254 if (tname)
28255 fprintf (stderr, "function: %.*s\n",
28256 (int) IDENTIFIER_LENGTH (tname),
28257 IDENTIFIER_POINTER (tname));
28258 else
28259 fprintf (stderr, "function: unknown\n");
28261 fprintf (stderr, "args:");
28262 rs6000_debug_target_options (args, " ");
28263 fprintf (stderr, "\n");
28265 if (flags)
28266 fprintf (stderr, "flags: 0x%x\n", flags);
28268 fprintf (stderr, "--------------------\n");
28271 old_optimize = build_optimization_node ();
28272 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
28274 /* If the function changed the optimization levels as well as setting target
28275 options, start with the optimizations specified. */
28276 if (func_optimize && func_optimize != old_optimize)
28277 cl_optimization_restore (&global_options,
28278 TREE_OPTIMIZATION (func_optimize));
28280 /* The target attributes may also change some optimization flags, so update
28281 the optimization options if necessary. */
28282 cl_target_option_save (&cur_target, &global_options);
28283 rs6000_cpu_index = rs6000_tune_index = -1;
28284 ret = rs6000_inner_target_options (args, true);
28286 /* Set up any additional state. */
28287 if (ret)
28289 ret = rs6000_option_override_internal (false);
28290 new_target = build_target_option_node ();
28292 else
28293 new_target = NULL;
28295 new_optimize = build_optimization_node ();
28297 if (!new_target)
28298 ret = false;
28300 else if (fndecl)
28302 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
28304 if (old_optimize != new_optimize)
28305 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
28308 cl_target_option_restore (&global_options, &cur_target);
28310 if (old_optimize != new_optimize)
28311 cl_optimization_restore (&global_options,
28312 TREE_OPTIMIZATION (old_optimize));
28314 return ret;
28318 /* Hook to validate the current #pragma GCC target and set the state, and
28319 update the macros based on what was changed. If ARGS is NULL, then
28320 POP_TARGET is used to reset the options. */
28322 bool
28323 rs6000_pragma_target_parse (tree args, tree pop_target)
28325 tree prev_tree = build_target_option_node ();
28326 tree cur_tree;
28327 struct cl_target_option *prev_opt, *cur_opt;
28328 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
28329 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
28331 if (TARGET_DEBUG_TARGET)
28333 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
28334 fprintf (stderr, "args:");
28335 rs6000_debug_target_options (args, " ");
28336 fprintf (stderr, "\n");
28338 if (pop_target)
28340 fprintf (stderr, "pop_target:\n");
28341 debug_tree (pop_target);
28343 else
28344 fprintf (stderr, "pop_target: <NULL>\n");
28346 fprintf (stderr, "--------------------\n");
28349 if (! args)
28351 cur_tree = ((pop_target)
28352 ? pop_target
28353 : target_option_default_node);
28354 cl_target_option_restore (&global_options,
28355 TREE_TARGET_OPTION (cur_tree));
28357 else
28359 rs6000_cpu_index = rs6000_tune_index = -1;
28360 if (!rs6000_inner_target_options (args, false)
28361 || !rs6000_option_override_internal (false)
28362 || (cur_tree = build_target_option_node ()) == NULL_TREE)
28364 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
28365 fprintf (stderr, "invalid pragma\n");
28367 return false;
28371 target_option_current_node = cur_tree;
28373 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
28374 change the macros that are defined. */
28375 if (rs6000_target_modify_macros_ptr)
28377 prev_opt = TREE_TARGET_OPTION (prev_tree);
28378 prev_bumask = prev_opt->x_rs6000_builtin_mask;
28379 prev_flags = prev_opt->x_rs6000_isa_flags;
28381 cur_opt = TREE_TARGET_OPTION (cur_tree);
28382 cur_flags = cur_opt->x_rs6000_isa_flags;
28383 cur_bumask = cur_opt->x_rs6000_builtin_mask;
28385 diff_bumask = (prev_bumask ^ cur_bumask);
28386 diff_flags = (prev_flags ^ cur_flags);
28388 if ((diff_flags != 0) || (diff_bumask != 0))
28390 /* Delete old macros. */
28391 rs6000_target_modify_macros_ptr (false,
28392 prev_flags & diff_flags,
28393 prev_bumask & diff_bumask);
28395 /* Define new macros. */
28396 rs6000_target_modify_macros_ptr (true,
28397 cur_flags & diff_flags,
28398 cur_bumask & diff_bumask);
28402 return true;
28406 /* Remember the last target of rs6000_set_current_function. */
28407 static GTY(()) tree rs6000_previous_fndecl;
28409 /* Establish appropriate back-end context for processing the function
28410 FNDECL. The argument might be NULL to indicate processing at top
28411 level, outside of any function scope. */
28412 static void
28413 rs6000_set_current_function (tree fndecl)
28415 tree old_tree = (rs6000_previous_fndecl
28416 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
28417 : NULL_TREE);
28419 tree new_tree = (fndecl
28420 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
28421 : NULL_TREE);
28423 if (TARGET_DEBUG_TARGET)
28425 bool print_final = false;
28426 fprintf (stderr, "\n==================== rs6000_set_current_function");
28428 if (fndecl)
28429 fprintf (stderr, ", fndecl %s (%p)",
28430 (DECL_NAME (fndecl)
28431 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
28432 : "<unknown>"), (void *)fndecl);
28434 if (rs6000_previous_fndecl)
28435 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
28437 fprintf (stderr, "\n");
28438 if (new_tree)
28440 fprintf (stderr, "\nnew fndecl target specific options:\n");
28441 debug_tree (new_tree);
28442 print_final = true;
28445 if (old_tree)
28447 fprintf (stderr, "\nold fndecl target specific options:\n");
28448 debug_tree (old_tree);
28449 print_final = true;
28452 if (print_final)
28453 fprintf (stderr, "--------------------\n");
28456 /* Only change the context if the function changes. This hook is called
28457 several times in the course of compiling a function, and we don't want to
28458 slow things down too much or call target_reinit when it isn't safe. */
28459 if (fndecl && fndecl != rs6000_previous_fndecl)
28461 rs6000_previous_fndecl = fndecl;
28462 if (old_tree == new_tree)
28465 else if (new_tree)
28467 cl_target_option_restore (&global_options,
28468 TREE_TARGET_OPTION (new_tree));
28469 target_reinit ();
28472 else if (old_tree)
28474 struct cl_target_option *def
28475 = TREE_TARGET_OPTION (target_option_current_node);
28477 cl_target_option_restore (&global_options, def);
28478 target_reinit ();
28484 /* Save the current options */
28486 static void
28487 rs6000_function_specific_save (struct cl_target_option *ptr)
28489 ptr->x_rs6000_isa_flags = rs6000_isa_flags;
28490 ptr->x_rs6000_isa_flags_explicit = rs6000_isa_flags_explicit;
28493 /* Restore the current options */
28495 static void
28496 rs6000_function_specific_restore (struct cl_target_option *ptr)
28498 rs6000_isa_flags = ptr->x_rs6000_isa_flags;
28499 rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
28500 (void) rs6000_option_override_internal (false);
28503 /* Print the current options */
28505 static void
28506 rs6000_function_specific_print (FILE *file, int indent,
28507 struct cl_target_option *ptr)
28509 rs6000_print_isa_options (file, indent, "Isa options set",
28510 ptr->x_rs6000_isa_flags);
28512 rs6000_print_isa_options (file, indent, "Isa options explicit",
28513 ptr->x_rs6000_isa_flags_explicit);
28516 /* Helper function to print the current isa or misc options on a line. */
28518 static void
28519 rs6000_print_options_internal (FILE *file,
28520 int indent,
28521 const char *string,
28522 HOST_WIDE_INT flags,
28523 const char *prefix,
28524 const struct rs6000_opt_mask *opts,
28525 size_t num_elements)
28527 size_t i;
28528 size_t start_column = 0;
28529 size_t cur_column;
28530 size_t max_column = 76;
28531 const char *comma = "";
28532 const char *nl = "\n";
28534 if (indent)
28535 start_column += fprintf (file, "%*s", indent, "");
28537 if (!flags)
28539 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
28540 return;
28543 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
28545 /* Print the various mask options. */
28546 cur_column = start_column;
28547 for (i = 0; i < num_elements; i++)
28549 if ((flags & opts[i].mask) != 0)
28551 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
28552 size_t len = (strlen (comma)
28553 + strlen (prefix)
28554 + strlen (no_str)
28555 + strlen (rs6000_opt_masks[i].name));
28557 cur_column += len;
28558 if (cur_column > max_column)
28560 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
28561 cur_column = start_column + len;
28562 comma = "";
28563 nl = "\n\n";
28566 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
28567 rs6000_opt_masks[i].name);
28568 flags &= ~ opts[i].mask;
28569 comma = ", ";
28573 fputs (nl, file);
28576 /* Helper function to print the current isa options on a line. */
28578 static void
28579 rs6000_print_isa_options (FILE *file, int indent, const char *string,
28580 HOST_WIDE_INT flags)
28582 rs6000_print_options_internal (file, indent, string, flags, "-m",
28583 &rs6000_opt_masks[0],
28584 ARRAY_SIZE (rs6000_opt_masks));
28587 static void
28588 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
28589 HOST_WIDE_INT flags)
28591 rs6000_print_options_internal (file, indent, string, flags, "",
28592 &rs6000_builtin_mask_names[0],
28593 ARRAY_SIZE (rs6000_builtin_mask_names));
28597 /* Hook to determine if one function can safely inline another. */
28599 static bool
28600 rs6000_can_inline_p (tree caller, tree callee)
28602 bool ret = false;
28603 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
28604 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
28606 /* If callee has no option attributes, then it is ok to inline. */
28607 if (!callee_tree)
28608 ret = true;
28610 /* If caller has no option attributes, but callee does then it is not ok to
28611 inline. */
28612 else if (!caller_tree)
28613 ret = false;
28615 else
28617 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
28618 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
28620 /* Callee's options should a subset of the caller's, i.e. a vsx function
28621 can inline an altivec function but a non-vsx function can't inline a
28622 vsx function. */
28623 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
28624 == callee_opts->x_rs6000_isa_flags)
28625 ret = true;
28628 if (TARGET_DEBUG_TARGET)
28629 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
28630 (DECL_NAME (caller)
28631 ? IDENTIFIER_POINTER (DECL_NAME (caller))
28632 : "<unknown>"),
28633 (DECL_NAME (callee)
28634 ? IDENTIFIER_POINTER (DECL_NAME (callee))
28635 : "<unknown>"),
28636 (ret ? "can" : "cannot"));
28638 return ret;
28641 /* Allocate a stack temp and fixup the address so it meets the particular
28642 memory requirements (either offetable or REG+REG addressing). */
28645 rs6000_allocate_stack_temp (enum machine_mode mode,
28646 bool offsettable_p,
28647 bool reg_reg_p)
28649 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
28650 rtx addr = XEXP (stack, 0);
28651 int strict_p = (reload_in_progress || reload_completed);
28653 if (!legitimate_indirect_address_p (addr, strict_p))
28655 if (offsettable_p
28656 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
28657 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28659 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
28660 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28663 return stack;
28666 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28667 to such a form to deal with memory reference instructions like STFIWX that
28668 only take reg+reg addressing. */
28671 rs6000_address_for_fpconvert (rtx x)
28673 int strict_p = (reload_in_progress || reload_completed);
28674 rtx addr;
28676 gcc_assert (MEM_P (x));
28677 addr = XEXP (x, 0);
28678 if (! legitimate_indirect_address_p (addr, strict_p)
28679 && ! legitimate_indexed_address_p (addr, strict_p))
28681 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
28683 rtx reg = XEXP (addr, 0);
28684 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
28685 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
28686 gcc_assert (REG_P (reg));
28687 emit_insn (gen_add3_insn (reg, reg, size_rtx));
28688 addr = reg;
28690 else if (GET_CODE (addr) == PRE_MODIFY)
28692 rtx reg = XEXP (addr, 0);
28693 rtx expr = XEXP (addr, 1);
28694 gcc_assert (REG_P (reg));
28695 gcc_assert (GET_CODE (expr) == PLUS);
28696 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
28697 addr = reg;
28700 x = replace_equiv_address (x, copy_addr_to_reg (addr));
28703 return x;
28706 /* Given a memory reference, if it is not in the form for altivec memory
28707 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28708 convert to the altivec format. */
28711 rs6000_address_for_altivec (rtx x)
28713 gcc_assert (MEM_P (x));
28714 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
28716 rtx addr = XEXP (x, 0);
28717 int strict_p = (reload_in_progress || reload_completed);
28719 if (!legitimate_indexed_address_p (addr, strict_p)
28720 && !legitimate_indirect_address_p (addr, strict_p))
28721 addr = copy_to_mode_reg (Pmode, addr);
28723 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
28724 x = change_address (x, GET_MODE (x), addr);
28727 return x;
28730 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28732 On the RS/6000, all integer constants are acceptable, most won't be valid
28733 for particular insns, though. Only easy FP constants are acceptable. */
28735 static bool
28736 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
28738 if (TARGET_ELF && rs6000_tls_referenced_p (x))
28739 return false;
28741 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
28742 || GET_MODE (x) == VOIDmode
28743 || (TARGET_POWERPC64 && mode == DImode)
28744 || easy_fp_constant (x, mode)
28745 || easy_vector_constant (x, mode));
28749 /* A function pointer under AIX is a pointer to a data area whose first word
28750 contains the actual address of the function, whose second word contains a
28751 pointer to its TOC, and whose third word contains a value to place in the
28752 static chain register (r11). Note that if we load the static chain, our
28753 "trampoline" need not have any executable code. */
28755 void
28756 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
28758 rtx func_addr;
28759 rtx toc_reg;
28760 rtx sc_reg;
28761 rtx stack_ptr;
28762 rtx stack_toc_offset;
28763 rtx stack_toc_mem;
28764 rtx func_toc_offset;
28765 rtx func_toc_mem;
28766 rtx func_sc_offset;
28767 rtx func_sc_mem;
28768 rtx insn;
28769 rtx (*call_func) (rtx, rtx, rtx, rtx);
28770 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
28772 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28773 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
28775 /* Load up address of the actual function. */
28776 func_desc = force_reg (Pmode, func_desc);
28777 func_addr = gen_reg_rtx (Pmode);
28778 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
28780 if (TARGET_32BIT)
28783 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
28784 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
28785 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
28786 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28788 call_func = gen_call_indirect_aix32bit;
28789 call_value_func = gen_call_value_indirect_aix32bit;
28791 else
28793 call_func = gen_call_indirect_aix32bit_nor11;
28794 call_value_func = gen_call_value_indirect_aix32bit_nor11;
28797 else
28799 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
28800 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
28801 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
28802 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28804 call_func = gen_call_indirect_aix64bit;
28805 call_value_func = gen_call_value_indirect_aix64bit;
28807 else
28809 call_func = gen_call_indirect_aix64bit_nor11;
28810 call_value_func = gen_call_value_indirect_aix64bit_nor11;
28814 /* Reserved spot to store the TOC. */
28815 stack_toc_mem = gen_frame_mem (Pmode,
28816 gen_rtx_PLUS (Pmode,
28817 stack_ptr,
28818 stack_toc_offset));
28820 gcc_assert (cfun);
28821 gcc_assert (cfun->machine);
28823 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28824 every call? */
28825 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
28826 cfun->machine->save_toc_in_prologue = true;
28828 else
28830 MEM_VOLATILE_P (stack_toc_mem) = 1;
28831 emit_move_insn (stack_toc_mem, toc_reg);
28834 /* Calculate the address to load the TOC of the called function. We don't
28835 actually load this until the split after reload. */
28836 func_toc_mem = gen_rtx_MEM (Pmode,
28837 gen_rtx_PLUS (Pmode,
28838 func_desc,
28839 func_toc_offset));
28841 /* If we have a static chain, load it up. */
28842 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28844 func_sc_mem = gen_rtx_MEM (Pmode,
28845 gen_rtx_PLUS (Pmode,
28846 func_desc,
28847 func_sc_offset));
28849 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
28850 emit_move_insn (sc_reg, func_sc_mem);
28853 /* Create the call. */
28854 if (value)
28855 insn = call_value_func (value, func_addr, flag, func_toc_mem,
28856 stack_toc_mem);
28857 else
28858 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
28860 emit_call_insn (insn);
28863 /* Return whether we need to always update the saved TOC pointer when we update
28864 the stack pointer. */
28866 static bool
28867 rs6000_save_toc_in_prologue_p (void)
28869 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
28872 #ifdef HAVE_GAS_HIDDEN
28873 # define USE_HIDDEN_LINKONCE 1
28874 #else
28875 # define USE_HIDDEN_LINKONCE 0
28876 #endif
28878 /* Fills in the label name that should be used for a 476 link stack thunk. */
28880 void
28881 get_ppc476_thunk_name (char name[32])
28883 gcc_assert (TARGET_LINK_STACK);
28885 if (USE_HIDDEN_LINKONCE)
28886 sprintf (name, "__ppc476.get_thunk");
28887 else
28888 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
28891 /* This function emits the simple thunk routine that is used to preserve
28892 the link stack on the 476 cpu. */
28894 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
28895 static void
28896 rs6000_code_end (void)
28898 char name[32];
28899 tree decl;
28901 if (!TARGET_LINK_STACK)
28902 return;
28904 get_ppc476_thunk_name (name);
28906 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
28907 build_function_type_list (void_type_node, NULL_TREE));
28908 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
28909 NULL_TREE, void_type_node);
28910 TREE_PUBLIC (decl) = 1;
28911 TREE_STATIC (decl) = 1;
28913 #if RS6000_WEAK
28914 if (USE_HIDDEN_LINKONCE)
28916 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
28917 targetm.asm_out.unique_section (decl, 0);
28918 switch_to_section (get_named_section (decl, NULL, 0));
28919 DECL_WEAK (decl) = 1;
28920 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
28921 targetm.asm_out.globalize_label (asm_out_file, name);
28922 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
28923 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
28925 else
28926 #endif
28928 switch_to_section (text_section);
28929 ASM_OUTPUT_LABEL (asm_out_file, name);
28932 DECL_INITIAL (decl) = make_node (BLOCK);
28933 current_function_decl = decl;
28934 init_function_start (decl);
28935 first_function_block_is_cold = false;
28936 /* Make sure unwind info is emitted for the thunk if needed. */
28937 final_start_function (emit_barrier (), asm_out_file, 1);
28939 fputs ("\tblr\n", asm_out_file);
28941 final_end_function ();
28942 init_insn_lengths ();
28943 free_after_compilation (cfun);
28944 set_cfun (NULL);
28945 current_function_decl = NULL;
28948 /* Add r30 to hard reg set if the prologue sets it up and it is not
28949 pic_offset_table_rtx. */
28951 static void
28952 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
28954 if (!TARGET_SINGLE_PIC_BASE
28955 && TARGET_TOC
28956 && TARGET_MINIMAL_TOC
28957 && get_pool_size () != 0)
28958 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28961 struct gcc_target targetm = TARGET_INITIALIZER;
28963 #include "gt-rs6000.h"