PR middle-end/54635
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob96026bdc50515b17f1bc1789b4c5e28ad673d80b
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2012 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #if TARGET_XCOFF
62 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
63 #endif
64 #if TARGET_MACHO
65 #include "gstab.h" /* for N_SLINE */
66 #endif
68 #ifndef TARGET_NO_PROTOTYPE
69 #define TARGET_NO_PROTOTYPE 0
70 #endif
72 #define min(A,B) ((A) < (B) ? (A) : (B))
73 #define max(A,B) ((A) > (B) ? (A) : (B))
75 /* Structure used to define the rs6000 stack */
76 typedef struct rs6000_stack {
77 int reload_completed; /* stack info won't change from here on */
78 int first_gp_reg_save; /* first callee saved GP register used */
79 int first_fp_reg_save; /* first callee saved FP register used */
80 int first_altivec_reg_save; /* first callee saved AltiVec register used */
81 int lr_save_p; /* true if the link reg needs to be saved */
82 int cr_save_p; /* true if the CR reg needs to be saved */
83 unsigned int vrsave_mask; /* mask of vec registers to save */
84 int push_p; /* true if we need to allocate stack space */
85 int calls_p; /* true if the function makes any calls */
86 int world_save_p; /* true if we're saving *everything*:
87 r13-r31, cr, f14-f31, vrsave, v20-v31 */
88 enum rs6000_abi abi; /* which ABI to use */
89 int gp_save_offset; /* offset to save GP regs from initial SP */
90 int fp_save_offset; /* offset to save FP regs from initial SP */
91 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
92 int lr_save_offset; /* offset to save LR from initial SP */
93 int cr_save_offset; /* offset to save CR from initial SP */
94 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
95 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
96 int varargs_save_offset; /* offset to save the varargs registers */
97 int ehrd_offset; /* offset to EH return data */
98 int reg_size; /* register size (4 or 8) */
99 HOST_WIDE_INT vars_size; /* variable save area size */
100 int parm_size; /* outgoing parameter size */
101 int save_size; /* save area size */
102 int fixed_size; /* fixed size of stack frame */
103 int gp_size; /* size of saved GP registers */
104 int fp_size; /* size of saved FP registers */
105 int altivec_size; /* size of saved AltiVec registers */
106 int cr_size; /* size to hold CR if not in save_size */
107 int vrsave_size; /* size to hold VRSAVE if not in save_size */
108 int altivec_padding_size; /* size of altivec alignment padding if
109 not in save_size */
110 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
111 int spe_padding_size;
112 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
113 int spe_64bit_regs_used;
114 int savres_strategy;
115 } rs6000_stack_t;
117 /* A C structure for machine-specific, per-function data.
118 This is added to the cfun structure. */
119 typedef struct GTY(()) machine_function
121 /* Some local-dynamic symbol. */
122 const char *some_ld_name;
123 /* Whether the instruction chain has been scanned already. */
124 int insn_chain_scanned_p;
125 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
126 int ra_needs_full_frame;
127 /* Flags if __builtin_return_address (0) was used. */
128 int ra_need_lr;
129 /* Cache lr_save_p after expansion of builtin_eh_return. */
130 int lr_save_state;
131 /* Whether we need to save the TOC to the reserved stack location in the
132 function prologue. */
133 bool save_toc_in_prologue;
134 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
135 varargs save area. */
136 HOST_WIDE_INT varargs_save_offset;
137 /* Temporary stack slot to use for SDmode copies. This slot is
138 64-bits wide and is allocated early enough so that the offset
139 does not overflow the 16-bit load/store offset field. */
140 rtx sdmode_stack_slot;
141 } machine_function;
143 /* Support targetm.vectorize.builtin_mask_for_load. */
144 static GTY(()) tree altivec_builtin_mask_for_load;
146 /* Set to nonzero once AIX common-mode calls have been defined. */
147 static GTY(()) int common_mode_defined;
149 /* Label number of label created for -mrelocatable, to call to so we can
150 get the address of the GOT section */
151 static int rs6000_pic_labelno;
153 #ifdef USING_ELFOS_H
154 /* Counter for labels which are to be placed in .fixup. */
155 int fixuplabelno = 0;
156 #endif
158 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
159 int dot_symbols;
161 /* Specify the machine mode that pointers have. After generation of rtl, the
162 compiler makes no further distinction between pointers and any other objects
163 of this machine mode. The type is unsigned since not all things that
164 include rs6000.h also include machmode.h. */
165 unsigned rs6000_pmode;
167 /* Width in bits of a pointer. */
168 unsigned rs6000_pointer_size;
170 #ifdef HAVE_AS_GNU_ATTRIBUTE
171 /* Flag whether floating point values have been passed/returned. */
172 static bool rs6000_passes_float;
173 /* Flag whether vector values have been passed/returned. */
174 static bool rs6000_passes_vector;
175 /* Flag whether small (<= 8 byte) structures have been returned. */
176 static bool rs6000_returns_struct;
177 #endif
179 /* Value is TRUE if register/mode pair is acceptable. */
180 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
182 /* Maximum number of registers needed for a given register class and mode. */
183 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
185 /* How many registers are needed for a given register and mode. */
186 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
188 /* Map register number to register class. */
189 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
191 /* Reload functions based on the type and the vector unit. */
192 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
194 static int dbg_cost_ctrl;
196 /* Built in types. */
197 tree rs6000_builtin_types[RS6000_BTI_MAX];
198 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
200 /* Flag to say the TOC is initialized */
201 int toc_initialized;
202 char toc_label_name[10];
204 /* Cached value of rs6000_variable_issue. This is cached in
205 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
206 static short cached_can_issue_more;
208 static GTY(()) section *read_only_data_section;
209 static GTY(()) section *private_data_section;
210 static GTY(()) section *read_only_private_data_section;
211 static GTY(()) section *sdata2_section;
212 static GTY(()) section *toc_section;
214 struct builtin_description
216 const unsigned int mask;
217 const enum insn_code icode;
218 const char *const name;
219 const enum rs6000_builtins code;
222 /* Describe the vector unit used for modes. */
223 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
224 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
226 /* Register classes for various constraints that are based on the target
227 switches. */
228 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
230 /* Describe the alignment of a vector. */
231 int rs6000_vector_align[NUM_MACHINE_MODES];
233 /* Map selected modes to types for builtins. */
234 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
236 /* What modes to automatically generate reciprocal divide estimate (fre) and
237 reciprocal sqrt (frsqrte) for. */
238 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
240 /* Masks to determine which reciprocal esitmate instructions to generate
241 automatically. */
242 enum rs6000_recip_mask {
243 RECIP_SF_DIV = 0x001, /* Use divide estimate */
244 RECIP_DF_DIV = 0x002,
245 RECIP_V4SF_DIV = 0x004,
246 RECIP_V2DF_DIV = 0x008,
248 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
249 RECIP_DF_RSQRT = 0x020,
250 RECIP_V4SF_RSQRT = 0x040,
251 RECIP_V2DF_RSQRT = 0x080,
253 /* Various combination of flags for -mrecip=xxx. */
254 RECIP_NONE = 0,
255 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
256 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
257 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
259 RECIP_HIGH_PRECISION = RECIP_ALL,
261 /* On low precision machines like the power5, don't enable double precision
262 reciprocal square root estimate, since it isn't accurate enough. */
263 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
266 /* -mrecip options. */
267 static struct
269 const char *string; /* option name */
270 unsigned int mask; /* mask bits to set */
271 } recip_options[] = {
272 { "all", RECIP_ALL },
273 { "none", RECIP_NONE },
274 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
275 | RECIP_V2DF_DIV) },
276 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
277 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
278 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
279 | RECIP_V2DF_RSQRT) },
280 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
281 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
284 /* 2 argument gen function typedef. */
285 typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx);
287 /* Pointer to function (in rs6000-c.c) that can define or undefine target
288 macros that have changed. Languages that don't support the preprocessor
289 don't link in rs6000-c.c, so we can't call it directly. */
290 void (*rs6000_target_modify_macros_ptr) (bool, int, unsigned);
293 /* Target cpu costs. */
295 struct processor_costs {
296 const int mulsi; /* cost of SImode multiplication. */
297 const int mulsi_const; /* cost of SImode multiplication by constant. */
298 const int mulsi_const9; /* cost of SImode mult by short constant. */
299 const int muldi; /* cost of DImode multiplication. */
300 const int divsi; /* cost of SImode division. */
301 const int divdi; /* cost of DImode division. */
302 const int fp; /* cost of simple SFmode and DFmode insns. */
303 const int dmul; /* cost of DFmode multiplication (and fmadd). */
304 const int sdiv; /* cost of SFmode division (fdivs). */
305 const int ddiv; /* cost of DFmode division (fdiv). */
306 const int cache_line_size; /* cache line size in bytes. */
307 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
308 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
309 const int simultaneous_prefetches; /* number of parallel prefetch
310 operations. */
313 const struct processor_costs *rs6000_cost;
315 /* Processor costs (relative to an add) */
317 /* Instruction size costs on 32bit processors. */
318 static const
319 struct processor_costs size32_cost = {
320 COSTS_N_INSNS (1), /* mulsi */
321 COSTS_N_INSNS (1), /* mulsi_const */
322 COSTS_N_INSNS (1), /* mulsi_const9 */
323 COSTS_N_INSNS (1), /* muldi */
324 COSTS_N_INSNS (1), /* divsi */
325 COSTS_N_INSNS (1), /* divdi */
326 COSTS_N_INSNS (1), /* fp */
327 COSTS_N_INSNS (1), /* dmul */
328 COSTS_N_INSNS (1), /* sdiv */
329 COSTS_N_INSNS (1), /* ddiv */
336 /* Instruction size costs on 64bit processors. */
337 static const
338 struct processor_costs size64_cost = {
339 COSTS_N_INSNS (1), /* mulsi */
340 COSTS_N_INSNS (1), /* mulsi_const */
341 COSTS_N_INSNS (1), /* mulsi_const9 */
342 COSTS_N_INSNS (1), /* muldi */
343 COSTS_N_INSNS (1), /* divsi */
344 COSTS_N_INSNS (1), /* divdi */
345 COSTS_N_INSNS (1), /* fp */
346 COSTS_N_INSNS (1), /* dmul */
347 COSTS_N_INSNS (1), /* sdiv */
348 COSTS_N_INSNS (1), /* ddiv */
349 128,
355 /* Instruction costs on RS64A processors. */
356 static const
357 struct processor_costs rs64a_cost = {
358 COSTS_N_INSNS (20), /* mulsi */
359 COSTS_N_INSNS (12), /* mulsi_const */
360 COSTS_N_INSNS (8), /* mulsi_const9 */
361 COSTS_N_INSNS (34), /* muldi */
362 COSTS_N_INSNS (65), /* divsi */
363 COSTS_N_INSNS (67), /* divdi */
364 COSTS_N_INSNS (4), /* fp */
365 COSTS_N_INSNS (4), /* dmul */
366 COSTS_N_INSNS (31), /* sdiv */
367 COSTS_N_INSNS (31), /* ddiv */
368 128, /* cache line size */
369 128, /* l1 cache */
370 2048, /* l2 cache */
371 1, /* streams */
374 /* Instruction costs on MPCCORE processors. */
375 static const
376 struct processor_costs mpccore_cost = {
377 COSTS_N_INSNS (2), /* mulsi */
378 COSTS_N_INSNS (2), /* mulsi_const */
379 COSTS_N_INSNS (2), /* mulsi_const9 */
380 COSTS_N_INSNS (2), /* muldi */
381 COSTS_N_INSNS (6), /* divsi */
382 COSTS_N_INSNS (6), /* divdi */
383 COSTS_N_INSNS (4), /* fp */
384 COSTS_N_INSNS (5), /* dmul */
385 COSTS_N_INSNS (10), /* sdiv */
386 COSTS_N_INSNS (17), /* ddiv */
387 32, /* cache line size */
388 4, /* l1 cache */
389 16, /* l2 cache */
390 1, /* streams */
393 /* Instruction costs on PPC403 processors. */
394 static const
395 struct processor_costs ppc403_cost = {
396 COSTS_N_INSNS (4), /* mulsi */
397 COSTS_N_INSNS (4), /* mulsi_const */
398 COSTS_N_INSNS (4), /* mulsi_const9 */
399 COSTS_N_INSNS (4), /* muldi */
400 COSTS_N_INSNS (33), /* divsi */
401 COSTS_N_INSNS (33), /* divdi */
402 COSTS_N_INSNS (11), /* fp */
403 COSTS_N_INSNS (11), /* dmul */
404 COSTS_N_INSNS (11), /* sdiv */
405 COSTS_N_INSNS (11), /* ddiv */
406 32, /* cache line size */
407 4, /* l1 cache */
408 16, /* l2 cache */
409 1, /* streams */
412 /* Instruction costs on PPC405 processors. */
413 static const
414 struct processor_costs ppc405_cost = {
415 COSTS_N_INSNS (5), /* mulsi */
416 COSTS_N_INSNS (4), /* mulsi_const */
417 COSTS_N_INSNS (3), /* mulsi_const9 */
418 COSTS_N_INSNS (5), /* muldi */
419 COSTS_N_INSNS (35), /* divsi */
420 COSTS_N_INSNS (35), /* divdi */
421 COSTS_N_INSNS (11), /* fp */
422 COSTS_N_INSNS (11), /* dmul */
423 COSTS_N_INSNS (11), /* sdiv */
424 COSTS_N_INSNS (11), /* ddiv */
425 32, /* cache line size */
426 16, /* l1 cache */
427 128, /* l2 cache */
428 1, /* streams */
431 /* Instruction costs on PPC440 processors. */
432 static const
433 struct processor_costs ppc440_cost = {
434 COSTS_N_INSNS (3), /* mulsi */
435 COSTS_N_INSNS (2), /* mulsi_const */
436 COSTS_N_INSNS (2), /* mulsi_const9 */
437 COSTS_N_INSNS (3), /* muldi */
438 COSTS_N_INSNS (34), /* divsi */
439 COSTS_N_INSNS (34), /* divdi */
440 COSTS_N_INSNS (5), /* fp */
441 COSTS_N_INSNS (5), /* dmul */
442 COSTS_N_INSNS (19), /* sdiv */
443 COSTS_N_INSNS (33), /* ddiv */
444 32, /* cache line size */
445 32, /* l1 cache */
446 256, /* l2 cache */
447 1, /* streams */
450 /* Instruction costs on PPC476 processors. */
451 static const
452 struct processor_costs ppc476_cost = {
453 COSTS_N_INSNS (4), /* mulsi */
454 COSTS_N_INSNS (4), /* mulsi_const */
455 COSTS_N_INSNS (4), /* mulsi_const9 */
456 COSTS_N_INSNS (4), /* muldi */
457 COSTS_N_INSNS (11), /* divsi */
458 COSTS_N_INSNS (11), /* divdi */
459 COSTS_N_INSNS (6), /* fp */
460 COSTS_N_INSNS (6), /* dmul */
461 COSTS_N_INSNS (19), /* sdiv */
462 COSTS_N_INSNS (33), /* ddiv */
463 32, /* l1 cache line size */
464 32, /* l1 cache */
465 512, /* l2 cache */
466 1, /* streams */
469 /* Instruction costs on PPC601 processors. */
470 static const
471 struct processor_costs ppc601_cost = {
472 COSTS_N_INSNS (5), /* mulsi */
473 COSTS_N_INSNS (5), /* mulsi_const */
474 COSTS_N_INSNS (5), /* mulsi_const9 */
475 COSTS_N_INSNS (5), /* muldi */
476 COSTS_N_INSNS (36), /* divsi */
477 COSTS_N_INSNS (36), /* divdi */
478 COSTS_N_INSNS (4), /* fp */
479 COSTS_N_INSNS (5), /* dmul */
480 COSTS_N_INSNS (17), /* sdiv */
481 COSTS_N_INSNS (31), /* ddiv */
482 32, /* cache line size */
483 32, /* l1 cache */
484 256, /* l2 cache */
485 1, /* streams */
488 /* Instruction costs on PPC603 processors. */
489 static const
490 struct processor_costs ppc603_cost = {
491 COSTS_N_INSNS (5), /* mulsi */
492 COSTS_N_INSNS (3), /* mulsi_const */
493 COSTS_N_INSNS (2), /* mulsi_const9 */
494 COSTS_N_INSNS (5), /* muldi */
495 COSTS_N_INSNS (37), /* divsi */
496 COSTS_N_INSNS (37), /* divdi */
497 COSTS_N_INSNS (3), /* fp */
498 COSTS_N_INSNS (4), /* dmul */
499 COSTS_N_INSNS (18), /* sdiv */
500 COSTS_N_INSNS (33), /* ddiv */
501 32, /* cache line size */
502 8, /* l1 cache */
503 64, /* l2 cache */
504 1, /* streams */
507 /* Instruction costs on PPC604 processors. */
508 static const
509 struct processor_costs ppc604_cost = {
510 COSTS_N_INSNS (4), /* mulsi */
511 COSTS_N_INSNS (4), /* mulsi_const */
512 COSTS_N_INSNS (4), /* mulsi_const9 */
513 COSTS_N_INSNS (4), /* muldi */
514 COSTS_N_INSNS (20), /* divsi */
515 COSTS_N_INSNS (20), /* divdi */
516 COSTS_N_INSNS (3), /* fp */
517 COSTS_N_INSNS (3), /* dmul */
518 COSTS_N_INSNS (18), /* sdiv */
519 COSTS_N_INSNS (32), /* ddiv */
520 32, /* cache line size */
521 16, /* l1 cache */
522 512, /* l2 cache */
523 1, /* streams */
526 /* Instruction costs on PPC604e processors. */
527 static const
528 struct processor_costs ppc604e_cost = {
529 COSTS_N_INSNS (2), /* mulsi */
530 COSTS_N_INSNS (2), /* mulsi_const */
531 COSTS_N_INSNS (2), /* mulsi_const9 */
532 COSTS_N_INSNS (2), /* muldi */
533 COSTS_N_INSNS (20), /* divsi */
534 COSTS_N_INSNS (20), /* divdi */
535 COSTS_N_INSNS (3), /* fp */
536 COSTS_N_INSNS (3), /* dmul */
537 COSTS_N_INSNS (18), /* sdiv */
538 COSTS_N_INSNS (32), /* ddiv */
539 32, /* cache line size */
540 32, /* l1 cache */
541 1024, /* l2 cache */
542 1, /* streams */
545 /* Instruction costs on PPC620 processors. */
546 static const
547 struct processor_costs ppc620_cost = {
548 COSTS_N_INSNS (5), /* mulsi */
549 COSTS_N_INSNS (4), /* mulsi_const */
550 COSTS_N_INSNS (3), /* mulsi_const9 */
551 COSTS_N_INSNS (7), /* muldi */
552 COSTS_N_INSNS (21), /* divsi */
553 COSTS_N_INSNS (37), /* divdi */
554 COSTS_N_INSNS (3), /* fp */
555 COSTS_N_INSNS (3), /* dmul */
556 COSTS_N_INSNS (18), /* sdiv */
557 COSTS_N_INSNS (32), /* ddiv */
558 128, /* cache line size */
559 32, /* l1 cache */
560 1024, /* l2 cache */
561 1, /* streams */
564 /* Instruction costs on PPC630 processors. */
565 static const
566 struct processor_costs ppc630_cost = {
567 COSTS_N_INSNS (5), /* mulsi */
568 COSTS_N_INSNS (4), /* mulsi_const */
569 COSTS_N_INSNS (3), /* mulsi_const9 */
570 COSTS_N_INSNS (7), /* muldi */
571 COSTS_N_INSNS (21), /* divsi */
572 COSTS_N_INSNS (37), /* divdi */
573 COSTS_N_INSNS (3), /* fp */
574 COSTS_N_INSNS (3), /* dmul */
575 COSTS_N_INSNS (17), /* sdiv */
576 COSTS_N_INSNS (21), /* ddiv */
577 128, /* cache line size */
578 64, /* l1 cache */
579 1024, /* l2 cache */
580 1, /* streams */
583 /* Instruction costs on Cell processor. */
584 /* COSTS_N_INSNS (1) ~ one add. */
585 static const
586 struct processor_costs ppccell_cost = {
587 COSTS_N_INSNS (9/2)+2, /* mulsi */
588 COSTS_N_INSNS (6/2), /* mulsi_const */
589 COSTS_N_INSNS (6/2), /* mulsi_const9 */
590 COSTS_N_INSNS (15/2)+2, /* muldi */
591 COSTS_N_INSNS (38/2), /* divsi */
592 COSTS_N_INSNS (70/2), /* divdi */
593 COSTS_N_INSNS (10/2), /* fp */
594 COSTS_N_INSNS (10/2), /* dmul */
595 COSTS_N_INSNS (74/2), /* sdiv */
596 COSTS_N_INSNS (74/2), /* ddiv */
597 128, /* cache line size */
598 32, /* l1 cache */
599 512, /* l2 cache */
600 6, /* streams */
603 /* Instruction costs on PPC750 and PPC7400 processors. */
604 static const
605 struct processor_costs ppc750_cost = {
606 COSTS_N_INSNS (5), /* mulsi */
607 COSTS_N_INSNS (3), /* mulsi_const */
608 COSTS_N_INSNS (2), /* mulsi_const9 */
609 COSTS_N_INSNS (5), /* muldi */
610 COSTS_N_INSNS (17), /* divsi */
611 COSTS_N_INSNS (17), /* divdi */
612 COSTS_N_INSNS (3), /* fp */
613 COSTS_N_INSNS (3), /* dmul */
614 COSTS_N_INSNS (17), /* sdiv */
615 COSTS_N_INSNS (31), /* ddiv */
616 32, /* cache line size */
617 32, /* l1 cache */
618 512, /* l2 cache */
619 1, /* streams */
622 /* Instruction costs on PPC7450 processors. */
623 static const
624 struct processor_costs ppc7450_cost = {
625 COSTS_N_INSNS (4), /* mulsi */
626 COSTS_N_INSNS (3), /* mulsi_const */
627 COSTS_N_INSNS (3), /* mulsi_const9 */
628 COSTS_N_INSNS (4), /* muldi */
629 COSTS_N_INSNS (23), /* divsi */
630 COSTS_N_INSNS (23), /* divdi */
631 COSTS_N_INSNS (5), /* fp */
632 COSTS_N_INSNS (5), /* dmul */
633 COSTS_N_INSNS (21), /* sdiv */
634 COSTS_N_INSNS (35), /* ddiv */
635 32, /* cache line size */
636 32, /* l1 cache */
637 1024, /* l2 cache */
638 1, /* streams */
641 /* Instruction costs on PPC8540 processors. */
642 static const
643 struct processor_costs ppc8540_cost = {
644 COSTS_N_INSNS (4), /* mulsi */
645 COSTS_N_INSNS (4), /* mulsi_const */
646 COSTS_N_INSNS (4), /* mulsi_const9 */
647 COSTS_N_INSNS (4), /* muldi */
648 COSTS_N_INSNS (19), /* divsi */
649 COSTS_N_INSNS (19), /* divdi */
650 COSTS_N_INSNS (4), /* fp */
651 COSTS_N_INSNS (4), /* dmul */
652 COSTS_N_INSNS (29), /* sdiv */
653 COSTS_N_INSNS (29), /* ddiv */
654 32, /* cache line size */
655 32, /* l1 cache */
656 256, /* l2 cache */
657 1, /* prefetch streams /*/
660 /* Instruction costs on E300C2 and E300C3 cores. */
661 static const
662 struct processor_costs ppce300c2c3_cost = {
663 COSTS_N_INSNS (4), /* mulsi */
664 COSTS_N_INSNS (4), /* mulsi_const */
665 COSTS_N_INSNS (4), /* mulsi_const9 */
666 COSTS_N_INSNS (4), /* muldi */
667 COSTS_N_INSNS (19), /* divsi */
668 COSTS_N_INSNS (19), /* divdi */
669 COSTS_N_INSNS (3), /* fp */
670 COSTS_N_INSNS (4), /* dmul */
671 COSTS_N_INSNS (18), /* sdiv */
672 COSTS_N_INSNS (33), /* ddiv */
674 16, /* l1 cache */
675 16, /* l2 cache */
676 1, /* prefetch streams /*/
679 /* Instruction costs on PPCE500MC processors. */
680 static const
681 struct processor_costs ppce500mc_cost = {
682 COSTS_N_INSNS (4), /* mulsi */
683 COSTS_N_INSNS (4), /* mulsi_const */
684 COSTS_N_INSNS (4), /* mulsi_const9 */
685 COSTS_N_INSNS (4), /* muldi */
686 COSTS_N_INSNS (14), /* divsi */
687 COSTS_N_INSNS (14), /* divdi */
688 COSTS_N_INSNS (8), /* fp */
689 COSTS_N_INSNS (10), /* dmul */
690 COSTS_N_INSNS (36), /* sdiv */
691 COSTS_N_INSNS (66), /* ddiv */
692 64, /* cache line size */
693 32, /* l1 cache */
694 128, /* l2 cache */
695 1, /* prefetch streams /*/
698 /* Instruction costs on PPCE500MC64 processors. */
699 static const
700 struct processor_costs ppce500mc64_cost = {
701 COSTS_N_INSNS (4), /* mulsi */
702 COSTS_N_INSNS (4), /* mulsi_const */
703 COSTS_N_INSNS (4), /* mulsi_const9 */
704 COSTS_N_INSNS (4), /* muldi */
705 COSTS_N_INSNS (14), /* divsi */
706 COSTS_N_INSNS (14), /* divdi */
707 COSTS_N_INSNS (4), /* fp */
708 COSTS_N_INSNS (10), /* dmul */
709 COSTS_N_INSNS (36), /* sdiv */
710 COSTS_N_INSNS (66), /* ddiv */
711 64, /* cache line size */
712 32, /* l1 cache */
713 128, /* l2 cache */
714 1, /* prefetch streams /*/
717 /* Instruction costs on PPCE5500 processors. */
718 static const
719 struct processor_costs ppce5500_cost = {
720 COSTS_N_INSNS (5), /* mulsi */
721 COSTS_N_INSNS (5), /* mulsi_const */
722 COSTS_N_INSNS (4), /* mulsi_const9 */
723 COSTS_N_INSNS (5), /* muldi */
724 COSTS_N_INSNS (14), /* divsi */
725 COSTS_N_INSNS (14), /* divdi */
726 COSTS_N_INSNS (7), /* fp */
727 COSTS_N_INSNS (10), /* dmul */
728 COSTS_N_INSNS (36), /* sdiv */
729 COSTS_N_INSNS (66), /* ddiv */
730 64, /* cache line size */
731 32, /* l1 cache */
732 128, /* l2 cache */
733 1, /* prefetch streams /*/
736 /* Instruction costs on PPCE6500 processors. */
737 static const
738 struct processor_costs ppce6500_cost = {
739 COSTS_N_INSNS (5), /* mulsi */
740 COSTS_N_INSNS (5), /* mulsi_const */
741 COSTS_N_INSNS (4), /* mulsi_const9 */
742 COSTS_N_INSNS (5), /* muldi */
743 COSTS_N_INSNS (14), /* divsi */
744 COSTS_N_INSNS (14), /* divdi */
745 COSTS_N_INSNS (7), /* fp */
746 COSTS_N_INSNS (10), /* dmul */
747 COSTS_N_INSNS (36), /* sdiv */
748 COSTS_N_INSNS (66), /* ddiv */
749 64, /* cache line size */
750 32, /* l1 cache */
751 128, /* l2 cache */
752 1, /* prefetch streams /*/
755 /* Instruction costs on AppliedMicro Titan processors. */
756 static const
757 struct processor_costs titan_cost = {
758 COSTS_N_INSNS (5), /* mulsi */
759 COSTS_N_INSNS (5), /* mulsi_const */
760 COSTS_N_INSNS (5), /* mulsi_const9 */
761 COSTS_N_INSNS (5), /* muldi */
762 COSTS_N_INSNS (18), /* divsi */
763 COSTS_N_INSNS (18), /* divdi */
764 COSTS_N_INSNS (10), /* fp */
765 COSTS_N_INSNS (10), /* dmul */
766 COSTS_N_INSNS (46), /* sdiv */
767 COSTS_N_INSNS (72), /* ddiv */
768 32, /* cache line size */
769 32, /* l1 cache */
770 512, /* l2 cache */
771 1, /* prefetch streams /*/
774 /* Instruction costs on POWER4 and POWER5 processors. */
775 static const
776 struct processor_costs power4_cost = {
777 COSTS_N_INSNS (3), /* mulsi */
778 COSTS_N_INSNS (2), /* mulsi_const */
779 COSTS_N_INSNS (2), /* mulsi_const9 */
780 COSTS_N_INSNS (4), /* muldi */
781 COSTS_N_INSNS (18), /* divsi */
782 COSTS_N_INSNS (34), /* divdi */
783 COSTS_N_INSNS (3), /* fp */
784 COSTS_N_INSNS (3), /* dmul */
785 COSTS_N_INSNS (17), /* sdiv */
786 COSTS_N_INSNS (17), /* ddiv */
787 128, /* cache line size */
788 32, /* l1 cache */
789 1024, /* l2 cache */
790 8, /* prefetch streams /*/
793 /* Instruction costs on POWER6 processors. */
794 static const
795 struct processor_costs power6_cost = {
796 COSTS_N_INSNS (8), /* mulsi */
797 COSTS_N_INSNS (8), /* mulsi_const */
798 COSTS_N_INSNS (8), /* mulsi_const9 */
799 COSTS_N_INSNS (8), /* muldi */
800 COSTS_N_INSNS (22), /* divsi */
801 COSTS_N_INSNS (28), /* divdi */
802 COSTS_N_INSNS (3), /* fp */
803 COSTS_N_INSNS (3), /* dmul */
804 COSTS_N_INSNS (13), /* sdiv */
805 COSTS_N_INSNS (16), /* ddiv */
806 128, /* cache line size */
807 64, /* l1 cache */
808 2048, /* l2 cache */
809 16, /* prefetch streams */
812 /* Instruction costs on POWER7 processors. */
813 static const
814 struct processor_costs power7_cost = {
815 COSTS_N_INSNS (2), /* mulsi */
816 COSTS_N_INSNS (2), /* mulsi_const */
817 COSTS_N_INSNS (2), /* mulsi_const9 */
818 COSTS_N_INSNS (2), /* muldi */
819 COSTS_N_INSNS (18), /* divsi */
820 COSTS_N_INSNS (34), /* divdi */
821 COSTS_N_INSNS (3), /* fp */
822 COSTS_N_INSNS (3), /* dmul */
823 COSTS_N_INSNS (13), /* sdiv */
824 COSTS_N_INSNS (16), /* ddiv */
825 128, /* cache line size */
826 32, /* l1 cache */
827 256, /* l2 cache */
828 12, /* prefetch streams */
831 /* Instruction costs on POWER A2 processors. */
832 static const
833 struct processor_costs ppca2_cost = {
834 COSTS_N_INSNS (16), /* mulsi */
835 COSTS_N_INSNS (16), /* mulsi_const */
836 COSTS_N_INSNS (16), /* mulsi_const9 */
837 COSTS_N_INSNS (16), /* muldi */
838 COSTS_N_INSNS (22), /* divsi */
839 COSTS_N_INSNS (28), /* divdi */
840 COSTS_N_INSNS (3), /* fp */
841 COSTS_N_INSNS (3), /* dmul */
842 COSTS_N_INSNS (59), /* sdiv */
843 COSTS_N_INSNS (72), /* ddiv */
845 16, /* l1 cache */
846 2048, /* l2 cache */
847 16, /* prefetch streams */
851 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
852 #undef RS6000_BUILTIN_1
853 #undef RS6000_BUILTIN_2
854 #undef RS6000_BUILTIN_3
855 #undef RS6000_BUILTIN_A
856 #undef RS6000_BUILTIN_D
857 #undef RS6000_BUILTIN_E
858 #undef RS6000_BUILTIN_P
859 #undef RS6000_BUILTIN_Q
860 #undef RS6000_BUILTIN_S
861 #undef RS6000_BUILTIN_X
863 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
864 { NAME, ICODE, MASK, ATTR },
866 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
867 { NAME, ICODE, MASK, ATTR },
869 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
870 { NAME, ICODE, MASK, ATTR },
872 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
873 { NAME, ICODE, MASK, ATTR },
875 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
876 { NAME, ICODE, MASK, ATTR },
878 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
879 { NAME, ICODE, MASK, ATTR },
881 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
882 { NAME, ICODE, MASK, ATTR },
884 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
885 { NAME, ICODE, MASK, ATTR },
887 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
888 { NAME, ICODE, MASK, ATTR },
890 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
891 { NAME, ICODE, MASK, ATTR },
893 struct rs6000_builtin_info_type {
894 const char *name;
895 const enum insn_code icode;
896 const unsigned mask;
897 const unsigned attr;
900 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
902 #include "rs6000-builtin.def"
905 #undef RS6000_BUILTIN_1
906 #undef RS6000_BUILTIN_2
907 #undef RS6000_BUILTIN_3
908 #undef RS6000_BUILTIN_A
909 #undef RS6000_BUILTIN_D
910 #undef RS6000_BUILTIN_E
911 #undef RS6000_BUILTIN_P
912 #undef RS6000_BUILTIN_Q
913 #undef RS6000_BUILTIN_S
914 #undef RS6000_BUILTIN_X
916 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
917 static tree (*rs6000_veclib_handler) (tree, tree, tree);
920 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
921 static bool spe_func_has_64bit_regs_p (void);
922 static struct machine_function * rs6000_init_machine_status (void);
923 static int rs6000_ra_ever_killed (void);
924 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
925 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
926 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
927 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
928 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
929 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
930 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
931 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
932 bool);
933 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
934 static bool is_microcoded_insn (rtx);
935 static bool is_nonpipeline_insn (rtx);
936 static bool is_cracked_insn (rtx);
937 static bool is_load_insn (rtx, rtx *);
938 static bool is_store_insn (rtx, rtx *);
939 static bool set_to_load_agen (rtx,rtx);
940 static bool insn_terminates_group_p (rtx , enum group_termination);
941 static bool insn_must_be_first_in_group (rtx);
942 static bool insn_must_be_last_in_group (rtx);
943 static void altivec_init_builtins (void);
944 static tree builtin_function_type (enum machine_mode, enum machine_mode,
945 enum machine_mode, enum machine_mode,
946 enum rs6000_builtins, const char *name);
947 static void rs6000_common_init_builtins (void);
948 static void paired_init_builtins (void);
949 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
950 static void spe_init_builtins (void);
951 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
952 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
953 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
954 static rs6000_stack_t *rs6000_stack_info (void);
955 static void is_altivec_return_reg (rtx, void *);
956 int easy_vector_constant (rtx, enum machine_mode);
957 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
958 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
959 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
960 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
961 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
962 bool, bool);
963 #if TARGET_MACHO
964 static void macho_branch_islands (void);
965 #endif
966 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
967 int, int *);
968 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
969 int, int, int *);
970 static bool rs6000_mode_dependent_address (const_rtx);
971 static bool rs6000_debug_mode_dependent_address (const_rtx);
972 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
973 enum machine_mode, rtx);
974 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
975 enum machine_mode,
976 rtx);
977 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
978 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
979 enum reg_class);
980 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
981 enum machine_mode);
982 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
983 enum reg_class,
984 enum machine_mode);
985 static bool rs6000_cannot_change_mode_class (enum machine_mode,
986 enum machine_mode,
987 enum reg_class);
988 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
989 enum machine_mode,
990 enum reg_class);
991 static bool rs6000_save_toc_in_prologue_p (void);
993 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
994 int, int *)
995 = rs6000_legitimize_reload_address;
997 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
998 = rs6000_mode_dependent_address;
1000 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1001 enum machine_mode, rtx)
1002 = rs6000_secondary_reload_class;
1004 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1005 = rs6000_preferred_reload_class;
1007 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1008 enum machine_mode)
1009 = rs6000_secondary_memory_needed;
1011 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1012 enum machine_mode,
1013 enum reg_class)
1014 = rs6000_cannot_change_mode_class;
1016 const int INSN_NOT_AVAILABLE = -1;
1018 /* Hash table stuff for keeping track of TOC entries. */
1020 struct GTY(()) toc_hash_struct
1022 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1023 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1024 rtx key;
1025 enum machine_mode key_mode;
1026 int labelno;
1029 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1031 /* Hash table to keep track of the argument types for builtin functions. */
1033 struct GTY(()) builtin_hash_struct
1035 tree type;
1036 enum machine_mode mode[4]; /* return value + 3 arguments. */
1037 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1040 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1043 /* Default register names. */
1044 char rs6000_reg_names[][8] =
1046 "0", "1", "2", "3", "4", "5", "6", "7",
1047 "8", "9", "10", "11", "12", "13", "14", "15",
1048 "16", "17", "18", "19", "20", "21", "22", "23",
1049 "24", "25", "26", "27", "28", "29", "30", "31",
1050 "0", "1", "2", "3", "4", "5", "6", "7",
1051 "8", "9", "10", "11", "12", "13", "14", "15",
1052 "16", "17", "18", "19", "20", "21", "22", "23",
1053 "24", "25", "26", "27", "28", "29", "30", "31",
1054 "mq", "lr", "ctr","ap",
1055 "0", "1", "2", "3", "4", "5", "6", "7",
1056 "ca",
1057 /* AltiVec registers. */
1058 "0", "1", "2", "3", "4", "5", "6", "7",
1059 "8", "9", "10", "11", "12", "13", "14", "15",
1060 "16", "17", "18", "19", "20", "21", "22", "23",
1061 "24", "25", "26", "27", "28", "29", "30", "31",
1062 "vrsave", "vscr",
1063 /* SPE registers. */
1064 "spe_acc", "spefscr",
1065 /* Soft frame pointer. */
1066 "sfp"
1069 #ifdef TARGET_REGNAMES
1070 static const char alt_reg_names[][8] =
1072 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1073 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1074 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1075 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1076 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1077 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1078 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1079 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1080 "mq", "lr", "ctr", "ap",
1081 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1082 "ca",
1083 /* AltiVec registers. */
1084 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1085 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1086 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1087 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1088 "vrsave", "vscr",
1089 /* SPE registers. */
1090 "spe_acc", "spefscr",
1091 /* Soft frame pointer. */
1092 "sfp"
1094 #endif
1096 /* Table of valid machine attributes. */
1098 static const struct attribute_spec rs6000_attribute_table[] =
1100 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1101 affects_type_identity } */
1102 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1103 false },
1104 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1105 false },
1106 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1107 false },
1108 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1109 false },
1110 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1111 false },
1112 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1113 SUBTARGET_ATTRIBUTE_TABLE,
1114 #endif
1115 { NULL, 0, 0, false, false, false, NULL, false }
1118 #ifndef MASK_STRICT_ALIGN
1119 #define MASK_STRICT_ALIGN 0
1120 #endif
1121 #ifndef TARGET_PROFILE_KERNEL
1122 #define TARGET_PROFILE_KERNEL 0
1123 #endif
1125 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1126 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1128 /* Initialize the GCC target structure. */
1129 #undef TARGET_ATTRIBUTE_TABLE
1130 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1131 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1132 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1133 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1134 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1136 #undef TARGET_ASM_ALIGNED_DI_OP
1137 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1139 /* Default unaligned ops are only provided for ELF. Find the ops needed
1140 for non-ELF systems. */
1141 #ifndef OBJECT_FORMAT_ELF
1142 #if TARGET_XCOFF
1143 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1144 64-bit targets. */
1145 #undef TARGET_ASM_UNALIGNED_HI_OP
1146 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1147 #undef TARGET_ASM_UNALIGNED_SI_OP
1148 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1149 #undef TARGET_ASM_UNALIGNED_DI_OP
1150 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1151 #else
1152 /* For Darwin. */
1153 #undef TARGET_ASM_UNALIGNED_HI_OP
1154 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1155 #undef TARGET_ASM_UNALIGNED_SI_OP
1156 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1157 #undef TARGET_ASM_UNALIGNED_DI_OP
1158 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1159 #undef TARGET_ASM_ALIGNED_DI_OP
1160 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1161 #endif
1162 #endif
1164 /* This hook deals with fixups for relocatable code and DI-mode objects
1165 in 64-bit code. */
1166 #undef TARGET_ASM_INTEGER
1167 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1169 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1170 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1171 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1172 #endif
1174 #undef TARGET_SET_UP_BY_PROLOGUE
1175 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1177 #undef TARGET_HAVE_TLS
1178 #define TARGET_HAVE_TLS HAVE_AS_TLS
1180 #undef TARGET_CANNOT_FORCE_CONST_MEM
1181 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1183 #undef TARGET_DELEGITIMIZE_ADDRESS
1184 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1186 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1187 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1189 #undef TARGET_ASM_FUNCTION_PROLOGUE
1190 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1191 #undef TARGET_ASM_FUNCTION_EPILOGUE
1192 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1194 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1195 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1197 #undef TARGET_LEGITIMIZE_ADDRESS
1198 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1200 #undef TARGET_SCHED_VARIABLE_ISSUE
1201 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1203 #undef TARGET_SCHED_ISSUE_RATE
1204 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1205 #undef TARGET_SCHED_ADJUST_COST
1206 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1207 #undef TARGET_SCHED_ADJUST_PRIORITY
1208 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1209 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1210 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1211 #undef TARGET_SCHED_INIT
1212 #define TARGET_SCHED_INIT rs6000_sched_init
1213 #undef TARGET_SCHED_FINISH
1214 #define TARGET_SCHED_FINISH rs6000_sched_finish
1215 #undef TARGET_SCHED_REORDER
1216 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1217 #undef TARGET_SCHED_REORDER2
1218 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1220 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1221 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1223 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1224 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1226 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1227 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1228 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1229 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1230 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1231 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1232 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1233 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1235 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1236 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1237 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1238 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1239 rs6000_builtin_support_vector_misalignment
1240 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1241 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1242 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1243 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1244 rs6000_builtin_vectorization_cost
1245 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1246 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1247 rs6000_preferred_simd_mode
1248 #undef TARGET_VECTORIZE_INIT_COST
1249 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1250 #undef TARGET_VECTORIZE_ADD_STMT_COST
1251 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1252 #undef TARGET_VECTORIZE_FINISH_COST
1253 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1254 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1255 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1257 #undef TARGET_INIT_BUILTINS
1258 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1259 #undef TARGET_BUILTIN_DECL
1260 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1262 #undef TARGET_EXPAND_BUILTIN
1263 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1265 #undef TARGET_MANGLE_TYPE
1266 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1268 #undef TARGET_INIT_LIBFUNCS
1269 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1271 #if TARGET_MACHO
1272 #undef TARGET_BINDS_LOCAL_P
1273 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1274 #endif
1276 #undef TARGET_MS_BITFIELD_LAYOUT_P
1277 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1279 #undef TARGET_ASM_OUTPUT_MI_THUNK
1280 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1282 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1283 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1285 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1286 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1288 #undef TARGET_INVALID_WITHIN_DOLOOP
1289 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1291 #undef TARGET_REGISTER_MOVE_COST
1292 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1293 #undef TARGET_MEMORY_MOVE_COST
1294 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1295 #undef TARGET_RTX_COSTS
1296 #define TARGET_RTX_COSTS rs6000_rtx_costs
1297 #undef TARGET_ADDRESS_COST
1298 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1300 #undef TARGET_DWARF_REGISTER_SPAN
1301 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1303 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1304 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1306 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1307 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1309 /* On rs6000, function arguments are promoted, as are function return
1310 values. */
1311 #undef TARGET_PROMOTE_FUNCTION_MODE
1312 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1314 #undef TARGET_RETURN_IN_MEMORY
1315 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1317 #undef TARGET_SETUP_INCOMING_VARARGS
1318 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1320 /* Always strict argument naming on rs6000. */
1321 #undef TARGET_STRICT_ARGUMENT_NAMING
1322 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1323 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1324 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1325 #undef TARGET_SPLIT_COMPLEX_ARG
1326 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1327 #undef TARGET_MUST_PASS_IN_STACK
1328 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1329 #undef TARGET_PASS_BY_REFERENCE
1330 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1331 #undef TARGET_ARG_PARTIAL_BYTES
1332 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1333 #undef TARGET_FUNCTION_ARG_ADVANCE
1334 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1335 #undef TARGET_FUNCTION_ARG
1336 #define TARGET_FUNCTION_ARG rs6000_function_arg
1337 #undef TARGET_FUNCTION_ARG_BOUNDARY
1338 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1340 #undef TARGET_BUILD_BUILTIN_VA_LIST
1341 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1343 #undef TARGET_EXPAND_BUILTIN_VA_START
1344 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1346 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1347 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1349 #undef TARGET_EH_RETURN_FILTER_MODE
1350 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1352 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1353 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1355 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1356 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1358 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1359 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1361 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1362 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1364 #undef TARGET_OPTION_OVERRIDE
1365 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1367 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1368 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1369 rs6000_builtin_vectorized_function
1371 #if !TARGET_MACHO
1372 #undef TARGET_STACK_PROTECT_FAIL
1373 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1374 #endif
1376 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1377 The PowerPC architecture requires only weak consistency among
1378 processors--that is, memory accesses between processors need not be
1379 sequentially consistent and memory accesses among processors can occur
1380 in any order. The ability to order memory accesses weakly provides
1381 opportunities for more efficient use of the system bus. Unless a
1382 dependency exists, the 604e allows read operations to precede store
1383 operations. */
1384 #undef TARGET_RELAXED_ORDERING
1385 #define TARGET_RELAXED_ORDERING true
1387 #ifdef HAVE_AS_TLS
1388 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1389 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1390 #endif
1392 /* Use a 32-bit anchor range. This leads to sequences like:
1394 addis tmp,anchor,high
1395 add dest,tmp,low
1397 where tmp itself acts as an anchor, and can be shared between
1398 accesses to the same 64k page. */
1399 #undef TARGET_MIN_ANCHOR_OFFSET
1400 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1401 #undef TARGET_MAX_ANCHOR_OFFSET
1402 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1403 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1404 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1406 #undef TARGET_BUILTIN_RECIPROCAL
1407 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1409 #undef TARGET_EXPAND_TO_RTL_HOOK
1410 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1412 #undef TARGET_INSTANTIATE_DECLS
1413 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1415 #undef TARGET_SECONDARY_RELOAD
1416 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1418 #undef TARGET_LEGITIMATE_ADDRESS_P
1419 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1421 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1422 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1424 #undef TARGET_CAN_ELIMINATE
1425 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1427 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1428 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1430 #undef TARGET_TRAMPOLINE_INIT
1431 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1433 #undef TARGET_FUNCTION_VALUE
1434 #define TARGET_FUNCTION_VALUE rs6000_function_value
1436 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1437 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1439 #undef TARGET_OPTION_SAVE
1440 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1442 #undef TARGET_OPTION_RESTORE
1443 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1445 #undef TARGET_OPTION_PRINT
1446 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1448 #undef TARGET_CAN_INLINE_P
1449 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1451 #undef TARGET_SET_CURRENT_FUNCTION
1452 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1454 #undef TARGET_LEGITIMATE_CONSTANT_P
1455 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1457 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1458 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1461 /* Simplifications for entries below. */
1463 enum {
1464 POWERPC_7400_MASK = MASK_PPC_GFXOPT | MASK_ALTIVEC
1467 /* Some OSs don't support saving the high part of 64-bit registers on context
1468 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1469 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1470 either, the user must explicitly specify them and we won't interfere with
1471 the user's specification. */
1473 enum {
1474 POWERPC_MASKS = (MASK_PPC_GPOPT | MASK_STRICT_ALIGN
1475 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
1476 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
1477 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
1478 | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE
1479 | MASK_RECIP_PRECISION)
1482 /* Masks for instructions set at various powerpc ISAs. */
1483 enum {
1484 ISA_2_1_MASKS = MASK_MFCRF,
1485 ISA_2_2_MASKS = (ISA_2_1_MASKS | MASK_POPCNTB),
1486 ISA_2_4_MASKS = (ISA_2_2_MASKS | MASK_FPRND),
1488 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1489 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1490 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1491 server and embedded. */
1492 ISA_2_5_MASKS_EMBEDDED = (ISA_2_2_MASKS | MASK_CMPB | MASK_RECIP_PRECISION
1493 | MASK_PPC_GFXOPT | MASK_PPC_GPOPT),
1494 ISA_2_5_MASKS_SERVER = (ISA_2_5_MASKS_EMBEDDED | MASK_DFP),
1496 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1497 altivec is a win so enable it. */
1498 ISA_2_6_MASKS_EMBEDDED = (ISA_2_5_MASKS_EMBEDDED | MASK_POPCNTD),
1499 ISA_2_6_MASKS_SERVER = (ISA_2_5_MASKS_SERVER | MASK_POPCNTD | MASK_ALTIVEC
1500 | MASK_VSX)
1503 struct rs6000_ptt
1505 const char *const name; /* Canonical processor name. */
1506 const enum processor_type processor; /* Processor type enum value. */
1507 const int target_enable; /* Target flags to enable. */
1510 static struct rs6000_ptt const processor_target_table[] =
1512 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1513 #include "rs6000-cpus.def"
1514 #undef RS6000_CPU
1517 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1518 name is invalid. */
1520 static int
1521 rs6000_cpu_name_lookup (const char *name)
1523 size_t i;
1525 if (name != NULL)
1527 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1528 if (! strcmp (name, processor_target_table[i].name))
1529 return (int)i;
1532 return -1;
1536 /* Return number of consecutive hard regs needed starting at reg REGNO
1537 to hold something of mode MODE.
1538 This is ordinarily the length in words of a value of mode MODE
1539 but can be less for certain modes in special long registers.
1541 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1542 scalar instructions. The upper 32 bits are only available to the
1543 SIMD instructions.
1545 POWER and PowerPC GPRs hold 32 bits worth;
1546 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1548 static int
1549 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1551 unsigned HOST_WIDE_INT reg_size;
1553 if (FP_REGNO_P (regno))
1554 reg_size = (VECTOR_MEM_VSX_P (mode)
1555 ? UNITS_PER_VSX_WORD
1556 : UNITS_PER_FP_WORD);
1558 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1559 reg_size = UNITS_PER_SPE_WORD;
1561 else if (ALTIVEC_REGNO_P (regno))
1562 reg_size = UNITS_PER_ALTIVEC_WORD;
1564 /* The value returned for SCmode in the E500 double case is 2 for
1565 ABI compatibility; storing an SCmode value in a single register
1566 would require function_arg and rs6000_spe_function_arg to handle
1567 SCmode so as to pass the value correctly in a pair of
1568 registers. */
1569 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1570 && !DECIMAL_FLOAT_MODE_P (mode))
1571 reg_size = UNITS_PER_FP_WORD;
1573 else
1574 reg_size = UNITS_PER_WORD;
1576 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1579 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1580 MODE. */
1581 static int
1582 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1584 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1586 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1587 implementations. Don't allow an item to be split between a FP register
1588 and an Altivec register. */
1589 if (VECTOR_MEM_VSX_P (mode))
1591 if (FP_REGNO_P (regno))
1592 return FP_REGNO_P (last_regno);
1594 if (ALTIVEC_REGNO_P (regno))
1595 return ALTIVEC_REGNO_P (last_regno);
1598 /* The GPRs can hold any mode, but values bigger than one register
1599 cannot go past R31. */
1600 if (INT_REGNO_P (regno))
1601 return INT_REGNO_P (last_regno);
1603 /* The float registers (except for VSX vector modes) can only hold floating
1604 modes and DImode. This excludes the 32-bit decimal float mode for
1605 now. */
1606 if (FP_REGNO_P (regno))
1608 if (SCALAR_FLOAT_MODE_P (mode)
1609 && (mode != TDmode || (regno % 2) == 0)
1610 && FP_REGNO_P (last_regno))
1611 return 1;
1613 if (GET_MODE_CLASS (mode) == MODE_INT
1614 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1615 return 1;
1617 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1618 && PAIRED_VECTOR_MODE (mode))
1619 return 1;
1621 return 0;
1624 /* The CR register can only hold CC modes. */
1625 if (CR_REGNO_P (regno))
1626 return GET_MODE_CLASS (mode) == MODE_CC;
1628 if (CA_REGNO_P (regno))
1629 return mode == BImode;
1631 /* AltiVec only in AldyVec registers. */
1632 if (ALTIVEC_REGNO_P (regno))
1633 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1635 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1636 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1637 return 1;
1639 /* We cannot put TImode anywhere except general register and it must be able
1640 to fit within the register set. In the future, allow TImode in the
1641 Altivec or VSX registers. */
1643 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1646 /* Print interesting facts about registers. */
1647 static void
1648 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1650 int r, m;
1652 for (r = first_regno; r <= last_regno; ++r)
1654 const char *comma = "";
1655 int len;
1657 if (first_regno == last_regno)
1658 fprintf (stderr, "%s:\t", reg_name);
1659 else
1660 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1662 len = 8;
1663 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1664 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1666 if (len > 70)
1668 fprintf (stderr, ",\n\t");
1669 len = 8;
1670 comma = "";
1673 if (rs6000_hard_regno_nregs[m][r] > 1)
1674 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1675 rs6000_hard_regno_nregs[m][r]);
1676 else
1677 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1679 comma = ", ";
1682 if (call_used_regs[r])
1684 if (len > 70)
1686 fprintf (stderr, ",\n\t");
1687 len = 8;
1688 comma = "";
1691 len += fprintf (stderr, "%s%s", comma, "call-used");
1692 comma = ", ";
1695 if (fixed_regs[r])
1697 if (len > 70)
1699 fprintf (stderr, ",\n\t");
1700 len = 8;
1701 comma = "";
1704 len += fprintf (stderr, "%s%s", comma, "fixed");
1705 comma = ", ";
1708 if (len > 70)
1710 fprintf (stderr, ",\n\t");
1711 comma = "";
1714 fprintf (stderr, "%sregno = %d\n", comma, r);
1718 #define DEBUG_FMT_D "%-32s= %d\n"
1719 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1720 #define DEBUG_FMT_S "%-32s= %s\n"
1722 /* Print various interesting information with -mdebug=reg. */
1723 static void
1724 rs6000_debug_reg_global (void)
1726 static const char *const tf[2] = { "false", "true" };
1727 const char *nl = (const char *)0;
1728 int m;
1729 char costly_num[20];
1730 char nop_num[20];
1731 const char *costly_str;
1732 const char *nop_str;
1733 const char *trace_str;
1734 const char *abi_str;
1735 const char *cmodel_str;
1737 /* Map enum rs6000_vector to string. */
1738 static const char *rs6000_debug_vector_unit[] = {
1739 "none",
1740 "altivec",
1741 "vsx",
1742 "paired",
1743 "spe",
1744 "other"
1747 fprintf (stderr, "Register information: (last virtual reg = %d)\n",
1748 LAST_VIRTUAL_REGISTER);
1749 rs6000_debug_reg_print (0, 31, "gr");
1750 rs6000_debug_reg_print (32, 63, "fp");
1751 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1752 LAST_ALTIVEC_REGNO,
1753 "vs");
1754 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1755 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1756 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1757 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1758 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1759 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1760 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1761 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1763 fprintf (stderr,
1764 "\n"
1765 "d reg_class = %s\n"
1766 "f reg_class = %s\n"
1767 "v reg_class = %s\n"
1768 "wa reg_class = %s\n"
1769 "wd reg_class = %s\n"
1770 "wf reg_class = %s\n"
1771 "ws reg_class = %s\n\n",
1772 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1773 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1774 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1775 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1776 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1777 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1778 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]);
1780 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1781 if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
1783 nl = "\n";
1784 fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1785 GET_MODE_NAME (m),
1786 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1787 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
1790 if (nl)
1791 fputs (nl, stderr);
1793 if (rs6000_recip_control)
1795 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1797 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1798 if (rs6000_recip_bits[m])
1800 fprintf (stderr,
1801 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1802 GET_MODE_NAME (m),
1803 (RS6000_RECIP_AUTO_RE_P (m)
1804 ? "auto"
1805 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1806 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1807 ? "auto"
1808 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1811 fputs ("\n", stderr);
1814 if (rs6000_cpu_index >= 0)
1815 fprintf (stderr, DEBUG_FMT_S, "cpu",
1816 processor_target_table[rs6000_cpu_index].name);
1818 if (rs6000_tune_index >= 0)
1819 fprintf (stderr, DEBUG_FMT_S, "tune",
1820 processor_target_table[rs6000_tune_index].name);
1822 switch (rs6000_sched_costly_dep)
1824 case max_dep_latency:
1825 costly_str = "max_dep_latency";
1826 break;
1828 case no_dep_costly:
1829 costly_str = "no_dep_costly";
1830 break;
1832 case all_deps_costly:
1833 costly_str = "all_deps_costly";
1834 break;
1836 case true_store_to_load_dep_costly:
1837 costly_str = "true_store_to_load_dep_costly";
1838 break;
1840 case store_to_load_dep_costly:
1841 costly_str = "store_to_load_dep_costly";
1842 break;
1844 default:
1845 costly_str = costly_num;
1846 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
1847 break;
1850 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
1852 switch (rs6000_sched_insert_nops)
1854 case sched_finish_regroup_exact:
1855 nop_str = "sched_finish_regroup_exact";
1856 break;
1858 case sched_finish_pad_groups:
1859 nop_str = "sched_finish_pad_groups";
1860 break;
1862 case sched_finish_none:
1863 nop_str = "sched_finish_none";
1864 break;
1866 default:
1867 nop_str = nop_num;
1868 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
1869 break;
1872 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
1874 switch (rs6000_sdata)
1876 default:
1877 case SDATA_NONE:
1878 break;
1880 case SDATA_DATA:
1881 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
1882 break;
1884 case SDATA_SYSV:
1885 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
1886 break;
1888 case SDATA_EABI:
1889 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
1890 break;
1894 switch (rs6000_traceback)
1896 case traceback_default: trace_str = "default"; break;
1897 case traceback_none: trace_str = "none"; break;
1898 case traceback_part: trace_str = "part"; break;
1899 case traceback_full: trace_str = "full"; break;
1900 default: trace_str = "unknown"; break;
1903 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
1905 switch (rs6000_current_cmodel)
1907 case CMODEL_SMALL: cmodel_str = "small"; break;
1908 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
1909 case CMODEL_LARGE: cmodel_str = "large"; break;
1910 default: cmodel_str = "unknown"; break;
1913 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
1915 switch (rs6000_current_abi)
1917 case ABI_NONE: abi_str = "none"; break;
1918 case ABI_AIX: abi_str = "aix"; break;
1919 case ABI_V4: abi_str = "V4"; break;
1920 case ABI_DARWIN: abi_str = "darwin"; break;
1921 default: abi_str = "unknown"; break;
1924 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
1926 if (rs6000_altivec_abi)
1927 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
1929 if (rs6000_spe_abi)
1930 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
1932 if (rs6000_darwin64_abi)
1933 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
1935 if (rs6000_float_gprs)
1936 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
1938 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
1939 fprintf (stderr, DEBUG_FMT_S, "align_branch",
1940 tf[!!rs6000_align_branch_targets]);
1941 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
1942 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
1943 rs6000_long_double_type_size);
1944 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
1945 (int)rs6000_sched_restricted_insns_priority);
1946 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
1947 (int)END_BUILTINS);
1948 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
1949 (int)RS6000_BUILTIN_COUNT);
1950 fprintf (stderr, DEBUG_FMT_X, "Builtin mask", rs6000_builtin_mask);
1953 /* Initialize the various global tables that are based on register size. */
1954 static void
1955 rs6000_init_hard_regno_mode_ok (bool global_init_p)
1957 int r, m, c;
1958 int align64;
1959 int align32;
1961 /* Precalculate REGNO_REG_CLASS. */
1962 rs6000_regno_regclass[0] = GENERAL_REGS;
1963 for (r = 1; r < 32; ++r)
1964 rs6000_regno_regclass[r] = BASE_REGS;
1966 for (r = 32; r < 64; ++r)
1967 rs6000_regno_regclass[r] = FLOAT_REGS;
1969 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
1970 rs6000_regno_regclass[r] = NO_REGS;
1972 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
1973 rs6000_regno_regclass[r] = ALTIVEC_REGS;
1975 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
1976 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
1977 rs6000_regno_regclass[r] = CR_REGS;
1979 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
1980 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
1981 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
1982 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
1983 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
1984 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
1985 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
1986 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
1987 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
1989 /* Precalculate vector information, this must be set up before the
1990 rs6000_hard_regno_nregs_internal below. */
1991 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1993 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
1994 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
1995 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
1998 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
1999 rs6000_constraints[c] = NO_REGS;
2001 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2002 believes it can use native alignment or still uses 128-bit alignment. */
2003 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2005 align64 = 64;
2006 align32 = 32;
2008 else
2010 align64 = 128;
2011 align32 = 128;
2014 /* V2DF mode, VSX only. */
2015 if (TARGET_VSX)
2017 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2018 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2019 rs6000_vector_align[V2DFmode] = align64;
2022 /* V4SF mode, either VSX or Altivec. */
2023 if (TARGET_VSX)
2025 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2026 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2027 rs6000_vector_align[V4SFmode] = align32;
2029 else if (TARGET_ALTIVEC)
2031 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2032 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2033 rs6000_vector_align[V4SFmode] = align32;
2036 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2037 and stores. */
2038 if (TARGET_ALTIVEC)
2040 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2041 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2042 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2043 rs6000_vector_align[V4SImode] = align32;
2044 rs6000_vector_align[V8HImode] = align32;
2045 rs6000_vector_align[V16QImode] = align32;
2047 if (TARGET_VSX)
2049 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2050 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2051 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2053 else
2055 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2056 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2057 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2061 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2062 Altivec doesn't have 64-bit support. */
2063 if (TARGET_VSX)
2065 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2066 rs6000_vector_unit[V2DImode] = VECTOR_NONE;
2067 rs6000_vector_align[V2DImode] = align64;
2070 /* DFmode, see if we want to use the VSX unit. */
2071 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2073 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2074 rs6000_vector_mem[DFmode]
2075 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2076 rs6000_vector_align[DFmode] = align64;
2079 /* TODO add SPE and paired floating point vector support. */
2081 /* Register class constraints for the constraints that depend on compile
2082 switches. */
2083 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2084 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2086 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2087 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2089 if (TARGET_VSX)
2091 /* At present, we just use VSX_REGS, but we have different constraints
2092 based on the use, in case we want to fine tune the default register
2093 class used. wa = any VSX register, wf = register class to use for
2094 V4SF, wd = register class to use for V2DF, and ws = register classs to
2095 use for DF scalars. */
2096 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2097 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2098 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2099 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2100 ? VSX_REGS
2101 : FLOAT_REGS);
2104 if (TARGET_ALTIVEC)
2105 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2107 /* Set up the reload helper functions. */
2108 if (TARGET_VSX || TARGET_ALTIVEC)
2110 if (TARGET_64BIT)
2112 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2113 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2114 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2115 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2116 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2117 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2118 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2119 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2120 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2121 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2122 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2123 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2124 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2126 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2127 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2130 else
2132 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2133 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2134 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2135 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2136 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2137 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2138 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2139 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2140 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2141 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2142 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2143 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2144 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2146 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2147 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2152 /* Precalculate HARD_REGNO_NREGS. */
2153 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2154 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2155 rs6000_hard_regno_nregs[m][r]
2156 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2158 /* Precalculate HARD_REGNO_MODE_OK. */
2159 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2160 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2161 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2162 rs6000_hard_regno_mode_ok_p[m][r] = true;
2164 /* Precalculate CLASS_MAX_NREGS sizes. */
2165 for (c = 0; c < LIM_REG_CLASSES; ++c)
2167 int reg_size;
2169 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2170 reg_size = UNITS_PER_VSX_WORD;
2172 else if (c == ALTIVEC_REGS)
2173 reg_size = UNITS_PER_ALTIVEC_WORD;
2175 else if (c == FLOAT_REGS)
2176 reg_size = UNITS_PER_FP_WORD;
2178 else
2179 reg_size = UNITS_PER_WORD;
2181 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2182 rs6000_class_max_nregs[m][c]
2183 = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size;
2186 if (TARGET_E500_DOUBLE)
2187 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2189 /* Calculate which modes to automatically generate code to use a the
2190 reciprocal divide and square root instructions. In the future, possibly
2191 automatically generate the instructions even if the user did not specify
2192 -mrecip. The older machines double precision reciprocal sqrt estimate is
2193 not accurate enough. */
2194 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2195 if (TARGET_FRES)
2196 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2197 if (TARGET_FRE)
2198 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2199 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2200 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2201 if (VECTOR_UNIT_VSX_P (V2DFmode))
2202 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2204 if (TARGET_FRSQRTES)
2205 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2206 if (TARGET_FRSQRTE)
2207 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2208 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2209 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2210 if (VECTOR_UNIT_VSX_P (V2DFmode))
2211 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2213 if (rs6000_recip_control)
2215 if (!flag_finite_math_only)
2216 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2217 if (flag_trapping_math)
2218 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2219 if (!flag_reciprocal_math)
2220 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2221 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2223 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2224 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2225 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2227 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2228 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2229 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2231 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2232 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2233 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2235 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2236 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2237 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2239 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2240 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2241 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2243 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2244 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2245 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2247 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2248 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2249 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2251 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2252 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2253 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2257 if (global_init_p || TARGET_DEBUG_TARGET)
2259 if (TARGET_DEBUG_REG)
2260 rs6000_debug_reg_global ();
2262 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2263 fprintf (stderr,
2264 "SImode variable mult cost = %d\n"
2265 "SImode constant mult cost = %d\n"
2266 "SImode short constant mult cost = %d\n"
2267 "DImode multipliciation cost = %d\n"
2268 "SImode division cost = %d\n"
2269 "DImode division cost = %d\n"
2270 "Simple fp operation cost = %d\n"
2271 "DFmode multiplication cost = %d\n"
2272 "SFmode division cost = %d\n"
2273 "DFmode division cost = %d\n"
2274 "cache line size = %d\n"
2275 "l1 cache size = %d\n"
2276 "l2 cache size = %d\n"
2277 "simultaneous prefetches = %d\n"
2278 "\n",
2279 rs6000_cost->mulsi,
2280 rs6000_cost->mulsi_const,
2281 rs6000_cost->mulsi_const9,
2282 rs6000_cost->muldi,
2283 rs6000_cost->divsi,
2284 rs6000_cost->divdi,
2285 rs6000_cost->fp,
2286 rs6000_cost->dmul,
2287 rs6000_cost->sdiv,
2288 rs6000_cost->ddiv,
2289 rs6000_cost->cache_line_size,
2290 rs6000_cost->l1_cache_size,
2291 rs6000_cost->l2_cache_size,
2292 rs6000_cost->simultaneous_prefetches);
2296 #if TARGET_MACHO
2297 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2299 static void
2300 darwin_rs6000_override_options (void)
2302 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2303 off. */
2304 rs6000_altivec_abi = 1;
2305 TARGET_ALTIVEC_VRSAVE = 1;
2306 rs6000_current_abi = ABI_DARWIN;
2308 if (DEFAULT_ABI == ABI_DARWIN
2309 && TARGET_64BIT)
2310 darwin_one_byte_bool = 1;
2312 if (TARGET_64BIT && ! TARGET_POWERPC64)
2314 target_flags |= MASK_POWERPC64;
2315 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2317 if (flag_mkernel)
2319 rs6000_default_long_calls = 1;
2320 target_flags |= MASK_SOFT_FLOAT;
2323 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2324 Altivec. */
2325 if (!flag_mkernel && !flag_apple_kext
2326 && TARGET_64BIT
2327 && ! (target_flags_explicit & MASK_ALTIVEC))
2328 target_flags |= MASK_ALTIVEC;
2330 /* Unless the user (not the configurer) has explicitly overridden
2331 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2332 G4 unless targeting the kernel. */
2333 if (!flag_mkernel
2334 && !flag_apple_kext
2335 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2336 && ! (target_flags_explicit & MASK_ALTIVEC)
2337 && ! global_options_set.x_rs6000_cpu_index)
2339 target_flags |= MASK_ALTIVEC;
2342 #endif
2344 /* If not otherwise specified by a target, make 'long double' equivalent to
2345 'double'. */
2347 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2348 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2349 #endif
2351 /* Return the builtin mask of the various options used that could affect which
2352 builtins were used. In the past we used target_flags, but we've run out of
2353 bits, and some options like SPE and PAIRED are no longer in
2354 target_flags. */
2356 unsigned
2357 rs6000_builtin_mask_calculate (void)
2359 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2360 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2361 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2362 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2363 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2364 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2365 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2366 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2367 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2368 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0));
2371 /* Override command line options. Mostly we process the processor type and
2372 sometimes adjust other TARGET_ options. */
2374 static bool
2375 rs6000_option_override_internal (bool global_init_p)
2377 bool ret = true;
2378 bool have_cpu = false;
2380 /* The default cpu requested at configure time, if any. */
2381 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2383 int set_masks;
2384 int cpu_index;
2385 int tune_index;
2386 struct cl_target_option *main_target_opt
2387 = ((global_init_p || target_option_default_node == NULL)
2388 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2390 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2391 library functions, so warn about it. The flag may be useful for
2392 performance studies from time to time though, so don't disable it
2393 entirely. */
2394 if (global_options_set.x_rs6000_alignment_flags
2395 && rs6000_alignment_flags == MASK_ALIGN_POWER
2396 && DEFAULT_ABI == ABI_DARWIN
2397 && TARGET_64BIT)
2398 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2399 " it is incompatible with the installed C and C++ libraries");
2401 /* Numerous experiment shows that IRA based loop pressure
2402 calculation works better for RTL loop invariant motion on targets
2403 with enough (>= 32) registers. It is an expensive optimization.
2404 So it is on only for peak performance. */
2405 if (optimize >= 3 && global_init_p)
2406 flag_ira_loop_pressure = 1;
2408 /* Set the pointer size. */
2409 if (TARGET_64BIT)
2411 rs6000_pmode = (int)DImode;
2412 rs6000_pointer_size = 64;
2414 else
2416 rs6000_pmode = (int)SImode;
2417 rs6000_pointer_size = 32;
2420 set_masks = POWERPC_MASKS | MASK_SOFT_FLOAT;
2421 #ifdef OS_MISSING_POWERPC64
2422 if (OS_MISSING_POWERPC64)
2423 set_masks &= ~MASK_POWERPC64;
2424 #endif
2425 #ifdef OS_MISSING_ALTIVEC
2426 if (OS_MISSING_ALTIVEC)
2427 set_masks &= ~MASK_ALTIVEC;
2428 #endif
2430 /* Don't override by the processor default if given explicitly. */
2431 set_masks &= ~target_flags_explicit;
2433 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2434 the cpu in a target attribute or pragma, but did not specify a tuning
2435 option, use the cpu for the tuning option rather than the option specified
2436 with -mtune on the command line. Process a '--with-cpu' configuration
2437 request as an implicit --cpu. */
2438 if (rs6000_cpu_index >= 0)
2440 cpu_index = rs6000_cpu_index;
2441 have_cpu = true;
2443 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2445 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2446 have_cpu = true;
2448 else
2450 const char *default_cpu =
2451 (implicit_cpu ? implicit_cpu
2452 : (TARGET_POWERPC64 ? "powerpc64" : "powerpc"));
2454 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2455 have_cpu = implicit_cpu != 0;
2458 gcc_assert (cpu_index >= 0);
2460 target_flags &= ~set_masks;
2461 target_flags |= (processor_target_table[cpu_index].target_enable
2462 & set_masks);
2464 if (rs6000_tune_index >= 0)
2465 tune_index = rs6000_tune_index;
2466 else if (have_cpu)
2467 rs6000_tune_index = tune_index = cpu_index;
2468 else
2470 size_t i;
2471 enum processor_type tune_proc
2472 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2474 tune_index = -1;
2475 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2476 if (processor_target_table[i].processor == tune_proc)
2478 rs6000_tune_index = tune_index = i;
2479 break;
2483 gcc_assert (tune_index >= 0);
2484 rs6000_cpu = processor_target_table[tune_index].processor;
2486 /* Pick defaults for SPE related control flags. Do this early to make sure
2487 that the TARGET_ macros are representative ASAP. */
2489 int spe_capable_cpu =
2490 (rs6000_cpu == PROCESSOR_PPC8540
2491 || rs6000_cpu == PROCESSOR_PPC8548);
2493 if (!global_options_set.x_rs6000_spe_abi)
2494 rs6000_spe_abi = spe_capable_cpu;
2496 if (!global_options_set.x_rs6000_spe)
2497 rs6000_spe = spe_capable_cpu;
2499 if (!global_options_set.x_rs6000_float_gprs)
2500 rs6000_float_gprs =
2501 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2502 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2503 : 0);
2506 if (global_options_set.x_rs6000_spe_abi
2507 && rs6000_spe_abi
2508 && !TARGET_SPE_ABI)
2509 error ("not configured for SPE ABI");
2511 if (global_options_set.x_rs6000_spe
2512 && rs6000_spe
2513 && !TARGET_SPE)
2514 error ("not configured for SPE instruction set");
2516 if (main_target_opt != NULL
2517 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2518 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2519 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2520 error ("target attribute or pragma changes SPE ABI");
2522 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2523 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2524 || rs6000_cpu == PROCESSOR_PPCE5500)
2526 if (TARGET_ALTIVEC)
2527 error ("AltiVec not supported in this target");
2528 if (TARGET_SPE)
2529 error ("SPE not supported in this target");
2531 if (rs6000_cpu == PROCESSOR_PPCE6500)
2533 if (TARGET_SPE)
2534 error ("SPE not supported in this target");
2537 /* Disable Cell microcode if we are optimizing for the Cell
2538 and not optimizing for size. */
2539 if (rs6000_gen_cell_microcode == -1)
2540 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2541 && !optimize_size);
2543 /* If we are optimizing big endian systems for space and it's OK to
2544 use instructions that would be microcoded on the Cell, use the
2545 load/store multiple and string instructions. */
2546 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2547 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
2549 /* Don't allow -mmultiple or -mstring on little endian systems
2550 unless the cpu is a 750, because the hardware doesn't support the
2551 instructions used in little endian mode, and causes an alignment
2552 trap. The 750 does not cause an alignment trap (except when the
2553 target is unaligned). */
2555 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2557 if (TARGET_MULTIPLE)
2559 target_flags &= ~MASK_MULTIPLE;
2560 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
2561 warning (0, "-mmultiple is not supported on little endian systems");
2564 if (TARGET_STRING)
2566 target_flags &= ~MASK_STRING;
2567 if ((target_flags_explicit & MASK_STRING) != 0)
2568 warning (0, "-mstring is not supported on little endian systems");
2572 /* Add some warnings for VSX. */
2573 if (TARGET_VSX)
2575 const char *msg = NULL;
2576 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2577 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2579 if (target_flags_explicit & MASK_VSX)
2580 msg = N_("-mvsx requires hardware floating point");
2581 else
2582 target_flags &= ~ MASK_VSX;
2584 else if (TARGET_PAIRED_FLOAT)
2585 msg = N_("-mvsx and -mpaired are incompatible");
2586 /* The hardware will allow VSX and little endian, but until we make sure
2587 things like vector select, etc. work don't allow VSX on little endian
2588 systems at this point. */
2589 else if (!BYTES_BIG_ENDIAN)
2590 msg = N_("-mvsx used with little endian code");
2591 else if (TARGET_AVOID_XFORM > 0)
2592 msg = N_("-mvsx needs indexed addressing");
2593 else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC))
2595 if (target_flags_explicit & MASK_VSX)
2596 msg = N_("-mvsx and -mno-altivec are incompatible");
2597 else
2598 msg = N_("-mno-altivec disables vsx");
2601 if (msg)
2603 warning (0, msg);
2604 target_flags &= ~ MASK_VSX;
2605 target_flags_explicit |= MASK_VSX;
2609 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2610 unless the user explicitly used the -mno-<option> to disable the code. */
2611 if (TARGET_VSX)
2612 target_flags |= (ISA_2_6_MASKS_SERVER & ~target_flags_explicit);
2613 else if (TARGET_POPCNTD)
2614 target_flags |= (ISA_2_6_MASKS_EMBEDDED & ~target_flags_explicit);
2615 else if (TARGET_DFP)
2616 target_flags |= (ISA_2_5_MASKS_SERVER & ~target_flags_explicit);
2617 else if (TARGET_CMPB)
2618 target_flags |= (ISA_2_5_MASKS_EMBEDDED & ~target_flags_explicit);
2619 else if (TARGET_FPRND)
2620 target_flags |= (ISA_2_4_MASKS & ~target_flags_explicit);
2621 else if (TARGET_POPCNTB)
2622 target_flags |= (ISA_2_2_MASKS & ~target_flags_explicit);
2623 else if (TARGET_ALTIVEC)
2624 target_flags |= (MASK_PPC_GFXOPT & ~target_flags_explicit);
2626 /* E500mc does "better" if we inline more aggressively. Respect the
2627 user's opinion, though. */
2628 if (rs6000_block_move_inline_limit == 0
2629 && (rs6000_cpu == PROCESSOR_PPCE500MC
2630 || rs6000_cpu == PROCESSOR_PPCE500MC64
2631 || rs6000_cpu == PROCESSOR_PPCE5500
2632 || rs6000_cpu == PROCESSOR_PPCE6500))
2633 rs6000_block_move_inline_limit = 128;
2635 /* store_one_arg depends on expand_block_move to handle at least the
2636 size of reg_parm_stack_space. */
2637 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
2638 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
2640 if (global_init_p)
2642 /* If the appropriate debug option is enabled, replace the target hooks
2643 with debug versions that call the real version and then prints
2644 debugging information. */
2645 if (TARGET_DEBUG_COST)
2647 targetm.rtx_costs = rs6000_debug_rtx_costs;
2648 targetm.address_cost = rs6000_debug_address_cost;
2649 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2652 if (TARGET_DEBUG_ADDR)
2654 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2655 targetm.legitimize_address = rs6000_debug_legitimize_address;
2656 rs6000_secondary_reload_class_ptr
2657 = rs6000_debug_secondary_reload_class;
2658 rs6000_secondary_memory_needed_ptr
2659 = rs6000_debug_secondary_memory_needed;
2660 rs6000_cannot_change_mode_class_ptr
2661 = rs6000_debug_cannot_change_mode_class;
2662 rs6000_preferred_reload_class_ptr
2663 = rs6000_debug_preferred_reload_class;
2664 rs6000_legitimize_reload_address_ptr
2665 = rs6000_debug_legitimize_reload_address;
2666 rs6000_mode_dependent_address_ptr
2667 = rs6000_debug_mode_dependent_address;
2670 if (rs6000_veclibabi_name)
2672 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
2673 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
2674 else
2676 error ("unknown vectorization library ABI type (%s) for "
2677 "-mveclibabi= switch", rs6000_veclibabi_name);
2678 ret = false;
2683 if (!global_options_set.x_rs6000_long_double_type_size)
2685 if (main_target_opt != NULL
2686 && (main_target_opt->x_rs6000_long_double_type_size
2687 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
2688 error ("target attribute or pragma changes long double size");
2689 else
2690 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2693 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2694 if (!global_options_set.x_rs6000_ieeequad)
2695 rs6000_ieeequad = 1;
2696 #endif
2698 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2699 target attribute or pragma which automatically enables both options,
2700 unless the altivec ABI was set. This is set by default for 64-bit, but
2701 not for 32-bit. */
2702 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2703 target_flags &= ~((MASK_VSX | MASK_ALTIVEC) & ~target_flags_explicit);
2705 /* Enable Altivec ABI for AIX -maltivec. */
2706 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
2708 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2709 error ("target attribute or pragma changes AltiVec ABI");
2710 else
2711 rs6000_altivec_abi = 1;
2714 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2715 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2716 be explicitly overridden in either case. */
2717 if (TARGET_ELF)
2719 if (!global_options_set.x_rs6000_altivec_abi
2720 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
2722 if (main_target_opt != NULL &&
2723 !main_target_opt->x_rs6000_altivec_abi)
2724 error ("target attribute or pragma changes AltiVec ABI");
2725 else
2726 rs6000_altivec_abi = 1;
2729 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
2730 if (!global_options_set.x_TARGET_ALTIVEC_VRSAVE)
2731 TARGET_ALTIVEC_VRSAVE = rs6000_altivec_abi;
2734 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2735 So far, the only darwin64 targets are also MACH-O. */
2736 if (TARGET_MACHO
2737 && DEFAULT_ABI == ABI_DARWIN
2738 && TARGET_64BIT)
2740 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
2741 error ("target attribute or pragma changes darwin64 ABI");
2742 else
2744 rs6000_darwin64_abi = 1;
2745 /* Default to natural alignment, for better performance. */
2746 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2750 /* Place FP constants in the constant pool instead of TOC
2751 if section anchors enabled. */
2752 if (flag_section_anchors)
2753 TARGET_NO_FP_IN_TOC = 1;
2755 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2756 SUBTARGET_OVERRIDE_OPTIONS;
2757 #endif
2758 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2759 SUBSUBTARGET_OVERRIDE_OPTIONS;
2760 #endif
2761 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2762 SUB3TARGET_OVERRIDE_OPTIONS;
2763 #endif
2765 /* For the E500 family of cores, reset the single/double FP flags to let us
2766 check that they remain constant across attributes or pragmas. Also,
2767 clear a possible request for string instructions, not supported and which
2768 we might have silently queried above for -Os.
2770 For other families, clear ISEL in case it was set implicitly.
2773 switch (rs6000_cpu)
2775 case PROCESSOR_PPC8540:
2776 case PROCESSOR_PPC8548:
2777 case PROCESSOR_PPCE500MC:
2778 case PROCESSOR_PPCE500MC64:
2779 case PROCESSOR_PPCE5500:
2780 case PROCESSOR_PPCE6500:
2782 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
2783 rs6000_double_float = TARGET_E500_DOUBLE;
2785 target_flags &= ~MASK_STRING;
2787 break;
2789 default:
2791 if (have_cpu && !(target_flags_explicit & MASK_ISEL))
2792 target_flags &= ~MASK_ISEL;
2794 break;
2797 if (main_target_opt)
2799 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
2800 error ("target attribute or pragma changes single precision floating "
2801 "point");
2802 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
2803 error ("target attribute or pragma changes double precision floating "
2804 "point");
2807 /* Detect invalid option combinations with E500. */
2808 CHECK_E500_OPTIONS;
2810 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
2811 && rs6000_cpu != PROCESSOR_POWER5
2812 && rs6000_cpu != PROCESSOR_POWER6
2813 && rs6000_cpu != PROCESSOR_POWER7
2814 && rs6000_cpu != PROCESSOR_PPCA2
2815 && rs6000_cpu != PROCESSOR_CELL
2816 && rs6000_cpu != PROCESSOR_PPC476);
2817 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
2818 || rs6000_cpu == PROCESSOR_POWER5
2819 || rs6000_cpu == PROCESSOR_POWER7);
2820 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
2821 || rs6000_cpu == PROCESSOR_POWER5
2822 || rs6000_cpu == PROCESSOR_POWER6
2823 || rs6000_cpu == PROCESSOR_POWER7
2824 || rs6000_cpu == PROCESSOR_PPCE500MC
2825 || rs6000_cpu == PROCESSOR_PPCE500MC64
2826 || rs6000_cpu == PROCESSOR_PPCE5500
2827 || rs6000_cpu == PROCESSOR_PPCE6500);
2829 /* Allow debug switches to override the above settings. These are set to -1
2830 in rs6000.opt to indicate the user hasn't directly set the switch. */
2831 if (TARGET_ALWAYS_HINT >= 0)
2832 rs6000_always_hint = TARGET_ALWAYS_HINT;
2834 if (TARGET_SCHED_GROUPS >= 0)
2835 rs6000_sched_groups = TARGET_SCHED_GROUPS;
2837 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
2838 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
2840 rs6000_sched_restricted_insns_priority
2841 = (rs6000_sched_groups ? 1 : 0);
2843 /* Handle -msched-costly-dep option. */
2844 rs6000_sched_costly_dep
2845 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
2847 if (rs6000_sched_costly_dep_str)
2849 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
2850 rs6000_sched_costly_dep = no_dep_costly;
2851 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
2852 rs6000_sched_costly_dep = all_deps_costly;
2853 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
2854 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
2855 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
2856 rs6000_sched_costly_dep = store_to_load_dep_costly;
2857 else
2858 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
2859 atoi (rs6000_sched_costly_dep_str));
2862 /* Handle -minsert-sched-nops option. */
2863 rs6000_sched_insert_nops
2864 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
2866 if (rs6000_sched_insert_nops_str)
2868 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
2869 rs6000_sched_insert_nops = sched_finish_none;
2870 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
2871 rs6000_sched_insert_nops = sched_finish_pad_groups;
2872 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
2873 rs6000_sched_insert_nops = sched_finish_regroup_exact;
2874 else
2875 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
2876 atoi (rs6000_sched_insert_nops_str));
2879 if (global_init_p)
2881 #ifdef TARGET_REGNAMES
2882 /* If the user desires alternate register names, copy in the
2883 alternate names now. */
2884 if (TARGET_REGNAMES)
2885 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
2886 #endif
2888 /* Set aix_struct_return last, after the ABI is determined.
2889 If -maix-struct-return or -msvr4-struct-return was explicitly
2890 used, don't override with the ABI default. */
2891 if (!global_options_set.x_aix_struct_return)
2892 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
2894 #if 0
2895 /* IBM XL compiler defaults to unsigned bitfields. */
2896 if (TARGET_XL_COMPAT)
2897 flag_signed_bitfields = 0;
2898 #endif
2900 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
2901 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
2903 if (TARGET_TOC)
2904 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
2906 /* We can only guarantee the availability of DI pseudo-ops when
2907 assembling for 64-bit targets. */
2908 if (!TARGET_64BIT)
2910 targetm.asm_out.aligned_op.di = NULL;
2911 targetm.asm_out.unaligned_op.di = NULL;
2915 /* Set branch target alignment, if not optimizing for size. */
2916 if (!optimize_size)
2918 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2919 aligned 8byte to avoid misprediction by the branch predictor. */
2920 if (rs6000_cpu == PROCESSOR_TITAN
2921 || rs6000_cpu == PROCESSOR_CELL)
2923 if (align_functions <= 0)
2924 align_functions = 8;
2925 if (align_jumps <= 0)
2926 align_jumps = 8;
2927 if (align_loops <= 0)
2928 align_loops = 8;
2930 if (rs6000_align_branch_targets)
2932 if (align_functions <= 0)
2933 align_functions = 16;
2934 if (align_jumps <= 0)
2935 align_jumps = 16;
2936 if (align_loops <= 0)
2938 can_override_loop_align = 1;
2939 align_loops = 16;
2942 if (align_jumps_max_skip <= 0)
2943 align_jumps_max_skip = 15;
2944 if (align_loops_max_skip <= 0)
2945 align_loops_max_skip = 15;
2948 /* Arrange to save and restore machine status around nested functions. */
2949 init_machine_status = rs6000_init_machine_status;
2951 /* We should always be splitting complex arguments, but we can't break
2952 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2953 if (DEFAULT_ABI != ABI_AIX)
2954 targetm.calls.split_complex_arg = NULL;
2957 /* Initialize rs6000_cost with the appropriate target costs. */
2958 if (optimize_size)
2959 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
2960 else
2961 switch (rs6000_cpu)
2963 case PROCESSOR_RS64A:
2964 rs6000_cost = &rs64a_cost;
2965 break;
2967 case PROCESSOR_MPCCORE:
2968 rs6000_cost = &mpccore_cost;
2969 break;
2971 case PROCESSOR_PPC403:
2972 rs6000_cost = &ppc403_cost;
2973 break;
2975 case PROCESSOR_PPC405:
2976 rs6000_cost = &ppc405_cost;
2977 break;
2979 case PROCESSOR_PPC440:
2980 rs6000_cost = &ppc440_cost;
2981 break;
2983 case PROCESSOR_PPC476:
2984 rs6000_cost = &ppc476_cost;
2985 break;
2987 case PROCESSOR_PPC601:
2988 rs6000_cost = &ppc601_cost;
2989 break;
2991 case PROCESSOR_PPC603:
2992 rs6000_cost = &ppc603_cost;
2993 break;
2995 case PROCESSOR_PPC604:
2996 rs6000_cost = &ppc604_cost;
2997 break;
2999 case PROCESSOR_PPC604e:
3000 rs6000_cost = &ppc604e_cost;
3001 break;
3003 case PROCESSOR_PPC620:
3004 rs6000_cost = &ppc620_cost;
3005 break;
3007 case PROCESSOR_PPC630:
3008 rs6000_cost = &ppc630_cost;
3009 break;
3011 case PROCESSOR_CELL:
3012 rs6000_cost = &ppccell_cost;
3013 break;
3015 case PROCESSOR_PPC750:
3016 case PROCESSOR_PPC7400:
3017 rs6000_cost = &ppc750_cost;
3018 break;
3020 case PROCESSOR_PPC7450:
3021 rs6000_cost = &ppc7450_cost;
3022 break;
3024 case PROCESSOR_PPC8540:
3025 case PROCESSOR_PPC8548:
3026 rs6000_cost = &ppc8540_cost;
3027 break;
3029 case PROCESSOR_PPCE300C2:
3030 case PROCESSOR_PPCE300C3:
3031 rs6000_cost = &ppce300c2c3_cost;
3032 break;
3034 case PROCESSOR_PPCE500MC:
3035 rs6000_cost = &ppce500mc_cost;
3036 break;
3038 case PROCESSOR_PPCE500MC64:
3039 rs6000_cost = &ppce500mc64_cost;
3040 break;
3042 case PROCESSOR_PPCE5500:
3043 rs6000_cost = &ppce5500_cost;
3044 break;
3046 case PROCESSOR_PPCE6500:
3047 rs6000_cost = &ppce6500_cost;
3048 break;
3050 case PROCESSOR_TITAN:
3051 rs6000_cost = &titan_cost;
3052 break;
3054 case PROCESSOR_POWER4:
3055 case PROCESSOR_POWER5:
3056 rs6000_cost = &power4_cost;
3057 break;
3059 case PROCESSOR_POWER6:
3060 rs6000_cost = &power6_cost;
3061 break;
3063 case PROCESSOR_POWER7:
3064 rs6000_cost = &power7_cost;
3065 break;
3067 case PROCESSOR_PPCA2:
3068 rs6000_cost = &ppca2_cost;
3069 break;
3071 default:
3072 gcc_unreachable ();
3075 if (global_init_p)
3077 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3078 rs6000_cost->simultaneous_prefetches,
3079 global_options.x_param_values,
3080 global_options_set.x_param_values);
3081 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3082 global_options.x_param_values,
3083 global_options_set.x_param_values);
3084 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3085 rs6000_cost->cache_line_size,
3086 global_options.x_param_values,
3087 global_options_set.x_param_values);
3088 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3089 global_options.x_param_values,
3090 global_options_set.x_param_values);
3092 /* If using typedef char *va_list, signal that
3093 __builtin_va_start (&ap, 0) can be optimized to
3094 ap = __builtin_next_arg (0). */
3095 if (DEFAULT_ABI != ABI_V4)
3096 targetm.expand_builtin_va_start = NULL;
3099 /* Set up single/double float flags.
3100 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3101 then set both flags. */
3102 if (TARGET_HARD_FLOAT && TARGET_FPRS
3103 && rs6000_single_float == 0 && rs6000_double_float == 0)
3104 rs6000_single_float = rs6000_double_float = 1;
3106 /* If not explicitly specified via option, decide whether to generate indexed
3107 load/store instructions. */
3108 if (TARGET_AVOID_XFORM == -1)
3109 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3110 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3111 need indexed accesses and the type used is the scalar type of the element
3112 being loaded or stored. */
3113 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3114 && !TARGET_ALTIVEC);
3116 /* Set the -mrecip options. */
3117 if (rs6000_recip_name)
3119 char *p = ASTRDUP (rs6000_recip_name);
3120 char *q;
3121 unsigned int mask, i;
3122 bool invert;
3124 while ((q = strtok (p, ",")) != NULL)
3126 p = NULL;
3127 if (*q == '!')
3129 invert = true;
3130 q++;
3132 else
3133 invert = false;
3135 if (!strcmp (q, "default"))
3136 mask = ((TARGET_RECIP_PRECISION)
3137 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3138 else
3140 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3141 if (!strcmp (q, recip_options[i].string))
3143 mask = recip_options[i].mask;
3144 break;
3147 if (i == ARRAY_SIZE (recip_options))
3149 error ("unknown option for -mrecip=%s", q);
3150 invert = false;
3151 mask = 0;
3152 ret = false;
3156 if (invert)
3157 rs6000_recip_control &= ~mask;
3158 else
3159 rs6000_recip_control |= mask;
3163 /* Set the builtin mask of the various options used that could affect which
3164 builtins were used. In the past we used target_flags, but we've run out
3165 of bits, and some options like SPE and PAIRED are no longer in
3166 target_flags. */
3167 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3168 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3169 fprintf (stderr, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask,
3170 (rs6000_builtin_mask & RS6000_BTM_ALTIVEC) ? ", altivec" : "",
3171 (rs6000_builtin_mask & RS6000_BTM_VSX) ? ", vsx" : "",
3172 (rs6000_builtin_mask & RS6000_BTM_PAIRED) ? ", paired" : "",
3173 (rs6000_builtin_mask & RS6000_BTM_SPE) ? ", spe" : "");
3175 /* Initialize all of the registers. */
3176 rs6000_init_hard_regno_mode_ok (global_init_p);
3178 /* Save the initial options in case the user does function specific options */
3179 if (global_init_p)
3180 target_option_default_node = target_option_current_node
3181 = build_target_option_node ();
3183 /* If not explicitly specified via option, decide whether to generate the
3184 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3185 if (TARGET_LINK_STACK == -1)
3186 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3188 return ret;
3191 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3192 define the target cpu type. */
3194 static void
3195 rs6000_option_override (void)
3197 (void) rs6000_option_override_internal (true);
3201 /* Implement targetm.vectorize.builtin_mask_for_load. */
3202 static tree
3203 rs6000_builtin_mask_for_load (void)
3205 if (TARGET_ALTIVEC || TARGET_VSX)
3206 return altivec_builtin_mask_for_load;
3207 else
3208 return 0;
3211 /* Implement LOOP_ALIGN. */
3213 rs6000_loop_align (rtx label)
3215 basic_block bb;
3216 int ninsns;
3218 /* Don't override loop alignment if -falign-loops was specified. */
3219 if (!can_override_loop_align)
3220 return align_loops_log;
3222 bb = BLOCK_FOR_INSN (label);
3223 ninsns = num_loop_insns(bb->loop_father);
3225 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3226 if (ninsns > 4 && ninsns <= 8
3227 && (rs6000_cpu == PROCESSOR_POWER4
3228 || rs6000_cpu == PROCESSOR_POWER5
3229 || rs6000_cpu == PROCESSOR_POWER6
3230 || rs6000_cpu == PROCESSOR_POWER7))
3231 return 5;
3232 else
3233 return align_loops_log;
3236 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3237 static int
3238 rs6000_loop_align_max_skip (rtx label)
3240 return (1 << rs6000_loop_align (label)) - 1;
3243 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3244 after applying N number of iterations. This routine does not determine
3245 how may iterations are required to reach desired alignment. */
3247 static bool
3248 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3250 if (is_packed)
3251 return false;
3253 if (TARGET_32BIT)
3255 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3256 return true;
3258 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3259 return true;
3261 return false;
3263 else
3265 if (TARGET_MACHO)
3266 return false;
3268 /* Assuming that all other types are naturally aligned. CHECKME! */
3269 return true;
3273 /* Return true if the vector misalignment factor is supported by the
3274 target. */
3275 static bool
3276 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3277 const_tree type,
3278 int misalignment,
3279 bool is_packed)
3281 if (TARGET_VSX)
3283 /* Return if movmisalign pattern is not supported for this mode. */
3284 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3285 return false;
3287 if (misalignment == -1)
3289 /* Misalignment factor is unknown at compile time but we know
3290 it's word aligned. */
3291 if (rs6000_vector_alignment_reachable (type, is_packed))
3293 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3295 if (element_size == 64 || element_size == 32)
3296 return true;
3299 return false;
3302 /* VSX supports word-aligned vector. */
3303 if (misalignment % 4 == 0)
3304 return true;
3306 return false;
3309 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3310 static int
3311 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3312 tree vectype, int misalign)
3314 unsigned elements;
3315 tree elem_type;
3317 switch (type_of_cost)
3319 case scalar_stmt:
3320 case scalar_load:
3321 case scalar_store:
3322 case vector_stmt:
3323 case vector_load:
3324 case vector_store:
3325 case vec_to_scalar:
3326 case scalar_to_vec:
3327 case cond_branch_not_taken:
3328 return 1;
3330 case vec_perm:
3331 if (TARGET_VSX)
3332 return 3;
3333 else
3334 return 1;
3336 case vec_promote_demote:
3337 if (TARGET_VSX)
3338 return 4;
3339 else
3340 return 1;
3342 case cond_branch_taken:
3343 return 3;
3345 case unaligned_load:
3346 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3348 elements = TYPE_VECTOR_SUBPARTS (vectype);
3349 if (elements == 2)
3350 /* Double word aligned. */
3351 return 2;
3353 if (elements == 4)
3355 switch (misalign)
3357 case 8:
3358 /* Double word aligned. */
3359 return 2;
3361 case -1:
3362 /* Unknown misalignment. */
3363 case 4:
3364 case 12:
3365 /* Word aligned. */
3366 return 22;
3368 default:
3369 gcc_unreachable ();
3374 if (TARGET_ALTIVEC)
3375 /* Misaligned loads are not supported. */
3376 gcc_unreachable ();
3378 return 2;
3380 case unaligned_store:
3381 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3383 elements = TYPE_VECTOR_SUBPARTS (vectype);
3384 if (elements == 2)
3385 /* Double word aligned. */
3386 return 2;
3388 if (elements == 4)
3390 switch (misalign)
3392 case 8:
3393 /* Double word aligned. */
3394 return 2;
3396 case -1:
3397 /* Unknown misalignment. */
3398 case 4:
3399 case 12:
3400 /* Word aligned. */
3401 return 23;
3403 default:
3404 gcc_unreachable ();
3409 if (TARGET_ALTIVEC)
3410 /* Misaligned stores are not supported. */
3411 gcc_unreachable ();
3413 return 2;
3415 case vec_construct:
3416 elements = TYPE_VECTOR_SUBPARTS (vectype);
3417 elem_type = TREE_TYPE (vectype);
3418 /* 32-bit vectors loaded into registers are stored as double
3419 precision, so we need n/2 converts in addition to the usual
3420 n/2 merges to construct a vector of short floats from them. */
3421 if (SCALAR_FLOAT_TYPE_P (elem_type)
3422 && TYPE_PRECISION (elem_type) == 32)
3423 return elements + 1;
3424 else
3425 return elements / 2 + 1;
3427 default:
3428 gcc_unreachable ();
3432 /* Implement targetm.vectorize.preferred_simd_mode. */
3434 static enum machine_mode
3435 rs6000_preferred_simd_mode (enum machine_mode mode)
3437 if (TARGET_VSX)
3438 switch (mode)
3440 case DFmode:
3441 return V2DFmode;
3442 default:;
3444 if (TARGET_ALTIVEC || TARGET_VSX)
3445 switch (mode)
3447 case SFmode:
3448 return V4SFmode;
3449 case DImode:
3450 return V2DImode;
3451 case SImode:
3452 return V4SImode;
3453 case HImode:
3454 return V8HImode;
3455 case QImode:
3456 return V16QImode;
3457 default:;
3459 if (TARGET_SPE)
3460 switch (mode)
3462 case SFmode:
3463 return V2SFmode;
3464 case SImode:
3465 return V2SImode;
3466 default:;
3468 if (TARGET_PAIRED_FLOAT
3469 && mode == SFmode)
3470 return V2SFmode;
3471 return word_mode;
3474 typedef struct _rs6000_cost_data
3476 struct loop *loop_info;
3477 unsigned cost[3];
3478 } rs6000_cost_data;
3480 /* Test for likely overcommitment of vector hardware resources. If a
3481 loop iteration is relatively large, and too large a percentage of
3482 instructions in the loop are vectorized, the cost model may not
3483 adequately reflect delays from unavailable vector resources.
3484 Penalize the loop body cost for this case. */
3486 static void
3487 rs6000_density_test (rs6000_cost_data *data)
3489 const int DENSITY_PCT_THRESHOLD = 85;
3490 const int DENSITY_SIZE_THRESHOLD = 70;
3491 const int DENSITY_PENALTY = 10;
3492 struct loop *loop = data->loop_info;
3493 basic_block *bbs = get_loop_body (loop);
3494 int nbbs = loop->num_nodes;
3495 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
3496 int i, density_pct;
3498 for (i = 0; i < nbbs; i++)
3500 basic_block bb = bbs[i];
3501 gimple_stmt_iterator gsi;
3503 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3505 gimple stmt = gsi_stmt (gsi);
3506 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3508 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3509 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
3510 not_vec_cost++;
3514 free (bbs);
3515 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
3517 if (density_pct > DENSITY_PCT_THRESHOLD
3518 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
3520 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
3521 if (vect_print_dump_info (REPORT_DETAILS))
3522 fprintf (vect_dump,
3523 "density %d%%, cost %d exceeds threshold, penalizing "
3524 "loop body cost by %d%%", density_pct,
3525 vec_cost + not_vec_cost, DENSITY_PENALTY);
3529 /* Implement targetm.vectorize.init_cost. */
3531 static void *
3532 rs6000_init_cost (struct loop *loop_info)
3534 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
3535 data->loop_info = loop_info;
3536 data->cost[vect_prologue] = 0;
3537 data->cost[vect_body] = 0;
3538 data->cost[vect_epilogue] = 0;
3539 return data;
3542 /* Implement targetm.vectorize.add_stmt_cost. */
3544 static unsigned
3545 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
3546 struct _stmt_vec_info *stmt_info, int misalign,
3547 enum vect_cost_model_location where)
3549 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3550 unsigned retval = 0;
3552 if (flag_vect_cost_model)
3554 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
3555 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
3556 misalign);
3557 /* Statements in an inner loop relative to the loop being
3558 vectorized are weighted more heavily. The value here is
3559 arbitrary and could potentially be improved with analysis. */
3560 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
3561 count *= 50; /* FIXME. */
3563 retval = (unsigned) (count * stmt_cost);
3564 cost_data->cost[where] += retval;
3567 return retval;
3570 /* Implement targetm.vectorize.finish_cost. */
3572 static void
3573 rs6000_finish_cost (void *data, unsigned *prologue_cost,
3574 unsigned *body_cost, unsigned *epilogue_cost)
3576 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3578 if (cost_data->loop_info)
3579 rs6000_density_test (cost_data);
3581 *prologue_cost = cost_data->cost[vect_prologue];
3582 *body_cost = cost_data->cost[vect_body];
3583 *epilogue_cost = cost_data->cost[vect_epilogue];
3586 /* Implement targetm.vectorize.destroy_cost_data. */
3588 static void
3589 rs6000_destroy_cost_data (void *data)
3591 free (data);
3594 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3595 library with vectorized intrinsics. */
3597 static tree
3598 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
3600 char name[32];
3601 const char *suffix = NULL;
3602 tree fntype, new_fndecl, bdecl = NULL_TREE;
3603 int n_args = 1;
3604 const char *bname;
3605 enum machine_mode el_mode, in_mode;
3606 int n, in_n;
3608 /* Libmass is suitable for unsafe math only as it does not correctly support
3609 parts of IEEE with the required precision such as denormals. Only support
3610 it if we have VSX to use the simd d2 or f4 functions.
3611 XXX: Add variable length support. */
3612 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
3613 return NULL_TREE;
3615 el_mode = TYPE_MODE (TREE_TYPE (type_out));
3616 n = TYPE_VECTOR_SUBPARTS (type_out);
3617 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3618 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3619 if (el_mode != in_mode
3620 || n != in_n)
3621 return NULL_TREE;
3623 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3625 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3626 switch (fn)
3628 case BUILT_IN_ATAN2:
3629 case BUILT_IN_HYPOT:
3630 case BUILT_IN_POW:
3631 n_args = 2;
3632 /* fall through */
3634 case BUILT_IN_ACOS:
3635 case BUILT_IN_ACOSH:
3636 case BUILT_IN_ASIN:
3637 case BUILT_IN_ASINH:
3638 case BUILT_IN_ATAN:
3639 case BUILT_IN_ATANH:
3640 case BUILT_IN_CBRT:
3641 case BUILT_IN_COS:
3642 case BUILT_IN_COSH:
3643 case BUILT_IN_ERF:
3644 case BUILT_IN_ERFC:
3645 case BUILT_IN_EXP2:
3646 case BUILT_IN_EXP:
3647 case BUILT_IN_EXPM1:
3648 case BUILT_IN_LGAMMA:
3649 case BUILT_IN_LOG10:
3650 case BUILT_IN_LOG1P:
3651 case BUILT_IN_LOG2:
3652 case BUILT_IN_LOG:
3653 case BUILT_IN_SIN:
3654 case BUILT_IN_SINH:
3655 case BUILT_IN_SQRT:
3656 case BUILT_IN_TAN:
3657 case BUILT_IN_TANH:
3658 bdecl = builtin_decl_implicit (fn);
3659 suffix = "d2"; /* pow -> powd2 */
3660 if (el_mode != DFmode
3661 || n != 2)
3662 return NULL_TREE;
3663 break;
3665 case BUILT_IN_ATAN2F:
3666 case BUILT_IN_HYPOTF:
3667 case BUILT_IN_POWF:
3668 n_args = 2;
3669 /* fall through */
3671 case BUILT_IN_ACOSF:
3672 case BUILT_IN_ACOSHF:
3673 case BUILT_IN_ASINF:
3674 case BUILT_IN_ASINHF:
3675 case BUILT_IN_ATANF:
3676 case BUILT_IN_ATANHF:
3677 case BUILT_IN_CBRTF:
3678 case BUILT_IN_COSF:
3679 case BUILT_IN_COSHF:
3680 case BUILT_IN_ERFF:
3681 case BUILT_IN_ERFCF:
3682 case BUILT_IN_EXP2F:
3683 case BUILT_IN_EXPF:
3684 case BUILT_IN_EXPM1F:
3685 case BUILT_IN_LGAMMAF:
3686 case BUILT_IN_LOG10F:
3687 case BUILT_IN_LOG1PF:
3688 case BUILT_IN_LOG2F:
3689 case BUILT_IN_LOGF:
3690 case BUILT_IN_SINF:
3691 case BUILT_IN_SINHF:
3692 case BUILT_IN_SQRTF:
3693 case BUILT_IN_TANF:
3694 case BUILT_IN_TANHF:
3695 bdecl = builtin_decl_implicit (fn);
3696 suffix = "4"; /* powf -> powf4 */
3697 if (el_mode != SFmode
3698 || n != 4)
3699 return NULL_TREE;
3700 break;
3702 default:
3703 return NULL_TREE;
3706 else
3707 return NULL_TREE;
3709 gcc_assert (suffix != NULL);
3710 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
3711 strcpy (name, bname + sizeof ("__builtin_") - 1);
3712 strcat (name, suffix);
3714 if (n_args == 1)
3715 fntype = build_function_type_list (type_out, type_in, NULL);
3716 else if (n_args == 2)
3717 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
3718 else
3719 gcc_unreachable ();
3721 /* Build a function declaration for the vectorized function. */
3722 new_fndecl = build_decl (BUILTINS_LOCATION,
3723 FUNCTION_DECL, get_identifier (name), fntype);
3724 TREE_PUBLIC (new_fndecl) = 1;
3725 DECL_EXTERNAL (new_fndecl) = 1;
3726 DECL_IS_NOVOPS (new_fndecl) = 1;
3727 TREE_READONLY (new_fndecl) = 1;
3729 return new_fndecl;
3732 /* Returns a function decl for a vectorized version of the builtin function
3733 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3734 if it is not available. */
3736 static tree
3737 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
3738 tree type_in)
3740 enum machine_mode in_mode, out_mode;
3741 int in_n, out_n;
3743 if (TARGET_DEBUG_BUILTIN)
3744 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3745 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
3746 GET_MODE_NAME (TYPE_MODE (type_out)),
3747 GET_MODE_NAME (TYPE_MODE (type_in)));
3749 if (TREE_CODE (type_out) != VECTOR_TYPE
3750 || TREE_CODE (type_in) != VECTOR_TYPE
3751 || !TARGET_VECTORIZE_BUILTINS)
3752 return NULL_TREE;
3754 out_mode = TYPE_MODE (TREE_TYPE (type_out));
3755 out_n = TYPE_VECTOR_SUBPARTS (type_out);
3756 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3757 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3759 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3761 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3762 switch (fn)
3764 case BUILT_IN_COPYSIGN:
3765 if (VECTOR_UNIT_VSX_P (V2DFmode)
3766 && out_mode == DFmode && out_n == 2
3767 && in_mode == DFmode && in_n == 2)
3768 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
3769 break;
3770 case BUILT_IN_COPYSIGNF:
3771 if (out_mode != SFmode || out_n != 4
3772 || in_mode != SFmode || in_n != 4)
3773 break;
3774 if (VECTOR_UNIT_VSX_P (V4SFmode))
3775 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
3776 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3777 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
3778 break;
3779 case BUILT_IN_SQRT:
3780 if (VECTOR_UNIT_VSX_P (V2DFmode)
3781 && out_mode == DFmode && out_n == 2
3782 && in_mode == DFmode && in_n == 2)
3783 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
3784 break;
3785 case BUILT_IN_SQRTF:
3786 if (VECTOR_UNIT_VSX_P (V4SFmode)
3787 && out_mode == SFmode && out_n == 4
3788 && in_mode == SFmode && in_n == 4)
3789 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
3790 break;
3791 case BUILT_IN_CEIL:
3792 if (VECTOR_UNIT_VSX_P (V2DFmode)
3793 && out_mode == DFmode && out_n == 2
3794 && in_mode == DFmode && in_n == 2)
3795 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
3796 break;
3797 case BUILT_IN_CEILF:
3798 if (out_mode != SFmode || out_n != 4
3799 || in_mode != SFmode || in_n != 4)
3800 break;
3801 if (VECTOR_UNIT_VSX_P (V4SFmode))
3802 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
3803 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3804 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
3805 break;
3806 case BUILT_IN_FLOOR:
3807 if (VECTOR_UNIT_VSX_P (V2DFmode)
3808 && out_mode == DFmode && out_n == 2
3809 && in_mode == DFmode && in_n == 2)
3810 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
3811 break;
3812 case BUILT_IN_FLOORF:
3813 if (out_mode != SFmode || out_n != 4
3814 || in_mode != SFmode || in_n != 4)
3815 break;
3816 if (VECTOR_UNIT_VSX_P (V4SFmode))
3817 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
3818 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3819 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
3820 break;
3821 case BUILT_IN_FMA:
3822 if (VECTOR_UNIT_VSX_P (V2DFmode)
3823 && out_mode == DFmode && out_n == 2
3824 && in_mode == DFmode && in_n == 2)
3825 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
3826 break;
3827 case BUILT_IN_FMAF:
3828 if (VECTOR_UNIT_VSX_P (V4SFmode)
3829 && out_mode == SFmode && out_n == 4
3830 && in_mode == SFmode && in_n == 4)
3831 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
3832 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
3833 && out_mode == SFmode && out_n == 4
3834 && in_mode == SFmode && in_n == 4)
3835 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
3836 break;
3837 case BUILT_IN_TRUNC:
3838 if (VECTOR_UNIT_VSX_P (V2DFmode)
3839 && out_mode == DFmode && out_n == 2
3840 && in_mode == DFmode && in_n == 2)
3841 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
3842 break;
3843 case BUILT_IN_TRUNCF:
3844 if (out_mode != SFmode || out_n != 4
3845 || in_mode != SFmode || in_n != 4)
3846 break;
3847 if (VECTOR_UNIT_VSX_P (V4SFmode))
3848 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
3849 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3850 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
3851 break;
3852 case BUILT_IN_NEARBYINT:
3853 if (VECTOR_UNIT_VSX_P (V2DFmode)
3854 && flag_unsafe_math_optimizations
3855 && out_mode == DFmode && out_n == 2
3856 && in_mode == DFmode && in_n == 2)
3857 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
3858 break;
3859 case BUILT_IN_NEARBYINTF:
3860 if (VECTOR_UNIT_VSX_P (V4SFmode)
3861 && flag_unsafe_math_optimizations
3862 && out_mode == SFmode && out_n == 4
3863 && in_mode == SFmode && in_n == 4)
3864 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
3865 break;
3866 case BUILT_IN_RINT:
3867 if (VECTOR_UNIT_VSX_P (V2DFmode)
3868 && !flag_trapping_math
3869 && out_mode == DFmode && out_n == 2
3870 && in_mode == DFmode && in_n == 2)
3871 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
3872 break;
3873 case BUILT_IN_RINTF:
3874 if (VECTOR_UNIT_VSX_P (V4SFmode)
3875 && !flag_trapping_math
3876 && out_mode == SFmode && out_n == 4
3877 && in_mode == SFmode && in_n == 4)
3878 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
3879 break;
3880 default:
3881 break;
3885 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
3887 enum rs6000_builtins fn
3888 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
3889 switch (fn)
3891 case RS6000_BUILTIN_RSQRTF:
3892 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3893 && out_mode == SFmode && out_n == 4
3894 && in_mode == SFmode && in_n == 4)
3895 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
3896 break;
3897 case RS6000_BUILTIN_RSQRT:
3898 if (VECTOR_UNIT_VSX_P (V2DFmode)
3899 && out_mode == DFmode && out_n == 2
3900 && in_mode == DFmode && in_n == 2)
3901 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
3902 break;
3903 case RS6000_BUILTIN_RECIPF:
3904 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3905 && out_mode == SFmode && out_n == 4
3906 && in_mode == SFmode && in_n == 4)
3907 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
3908 break;
3909 case RS6000_BUILTIN_RECIP:
3910 if (VECTOR_UNIT_VSX_P (V2DFmode)
3911 && out_mode == DFmode && out_n == 2
3912 && in_mode == DFmode && in_n == 2)
3913 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
3914 break;
3915 default:
3916 break;
3920 /* Generate calls to libmass if appropriate. */
3921 if (rs6000_veclib_handler)
3922 return rs6000_veclib_handler (fndecl, type_out, type_in);
3924 return NULL_TREE;
3927 /* Default CPU string for rs6000*_file_start functions. */
3928 static const char *rs6000_default_cpu;
3930 /* Do anything needed at the start of the asm file. */
3932 static void
3933 rs6000_file_start (void)
3935 char buffer[80];
3936 const char *start = buffer;
3937 FILE *file = asm_out_file;
3939 rs6000_default_cpu = TARGET_CPU_DEFAULT;
3941 default_file_start ();
3943 if (flag_verbose_asm)
3945 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
3947 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
3949 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
3950 start = "";
3953 if (global_options_set.x_rs6000_cpu_index)
3955 fprintf (file, "%s -mcpu=%s", start,
3956 processor_target_table[rs6000_cpu_index].name);
3957 start = "";
3960 if (global_options_set.x_rs6000_tune_index)
3962 fprintf (file, "%s -mtune=%s", start,
3963 processor_target_table[rs6000_tune_index].name);
3964 start = "";
3967 if (PPC405_ERRATUM77)
3969 fprintf (file, "%s PPC405CR_ERRATUM77", start);
3970 start = "";
3973 #ifdef USING_ELFOS_H
3974 switch (rs6000_sdata)
3976 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
3977 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
3978 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
3979 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
3982 if (rs6000_sdata && g_switch_value)
3984 fprintf (file, "%s -G %d", start,
3985 g_switch_value);
3986 start = "";
3988 #endif
3990 if (*start == '\0')
3991 putc ('\n', file);
3994 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
3996 switch_to_section (toc_section);
3997 switch_to_section (text_section);
4002 /* Return nonzero if this function is known to have a null epilogue. */
4005 direct_return (void)
4007 if (reload_completed)
4009 rs6000_stack_t *info = rs6000_stack_info ();
4011 if (info->first_gp_reg_save == 32
4012 && info->first_fp_reg_save == 64
4013 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4014 && ! info->lr_save_p
4015 && ! info->cr_save_p
4016 && info->vrsave_mask == 0
4017 && ! info->push_p)
4018 return 1;
4021 return 0;
4024 /* Return the number of instructions it takes to form a constant in an
4025 integer register. */
4028 num_insns_constant_wide (HOST_WIDE_INT value)
4030 /* signed constant loadable with addi */
4031 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4032 return 1;
4034 /* constant loadable with addis */
4035 else if ((value & 0xffff) == 0
4036 && (value >> 31 == -1 || value >> 31 == 0))
4037 return 1;
4039 #if HOST_BITS_PER_WIDE_INT == 64
4040 else if (TARGET_POWERPC64)
4042 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4043 HOST_WIDE_INT high = value >> 31;
4045 if (high == 0 || high == -1)
4046 return 2;
4048 high >>= 1;
4050 if (low == 0)
4051 return num_insns_constant_wide (high) + 1;
4052 else if (high == 0)
4053 return num_insns_constant_wide (low) + 1;
4054 else
4055 return (num_insns_constant_wide (high)
4056 + num_insns_constant_wide (low) + 1);
4058 #endif
4060 else
4061 return 2;
4065 num_insns_constant (rtx op, enum machine_mode mode)
4067 HOST_WIDE_INT low, high;
4069 switch (GET_CODE (op))
4071 case CONST_INT:
4072 #if HOST_BITS_PER_WIDE_INT == 64
4073 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4074 && mask64_operand (op, mode))
4075 return 2;
4076 else
4077 #endif
4078 return num_insns_constant_wide (INTVAL (op));
4080 case CONST_DOUBLE:
4081 if (mode == SFmode || mode == SDmode)
4083 long l;
4084 REAL_VALUE_TYPE rv;
4086 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4087 if (DECIMAL_FLOAT_MODE_P (mode))
4088 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4089 else
4090 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4091 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4094 if (mode == VOIDmode || mode == DImode)
4096 high = CONST_DOUBLE_HIGH (op);
4097 low = CONST_DOUBLE_LOW (op);
4099 else
4101 long l[2];
4102 REAL_VALUE_TYPE rv;
4104 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4105 if (DECIMAL_FLOAT_MODE_P (mode))
4106 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4107 else
4108 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4109 high = l[WORDS_BIG_ENDIAN == 0];
4110 low = l[WORDS_BIG_ENDIAN != 0];
4113 if (TARGET_32BIT)
4114 return (num_insns_constant_wide (low)
4115 + num_insns_constant_wide (high));
4116 else
4118 if ((high == 0 && low >= 0)
4119 || (high == -1 && low < 0))
4120 return num_insns_constant_wide (low);
4122 else if (mask64_operand (op, mode))
4123 return 2;
4125 else if (low == 0)
4126 return num_insns_constant_wide (high) + 1;
4128 else
4129 return (num_insns_constant_wide (high)
4130 + num_insns_constant_wide (low) + 1);
4133 default:
4134 gcc_unreachable ();
4138 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4139 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4140 corresponding element of the vector, but for V4SFmode and V2SFmode,
4141 the corresponding "float" is interpreted as an SImode integer. */
4143 HOST_WIDE_INT
4144 const_vector_elt_as_int (rtx op, unsigned int elt)
4146 rtx tmp;
4148 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4149 gcc_assert (GET_MODE (op) != V2DImode
4150 && GET_MODE (op) != V2DFmode);
4152 tmp = CONST_VECTOR_ELT (op, elt);
4153 if (GET_MODE (op) == V4SFmode
4154 || GET_MODE (op) == V2SFmode)
4155 tmp = gen_lowpart (SImode, tmp);
4156 return INTVAL (tmp);
4159 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4160 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4161 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4162 all items are set to the same value and contain COPIES replicas of the
4163 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4164 operand and the others are set to the value of the operand's msb. */
4166 static bool
4167 vspltis_constant (rtx op, unsigned step, unsigned copies)
4169 enum machine_mode mode = GET_MODE (op);
4170 enum machine_mode inner = GET_MODE_INNER (mode);
4172 unsigned i;
4173 unsigned nunits;
4174 unsigned bitsize;
4175 unsigned mask;
4177 HOST_WIDE_INT val;
4178 HOST_WIDE_INT splat_val;
4179 HOST_WIDE_INT msb_val;
4181 if (mode == V2DImode || mode == V2DFmode)
4182 return false;
4184 nunits = GET_MODE_NUNITS (mode);
4185 bitsize = GET_MODE_BITSIZE (inner);
4186 mask = GET_MODE_MASK (inner);
4188 val = const_vector_elt_as_int (op, nunits - 1);
4189 splat_val = val;
4190 msb_val = val > 0 ? 0 : -1;
4192 /* Construct the value to be splatted, if possible. If not, return 0. */
4193 for (i = 2; i <= copies; i *= 2)
4195 HOST_WIDE_INT small_val;
4196 bitsize /= 2;
4197 small_val = splat_val >> bitsize;
4198 mask >>= bitsize;
4199 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4200 return false;
4201 splat_val = small_val;
4204 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4205 if (EASY_VECTOR_15 (splat_val))
4208 /* Also check if we can splat, and then add the result to itself. Do so if
4209 the value is positive, of if the splat instruction is using OP's mode;
4210 for splat_val < 0, the splat and the add should use the same mode. */
4211 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4212 && (splat_val >= 0 || (step == 1 && copies == 1)))
4215 /* Also check if are loading up the most significant bit which can be done by
4216 loading up -1 and shifting the value left by -1. */
4217 else if (EASY_VECTOR_MSB (splat_val, inner))
4220 else
4221 return false;
4223 /* Check if VAL is present in every STEP-th element, and the
4224 other elements are filled with its most significant bit. */
4225 for (i = 0; i < nunits - 1; ++i)
4227 HOST_WIDE_INT desired_val;
4228 if (((i + 1) & (step - 1)) == 0)
4229 desired_val = val;
4230 else
4231 desired_val = msb_val;
4233 if (desired_val != const_vector_elt_as_int (op, i))
4234 return false;
4237 return true;
4241 /* Return true if OP is of the given MODE and can be synthesized
4242 with a vspltisb, vspltish or vspltisw. */
4244 bool
4245 easy_altivec_constant (rtx op, enum machine_mode mode)
4247 unsigned step, copies;
4249 if (mode == VOIDmode)
4250 mode = GET_MODE (op);
4251 else if (mode != GET_MODE (op))
4252 return false;
4254 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4255 constants. */
4256 if (mode == V2DFmode)
4257 return zero_constant (op, mode);
4259 if (mode == V2DImode)
4261 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4262 easy. */
4263 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4264 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4265 return false;
4267 if (zero_constant (op, mode))
4268 return true;
4270 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4271 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4272 return true;
4274 return false;
4277 /* Start with a vspltisw. */
4278 step = GET_MODE_NUNITS (mode) / 4;
4279 copies = 1;
4281 if (vspltis_constant (op, step, copies))
4282 return true;
4284 /* Then try with a vspltish. */
4285 if (step == 1)
4286 copies <<= 1;
4287 else
4288 step >>= 1;
4290 if (vspltis_constant (op, step, copies))
4291 return true;
4293 /* And finally a vspltisb. */
4294 if (step == 1)
4295 copies <<= 1;
4296 else
4297 step >>= 1;
4299 if (vspltis_constant (op, step, copies))
4300 return true;
4302 return false;
4305 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4306 result is OP. Abort if it is not possible. */
4309 gen_easy_altivec_constant (rtx op)
4311 enum machine_mode mode = GET_MODE (op);
4312 int nunits = GET_MODE_NUNITS (mode);
4313 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
4314 unsigned step = nunits / 4;
4315 unsigned copies = 1;
4317 /* Start with a vspltisw. */
4318 if (vspltis_constant (op, step, copies))
4319 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
4321 /* Then try with a vspltish. */
4322 if (step == 1)
4323 copies <<= 1;
4324 else
4325 step >>= 1;
4327 if (vspltis_constant (op, step, copies))
4328 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
4330 /* And finally a vspltisb. */
4331 if (step == 1)
4332 copies <<= 1;
4333 else
4334 step >>= 1;
4336 if (vspltis_constant (op, step, copies))
4337 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
4339 gcc_unreachable ();
4342 const char *
4343 output_vec_const_move (rtx *operands)
4345 int cst, cst2;
4346 enum machine_mode mode;
4347 rtx dest, vec;
4349 dest = operands[0];
4350 vec = operands[1];
4351 mode = GET_MODE (dest);
4353 if (TARGET_VSX)
4355 if (zero_constant (vec, mode))
4356 return "xxlxor %x0,%x0,%x0";
4358 if (mode == V2DImode
4359 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4360 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4361 return "vspltisw %0,-1";
4364 if (TARGET_ALTIVEC)
4366 rtx splat_vec;
4367 if (zero_constant (vec, mode))
4368 return "vxor %0,%0,%0";
4370 splat_vec = gen_easy_altivec_constant (vec);
4371 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4372 operands[1] = XEXP (splat_vec, 0);
4373 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4374 return "#";
4376 switch (GET_MODE (splat_vec))
4378 case V4SImode:
4379 return "vspltisw %0,%1";
4381 case V8HImode:
4382 return "vspltish %0,%1";
4384 case V16QImode:
4385 return "vspltisb %0,%1";
4387 default:
4388 gcc_unreachable ();
4392 gcc_assert (TARGET_SPE);
4394 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4395 pattern of V1DI, V4HI, and V2SF.
4397 FIXME: We should probably return # and add post reload
4398 splitters for these, but this way is so easy ;-). */
4399 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4400 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4401 operands[1] = CONST_VECTOR_ELT (vec, 0);
4402 operands[2] = CONST_VECTOR_ELT (vec, 1);
4403 if (cst == cst2)
4404 return "li %0,%1\n\tevmergelo %0,%0,%0";
4405 else
4406 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4409 /* Initialize TARGET of vector PAIRED to VALS. */
4411 void
4412 paired_expand_vector_init (rtx target, rtx vals)
4414 enum machine_mode mode = GET_MODE (target);
4415 int n_elts = GET_MODE_NUNITS (mode);
4416 int n_var = 0;
4417 rtx x, new_rtx, tmp, constant_op, op1, op2;
4418 int i;
4420 for (i = 0; i < n_elts; ++i)
4422 x = XVECEXP (vals, 0, i);
4423 if (!(CONST_INT_P (x)
4424 || GET_CODE (x) == CONST_DOUBLE
4425 || GET_CODE (x) == CONST_FIXED))
4426 ++n_var;
4428 if (n_var == 0)
4430 /* Load from constant pool. */
4431 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4432 return;
4435 if (n_var == 2)
4437 /* The vector is initialized only with non-constants. */
4438 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4439 XVECEXP (vals, 0, 1));
4441 emit_move_insn (target, new_rtx);
4442 return;
4445 /* One field is non-constant and the other one is a constant. Load the
4446 constant from the constant pool and use ps_merge instruction to
4447 construct the whole vector. */
4448 op1 = XVECEXP (vals, 0, 0);
4449 op2 = XVECEXP (vals, 0, 1);
4451 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4453 tmp = gen_reg_rtx (GET_MODE (constant_op));
4454 emit_move_insn (tmp, constant_op);
4456 if (CONSTANT_P (op1))
4457 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4458 else
4459 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4461 emit_move_insn (target, new_rtx);
4464 void
4465 paired_expand_vector_move (rtx operands[])
4467 rtx op0 = operands[0], op1 = operands[1];
4469 emit_move_insn (op0, op1);
4472 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4473 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4474 operands for the relation operation COND. This is a recursive
4475 function. */
4477 static void
4478 paired_emit_vector_compare (enum rtx_code rcode,
4479 rtx dest, rtx op0, rtx op1,
4480 rtx cc_op0, rtx cc_op1)
4482 rtx tmp = gen_reg_rtx (V2SFmode);
4483 rtx tmp1, max, min;
4485 gcc_assert (TARGET_PAIRED_FLOAT);
4486 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4488 switch (rcode)
4490 case LT:
4491 case LTU:
4492 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4493 return;
4494 case GE:
4495 case GEU:
4496 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4497 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4498 return;
4499 case LE:
4500 case LEU:
4501 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4502 return;
4503 case GT:
4504 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4505 return;
4506 case EQ:
4507 tmp1 = gen_reg_rtx (V2SFmode);
4508 max = gen_reg_rtx (V2SFmode);
4509 min = gen_reg_rtx (V2SFmode);
4510 gen_reg_rtx (V2SFmode);
4512 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4513 emit_insn (gen_selv2sf4
4514 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4515 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4516 emit_insn (gen_selv2sf4
4517 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4518 emit_insn (gen_subv2sf3 (tmp1, min, max));
4519 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4520 return;
4521 case NE:
4522 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4523 return;
4524 case UNLE:
4525 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4526 return;
4527 case UNLT:
4528 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4529 return;
4530 case UNGE:
4531 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4532 return;
4533 case UNGT:
4534 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4535 return;
4536 default:
4537 gcc_unreachable ();
4540 return;
4543 /* Emit vector conditional expression.
4544 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4545 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4548 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4549 rtx cond, rtx cc_op0, rtx cc_op1)
4551 enum rtx_code rcode = GET_CODE (cond);
4553 if (!TARGET_PAIRED_FLOAT)
4554 return 0;
4556 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4558 return 1;
4561 /* Initialize vector TARGET to VALS. */
4563 void
4564 rs6000_expand_vector_init (rtx target, rtx vals)
4566 enum machine_mode mode = GET_MODE (target);
4567 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4568 int n_elts = GET_MODE_NUNITS (mode);
4569 int n_var = 0, one_var = -1;
4570 bool all_same = true, all_const_zero = true;
4571 rtx x, mem;
4572 int i;
4574 for (i = 0; i < n_elts; ++i)
4576 x = XVECEXP (vals, 0, i);
4577 if (!(CONST_INT_P (x)
4578 || GET_CODE (x) == CONST_DOUBLE
4579 || GET_CODE (x) == CONST_FIXED))
4580 ++n_var, one_var = i;
4581 else if (x != CONST0_RTX (inner_mode))
4582 all_const_zero = false;
4584 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4585 all_same = false;
4588 if (n_var == 0)
4590 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4591 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4592 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4594 /* Zero register. */
4595 emit_insn (gen_rtx_SET (VOIDmode, target,
4596 gen_rtx_XOR (mode, target, target)));
4597 return;
4599 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4601 /* Splat immediate. */
4602 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4603 return;
4605 else
4607 /* Load from constant pool. */
4608 emit_move_insn (target, const_vec);
4609 return;
4613 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4614 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4616 rtx op0 = XVECEXP (vals, 0, 0);
4617 rtx op1 = XVECEXP (vals, 0, 1);
4618 if (all_same)
4620 if (!MEM_P (op0) && !REG_P (op0))
4621 op0 = force_reg (inner_mode, op0);
4622 if (mode == V2DFmode)
4623 emit_insn (gen_vsx_splat_v2df (target, op0));
4624 else
4625 emit_insn (gen_vsx_splat_v2di (target, op0));
4627 else
4629 op0 = force_reg (inner_mode, op0);
4630 op1 = force_reg (inner_mode, op1);
4631 if (mode == V2DFmode)
4632 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4633 else
4634 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4636 return;
4639 /* With single precision floating point on VSX, know that internally single
4640 precision is actually represented as a double, and either make 2 V2DF
4641 vectors, and convert these vectors to single precision, or do one
4642 conversion, and splat the result to the other elements. */
4643 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4645 if (all_same)
4647 rtx freg = gen_reg_rtx (V4SFmode);
4648 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
4650 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4651 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4653 else
4655 rtx dbl_even = gen_reg_rtx (V2DFmode);
4656 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4657 rtx flt_even = gen_reg_rtx (V4SFmode);
4658 rtx flt_odd = gen_reg_rtx (V4SFmode);
4659 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
4660 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
4661 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
4662 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
4664 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
4665 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
4666 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4667 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4668 rs6000_expand_extract_even (target, flt_even, flt_odd);
4670 return;
4673 /* Store value to stack temp. Load vector element. Splat. However, splat
4674 of 64-bit items is not supported on Altivec. */
4675 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
4677 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4678 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4679 XVECEXP (vals, 0, 0));
4680 x = gen_rtx_UNSPEC (VOIDmode,
4681 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4682 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4683 gen_rtvec (2,
4684 gen_rtx_SET (VOIDmode,
4685 target, mem),
4686 x)));
4687 x = gen_rtx_VEC_SELECT (inner_mode, target,
4688 gen_rtx_PARALLEL (VOIDmode,
4689 gen_rtvec (1, const0_rtx)));
4690 emit_insn (gen_rtx_SET (VOIDmode, target,
4691 gen_rtx_VEC_DUPLICATE (mode, x)));
4692 return;
4695 /* One field is non-constant. Load constant then overwrite
4696 varying field. */
4697 if (n_var == 1)
4699 rtx copy = copy_rtx (vals);
4701 /* Load constant part of vector, substitute neighboring value for
4702 varying element. */
4703 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
4704 rs6000_expand_vector_init (target, copy);
4706 /* Insert variable. */
4707 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
4708 return;
4711 /* Construct the vector in memory one field at a time
4712 and load the whole vector. */
4713 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4714 for (i = 0; i < n_elts; i++)
4715 emit_move_insn (adjust_address_nv (mem, inner_mode,
4716 i * GET_MODE_SIZE (inner_mode)),
4717 XVECEXP (vals, 0, i));
4718 emit_move_insn (target, mem);
4721 /* Set field ELT of TARGET to VAL. */
4723 void
4724 rs6000_expand_vector_set (rtx target, rtx val, int elt)
4726 enum machine_mode mode = GET_MODE (target);
4727 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4728 rtx reg = gen_reg_rtx (mode);
4729 rtx mask, mem, x;
4730 int width = GET_MODE_SIZE (inner_mode);
4731 int i;
4733 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4735 rtx (*set_func) (rtx, rtx, rtx, rtx)
4736 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
4737 emit_insn (set_func (target, target, val, GEN_INT (elt)));
4738 return;
4741 /* Load single variable value. */
4742 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4743 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
4744 x = gen_rtx_UNSPEC (VOIDmode,
4745 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4746 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4747 gen_rtvec (2,
4748 gen_rtx_SET (VOIDmode,
4749 reg, mem),
4750 x)));
4752 /* Linear sequence. */
4753 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
4754 for (i = 0; i < 16; ++i)
4755 XVECEXP (mask, 0, i) = GEN_INT (i);
4757 /* Set permute mask to insert element into target. */
4758 for (i = 0; i < width; ++i)
4759 XVECEXP (mask, 0, elt*width + i)
4760 = GEN_INT (i + 0x10);
4761 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
4762 x = gen_rtx_UNSPEC (mode,
4763 gen_rtvec (3, target, reg,
4764 force_reg (V16QImode, x)),
4765 UNSPEC_VPERM);
4766 emit_insn (gen_rtx_SET (VOIDmode, target, x));
4769 /* Extract field ELT from VEC into TARGET. */
4771 void
4772 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
4774 enum machine_mode mode = GET_MODE (vec);
4775 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4776 rtx mem;
4778 if (VECTOR_MEM_VSX_P (mode))
4780 switch (mode)
4782 default:
4783 break;
4784 case V2DFmode:
4785 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
4786 return;
4787 case V2DImode:
4788 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
4789 return;
4790 case V4SFmode:
4791 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
4792 return;
4796 /* Allocate mode-sized buffer. */
4797 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4799 emit_move_insn (mem, vec);
4801 /* Add offset to field within buffer matching vector element. */
4802 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
4804 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
4807 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4808 implement ANDing by the mask IN. */
4809 void
4810 build_mask64_2_operands (rtx in, rtx *out)
4812 #if HOST_BITS_PER_WIDE_INT >= 64
4813 unsigned HOST_WIDE_INT c, lsb, m1, m2;
4814 int shift;
4816 gcc_assert (GET_CODE (in) == CONST_INT);
4818 c = INTVAL (in);
4819 if (c & 1)
4821 /* Assume c initially something like 0x00fff000000fffff. The idea
4822 is to rotate the word so that the middle ^^^^^^ group of zeros
4823 is at the MS end and can be cleared with an rldicl mask. We then
4824 rotate back and clear off the MS ^^ group of zeros with a
4825 second rldicl. */
4826 c = ~c; /* c == 0xff000ffffff00000 */
4827 lsb = c & -c; /* lsb == 0x0000000000100000 */
4828 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
4829 c = ~c; /* c == 0x00fff000000fffff */
4830 c &= -lsb; /* c == 0x00fff00000000000 */
4831 lsb = c & -c; /* lsb == 0x0000100000000000 */
4832 c = ~c; /* c == 0xff000fffffffffff */
4833 c &= -lsb; /* c == 0xff00000000000000 */
4834 shift = 0;
4835 while ((lsb >>= 1) != 0)
4836 shift++; /* shift == 44 on exit from loop */
4837 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
4838 m1 = ~m1; /* m1 == 0x000000ffffffffff */
4839 m2 = ~c; /* m2 == 0x00ffffffffffffff */
4841 else
4843 /* Assume c initially something like 0xff000f0000000000. The idea
4844 is to rotate the word so that the ^^^ middle group of zeros
4845 is at the LS end and can be cleared with an rldicr mask. We then
4846 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4847 a second rldicr. */
4848 lsb = c & -c; /* lsb == 0x0000010000000000 */
4849 m2 = -lsb; /* m2 == 0xffffff0000000000 */
4850 c = ~c; /* c == 0x00fff0ffffffffff */
4851 c &= -lsb; /* c == 0x00fff00000000000 */
4852 lsb = c & -c; /* lsb == 0x0000100000000000 */
4853 c = ~c; /* c == 0xff000fffffffffff */
4854 c &= -lsb; /* c == 0xff00000000000000 */
4855 shift = 0;
4856 while ((lsb >>= 1) != 0)
4857 shift++; /* shift == 44 on exit from loop */
4858 m1 = ~c; /* m1 == 0x00ffffffffffffff */
4859 m1 >>= shift; /* m1 == 0x0000000000000fff */
4860 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
4863 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4864 masks will be all 1's. We are guaranteed more than one transition. */
4865 out[0] = GEN_INT (64 - shift);
4866 out[1] = GEN_INT (m1);
4867 out[2] = GEN_INT (shift);
4868 out[3] = GEN_INT (m2);
4869 #else
4870 (void)in;
4871 (void)out;
4872 gcc_unreachable ();
4873 #endif
4876 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4878 bool
4879 invalid_e500_subreg (rtx op, enum machine_mode mode)
4881 if (TARGET_E500_DOUBLE)
4883 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4884 subreg:TI and reg:TF. Decimal float modes are like integer
4885 modes (only low part of each register used) for this
4886 purpose. */
4887 if (GET_CODE (op) == SUBREG
4888 && (mode == SImode || mode == DImode || mode == TImode
4889 || mode == DDmode || mode == TDmode)
4890 && REG_P (SUBREG_REG (op))
4891 && (GET_MODE (SUBREG_REG (op)) == DFmode
4892 || GET_MODE (SUBREG_REG (op)) == TFmode))
4893 return true;
4895 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4896 reg:TI. */
4897 if (GET_CODE (op) == SUBREG
4898 && (mode == DFmode || mode == TFmode)
4899 && REG_P (SUBREG_REG (op))
4900 && (GET_MODE (SUBREG_REG (op)) == DImode
4901 || GET_MODE (SUBREG_REG (op)) == TImode
4902 || GET_MODE (SUBREG_REG (op)) == DDmode
4903 || GET_MODE (SUBREG_REG (op)) == TDmode))
4904 return true;
4907 if (TARGET_SPE
4908 && GET_CODE (op) == SUBREG
4909 && mode == SImode
4910 && REG_P (SUBREG_REG (op))
4911 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
4912 return true;
4914 return false;
4917 /* AIX increases natural record alignment to doubleword if the first
4918 field is an FP double while the FP fields remain word aligned. */
4920 unsigned int
4921 rs6000_special_round_type_align (tree type, unsigned int computed,
4922 unsigned int specified)
4924 unsigned int align = MAX (computed, specified);
4925 tree field = TYPE_FIELDS (type);
4927 /* Skip all non field decls */
4928 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4929 field = DECL_CHAIN (field);
4931 if (field != NULL && field != type)
4933 type = TREE_TYPE (field);
4934 while (TREE_CODE (type) == ARRAY_TYPE)
4935 type = TREE_TYPE (type);
4937 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
4938 align = MAX (align, 64);
4941 return align;
4944 /* Darwin increases record alignment to the natural alignment of
4945 the first field. */
4947 unsigned int
4948 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
4949 unsigned int specified)
4951 unsigned int align = MAX (computed, specified);
4953 if (TYPE_PACKED (type))
4954 return align;
4956 /* Find the first field, looking down into aggregates. */
4957 do {
4958 tree field = TYPE_FIELDS (type);
4959 /* Skip all non field decls */
4960 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4961 field = DECL_CHAIN (field);
4962 if (! field)
4963 break;
4964 /* A packed field does not contribute any extra alignment. */
4965 if (DECL_PACKED (field))
4966 return align;
4967 type = TREE_TYPE (field);
4968 while (TREE_CODE (type) == ARRAY_TYPE)
4969 type = TREE_TYPE (type);
4970 } while (AGGREGATE_TYPE_P (type));
4972 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
4973 align = MAX (align, TYPE_ALIGN (type));
4975 return align;
4978 /* Return 1 for an operand in small memory on V.4/eabi. */
4981 small_data_operand (rtx op ATTRIBUTE_UNUSED,
4982 enum machine_mode mode ATTRIBUTE_UNUSED)
4984 #if TARGET_ELF
4985 rtx sym_ref;
4987 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
4988 return 0;
4990 if (DEFAULT_ABI != ABI_V4)
4991 return 0;
4993 /* Vector and float memory instructions have a limited offset on the
4994 SPE, so using a vector or float variable directly as an operand is
4995 not useful. */
4996 if (TARGET_SPE
4997 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
4998 return 0;
5000 if (GET_CODE (op) == SYMBOL_REF)
5001 sym_ref = op;
5003 else if (GET_CODE (op) != CONST
5004 || GET_CODE (XEXP (op, 0)) != PLUS
5005 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5006 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5007 return 0;
5009 else
5011 rtx sum = XEXP (op, 0);
5012 HOST_WIDE_INT summand;
5014 /* We have to be careful here, because it is the referenced address
5015 that must be 32k from _SDA_BASE_, not just the symbol. */
5016 summand = INTVAL (XEXP (sum, 1));
5017 if (summand < 0 || summand > g_switch_value)
5018 return 0;
5020 sym_ref = XEXP (sum, 0);
5023 return SYMBOL_REF_SMALL_P (sym_ref);
5024 #else
5025 return 0;
5026 #endif
5029 /* Return true if either operand is a general purpose register. */
5031 bool
5032 gpr_or_gpr_p (rtx op0, rtx op1)
5034 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5035 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5038 /* Given an address, return a constant offset term if one exists. */
5040 static rtx
5041 address_offset (rtx op)
5043 if (GET_CODE (op) == PRE_INC
5044 || GET_CODE (op) == PRE_DEC)
5045 op = XEXP (op, 0);
5046 else if (GET_CODE (op) == PRE_MODIFY
5047 || GET_CODE (op) == LO_SUM)
5048 op = XEXP (op, 1);
5050 if (GET_CODE (op) == CONST)
5051 op = XEXP (op, 0);
5053 if (GET_CODE (op) == PLUS)
5054 op = XEXP (op, 1);
5056 if (CONST_INT_P (op))
5057 return op;
5059 return NULL_RTX;
5062 /* Return true if the MEM operand is a memory operand suitable for use
5063 with a (full width, possibly multiple) gpr load/store. On
5064 powerpc64 this means the offset must be divisible by 4.
5065 Implements 'Y' constraint.
5067 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5068 a constraint function we know the operand has satisfied a suitable
5069 memory predicate. Also accept some odd rtl generated by reload
5070 (see rs6000_legitimize_reload_address for various forms). It is
5071 important that reload rtl be accepted by appropriate constraints
5072 but not by the operand predicate.
5074 Offsetting a lo_sum should not be allowed, except where we know by
5075 alignment that a 32k boundary is not crossed, but see the ???
5076 comment in rs6000_legitimize_reload_address. Note that by
5077 "offsetting" here we mean a further offset to access parts of the
5078 MEM. It's fine to have a lo_sum where the inner address is offset
5079 from a sym, since the same sym+offset will appear in the high part
5080 of the address calculation. */
5082 bool
5083 mem_operand_gpr (rtx op, enum machine_mode mode)
5085 unsigned HOST_WIDE_INT offset;
5086 int extra;
5087 rtx addr = XEXP (op, 0);
5089 op = address_offset (addr);
5090 if (op == NULL_RTX)
5091 return true;
5093 offset = INTVAL (op);
5094 if (TARGET_POWERPC64 && (offset & 3) != 0)
5095 return false;
5097 if (GET_CODE (addr) == LO_SUM)
5098 /* We know by alignment that ABI_AIX medium/large model toc refs
5099 will not cross a 32k boundary, since all entries in the
5100 constant pool are naturally aligned and we check alignment for
5101 other medium model toc-relative addresses. For ABI_V4 and
5102 ABI_DARWIN lo_sum addresses, we just check that 64-bit
5103 offsets are 4-byte aligned. */
5104 return true;
5106 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5107 gcc_assert (extra >= 0);
5108 return offset + 0x8000 < 0x10000u - extra;
5111 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5113 static bool
5114 reg_offset_addressing_ok_p (enum machine_mode mode)
5116 switch (mode)
5118 case V16QImode:
5119 case V8HImode:
5120 case V4SFmode:
5121 case V4SImode:
5122 case V2DFmode:
5123 case V2DImode:
5124 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5125 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5126 return false;
5127 break;
5129 case V4HImode:
5130 case V2SImode:
5131 case V1DImode:
5132 case V2SFmode:
5133 /* Paired vector modes. Only reg+reg addressing is valid. */
5134 if (TARGET_PAIRED_FLOAT)
5135 return false;
5136 break;
5138 default:
5139 break;
5142 return true;
5145 static bool
5146 virtual_stack_registers_memory_p (rtx op)
5148 int regnum;
5150 if (GET_CODE (op) == REG)
5151 regnum = REGNO (op);
5153 else if (GET_CODE (op) == PLUS
5154 && GET_CODE (XEXP (op, 0)) == REG
5155 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5156 regnum = REGNO (XEXP (op, 0));
5158 else
5159 return false;
5161 return (regnum >= FIRST_VIRTUAL_REGISTER
5162 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5165 /* Return true if memory accesses to OP are known to never straddle
5166 a 32k boundary. */
5168 static bool
5169 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5170 enum machine_mode mode)
5172 tree decl, type;
5173 unsigned HOST_WIDE_INT dsize, dalign;
5175 if (GET_CODE (op) != SYMBOL_REF)
5176 return false;
5178 decl = SYMBOL_REF_DECL (op);
5179 if (!decl)
5181 if (GET_MODE_SIZE (mode) == 0)
5182 return false;
5184 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5185 replacing memory addresses with an anchor plus offset. We
5186 could find the decl by rummaging around in the block->objects
5187 VEC for the given offset but that seems like too much work. */
5188 dalign = 1;
5189 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5190 && SYMBOL_REF_ANCHOR_P (op)
5191 && SYMBOL_REF_BLOCK (op) != NULL)
5193 struct object_block *block = SYMBOL_REF_BLOCK (op);
5194 HOST_WIDE_INT lsb, mask;
5196 /* Given the alignment of the block.. */
5197 dalign = block->alignment;
5198 mask = dalign / BITS_PER_UNIT - 1;
5200 /* ..and the combined offset of the anchor and any offset
5201 to this block object.. */
5202 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5203 lsb = offset & -offset;
5205 /* ..find how many bits of the alignment we know for the
5206 object. */
5207 mask &= lsb - 1;
5208 dalign = mask + 1;
5210 return dalign >= GET_MODE_SIZE (mode);
5213 if (DECL_P (decl))
5215 if (TREE_CODE (decl) == FUNCTION_DECL)
5216 return true;
5218 if (!DECL_SIZE_UNIT (decl))
5219 return false;
5221 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5222 return false;
5224 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5225 if (dsize > 32768)
5226 return false;
5228 dalign = DECL_ALIGN_UNIT (decl);
5229 return dalign >= dsize;
5232 type = TREE_TYPE (decl);
5234 if (TREE_CODE (decl) == STRING_CST)
5235 dsize = TREE_STRING_LENGTH (decl);
5236 else if (TYPE_SIZE_UNIT (type)
5237 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5238 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5239 else
5240 return false;
5241 if (dsize > 32768)
5242 return false;
5244 dalign = TYPE_ALIGN (type);
5245 if (CONSTANT_CLASS_P (decl))
5246 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5247 else
5248 dalign = DATA_ALIGNMENT (decl, dalign);
5249 dalign /= BITS_PER_UNIT;
5250 return dalign >= dsize;
5253 static bool
5254 constant_pool_expr_p (rtx op)
5256 rtx base, offset;
5258 split_const (op, &base, &offset);
5259 return (GET_CODE (base) == SYMBOL_REF
5260 && CONSTANT_POOL_ADDRESS_P (base)
5261 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5264 static const_rtx tocrel_base, tocrel_offset;
5266 /* Return true if OP is a toc pointer relative address (the output
5267 of create_TOC_reference). If STRICT, do not match high part or
5268 non-split -mcmodel=large/medium toc pointer relative addresses. */
5270 bool
5271 toc_relative_expr_p (const_rtx op, bool strict)
5273 if (!TARGET_TOC)
5274 return false;
5276 if (TARGET_CMODEL != CMODEL_SMALL)
5278 /* Only match the low part. */
5279 if (GET_CODE (op) == LO_SUM
5280 && REG_P (XEXP (op, 0))
5281 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5282 op = XEXP (op, 1);
5283 else if (strict)
5284 return false;
5287 tocrel_base = op;
5288 tocrel_offset = const0_rtx;
5289 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
5291 tocrel_base = XEXP (op, 0);
5292 tocrel_offset = XEXP (op, 1);
5295 return (GET_CODE (tocrel_base) == UNSPEC
5296 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5299 /* Return true if X is a constant pool address, and also for cmodel=medium
5300 if X is a toc-relative address known to be offsettable within MODE. */
5302 bool
5303 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5304 bool strict)
5306 return (toc_relative_expr_p (x, strict)
5307 && (TARGET_CMODEL != CMODEL_MEDIUM
5308 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5309 || mode == QImode
5310 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5311 INTVAL (tocrel_offset), mode)));
5314 static bool
5315 legitimate_small_data_p (enum machine_mode mode, rtx x)
5317 return (DEFAULT_ABI == ABI_V4
5318 && !flag_pic && !TARGET_TOC
5319 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5320 && small_data_operand (x, mode));
5323 /* SPE offset addressing is limited to 5-bits worth of double words. */
5324 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5326 bool
5327 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
5328 bool strict, bool worst_case)
5330 unsigned HOST_WIDE_INT offset;
5331 unsigned int extra;
5333 if (GET_CODE (x) != PLUS)
5334 return false;
5335 if (!REG_P (XEXP (x, 0)))
5336 return false;
5337 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5338 return false;
5339 if (!reg_offset_addressing_ok_p (mode))
5340 return virtual_stack_registers_memory_p (x);
5341 if (legitimate_constant_pool_address_p (x, mode, strict))
5342 return true;
5343 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5344 return false;
5346 offset = INTVAL (XEXP (x, 1));
5347 extra = 0;
5348 switch (mode)
5350 case V4HImode:
5351 case V2SImode:
5352 case V1DImode:
5353 case V2SFmode:
5354 /* SPE vector modes. */
5355 return SPE_CONST_OFFSET_OK (offset);
5357 case DFmode:
5358 case DDmode:
5359 case DImode:
5360 /* On e500v2, we may have:
5362 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5364 Which gets addressed with evldd instructions. */
5365 if (TARGET_E500_DOUBLE)
5366 return SPE_CONST_OFFSET_OK (offset);
5368 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5369 addressing. */
5370 if (mode == DFmode && VECTOR_MEM_VSX_P (DFmode))
5371 return false;
5373 if (!worst_case)
5374 break;
5375 if (!TARGET_POWERPC64)
5376 extra = 4;
5377 else if (offset & 3)
5378 return false;
5379 break;
5381 case TFmode:
5382 case TDmode:
5383 case TImode:
5384 if (TARGET_E500_DOUBLE)
5385 return (SPE_CONST_OFFSET_OK (offset)
5386 && SPE_CONST_OFFSET_OK (offset + 8));
5388 extra = 8;
5389 if (!worst_case)
5390 break;
5391 if (!TARGET_POWERPC64)
5392 extra = 12;
5393 else if (offset & 3)
5394 return false;
5395 break;
5397 default:
5398 break;
5401 offset += 0x8000;
5402 return offset < 0x10000 - extra;
5405 bool
5406 legitimate_indexed_address_p (rtx x, int strict)
5408 rtx op0, op1;
5410 if (GET_CODE (x) != PLUS)
5411 return false;
5413 op0 = XEXP (x, 0);
5414 op1 = XEXP (x, 1);
5416 /* Recognize the rtl generated by reload which we know will later be
5417 replaced with proper base and index regs. */
5418 if (!strict
5419 && reload_in_progress
5420 && (REG_P (op0) || GET_CODE (op0) == PLUS)
5421 && REG_P (op1))
5422 return true;
5424 return (REG_P (op0) && REG_P (op1)
5425 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
5426 && INT_REG_OK_FOR_INDEX_P (op1, strict))
5427 || (INT_REG_OK_FOR_BASE_P (op1, strict)
5428 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
5431 bool
5432 avoiding_indexed_address_p (enum machine_mode mode)
5434 /* Avoid indexed addressing for modes that have non-indexed
5435 load/store instruction forms. */
5436 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
5439 inline bool
5440 legitimate_indirect_address_p (rtx x, int strict)
5442 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
5445 bool
5446 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
5448 if (!TARGET_MACHO || !flag_pic
5449 || mode != SImode || GET_CODE (x) != MEM)
5450 return false;
5451 x = XEXP (x, 0);
5453 if (GET_CODE (x) != LO_SUM)
5454 return false;
5455 if (GET_CODE (XEXP (x, 0)) != REG)
5456 return false;
5457 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
5458 return false;
5459 x = XEXP (x, 1);
5461 return CONSTANT_P (x);
5464 static bool
5465 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
5467 if (GET_CODE (x) != LO_SUM)
5468 return false;
5469 if (GET_CODE (XEXP (x, 0)) != REG)
5470 return false;
5471 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5472 return false;
5473 /* Restrict addressing for DI because of our SUBREG hackery. */
5474 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
5475 return false;
5476 x = XEXP (x, 1);
5478 if (TARGET_ELF || TARGET_MACHO)
5480 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
5481 return false;
5482 if (TARGET_TOC)
5483 return false;
5484 if (GET_MODE_NUNITS (mode) != 1)
5485 return false;
5486 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5487 && !(/* ??? Assume floating point reg based on mode? */
5488 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5489 && (mode == DFmode || mode == DDmode)))
5490 return false;
5492 return CONSTANT_P (x);
5495 return false;
5499 /* Try machine-dependent ways of modifying an illegitimate address
5500 to be legitimate. If we find one, return the new, valid address.
5501 This is used from only one place: `memory_address' in explow.c.
5503 OLDX is the address as it was before break_out_memory_refs was
5504 called. In some cases it is useful to look at this to decide what
5505 needs to be done.
5507 It is always safe for this function to do nothing. It exists to
5508 recognize opportunities to optimize the output.
5510 On RS/6000, first check for the sum of a register with a constant
5511 integer that is out of range. If so, generate code to add the
5512 constant with the low-order 16 bits masked to the register and force
5513 this result into another register (this can be done with `cau').
5514 Then generate an address of REG+(CONST&0xffff), allowing for the
5515 possibility of bit 16 being a one.
5517 Then check for the sum of a register and something not constant, try to
5518 load the other things into a register and return the sum. */
5520 static rtx
5521 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5522 enum machine_mode mode)
5524 unsigned int extra;
5526 if (!reg_offset_addressing_ok_p (mode))
5528 if (virtual_stack_registers_memory_p (x))
5529 return x;
5531 /* In theory we should not be seeing addresses of the form reg+0,
5532 but just in case it is generated, optimize it away. */
5533 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5534 return force_reg (Pmode, XEXP (x, 0));
5536 /* Make sure both operands are registers. */
5537 else if (GET_CODE (x) == PLUS)
5538 return gen_rtx_PLUS (Pmode,
5539 force_reg (Pmode, XEXP (x, 0)),
5540 force_reg (Pmode, XEXP (x, 1)));
5541 else
5542 return force_reg (Pmode, x);
5544 if (GET_CODE (x) == SYMBOL_REF)
5546 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5547 if (model != 0)
5548 return rs6000_legitimize_tls_address (x, model);
5551 extra = 0;
5552 switch (mode)
5554 case TFmode:
5555 case TDmode:
5556 case TImode:
5557 /* As in legitimate_offset_address_p we do not assume
5558 worst-case. The mode here is just a hint as to the registers
5559 used. A TImode is usually in gprs, but may actually be in
5560 fprs. Leave worst-case scenario for reload to handle via
5561 insn constraints. */
5562 extra = 8;
5563 break;
5564 default:
5565 break;
5568 if (GET_CODE (x) == PLUS
5569 && GET_CODE (XEXP (x, 0)) == REG
5570 && GET_CODE (XEXP (x, 1)) == CONST_INT
5571 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5572 >= 0x10000 - extra)
5573 && !(SPE_VECTOR_MODE (mode)
5574 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
5576 HOST_WIDE_INT high_int, low_int;
5577 rtx sum;
5578 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5579 if (low_int >= 0x8000 - extra)
5580 low_int = 0;
5581 high_int = INTVAL (XEXP (x, 1)) - low_int;
5582 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5583 GEN_INT (high_int)), 0);
5584 return plus_constant (Pmode, sum, low_int);
5586 else if (GET_CODE (x) == PLUS
5587 && GET_CODE (XEXP (x, 0)) == REG
5588 && GET_CODE (XEXP (x, 1)) != CONST_INT
5589 && GET_MODE_NUNITS (mode) == 1
5590 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5591 || (/* ??? Assume floating point reg based on mode? */
5592 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5593 && (mode == DFmode || mode == DDmode)))
5594 && !avoiding_indexed_address_p (mode))
5596 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5597 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5599 else if (SPE_VECTOR_MODE (mode)
5600 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
5602 if (mode == DImode)
5603 return x;
5604 /* We accept [reg + reg] and [reg + OFFSET]. */
5606 if (GET_CODE (x) == PLUS)
5608 rtx op1 = XEXP (x, 0);
5609 rtx op2 = XEXP (x, 1);
5610 rtx y;
5612 op1 = force_reg (Pmode, op1);
5614 if (GET_CODE (op2) != REG
5615 && (GET_CODE (op2) != CONST_INT
5616 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5617 || (GET_MODE_SIZE (mode) > 8
5618 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5619 op2 = force_reg (Pmode, op2);
5621 /* We can't always do [reg + reg] for these, because [reg +
5622 reg + offset] is not a legitimate addressing mode. */
5623 y = gen_rtx_PLUS (Pmode, op1, op2);
5625 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5626 return force_reg (Pmode, y);
5627 else
5628 return y;
5631 return force_reg (Pmode, x);
5633 else if ((TARGET_ELF
5634 #if TARGET_MACHO
5635 || !MACHO_DYNAMIC_NO_PIC_P
5636 #endif
5638 && TARGET_32BIT
5639 && TARGET_NO_TOC
5640 && ! flag_pic
5641 && GET_CODE (x) != CONST_INT
5642 && GET_CODE (x) != CONST_DOUBLE
5643 && CONSTANT_P (x)
5644 && GET_MODE_NUNITS (mode) == 1
5645 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5646 || (/* ??? Assume floating point reg based on mode? */
5647 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5648 && (mode == DFmode || mode == DDmode))))
5650 rtx reg = gen_reg_rtx (Pmode);
5651 if (TARGET_ELF)
5652 emit_insn (gen_elf_high (reg, x));
5653 else
5654 emit_insn (gen_macho_high (reg, x));
5655 return gen_rtx_LO_SUM (Pmode, reg, x);
5657 else if (TARGET_TOC
5658 && GET_CODE (x) == SYMBOL_REF
5659 && constant_pool_expr_p (x)
5660 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5661 return create_TOC_reference (x, NULL_RTX);
5662 else
5663 return x;
5666 /* Debug version of rs6000_legitimize_address. */
5667 static rtx
5668 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5670 rtx ret;
5671 rtx insns;
5673 start_sequence ();
5674 ret = rs6000_legitimize_address (x, oldx, mode);
5675 insns = get_insns ();
5676 end_sequence ();
5678 if (ret != x)
5680 fprintf (stderr,
5681 "\nrs6000_legitimize_address: mode %s, old code %s, "
5682 "new code %s, modified\n",
5683 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5684 GET_RTX_NAME (GET_CODE (ret)));
5686 fprintf (stderr, "Original address:\n");
5687 debug_rtx (x);
5689 fprintf (stderr, "oldx:\n");
5690 debug_rtx (oldx);
5692 fprintf (stderr, "New address:\n");
5693 debug_rtx (ret);
5695 if (insns)
5697 fprintf (stderr, "Insns added:\n");
5698 debug_rtx_list (insns, 20);
5701 else
5703 fprintf (stderr,
5704 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5705 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
5707 debug_rtx (x);
5710 if (insns)
5711 emit_insn (insns);
5713 return ret;
5716 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5717 We need to emit DTP-relative relocations. */
5719 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5720 static void
5721 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
5723 switch (size)
5725 case 4:
5726 fputs ("\t.long\t", file);
5727 break;
5728 case 8:
5729 fputs (DOUBLE_INT_ASM_OP, file);
5730 break;
5731 default:
5732 gcc_unreachable ();
5734 output_addr_const (file, x);
5735 fputs ("@dtprel+0x8000", file);
5738 /* In the name of slightly smaller debug output, and to cater to
5739 general assembler lossage, recognize various UNSPEC sequences
5740 and turn them back into a direct symbol reference. */
5742 static rtx
5743 rs6000_delegitimize_address (rtx orig_x)
5745 rtx x, y, offset;
5747 orig_x = delegitimize_mem_from_attrs (orig_x);
5748 x = orig_x;
5749 if (MEM_P (x))
5750 x = XEXP (x, 0);
5752 y = x;
5753 if (TARGET_CMODEL != CMODEL_SMALL
5754 && GET_CODE (y) == LO_SUM)
5755 y = XEXP (y, 1);
5757 offset = NULL_RTX;
5758 if (GET_CODE (y) == PLUS
5759 && GET_MODE (y) == Pmode
5760 && CONST_INT_P (XEXP (y, 1)))
5762 offset = XEXP (y, 1);
5763 y = XEXP (y, 0);
5766 if (GET_CODE (y) == UNSPEC
5767 && XINT (y, 1) == UNSPEC_TOCREL)
5769 #ifdef ENABLE_CHECKING
5770 if (REG_P (XVECEXP (y, 0, 1))
5771 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
5773 /* All good. */
5775 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
5777 /* Weirdness alert. df_note_compute can replace r2 with a
5778 debug_expr when this unspec is in a debug_insn.
5779 Seen in gcc.dg/pr51957-1.c */
5781 else
5783 debug_rtx (orig_x);
5784 abort ();
5786 #endif
5787 y = XVECEXP (y, 0, 0);
5788 if (offset != NULL_RTX)
5789 y = gen_rtx_PLUS (Pmode, y, offset);
5790 if (!MEM_P (orig_x))
5791 return y;
5792 else
5793 return replace_equiv_address_nv (orig_x, y);
5796 if (TARGET_MACHO
5797 && GET_CODE (orig_x) == LO_SUM
5798 && GET_CODE (XEXP (orig_x, 1)) == CONST)
5800 y = XEXP (XEXP (orig_x, 1), 0);
5801 if (GET_CODE (y) == UNSPEC
5802 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
5803 return XVECEXP (y, 0, 0);
5806 return orig_x;
5809 /* Return true if X shouldn't be emitted into the debug info.
5810 The linker doesn't like .toc section references from
5811 .debug_* sections, so reject .toc section symbols. */
5813 static bool
5814 rs6000_const_not_ok_for_debug_p (rtx x)
5816 if (GET_CODE (x) == SYMBOL_REF
5817 && CONSTANT_POOL_ADDRESS_P (x))
5819 rtx c = get_pool_constant (x);
5820 enum machine_mode cmode = get_pool_mode (x);
5821 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
5822 return true;
5825 return false;
5828 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5830 static GTY(()) rtx rs6000_tls_symbol;
5831 static rtx
5832 rs6000_tls_get_addr (void)
5834 if (!rs6000_tls_symbol)
5835 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
5837 return rs6000_tls_symbol;
5840 /* Construct the SYMBOL_REF for TLS GOT references. */
5842 static GTY(()) rtx rs6000_got_symbol;
5843 static rtx
5844 rs6000_got_sym (void)
5846 if (!rs6000_got_symbol)
5848 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
5849 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
5850 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
5853 return rs6000_got_symbol;
5856 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5857 this (thread-local) address. */
5859 static rtx
5860 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
5862 rtx dest, insn;
5864 dest = gen_reg_rtx (Pmode);
5865 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
5867 rtx tlsreg;
5869 if (TARGET_64BIT)
5871 tlsreg = gen_rtx_REG (Pmode, 13);
5872 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
5874 else
5876 tlsreg = gen_rtx_REG (Pmode, 2);
5877 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
5879 emit_insn (insn);
5881 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
5883 rtx tlsreg, tmp;
5885 tmp = gen_reg_rtx (Pmode);
5886 if (TARGET_64BIT)
5888 tlsreg = gen_rtx_REG (Pmode, 13);
5889 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
5891 else
5893 tlsreg = gen_rtx_REG (Pmode, 2);
5894 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
5896 emit_insn (insn);
5897 if (TARGET_64BIT)
5898 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
5899 else
5900 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
5901 emit_insn (insn);
5903 else
5905 rtx r3, got, tga, tmp1, tmp2, call_insn;
5907 /* We currently use relocations like @got@tlsgd for tls, which
5908 means the linker will handle allocation of tls entries, placing
5909 them in the .got section. So use a pointer to the .got section,
5910 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5911 or to secondary GOT sections used by 32-bit -fPIC. */
5912 if (TARGET_64BIT)
5913 got = gen_rtx_REG (Pmode, 2);
5914 else
5916 if (flag_pic == 1)
5917 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
5918 else
5920 rtx gsym = rs6000_got_sym ();
5921 got = gen_reg_rtx (Pmode);
5922 if (flag_pic == 0)
5923 rs6000_emit_move (got, gsym, Pmode);
5924 else
5926 rtx mem, lab, last;
5928 tmp1 = gen_reg_rtx (Pmode);
5929 tmp2 = gen_reg_rtx (Pmode);
5930 mem = gen_const_mem (Pmode, tmp1);
5931 lab = gen_label_rtx ();
5932 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
5933 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
5934 if (TARGET_LINK_STACK)
5935 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
5936 emit_move_insn (tmp2, mem);
5937 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
5938 set_unique_reg_note (last, REG_EQUAL, gsym);
5943 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
5945 tga = rs6000_tls_get_addr ();
5946 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
5947 1, const0_rtx, Pmode);
5949 r3 = gen_rtx_REG (Pmode, 3);
5950 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5951 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
5952 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5953 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
5954 else if (DEFAULT_ABI == ABI_V4)
5955 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
5956 else
5957 gcc_unreachable ();
5958 call_insn = last_call_insn ();
5959 PATTERN (call_insn) = insn;
5960 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5961 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5962 pic_offset_table_rtx);
5964 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
5966 tga = rs6000_tls_get_addr ();
5967 tmp1 = gen_reg_rtx (Pmode);
5968 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
5969 1, const0_rtx, Pmode);
5971 r3 = gen_rtx_REG (Pmode, 3);
5972 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5973 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
5974 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5975 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
5976 else if (DEFAULT_ABI == ABI_V4)
5977 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
5978 else
5979 gcc_unreachable ();
5980 call_insn = last_call_insn ();
5981 PATTERN (call_insn) = insn;
5982 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5983 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5984 pic_offset_table_rtx);
5986 if (rs6000_tls_size == 16)
5988 if (TARGET_64BIT)
5989 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
5990 else
5991 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
5993 else if (rs6000_tls_size == 32)
5995 tmp2 = gen_reg_rtx (Pmode);
5996 if (TARGET_64BIT)
5997 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
5998 else
5999 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
6000 emit_insn (insn);
6001 if (TARGET_64BIT)
6002 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6003 else
6004 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6006 else
6008 tmp2 = gen_reg_rtx (Pmode);
6009 if (TARGET_64BIT)
6010 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6011 else
6012 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6013 emit_insn (insn);
6014 insn = gen_rtx_SET (Pmode, dest,
6015 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6017 emit_insn (insn);
6019 else
6021 /* IE, or 64-bit offset LE. */
6022 tmp2 = gen_reg_rtx (Pmode);
6023 if (TARGET_64BIT)
6024 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6025 else
6026 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6027 emit_insn (insn);
6028 if (TARGET_64BIT)
6029 insn = gen_tls_tls_64 (dest, tmp2, addr);
6030 else
6031 insn = gen_tls_tls_32 (dest, tmp2, addr);
6032 emit_insn (insn);
6036 return dest;
6039 /* Return 1 if X contains a thread-local symbol. */
6041 static bool
6042 rs6000_tls_referenced_p (rtx x)
6044 if (! TARGET_HAVE_TLS)
6045 return false;
6047 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6050 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6052 static bool
6053 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6055 if (GET_CODE (x) == HIGH
6056 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6057 return true;
6059 return rs6000_tls_referenced_p (x);
6062 /* Return 1 if *X is a thread-local symbol. This is the same as
6063 rs6000_tls_symbol_ref except for the type of the unused argument. */
6065 static int
6066 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6068 return RS6000_SYMBOL_REF_TLS_P (*x);
6071 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6072 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6073 can be addressed relative to the toc pointer. */
6075 static bool
6076 use_toc_relative_ref (rtx sym)
6078 return ((constant_pool_expr_p (sym)
6079 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6080 get_pool_mode (sym)))
6081 || (TARGET_CMODEL == CMODEL_MEDIUM
6082 && !CONSTANT_POOL_ADDRESS_P (sym)
6083 && SYMBOL_REF_LOCAL_P (sym)));
6086 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6087 replace the input X, or the original X if no replacement is called for.
6088 The output parameter *WIN is 1 if the calling macro should goto WIN,
6089 0 if it should not.
6091 For RS/6000, we wish to handle large displacements off a base
6092 register by splitting the addend across an addiu/addis and the mem insn.
6093 This cuts number of extra insns needed from 3 to 1.
6095 On Darwin, we use this to generate code for floating point constants.
6096 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6097 The Darwin code is inside #if TARGET_MACHO because only then are the
6098 machopic_* functions defined. */
6099 static rtx
6100 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6101 int opnum, int type,
6102 int ind_levels ATTRIBUTE_UNUSED, int *win)
6104 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6106 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6107 DFmode/DImode MEM. */
6108 if (reg_offset_p
6109 && opnum == 1
6110 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6111 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6112 reg_offset_p = false;
6114 /* We must recognize output that we have already generated ourselves. */
6115 if (GET_CODE (x) == PLUS
6116 && GET_CODE (XEXP (x, 0)) == PLUS
6117 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6118 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6119 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6121 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6122 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6123 opnum, (enum reload_type) type);
6124 *win = 1;
6125 return x;
6128 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6129 if (GET_CODE (x) == LO_SUM
6130 && GET_CODE (XEXP (x, 0)) == HIGH)
6132 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6133 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6134 opnum, (enum reload_type) type);
6135 *win = 1;
6136 return x;
6139 #if TARGET_MACHO
6140 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6141 && GET_CODE (x) == LO_SUM
6142 && GET_CODE (XEXP (x, 0)) == PLUS
6143 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6144 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6145 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6146 && machopic_operand_p (XEXP (x, 1)))
6148 /* Result of previous invocation of this function on Darwin
6149 floating point constant. */
6150 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6151 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6152 opnum, (enum reload_type) type);
6153 *win = 1;
6154 return x;
6156 #endif
6158 if (TARGET_CMODEL != CMODEL_SMALL
6159 && reg_offset_p
6160 && small_toc_ref (x, VOIDmode))
6162 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6163 x = gen_rtx_LO_SUM (Pmode, hi, x);
6164 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6165 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6166 opnum, (enum reload_type) type);
6167 *win = 1;
6168 return x;
6171 /* Force ld/std non-word aligned offset into base register by wrapping
6172 in offset 0. */
6173 if (GET_CODE (x) == PLUS
6174 && GET_CODE (XEXP (x, 0)) == REG
6175 && REGNO (XEXP (x, 0)) < 32
6176 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6177 && GET_CODE (XEXP (x, 1)) == CONST_INT
6178 && reg_offset_p
6179 && (INTVAL (XEXP (x, 1)) & 3) != 0
6180 && VECTOR_MEM_NONE_P (mode)
6181 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
6182 && TARGET_POWERPC64)
6184 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
6185 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6186 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6187 opnum, (enum reload_type) type);
6188 *win = 1;
6189 return x;
6192 if (GET_CODE (x) == PLUS
6193 && GET_CODE (XEXP (x, 0)) == REG
6194 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6195 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6196 && GET_CODE (XEXP (x, 1)) == CONST_INT
6197 && reg_offset_p
6198 && !SPE_VECTOR_MODE (mode)
6199 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6200 || mode == DDmode || mode == TDmode
6201 || mode == DImode))
6202 && VECTOR_MEM_NONE_P (mode))
6204 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6205 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6206 HOST_WIDE_INT high
6207 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6209 /* Check for 32-bit overflow. */
6210 if (high + low != val)
6212 *win = 0;
6213 return x;
6216 /* Reload the high part into a base reg; leave the low part
6217 in the mem directly. */
6219 x = gen_rtx_PLUS (GET_MODE (x),
6220 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6221 GEN_INT (high)),
6222 GEN_INT (low));
6224 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6225 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6226 opnum, (enum reload_type) type);
6227 *win = 1;
6228 return x;
6231 if (GET_CODE (x) == SYMBOL_REF
6232 && reg_offset_p
6233 && VECTOR_MEM_NONE_P (mode)
6234 && !SPE_VECTOR_MODE (mode)
6235 #if TARGET_MACHO
6236 && DEFAULT_ABI == ABI_DARWIN
6237 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6238 && machopic_symbol_defined_p (x)
6239 #else
6240 && DEFAULT_ABI == ABI_V4
6241 && !flag_pic
6242 #endif
6243 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6244 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6245 without fprs.
6246 ??? Assume floating point reg based on mode? This assumption is
6247 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6248 where reload ends up doing a DFmode load of a constant from
6249 mem using two gprs. Unfortunately, at this point reload
6250 hasn't yet selected regs so poking around in reload data
6251 won't help and even if we could figure out the regs reliably,
6252 we'd still want to allow this transformation when the mem is
6253 naturally aligned. Since we say the address is good here, we
6254 can't disable offsets from LO_SUMs in mem_operand_gpr.
6255 FIXME: Allow offset from lo_sum for other modes too, when
6256 mem is sufficiently aligned. */
6257 && mode != TFmode
6258 && mode != TDmode
6259 && (mode != DImode || TARGET_POWERPC64)
6260 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6261 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6263 #if TARGET_MACHO
6264 if (flag_pic)
6266 rtx offset = machopic_gen_offset (x);
6267 x = gen_rtx_LO_SUM (GET_MODE (x),
6268 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6269 gen_rtx_HIGH (Pmode, offset)), offset);
6271 else
6272 #endif
6273 x = gen_rtx_LO_SUM (GET_MODE (x),
6274 gen_rtx_HIGH (Pmode, x), x);
6276 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6277 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6278 opnum, (enum reload_type) type);
6279 *win = 1;
6280 return x;
6283 /* Reload an offset address wrapped by an AND that represents the
6284 masking of the lower bits. Strip the outer AND and let reload
6285 convert the offset address into an indirect address. For VSX,
6286 force reload to create the address with an AND in a separate
6287 register, because we can't guarantee an altivec register will
6288 be used. */
6289 if (VECTOR_MEM_ALTIVEC_P (mode)
6290 && GET_CODE (x) == AND
6291 && GET_CODE (XEXP (x, 0)) == PLUS
6292 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6293 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6294 && GET_CODE (XEXP (x, 1)) == CONST_INT
6295 && INTVAL (XEXP (x, 1)) == -16)
6297 x = XEXP (x, 0);
6298 *win = 1;
6299 return x;
6302 if (TARGET_TOC
6303 && reg_offset_p
6304 && GET_CODE (x) == SYMBOL_REF
6305 && use_toc_relative_ref (x))
6307 x = create_TOC_reference (x, NULL_RTX);
6308 if (TARGET_CMODEL != CMODEL_SMALL)
6309 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6310 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6311 opnum, (enum reload_type) type);
6312 *win = 1;
6313 return x;
6315 *win = 0;
6316 return x;
6319 /* Debug version of rs6000_legitimize_reload_address. */
6320 static rtx
6321 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
6322 int opnum, int type,
6323 int ind_levels, int *win)
6325 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
6326 ind_levels, win);
6327 fprintf (stderr,
6328 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6329 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6330 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
6331 debug_rtx (x);
6333 if (x == ret)
6334 fprintf (stderr, "Same address returned\n");
6335 else if (!ret)
6336 fprintf (stderr, "NULL returned\n");
6337 else
6339 fprintf (stderr, "New address:\n");
6340 debug_rtx (ret);
6343 return ret;
6346 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6347 that is a valid memory address for an instruction.
6348 The MODE argument is the machine mode for the MEM expression
6349 that wants to use this address.
6351 On the RS/6000, there are four valid address: a SYMBOL_REF that
6352 refers to a constant pool entry of an address (or the sum of it
6353 plus a constant), a short (16-bit signed) constant plus a register,
6354 the sum of two registers, or a register indirect, possibly with an
6355 auto-increment. For DFmode, DDmode and DImode with a constant plus
6356 register, we must ensure that both words are addressable or PowerPC64
6357 with offset word aligned.
6359 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6360 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6361 because adjacent memory cells are accessed by adding word-sized offsets
6362 during assembly output. */
6363 static bool
6364 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
6366 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6368 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6369 if (VECTOR_MEM_ALTIVEC_P (mode)
6370 && GET_CODE (x) == AND
6371 && GET_CODE (XEXP (x, 1)) == CONST_INT
6372 && INTVAL (XEXP (x, 1)) == -16)
6373 x = XEXP (x, 0);
6375 if (RS6000_SYMBOL_REF_TLS_P (x))
6376 return 0;
6377 if (legitimate_indirect_address_p (x, reg_ok_strict))
6378 return 1;
6379 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
6380 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6381 && !SPE_VECTOR_MODE (mode)
6382 && mode != TFmode
6383 && mode != TDmode
6384 /* Restrict addressing for DI because of our SUBREG hackery. */
6385 && !(TARGET_E500_DOUBLE
6386 && (mode == DFmode || mode == DDmode || mode == DImode))
6387 && TARGET_UPDATE
6388 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
6389 return 1;
6390 if (virtual_stack_registers_memory_p (x))
6391 return 1;
6392 if (reg_offset_p && legitimate_small_data_p (mode, x))
6393 return 1;
6394 if (reg_offset_p
6395 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
6396 return 1;
6397 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6398 if (! reg_ok_strict
6399 && reg_offset_p
6400 && GET_CODE (x) == PLUS
6401 && GET_CODE (XEXP (x, 0)) == REG
6402 && (XEXP (x, 0) == virtual_stack_vars_rtx
6403 || XEXP (x, 0) == arg_pointer_rtx)
6404 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6405 return 1;
6406 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
6407 return 1;
6408 if (mode != TImode
6409 && mode != TFmode
6410 && mode != TDmode
6411 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6412 || TARGET_POWERPC64
6413 || (mode != DFmode && mode != DDmode)
6414 || (TARGET_E500_DOUBLE && mode != DDmode))
6415 && (TARGET_POWERPC64 || mode != DImode)
6416 && !avoiding_indexed_address_p (mode)
6417 && legitimate_indexed_address_p (x, reg_ok_strict))
6418 return 1;
6419 if (GET_CODE (x) == PRE_MODIFY
6420 && mode != TImode
6421 && mode != TFmode
6422 && mode != TDmode
6423 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6424 || TARGET_POWERPC64
6425 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
6426 && (TARGET_POWERPC64 || mode != DImode)
6427 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6428 && !SPE_VECTOR_MODE (mode)
6429 /* Restrict addressing for DI because of our SUBREG hackery. */
6430 && !(TARGET_E500_DOUBLE
6431 && (mode == DFmode || mode == DDmode || mode == DImode))
6432 && TARGET_UPDATE
6433 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
6434 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
6435 reg_ok_strict, false)
6436 || (!avoiding_indexed_address_p (mode)
6437 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
6438 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6439 return 1;
6440 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
6441 return 1;
6442 return 0;
6445 /* Debug version of rs6000_legitimate_address_p. */
6446 static bool
6447 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
6448 bool reg_ok_strict)
6450 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
6451 fprintf (stderr,
6452 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6453 "strict = %d, code = %s\n",
6454 ret ? "true" : "false",
6455 GET_MODE_NAME (mode),
6456 reg_ok_strict,
6457 GET_RTX_NAME (GET_CODE (x)));
6458 debug_rtx (x);
6460 return ret;
6463 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6465 static bool
6466 rs6000_mode_dependent_address_p (const_rtx addr,
6467 addr_space_t as ATTRIBUTE_UNUSED)
6469 return rs6000_mode_dependent_address_ptr (addr);
6472 /* Go to LABEL if ADDR (a legitimate address expression)
6473 has an effect that depends on the machine mode it is used for.
6475 On the RS/6000 this is true of all integral offsets (since AltiVec
6476 and VSX modes don't allow them) or is a pre-increment or decrement.
6478 ??? Except that due to conceptual problems in offsettable_address_p
6479 we can't really report the problems of integral offsets. So leave
6480 this assuming that the adjustable offset must be valid for the
6481 sub-words of a TFmode operand, which is what we had before. */
6483 static bool
6484 rs6000_mode_dependent_address (const_rtx addr)
6486 switch (GET_CODE (addr))
6488 case PLUS:
6489 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6490 is considered a legitimate address before reload, so there
6491 are no offset restrictions in that case. Note that this
6492 condition is safe in strict mode because any address involving
6493 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6494 been rejected as illegitimate. */
6495 if (XEXP (addr, 0) != virtual_stack_vars_rtx
6496 && XEXP (addr, 0) != arg_pointer_rtx
6497 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6499 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
6500 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
6502 break;
6504 case LO_SUM:
6505 /* Anything in the constant pool is sufficiently aligned that
6506 all bytes have the same high part address. */
6507 return !legitimate_constant_pool_address_p (addr, QImode, false);
6509 /* Auto-increment cases are now treated generically in recog.c. */
6510 case PRE_MODIFY:
6511 return TARGET_UPDATE;
6513 /* AND is only allowed in Altivec loads. */
6514 case AND:
6515 return true;
6517 default:
6518 break;
6521 return false;
6524 /* Debug version of rs6000_mode_dependent_address. */
6525 static bool
6526 rs6000_debug_mode_dependent_address (const_rtx addr)
6528 bool ret = rs6000_mode_dependent_address (addr);
6530 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
6531 ret ? "true" : "false");
6532 debug_rtx (addr);
6534 return ret;
6537 /* Implement FIND_BASE_TERM. */
6540 rs6000_find_base_term (rtx op)
6542 rtx base;
6544 base = op;
6545 if (GET_CODE (base) == CONST)
6546 base = XEXP (base, 0);
6547 if (GET_CODE (base) == PLUS)
6548 base = XEXP (base, 0);
6549 if (GET_CODE (base) == UNSPEC)
6550 switch (XINT (base, 1))
6552 case UNSPEC_TOCREL:
6553 case UNSPEC_MACHOPIC_OFFSET:
6554 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6555 for aliasing purposes. */
6556 return XVECEXP (base, 0, 0);
6559 return op;
6562 /* More elaborate version of recog's offsettable_memref_p predicate
6563 that works around the ??? note of rs6000_mode_dependent_address.
6564 In particular it accepts
6566 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6568 in 32-bit mode, that the recog predicate rejects. */
6570 static bool
6571 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
6573 bool worst_case;
6575 if (!MEM_P (op))
6576 return false;
6578 /* First mimic offsettable_memref_p. */
6579 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
6580 return true;
6582 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6583 the latter predicate knows nothing about the mode of the memory
6584 reference and, therefore, assumes that it is the largest supported
6585 mode (TFmode). As a consequence, legitimate offsettable memory
6586 references are rejected. rs6000_legitimate_offset_address_p contains
6587 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6588 at least with a little bit of help here given that we know the
6589 actual registers used. */
6590 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
6591 || GET_MODE_SIZE (reg_mode) == 4);
6592 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
6593 true, worst_case);
6596 /* Change register usage conditional on target flags. */
6597 static void
6598 rs6000_conditional_register_usage (void)
6600 int i;
6602 if (TARGET_DEBUG_TARGET)
6603 fprintf (stderr, "rs6000_conditional_register_usage called\n");
6605 /* Set MQ register fixed (already call_used) so that it will not be
6606 allocated. */
6607 fixed_regs[64] = 1;
6609 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6610 if (TARGET_64BIT)
6611 fixed_regs[13] = call_used_regs[13]
6612 = call_really_used_regs[13] = 1;
6614 /* Conditionally disable FPRs. */
6615 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
6616 for (i = 32; i < 64; i++)
6617 fixed_regs[i] = call_used_regs[i]
6618 = call_really_used_regs[i] = 1;
6620 /* The TOC register is not killed across calls in a way that is
6621 visible to the compiler. */
6622 if (DEFAULT_ABI == ABI_AIX)
6623 call_really_used_regs[2] = 0;
6625 if (DEFAULT_ABI == ABI_V4
6626 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6627 && flag_pic == 2)
6628 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6630 if (DEFAULT_ABI == ABI_V4
6631 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6632 && flag_pic == 1)
6633 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6634 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6635 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6637 if (DEFAULT_ABI == ABI_DARWIN
6638 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6639 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6640 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6641 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6643 if (TARGET_TOC && TARGET_MINIMAL_TOC)
6644 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6645 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6647 if (TARGET_SPE)
6649 global_regs[SPEFSCR_REGNO] = 1;
6650 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6651 registers in prologues and epilogues. We no longer use r14
6652 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6653 pool for link-compatibility with older versions of GCC. Once
6654 "old" code has died out, we can return r14 to the allocation
6655 pool. */
6656 fixed_regs[14]
6657 = call_used_regs[14]
6658 = call_really_used_regs[14] = 1;
6661 if (!TARGET_ALTIVEC && !TARGET_VSX)
6663 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
6664 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6665 call_really_used_regs[VRSAVE_REGNO] = 1;
6668 if (TARGET_ALTIVEC || TARGET_VSX)
6669 global_regs[VSCR_REGNO] = 1;
6671 if (TARGET_ALTIVEC_ABI)
6673 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
6674 call_used_regs[i] = call_really_used_regs[i] = 1;
6676 /* AIX reserves VR20:31 in non-extended ABI mode. */
6677 if (TARGET_XCOFF)
6678 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
6679 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6683 /* Try to output insns to set TARGET equal to the constant C if it can
6684 be done in less than N insns. Do all computations in MODE.
6685 Returns the place where the output has been placed if it can be
6686 done and the insns have been emitted. If it would take more than N
6687 insns, zero is returned and no insns and emitted. */
6690 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
6691 rtx source, int n ATTRIBUTE_UNUSED)
6693 rtx result, insn, set;
6694 HOST_WIDE_INT c0, c1;
6696 switch (mode)
6698 case QImode:
6699 case HImode:
6700 if (dest == NULL)
6701 dest = gen_reg_rtx (mode);
6702 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
6703 return dest;
6705 case SImode:
6706 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
6708 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
6709 GEN_INT (INTVAL (source)
6710 & (~ (HOST_WIDE_INT) 0xffff))));
6711 emit_insn (gen_rtx_SET (VOIDmode, dest,
6712 gen_rtx_IOR (SImode, copy_rtx (result),
6713 GEN_INT (INTVAL (source) & 0xffff))));
6714 result = dest;
6715 break;
6717 case DImode:
6718 switch (GET_CODE (source))
6720 case CONST_INT:
6721 c0 = INTVAL (source);
6722 c1 = -(c0 < 0);
6723 break;
6725 case CONST_DOUBLE:
6726 #if HOST_BITS_PER_WIDE_INT >= 64
6727 c0 = CONST_DOUBLE_LOW (source);
6728 c1 = -(c0 < 0);
6729 #else
6730 c0 = CONST_DOUBLE_LOW (source);
6731 c1 = CONST_DOUBLE_HIGH (source);
6732 #endif
6733 break;
6735 default:
6736 gcc_unreachable ();
6739 result = rs6000_emit_set_long_const (dest, c0, c1);
6740 break;
6742 default:
6743 gcc_unreachable ();
6746 insn = get_last_insn ();
6747 set = single_set (insn);
6748 if (! CONSTANT_P (SET_SRC (set)))
6749 set_unique_reg_note (insn, REG_EQUAL, source);
6751 return result;
6754 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6755 fall back to a straight forward decomposition. We do this to avoid
6756 exponential run times encountered when looking for longer sequences
6757 with rs6000_emit_set_const. */
6758 static rtx
6759 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
6761 if (!TARGET_POWERPC64)
6763 rtx operand1, operand2;
6765 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
6766 DImode);
6767 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
6768 DImode);
6769 emit_move_insn (operand1, GEN_INT (c1));
6770 emit_move_insn (operand2, GEN_INT (c2));
6772 else
6774 HOST_WIDE_INT ud1, ud2, ud3, ud4;
6776 ud1 = c1 & 0xffff;
6777 ud2 = (c1 & 0xffff0000) >> 16;
6778 #if HOST_BITS_PER_WIDE_INT >= 64
6779 c2 = c1 >> 32;
6780 #endif
6781 ud3 = c2 & 0xffff;
6782 ud4 = (c2 & 0xffff0000) >> 16;
6784 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
6785 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
6787 if (ud1 & 0x8000)
6788 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
6789 else
6790 emit_move_insn (dest, GEN_INT (ud1));
6793 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
6794 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
6796 if (ud2 & 0x8000)
6797 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6798 - 0x80000000));
6799 else
6800 emit_move_insn (dest, GEN_INT (ud2 << 16));
6801 if (ud1 != 0)
6802 emit_move_insn (copy_rtx (dest),
6803 gen_rtx_IOR (DImode, copy_rtx (dest),
6804 GEN_INT (ud1)));
6806 else if (ud3 == 0 && ud4 == 0)
6808 gcc_assert (ud2 & 0x8000);
6809 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6810 - 0x80000000));
6811 if (ud1 != 0)
6812 emit_move_insn (copy_rtx (dest),
6813 gen_rtx_IOR (DImode, copy_rtx (dest),
6814 GEN_INT (ud1)));
6815 emit_move_insn (copy_rtx (dest),
6816 gen_rtx_ZERO_EXTEND (DImode,
6817 gen_lowpart (SImode,
6818 copy_rtx (dest))));
6820 else if ((ud4 == 0xffff && (ud3 & 0x8000))
6821 || (ud4 == 0 && ! (ud3 & 0x8000)))
6823 if (ud3 & 0x8000)
6824 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
6825 - 0x80000000));
6826 else
6827 emit_move_insn (dest, GEN_INT (ud3 << 16));
6829 if (ud2 != 0)
6830 emit_move_insn (copy_rtx (dest),
6831 gen_rtx_IOR (DImode, copy_rtx (dest),
6832 GEN_INT (ud2)));
6833 emit_move_insn (copy_rtx (dest),
6834 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6835 GEN_INT (16)));
6836 if (ud1 != 0)
6837 emit_move_insn (copy_rtx (dest),
6838 gen_rtx_IOR (DImode, copy_rtx (dest),
6839 GEN_INT (ud1)));
6841 else
6843 if (ud4 & 0x8000)
6844 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
6845 - 0x80000000));
6846 else
6847 emit_move_insn (dest, GEN_INT (ud4 << 16));
6849 if (ud3 != 0)
6850 emit_move_insn (copy_rtx (dest),
6851 gen_rtx_IOR (DImode, copy_rtx (dest),
6852 GEN_INT (ud3)));
6854 emit_move_insn (copy_rtx (dest),
6855 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6856 GEN_INT (32)));
6857 if (ud2 != 0)
6858 emit_move_insn (copy_rtx (dest),
6859 gen_rtx_IOR (DImode, copy_rtx (dest),
6860 GEN_INT (ud2 << 16)));
6861 if (ud1 != 0)
6862 emit_move_insn (copy_rtx (dest),
6863 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
6866 return dest;
6869 /* Helper for the following. Get rid of [r+r] memory refs
6870 in cases where it won't work (TImode, TFmode, TDmode). */
6872 static void
6873 rs6000_eliminate_indexed_memrefs (rtx operands[2])
6875 if (reload_in_progress)
6876 return;
6878 if (GET_CODE (operands[0]) == MEM
6879 && GET_CODE (XEXP (operands[0], 0)) != REG
6880 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
6881 GET_MODE (operands[0]), false))
6882 operands[0]
6883 = replace_equiv_address (operands[0],
6884 copy_addr_to_reg (XEXP (operands[0], 0)));
6886 if (GET_CODE (operands[1]) == MEM
6887 && GET_CODE (XEXP (operands[1], 0)) != REG
6888 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
6889 GET_MODE (operands[1]), false))
6890 operands[1]
6891 = replace_equiv_address (operands[1],
6892 copy_addr_to_reg (XEXP (operands[1], 0)));
6895 /* Emit a move from SOURCE to DEST in mode MODE. */
6896 void
6897 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
6899 rtx operands[2];
6900 operands[0] = dest;
6901 operands[1] = source;
6903 if (TARGET_DEBUG_ADDR)
6905 fprintf (stderr,
6906 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6907 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6908 GET_MODE_NAME (mode),
6909 reload_in_progress,
6910 reload_completed,
6911 can_create_pseudo_p ());
6912 debug_rtx (dest);
6913 fprintf (stderr, "source:\n");
6914 debug_rtx (source);
6917 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6918 if (GET_CODE (operands[1]) == CONST_DOUBLE
6919 && ! FLOAT_MODE_P (mode)
6920 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6922 /* FIXME. This should never happen. */
6923 /* Since it seems that it does, do the safe thing and convert
6924 to a CONST_INT. */
6925 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
6927 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
6928 || FLOAT_MODE_P (mode)
6929 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
6930 || CONST_DOUBLE_LOW (operands[1]) < 0)
6931 && (CONST_DOUBLE_HIGH (operands[1]) != -1
6932 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
6934 /* Check if GCC is setting up a block move that will end up using FP
6935 registers as temporaries. We must make sure this is acceptable. */
6936 if (GET_CODE (operands[0]) == MEM
6937 && GET_CODE (operands[1]) == MEM
6938 && mode == DImode
6939 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
6940 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
6941 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
6942 ? 32 : MEM_ALIGN (operands[0])))
6943 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
6944 ? 32
6945 : MEM_ALIGN (operands[1]))))
6946 && ! MEM_VOLATILE_P (operands [0])
6947 && ! MEM_VOLATILE_P (operands [1]))
6949 emit_move_insn (adjust_address (operands[0], SImode, 0),
6950 adjust_address (operands[1], SImode, 0));
6951 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
6952 adjust_address (copy_rtx (operands[1]), SImode, 4));
6953 return;
6956 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
6957 && !gpc_reg_operand (operands[1], mode))
6958 operands[1] = force_reg (mode, operands[1]);
6960 /* Recognize the case where operand[1] is a reference to thread-local
6961 data and load its address to a register. */
6962 if (rs6000_tls_referenced_p (operands[1]))
6964 enum tls_model model;
6965 rtx tmp = operands[1];
6966 rtx addend = NULL;
6968 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
6970 addend = XEXP (XEXP (tmp, 0), 1);
6971 tmp = XEXP (XEXP (tmp, 0), 0);
6974 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
6975 model = SYMBOL_REF_TLS_MODEL (tmp);
6976 gcc_assert (model != 0);
6978 tmp = rs6000_legitimize_tls_address (tmp, model);
6979 if (addend)
6981 tmp = gen_rtx_PLUS (mode, tmp, addend);
6982 tmp = force_operand (tmp, operands[0]);
6984 operands[1] = tmp;
6987 /* Handle the case where reload calls us with an invalid address. */
6988 if (reload_in_progress && mode == Pmode
6989 && (! general_operand (operands[1], mode)
6990 || ! nonimmediate_operand (operands[0], mode)))
6991 goto emit_set;
6993 /* 128-bit constant floating-point values on Darwin should really be
6994 loaded as two parts. */
6995 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
6996 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
6998 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
6999 simplify_gen_subreg (DFmode, operands[1], mode, 0),
7000 DFmode);
7001 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
7002 GET_MODE_SIZE (DFmode)),
7003 simplify_gen_subreg (DFmode, operands[1], mode,
7004 GET_MODE_SIZE (DFmode)),
7005 DFmode);
7006 return;
7009 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7010 cfun->machine->sdmode_stack_slot =
7011 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7013 if (reload_in_progress
7014 && mode == SDmode
7015 && MEM_P (operands[0])
7016 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7017 && REG_P (operands[1]))
7019 if (FP_REGNO_P (REGNO (operands[1])))
7021 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7022 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7023 emit_insn (gen_movsd_store (mem, operands[1]));
7025 else if (INT_REGNO_P (REGNO (operands[1])))
7027 rtx mem = adjust_address_nv (operands[0], mode, 4);
7028 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7029 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7031 else
7032 gcc_unreachable();
7033 return;
7035 if (reload_in_progress
7036 && mode == SDmode
7037 && REG_P (operands[0])
7038 && MEM_P (operands[1])
7039 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7041 if (FP_REGNO_P (REGNO (operands[0])))
7043 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7044 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7045 emit_insn (gen_movsd_load (operands[0], mem));
7047 else if (INT_REGNO_P (REGNO (operands[0])))
7049 rtx mem = adjust_address_nv (operands[1], mode, 4);
7050 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7051 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7053 else
7054 gcc_unreachable();
7055 return;
7058 /* FIXME: In the long term, this switch statement should go away
7059 and be replaced by a sequence of tests based on things like
7060 mode == Pmode. */
7061 switch (mode)
7063 case HImode:
7064 case QImode:
7065 if (CONSTANT_P (operands[1])
7066 && GET_CODE (operands[1]) != CONST_INT)
7067 operands[1] = force_const_mem (mode, operands[1]);
7068 break;
7070 case TFmode:
7071 case TDmode:
7072 rs6000_eliminate_indexed_memrefs (operands);
7073 /* fall through */
7075 case DFmode:
7076 case DDmode:
7077 case SFmode:
7078 case SDmode:
7079 if (CONSTANT_P (operands[1])
7080 && ! easy_fp_constant (operands[1], mode))
7081 operands[1] = force_const_mem (mode, operands[1]);
7082 break;
7084 case V16QImode:
7085 case V8HImode:
7086 case V4SFmode:
7087 case V4SImode:
7088 case V4HImode:
7089 case V2SFmode:
7090 case V2SImode:
7091 case V1DImode:
7092 case V2DFmode:
7093 case V2DImode:
7094 if (CONSTANT_P (operands[1])
7095 && !easy_vector_constant (operands[1], mode))
7096 operands[1] = force_const_mem (mode, operands[1]);
7097 break;
7099 case SImode:
7100 case DImode:
7101 /* Use default pattern for address of ELF small data */
7102 if (TARGET_ELF
7103 && mode == Pmode
7104 && DEFAULT_ABI == ABI_V4
7105 && (GET_CODE (operands[1]) == SYMBOL_REF
7106 || GET_CODE (operands[1]) == CONST)
7107 && small_data_operand (operands[1], mode))
7109 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7110 return;
7113 if (DEFAULT_ABI == ABI_V4
7114 && mode == Pmode && mode == SImode
7115 && flag_pic == 1 && got_operand (operands[1], mode))
7117 emit_insn (gen_movsi_got (operands[0], operands[1]));
7118 return;
7121 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
7122 && TARGET_NO_TOC
7123 && ! flag_pic
7124 && mode == Pmode
7125 && CONSTANT_P (operands[1])
7126 && GET_CODE (operands[1]) != HIGH
7127 && GET_CODE (operands[1]) != CONST_INT)
7129 rtx target = (!can_create_pseudo_p ()
7130 ? operands[0]
7131 : gen_reg_rtx (mode));
7133 /* If this is a function address on -mcall-aixdesc,
7134 convert it to the address of the descriptor. */
7135 if (DEFAULT_ABI == ABI_AIX
7136 && GET_CODE (operands[1]) == SYMBOL_REF
7137 && XSTR (operands[1], 0)[0] == '.')
7139 const char *name = XSTR (operands[1], 0);
7140 rtx new_ref;
7141 while (*name == '.')
7142 name++;
7143 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7144 CONSTANT_POOL_ADDRESS_P (new_ref)
7145 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7146 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7147 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7148 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7149 operands[1] = new_ref;
7152 if (DEFAULT_ABI == ABI_DARWIN)
7154 #if TARGET_MACHO
7155 if (MACHO_DYNAMIC_NO_PIC_P)
7157 /* Take care of any required data indirection. */
7158 operands[1] = rs6000_machopic_legitimize_pic_address (
7159 operands[1], mode, operands[0]);
7160 if (operands[0] != operands[1])
7161 emit_insn (gen_rtx_SET (VOIDmode,
7162 operands[0], operands[1]));
7163 return;
7165 #endif
7166 emit_insn (gen_macho_high (target, operands[1]));
7167 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7168 return;
7171 emit_insn (gen_elf_high (target, operands[1]));
7172 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7173 return;
7176 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7177 and we have put it in the TOC, we just need to make a TOC-relative
7178 reference to it. */
7179 if (TARGET_TOC
7180 && GET_CODE (operands[1]) == SYMBOL_REF
7181 && use_toc_relative_ref (operands[1]))
7182 operands[1] = create_TOC_reference (operands[1], operands[0]);
7183 else if (mode == Pmode
7184 && CONSTANT_P (operands[1])
7185 && GET_CODE (operands[1]) != HIGH
7186 && ((GET_CODE (operands[1]) != CONST_INT
7187 && ! easy_fp_constant (operands[1], mode))
7188 || (GET_CODE (operands[1]) == CONST_INT
7189 && (num_insns_constant (operands[1], mode)
7190 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7191 || (GET_CODE (operands[0]) == REG
7192 && FP_REGNO_P (REGNO (operands[0]))))
7193 && !toc_relative_expr_p (operands[1], false)
7194 && (TARGET_CMODEL == CMODEL_SMALL
7195 || can_create_pseudo_p ()
7196 || (REG_P (operands[0])
7197 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7200 #if TARGET_MACHO
7201 /* Darwin uses a special PIC legitimizer. */
7202 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7204 operands[1] =
7205 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7206 operands[0]);
7207 if (operands[0] != operands[1])
7208 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7209 return;
7211 #endif
7213 /* If we are to limit the number of things we put in the TOC and
7214 this is a symbol plus a constant we can add in one insn,
7215 just put the symbol in the TOC and add the constant. Don't do
7216 this if reload is in progress. */
7217 if (GET_CODE (operands[1]) == CONST
7218 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7219 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7220 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7221 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7222 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7223 && ! side_effects_p (operands[0]))
7225 rtx sym =
7226 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7227 rtx other = XEXP (XEXP (operands[1], 0), 1);
7229 sym = force_reg (mode, sym);
7230 emit_insn (gen_add3_insn (operands[0], sym, other));
7231 return;
7234 operands[1] = force_const_mem (mode, operands[1]);
7236 if (TARGET_TOC
7237 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7238 && constant_pool_expr_p (XEXP (operands[1], 0))
7239 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7240 get_pool_constant (XEXP (operands[1], 0)),
7241 get_pool_mode (XEXP (operands[1], 0))))
7243 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7244 operands[0]);
7245 operands[1] = gen_const_mem (mode, tocref);
7246 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7249 break;
7251 case TImode:
7252 rs6000_eliminate_indexed_memrefs (operands);
7253 break;
7255 default:
7256 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7259 /* Above, we may have called force_const_mem which may have returned
7260 an invalid address. If we can, fix this up; otherwise, reload will
7261 have to deal with it. */
7262 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7263 operands[1] = validize_mem (operands[1]);
7265 emit_set:
7266 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7269 /* Return true if a structure, union or array containing FIELD should be
7270 accessed using `BLKMODE'.
7272 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7273 entire thing in a DI and use subregs to access the internals.
7274 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7275 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7276 best thing to do is set structs to BLKmode and avoid Severe Tire
7277 Damage.
7279 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7280 fit into 1, whereas DI still needs two. */
7282 static bool
7283 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
7285 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7286 || (TARGET_E500_DOUBLE && mode == DFmode));
7289 /* Nonzero if we can use a floating-point register to pass this arg. */
7290 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7291 (SCALAR_FLOAT_MODE_P (MODE) \
7292 && (CUM)->fregno <= FP_ARG_MAX_REG \
7293 && TARGET_HARD_FLOAT && TARGET_FPRS)
7295 /* Nonzero if we can use an AltiVec register to pass this arg. */
7296 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7297 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7298 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7299 && TARGET_ALTIVEC_ABI \
7300 && (NAMED))
7302 /* Return a nonzero value to say to return the function value in
7303 memory, just as large structures are always returned. TYPE will be
7304 the data type of the value, and FNTYPE will be the type of the
7305 function doing the returning, or @code{NULL} for libcalls.
7307 The AIX ABI for the RS/6000 specifies that all structures are
7308 returned in memory. The Darwin ABI does the same.
7310 For the Darwin 64 Bit ABI, a function result can be returned in
7311 registers or in memory, depending on the size of the return data
7312 type. If it is returned in registers, the value occupies the same
7313 registers as it would if it were the first and only function
7314 argument. Otherwise, the function places its result in memory at
7315 the location pointed to by GPR3.
7317 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7318 but a draft put them in memory, and GCC used to implement the draft
7319 instead of the final standard. Therefore, aix_struct_return
7320 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7321 compatibility can change DRAFT_V4_STRUCT_RET to override the
7322 default, and -m switches get the final word. See
7323 rs6000_option_override_internal for more details.
7325 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7326 long double support is enabled. These values are returned in memory.
7328 int_size_in_bytes returns -1 for variable size objects, which go in
7329 memory always. The cast to unsigned makes -1 > 8. */
7331 static bool
7332 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7334 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7335 if (TARGET_MACHO
7336 && rs6000_darwin64_abi
7337 && TREE_CODE (type) == RECORD_TYPE
7338 && int_size_in_bytes (type) > 0)
7340 CUMULATIVE_ARGS valcum;
7341 rtx valret;
7343 valcum.words = 0;
7344 valcum.fregno = FP_ARG_MIN_REG;
7345 valcum.vregno = ALTIVEC_ARG_MIN_REG;
7346 /* Do a trial code generation as if this were going to be passed
7347 as an argument; if any part goes in memory, we return NULL. */
7348 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
7349 if (valret)
7350 return false;
7351 /* Otherwise fall through to more conventional ABI rules. */
7354 if (AGGREGATE_TYPE_P (type)
7355 && (aix_struct_return
7356 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
7357 return true;
7359 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7360 modes only exist for GCC vector types if -maltivec. */
7361 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
7362 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7363 return false;
7365 /* Return synthetic vectors in memory. */
7366 if (TREE_CODE (type) == VECTOR_TYPE
7367 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
7369 static bool warned_for_return_big_vectors = false;
7370 if (!warned_for_return_big_vectors)
7372 warning (0, "GCC vector returned by reference: "
7373 "non-standard ABI extension with no compatibility guarantee");
7374 warned_for_return_big_vectors = true;
7376 return true;
7379 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
7380 return true;
7382 return false;
7385 #ifdef HAVE_AS_GNU_ATTRIBUTE
7386 /* Return TRUE if a call to function FNDECL may be one that
7387 potentially affects the function calling ABI of the object file. */
7389 static bool
7390 call_ABI_of_interest (tree fndecl)
7392 if (cgraph_state == CGRAPH_STATE_EXPANSION)
7394 struct cgraph_node *c_node;
7396 /* Libcalls are always interesting. */
7397 if (fndecl == NULL_TREE)
7398 return true;
7400 /* Any call to an external function is interesting. */
7401 if (DECL_EXTERNAL (fndecl))
7402 return true;
7404 /* Interesting functions that we are emitting in this object file. */
7405 c_node = cgraph_get_node (fndecl);
7406 c_node = cgraph_function_or_thunk_node (c_node, NULL);
7407 return !cgraph_only_called_directly_p (c_node);
7409 return false;
7411 #endif
7413 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7414 for a call to a function whose data type is FNTYPE.
7415 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7417 For incoming args we set the number of arguments in the prototype large
7418 so we never return a PARALLEL. */
7420 void
7421 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
7422 rtx libname ATTRIBUTE_UNUSED, int incoming,
7423 int libcall, int n_named_args,
7424 tree fndecl ATTRIBUTE_UNUSED,
7425 enum machine_mode return_mode ATTRIBUTE_UNUSED)
7427 static CUMULATIVE_ARGS zero_cumulative;
7429 *cum = zero_cumulative;
7430 cum->words = 0;
7431 cum->fregno = FP_ARG_MIN_REG;
7432 cum->vregno = ALTIVEC_ARG_MIN_REG;
7433 cum->prototype = (fntype && prototype_p (fntype));
7434 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
7435 ? CALL_LIBCALL : CALL_NORMAL);
7436 cum->sysv_gregno = GP_ARG_MIN_REG;
7437 cum->stdarg = stdarg_p (fntype);
7439 cum->nargs_prototype = 0;
7440 if (incoming || cum->prototype)
7441 cum->nargs_prototype = n_named_args;
7443 /* Check for a longcall attribute. */
7444 if ((!fntype && rs6000_default_long_calls)
7445 || (fntype
7446 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
7447 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
7448 cum->call_cookie |= CALL_LONG;
7450 if (TARGET_DEBUG_ARG)
7452 fprintf (stderr, "\ninit_cumulative_args:");
7453 if (fntype)
7455 tree ret_type = TREE_TYPE (fntype);
7456 fprintf (stderr, " ret code = %s,",
7457 tree_code_name[ (int)TREE_CODE (ret_type) ]);
7460 if (cum->call_cookie & CALL_LONG)
7461 fprintf (stderr, " longcall,");
7463 fprintf (stderr, " proto = %d, nargs = %d\n",
7464 cum->prototype, cum->nargs_prototype);
7467 #ifdef HAVE_AS_GNU_ATTRIBUTE
7468 if (DEFAULT_ABI == ABI_V4)
7470 cum->escapes = call_ABI_of_interest (fndecl);
7471 if (cum->escapes)
7473 tree return_type;
7475 if (fntype)
7477 return_type = TREE_TYPE (fntype);
7478 return_mode = TYPE_MODE (return_type);
7480 else
7481 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
7483 if (return_type != NULL)
7485 if (TREE_CODE (return_type) == RECORD_TYPE
7486 && TYPE_TRANSPARENT_AGGR (return_type))
7488 return_type = TREE_TYPE (first_field (return_type));
7489 return_mode = TYPE_MODE (return_type);
7491 if (AGGREGATE_TYPE_P (return_type)
7492 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
7493 <= 8))
7494 rs6000_returns_struct = true;
7496 if (SCALAR_FLOAT_MODE_P (return_mode))
7497 rs6000_passes_float = true;
7498 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
7499 || SPE_VECTOR_MODE (return_mode))
7500 rs6000_passes_vector = true;
7503 #endif
7505 if (fntype
7506 && !TARGET_ALTIVEC
7507 && TARGET_ALTIVEC_ABI
7508 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
7510 error ("cannot return value in vector register because"
7511 " altivec instructions are disabled, use -maltivec"
7512 " to enable them");
7516 /* Return true if TYPE must be passed on the stack and not in registers. */
7518 static bool
7519 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
7521 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
7522 return must_pass_in_stack_var_size (mode, type);
7523 else
7524 return must_pass_in_stack_var_size_or_pad (mode, type);
7527 /* If defined, a C expression which determines whether, and in which
7528 direction, to pad out an argument with extra space. The value
7529 should be of type `enum direction': either `upward' to pad above
7530 the argument, `downward' to pad below, or `none' to inhibit
7531 padding.
7533 For the AIX ABI structs are always stored left shifted in their
7534 argument slot. */
7536 enum direction
7537 function_arg_padding (enum machine_mode mode, const_tree type)
7539 #ifndef AGGREGATE_PADDING_FIXED
7540 #define AGGREGATE_PADDING_FIXED 0
7541 #endif
7542 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7543 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7544 #endif
7546 if (!AGGREGATE_PADDING_FIXED)
7548 /* GCC used to pass structures of the same size as integer types as
7549 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7550 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7551 passed padded downward, except that -mstrict-align further
7552 muddied the water in that multi-component structures of 2 and 4
7553 bytes in size were passed padded upward.
7555 The following arranges for best compatibility with previous
7556 versions of gcc, but removes the -mstrict-align dependency. */
7557 if (BYTES_BIG_ENDIAN)
7559 HOST_WIDE_INT size = 0;
7561 if (mode == BLKmode)
7563 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
7564 size = int_size_in_bytes (type);
7566 else
7567 size = GET_MODE_SIZE (mode);
7569 if (size == 1 || size == 2 || size == 4)
7570 return downward;
7572 return upward;
7575 if (AGGREGATES_PAD_UPWARD_ALWAYS)
7577 if (type != 0 && AGGREGATE_TYPE_P (type))
7578 return upward;
7581 /* Fall back to the default. */
7582 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7585 /* If defined, a C expression that gives the alignment boundary, in bits,
7586 of an argument with the specified mode and type. If it is not defined,
7587 PARM_BOUNDARY is used for all arguments.
7589 V.4 wants long longs and doubles to be double word aligned. Just
7590 testing the mode size is a boneheaded way to do this as it means
7591 that other types such as complex int are also double word aligned.
7592 However, we're stuck with this because changing the ABI might break
7593 existing library interfaces.
7595 Doubleword align SPE vectors.
7596 Quadword align Altivec/VSX vectors.
7597 Quadword align large synthetic vector types. */
7599 static unsigned int
7600 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
7602 if (DEFAULT_ABI == ABI_V4
7603 && (GET_MODE_SIZE (mode) == 8
7604 || (TARGET_HARD_FLOAT
7605 && TARGET_FPRS
7606 && (mode == TFmode || mode == TDmode))))
7607 return 64;
7608 else if (SPE_VECTOR_MODE (mode)
7609 || (type && TREE_CODE (type) == VECTOR_TYPE
7610 && int_size_in_bytes (type) >= 8
7611 && int_size_in_bytes (type) < 16))
7612 return 64;
7613 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7614 || (type && TREE_CODE (type) == VECTOR_TYPE
7615 && int_size_in_bytes (type) >= 16))
7616 return 128;
7617 else if (TARGET_MACHO
7618 && rs6000_darwin64_abi
7619 && mode == BLKmode
7620 && type && TYPE_ALIGN (type) > 64)
7621 return 128;
7622 else
7623 return PARM_BOUNDARY;
7626 /* For a function parm of MODE and TYPE, return the starting word in
7627 the parameter area. NWORDS of the parameter area are already used. */
7629 static unsigned int
7630 rs6000_parm_start (enum machine_mode mode, const_tree type,
7631 unsigned int nwords)
7633 unsigned int align;
7634 unsigned int parm_offset;
7636 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
7637 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
7638 return nwords + (-(parm_offset + nwords) & align);
7641 /* Compute the size (in words) of a function argument. */
7643 static unsigned long
7644 rs6000_arg_size (enum machine_mode mode, const_tree type)
7646 unsigned long size;
7648 if (mode != BLKmode)
7649 size = GET_MODE_SIZE (mode);
7650 else
7651 size = int_size_in_bytes (type);
7653 if (TARGET_32BIT)
7654 return (size + 3) >> 2;
7655 else
7656 return (size + 7) >> 3;
7659 /* Use this to flush pending int fields. */
7661 static void
7662 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
7663 HOST_WIDE_INT bitpos, int final)
7665 unsigned int startbit, endbit;
7666 int intregs, intoffset;
7667 enum machine_mode mode;
7669 /* Handle the situations where a float is taking up the first half
7670 of the GPR, and the other half is empty (typically due to
7671 alignment restrictions). We can detect this by a 8-byte-aligned
7672 int field, or by seeing that this is the final flush for this
7673 argument. Count the word and continue on. */
7674 if (cum->floats_in_gpr == 1
7675 && (cum->intoffset % 64 == 0
7676 || (cum->intoffset == -1 && final)))
7678 cum->words++;
7679 cum->floats_in_gpr = 0;
7682 if (cum->intoffset == -1)
7683 return;
7685 intoffset = cum->intoffset;
7686 cum->intoffset = -1;
7687 cum->floats_in_gpr = 0;
7689 if (intoffset % BITS_PER_WORD != 0)
7691 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7692 MODE_INT, 0);
7693 if (mode == BLKmode)
7695 /* We couldn't find an appropriate mode, which happens,
7696 e.g., in packed structs when there are 3 bytes to load.
7697 Back intoffset back to the beginning of the word in this
7698 case. */
7699 intoffset = intoffset & -BITS_PER_WORD;
7703 startbit = intoffset & -BITS_PER_WORD;
7704 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7705 intregs = (endbit - startbit) / BITS_PER_WORD;
7706 cum->words += intregs;
7707 /* words should be unsigned. */
7708 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
7710 int pad = (endbit/BITS_PER_WORD) - cum->words;
7711 cum->words += pad;
7715 /* The darwin64 ABI calls for us to recurse down through structs,
7716 looking for elements passed in registers. Unfortunately, we have
7717 to track int register count here also because of misalignments
7718 in powerpc alignment mode. */
7720 static void
7721 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
7722 const_tree type,
7723 HOST_WIDE_INT startbitpos)
7725 tree f;
7727 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
7728 if (TREE_CODE (f) == FIELD_DECL)
7730 HOST_WIDE_INT bitpos = startbitpos;
7731 tree ftype = TREE_TYPE (f);
7732 enum machine_mode mode;
7733 if (ftype == error_mark_node)
7734 continue;
7735 mode = TYPE_MODE (ftype);
7737 if (DECL_SIZE (f) != 0
7738 && host_integerp (bit_position (f), 1))
7739 bitpos += int_bit_position (f);
7741 /* ??? FIXME: else assume zero offset. */
7743 if (TREE_CODE (ftype) == RECORD_TYPE)
7744 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
7745 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
7747 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
7748 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7749 cum->fregno += n_fpregs;
7750 /* Single-precision floats present a special problem for
7751 us, because they are smaller than an 8-byte GPR, and so
7752 the structure-packing rules combined with the standard
7753 varargs behavior mean that we want to pack float/float
7754 and float/int combinations into a single register's
7755 space. This is complicated by the arg advance flushing,
7756 which works on arbitrarily large groups of int-type
7757 fields. */
7758 if (mode == SFmode)
7760 if (cum->floats_in_gpr == 1)
7762 /* Two floats in a word; count the word and reset
7763 the float count. */
7764 cum->words++;
7765 cum->floats_in_gpr = 0;
7767 else if (bitpos % 64 == 0)
7769 /* A float at the beginning of an 8-byte word;
7770 count it and put off adjusting cum->words until
7771 we see if a arg advance flush is going to do it
7772 for us. */
7773 cum->floats_in_gpr++;
7775 else
7777 /* The float is at the end of a word, preceded
7778 by integer fields, so the arg advance flush
7779 just above has already set cum->words and
7780 everything is taken care of. */
7783 else
7784 cum->words += n_fpregs;
7786 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
7788 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7789 cum->vregno++;
7790 cum->words += 2;
7792 else if (cum->intoffset == -1)
7793 cum->intoffset = bitpos;
7797 /* Check for an item that needs to be considered specially under the darwin 64
7798 bit ABI. These are record types where the mode is BLK or the structure is
7799 8 bytes in size. */
7800 static int
7801 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
7803 return rs6000_darwin64_abi
7804 && ((mode == BLKmode
7805 && TREE_CODE (type) == RECORD_TYPE
7806 && int_size_in_bytes (type) > 0)
7807 || (type && TREE_CODE (type) == RECORD_TYPE
7808 && int_size_in_bytes (type) == 8)) ? 1 : 0;
7811 /* Update the data in CUM to advance over an argument
7812 of mode MODE and data type TYPE.
7813 (TYPE is null for libcalls where that information may not be available.)
7815 Note that for args passed by reference, function_arg will be called
7816 with MODE and TYPE set to that of the pointer to the arg, not the arg
7817 itself. */
7819 static void
7820 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7821 const_tree type, bool named, int depth)
7823 /* Only tick off an argument if we're not recursing. */
7824 if (depth == 0)
7825 cum->nargs_prototype--;
7827 #ifdef HAVE_AS_GNU_ATTRIBUTE
7828 if (DEFAULT_ABI == ABI_V4
7829 && cum->escapes)
7831 if (SCALAR_FLOAT_MODE_P (mode))
7832 rs6000_passes_float = true;
7833 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
7834 rs6000_passes_vector = true;
7835 else if (SPE_VECTOR_MODE (mode)
7836 && !cum->stdarg
7837 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7838 rs6000_passes_vector = true;
7840 #endif
7842 if (TARGET_ALTIVEC_ABI
7843 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7844 || (type && TREE_CODE (type) == VECTOR_TYPE
7845 && int_size_in_bytes (type) == 16)))
7847 bool stack = false;
7849 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7851 cum->vregno++;
7852 if (!TARGET_ALTIVEC)
7853 error ("cannot pass argument in vector register because"
7854 " altivec instructions are disabled, use -maltivec"
7855 " to enable them");
7857 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7858 even if it is going to be passed in a vector register.
7859 Darwin does the same for variable-argument functions. */
7860 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
7861 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
7862 stack = true;
7864 else
7865 stack = true;
7867 if (stack)
7869 int align;
7871 /* Vector parameters must be 16-byte aligned. This places
7872 them at 2 mod 4 in terms of words in 32-bit mode, since
7873 the parameter save area starts at offset 24 from the
7874 stack. In 64-bit mode, they just have to start on an
7875 even word, since the parameter save area is 16-byte
7876 aligned. Space for GPRs is reserved even if the argument
7877 will be passed in memory. */
7878 if (TARGET_32BIT)
7879 align = (2 - cum->words) & 3;
7880 else
7881 align = cum->words & 1;
7882 cum->words += align + rs6000_arg_size (mode, type);
7884 if (TARGET_DEBUG_ARG)
7886 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
7887 cum->words, align);
7888 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
7889 cum->nargs_prototype, cum->prototype,
7890 GET_MODE_NAME (mode));
7894 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
7895 && !cum->stdarg
7896 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7897 cum->sysv_gregno++;
7899 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
7901 int size = int_size_in_bytes (type);
7902 /* Variable sized types have size == -1 and are
7903 treated as if consisting entirely of ints.
7904 Pad to 16 byte boundary if needed. */
7905 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
7906 && (cum->words % 2) != 0)
7907 cum->words++;
7908 /* For varargs, we can just go up by the size of the struct. */
7909 if (!named)
7910 cum->words += (size + 7) / 8;
7911 else
7913 /* It is tempting to say int register count just goes up by
7914 sizeof(type)/8, but this is wrong in a case such as
7915 { int; double; int; } [powerpc alignment]. We have to
7916 grovel through the fields for these too. */
7917 cum->intoffset = 0;
7918 cum->floats_in_gpr = 0;
7919 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
7920 rs6000_darwin64_record_arg_advance_flush (cum,
7921 size * BITS_PER_UNIT, 1);
7923 if (TARGET_DEBUG_ARG)
7925 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
7926 cum->words, TYPE_ALIGN (type), size);
7927 fprintf (stderr,
7928 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7929 cum->nargs_prototype, cum->prototype,
7930 GET_MODE_NAME (mode));
7933 else if (DEFAULT_ABI == ABI_V4)
7935 if (TARGET_HARD_FLOAT && TARGET_FPRS
7936 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
7937 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
7938 || (mode == TFmode && !TARGET_IEEEQUAD)
7939 || mode == SDmode || mode == DDmode || mode == TDmode))
7941 /* _Decimal128 must use an even/odd register pair. This assumes
7942 that the register number is odd when fregno is odd. */
7943 if (mode == TDmode && (cum->fregno % 2) == 1)
7944 cum->fregno++;
7946 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
7947 <= FP_ARG_V4_MAX_REG)
7948 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7949 else
7951 cum->fregno = FP_ARG_V4_MAX_REG + 1;
7952 if (mode == DFmode || mode == TFmode
7953 || mode == DDmode || mode == TDmode)
7954 cum->words += cum->words & 1;
7955 cum->words += rs6000_arg_size (mode, type);
7958 else
7960 int n_words = rs6000_arg_size (mode, type);
7961 int gregno = cum->sysv_gregno;
7963 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7964 (r7,r8) or (r9,r10). As does any other 2 word item such
7965 as complex int due to a historical mistake. */
7966 if (n_words == 2)
7967 gregno += (1 - gregno) & 1;
7969 /* Multi-reg args are not split between registers and stack. */
7970 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7972 /* Long long and SPE vectors are aligned on the stack.
7973 So are other 2 word items such as complex int due to
7974 a historical mistake. */
7975 if (n_words == 2)
7976 cum->words += cum->words & 1;
7977 cum->words += n_words;
7980 /* Note: continuing to accumulate gregno past when we've started
7981 spilling to the stack indicates the fact that we've started
7982 spilling to the stack to expand_builtin_saveregs. */
7983 cum->sysv_gregno = gregno + n_words;
7986 if (TARGET_DEBUG_ARG)
7988 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7989 cum->words, cum->fregno);
7990 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
7991 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
7992 fprintf (stderr, "mode = %4s, named = %d\n",
7993 GET_MODE_NAME (mode), named);
7996 else
7998 int n_words = rs6000_arg_size (mode, type);
7999 int start_words = cum->words;
8000 int align_words = rs6000_parm_start (mode, type, start_words);
8002 cum->words = align_words + n_words;
8004 if (SCALAR_FLOAT_MODE_P (mode)
8005 && TARGET_HARD_FLOAT && TARGET_FPRS)
8007 /* _Decimal128 must be passed in an even/odd float register pair.
8008 This assumes that the register number is odd when fregno is
8009 odd. */
8010 if (mode == TDmode && (cum->fregno % 2) == 1)
8011 cum->fregno++;
8012 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8015 if (TARGET_DEBUG_ARG)
8017 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8018 cum->words, cum->fregno);
8019 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8020 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8021 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8022 named, align_words - start_words, depth);
8027 static void
8028 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8029 const_tree type, bool named)
8031 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8035 static rtx
8036 spe_build_register_parallel (enum machine_mode mode, int gregno)
8038 rtx r1, r3, r5, r7;
8040 switch (mode)
8042 case DFmode:
8043 r1 = gen_rtx_REG (DImode, gregno);
8044 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8045 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8047 case DCmode:
8048 case TFmode:
8049 r1 = gen_rtx_REG (DImode, gregno);
8050 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8051 r3 = gen_rtx_REG (DImode, gregno + 2);
8052 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8053 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8055 case TCmode:
8056 r1 = gen_rtx_REG (DImode, gregno);
8057 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8058 r3 = gen_rtx_REG (DImode, gregno + 2);
8059 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8060 r5 = gen_rtx_REG (DImode, gregno + 4);
8061 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8062 r7 = gen_rtx_REG (DImode, gregno + 6);
8063 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8064 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8066 default:
8067 gcc_unreachable ();
8071 /* Determine where to put a SIMD argument on the SPE. */
8072 static rtx
8073 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8074 const_tree type)
8076 int gregno = cum->sysv_gregno;
8078 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8079 are passed and returned in a pair of GPRs for ABI compatibility. */
8080 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8081 || mode == DCmode || mode == TCmode))
8083 int n_words = rs6000_arg_size (mode, type);
8085 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8086 if (mode == DFmode)
8087 gregno += (1 - gregno) & 1;
8089 /* Multi-reg args are not split between registers and stack. */
8090 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8091 return NULL_RTX;
8093 return spe_build_register_parallel (mode, gregno);
8095 if (cum->stdarg)
8097 int n_words = rs6000_arg_size (mode, type);
8099 /* SPE vectors are put in odd registers. */
8100 if (n_words == 2 && (gregno & 1) == 0)
8101 gregno += 1;
8103 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8105 rtx r1, r2;
8106 enum machine_mode m = SImode;
8108 r1 = gen_rtx_REG (m, gregno);
8109 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8110 r2 = gen_rtx_REG (m, gregno + 1);
8111 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8112 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8114 else
8115 return NULL_RTX;
8117 else
8119 if (gregno <= GP_ARG_MAX_REG)
8120 return gen_rtx_REG (mode, gregno);
8121 else
8122 return NULL_RTX;
8126 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8127 structure between cum->intoffset and bitpos to integer registers. */
8129 static void
8130 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
8131 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
8133 enum machine_mode mode;
8134 unsigned int regno;
8135 unsigned int startbit, endbit;
8136 int this_regno, intregs, intoffset;
8137 rtx reg;
8139 if (cum->intoffset == -1)
8140 return;
8142 intoffset = cum->intoffset;
8143 cum->intoffset = -1;
8145 /* If this is the trailing part of a word, try to only load that
8146 much into the register. Otherwise load the whole register. Note
8147 that in the latter case we may pick up unwanted bits. It's not a
8148 problem at the moment but may wish to revisit. */
8150 if (intoffset % BITS_PER_WORD != 0)
8152 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8153 MODE_INT, 0);
8154 if (mode == BLKmode)
8156 /* We couldn't find an appropriate mode, which happens,
8157 e.g., in packed structs when there are 3 bytes to load.
8158 Back intoffset back to the beginning of the word in this
8159 case. */
8160 intoffset = intoffset & -BITS_PER_WORD;
8161 mode = word_mode;
8164 else
8165 mode = word_mode;
8167 startbit = intoffset & -BITS_PER_WORD;
8168 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8169 intregs = (endbit - startbit) / BITS_PER_WORD;
8170 this_regno = cum->words + intoffset / BITS_PER_WORD;
8172 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8173 cum->use_stack = 1;
8175 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8176 if (intregs <= 0)
8177 return;
8179 intoffset /= BITS_PER_UNIT;
8182 regno = GP_ARG_MIN_REG + this_regno;
8183 reg = gen_rtx_REG (mode, regno);
8184 rvec[(*k)++] =
8185 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8187 this_regno += 1;
8188 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8189 mode = word_mode;
8190 intregs -= 1;
8192 while (intregs > 0);
8195 /* Recursive workhorse for the following. */
8197 static void
8198 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8199 HOST_WIDE_INT startbitpos, rtx rvec[],
8200 int *k)
8202 tree f;
8204 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8205 if (TREE_CODE (f) == FIELD_DECL)
8207 HOST_WIDE_INT bitpos = startbitpos;
8208 tree ftype = TREE_TYPE (f);
8209 enum machine_mode mode;
8210 if (ftype == error_mark_node)
8211 continue;
8212 mode = TYPE_MODE (ftype);
8214 if (DECL_SIZE (f) != 0
8215 && host_integerp (bit_position (f), 1))
8216 bitpos += int_bit_position (f);
8218 /* ??? FIXME: else assume zero offset. */
8220 if (TREE_CODE (ftype) == RECORD_TYPE)
8221 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8222 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8224 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8225 #if 0
8226 switch (mode)
8228 case SCmode: mode = SFmode; break;
8229 case DCmode: mode = DFmode; break;
8230 case TCmode: mode = TFmode; break;
8231 default: break;
8233 #endif
8234 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8235 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8237 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8238 && (mode == TFmode || mode == TDmode));
8239 /* Long double or _Decimal128 split over regs and memory. */
8240 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8241 cum->use_stack=1;
8243 rvec[(*k)++]
8244 = gen_rtx_EXPR_LIST (VOIDmode,
8245 gen_rtx_REG (mode, cum->fregno++),
8246 GEN_INT (bitpos / BITS_PER_UNIT));
8247 if (mode == TFmode || mode == TDmode)
8248 cum->fregno++;
8250 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8252 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8253 rvec[(*k)++]
8254 = gen_rtx_EXPR_LIST (VOIDmode,
8255 gen_rtx_REG (mode, cum->vregno++),
8256 GEN_INT (bitpos / BITS_PER_UNIT));
8258 else if (cum->intoffset == -1)
8259 cum->intoffset = bitpos;
8263 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8264 the register(s) to be used for each field and subfield of a struct
8265 being passed by value, along with the offset of where the
8266 register's value may be found in the block. FP fields go in FP
8267 register, vector fields go in vector registers, and everything
8268 else goes in int registers, packed as in memory.
8270 This code is also used for function return values. RETVAL indicates
8271 whether this is the case.
8273 Much of this is taken from the SPARC V9 port, which has a similar
8274 calling convention. */
8276 static rtx
8277 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8278 bool named, bool retval)
8280 rtx rvec[FIRST_PSEUDO_REGISTER];
8281 int k = 1, kbase = 1;
8282 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8283 /* This is a copy; modifications are not visible to our caller. */
8284 CUMULATIVE_ARGS copy_cum = *orig_cum;
8285 CUMULATIVE_ARGS *cum = &copy_cum;
8287 /* Pad to 16 byte boundary if needed. */
8288 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8289 && (cum->words % 2) != 0)
8290 cum->words++;
8292 cum->intoffset = 0;
8293 cum->use_stack = 0;
8294 cum->named = named;
8296 /* Put entries into rvec[] for individual FP and vector fields, and
8297 for the chunks of memory that go in int regs. Note we start at
8298 element 1; 0 is reserved for an indication of using memory, and
8299 may or may not be filled in below. */
8300 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8301 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8303 /* If any part of the struct went on the stack put all of it there.
8304 This hack is because the generic code for
8305 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8306 parts of the struct are not at the beginning. */
8307 if (cum->use_stack)
8309 if (retval)
8310 return NULL_RTX; /* doesn't go in registers at all */
8311 kbase = 0;
8312 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8314 if (k > 1 || cum->use_stack)
8315 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
8316 else
8317 return NULL_RTX;
8320 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8322 static rtx
8323 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
8324 int align_words)
8326 int n_units;
8327 int i, k;
8328 rtx rvec[GP_ARG_NUM_REG + 1];
8330 if (align_words >= GP_ARG_NUM_REG)
8331 return NULL_RTX;
8333 n_units = rs6000_arg_size (mode, type);
8335 /* Optimize the simple case where the arg fits in one gpr, except in
8336 the case of BLKmode due to assign_parms assuming that registers are
8337 BITS_PER_WORD wide. */
8338 if (n_units == 0
8339 || (n_units == 1 && mode != BLKmode))
8340 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8342 k = 0;
8343 if (align_words + n_units > GP_ARG_NUM_REG)
8344 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8345 using a magic NULL_RTX component.
8346 This is not strictly correct. Only some of the arg belongs in
8347 memory, not all of it. However, the normal scheme using
8348 function_arg_partial_nregs can result in unusual subregs, eg.
8349 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8350 store the whole arg to memory is often more efficient than code
8351 to store pieces, and we know that space is available in the right
8352 place for the whole arg. */
8353 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8355 i = 0;
8358 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
8359 rtx off = GEN_INT (i++ * 4);
8360 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8362 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
8364 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8367 /* Determine where to put an argument to a function.
8368 Value is zero to push the argument on the stack,
8369 or a hard register in which to store the argument.
8371 MODE is the argument's machine mode.
8372 TYPE is the data type of the argument (as a tree).
8373 This is null for libcalls where that information may
8374 not be available.
8375 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8376 the preceding args and about the function being called. It is
8377 not modified in this routine.
8378 NAMED is nonzero if this argument is a named parameter
8379 (otherwise it is an extra parameter matching an ellipsis).
8381 On RS/6000 the first eight words of non-FP are normally in registers
8382 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8383 Under V.4, the first 8 FP args are in registers.
8385 If this is floating-point and no prototype is specified, we use
8386 both an FP and integer register (or possibly FP reg and stack). Library
8387 functions (when CALL_LIBCALL is set) always have the proper types for args,
8388 so we can pass the FP value just in one register. emit_library_function
8389 doesn't support PARALLEL anyway.
8391 Note that for args passed by reference, function_arg will be called
8392 with MODE and TYPE set to that of the pointer to the arg, not the arg
8393 itself. */
8395 static rtx
8396 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8397 const_tree type, bool named)
8399 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8400 enum rs6000_abi abi = DEFAULT_ABI;
8402 /* Return a marker to indicate whether CR1 needs to set or clear the
8403 bit that V.4 uses to say fp args were passed in registers.
8404 Assume that we don't need the marker for software floating point,
8405 or compiler generated library calls. */
8406 if (mode == VOIDmode)
8408 if (abi == ABI_V4
8409 && (cum->call_cookie & CALL_LIBCALL) == 0
8410 && (cum->stdarg
8411 || (cum->nargs_prototype < 0
8412 && (cum->prototype || TARGET_NO_PROTOTYPE))))
8414 /* For the SPE, we need to crxor CR6 always. */
8415 if (TARGET_SPE_ABI)
8416 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
8417 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
8418 return GEN_INT (cum->call_cookie
8419 | ((cum->fregno == FP_ARG_MIN_REG)
8420 ? CALL_V4_SET_FP_ARGS
8421 : CALL_V4_CLEAR_FP_ARGS));
8424 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
8427 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8429 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
8430 if (rslt != NULL_RTX)
8431 return rslt;
8432 /* Else fall through to usual handling. */
8435 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8436 if (TARGET_64BIT && ! cum->prototype)
8438 /* Vector parameters get passed in vector register
8439 and also in GPRs or memory, in absence of prototype. */
8440 int align_words;
8441 rtx slot;
8442 align_words = (cum->words + 1) & ~1;
8444 if (align_words >= GP_ARG_NUM_REG)
8446 slot = NULL_RTX;
8448 else
8450 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8452 return gen_rtx_PARALLEL (mode,
8453 gen_rtvec (2,
8454 gen_rtx_EXPR_LIST (VOIDmode,
8455 slot, const0_rtx),
8456 gen_rtx_EXPR_LIST (VOIDmode,
8457 gen_rtx_REG (mode, cum->vregno),
8458 const0_rtx)));
8460 else
8461 return gen_rtx_REG (mode, cum->vregno);
8462 else if (TARGET_ALTIVEC_ABI
8463 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8464 || (type && TREE_CODE (type) == VECTOR_TYPE
8465 && int_size_in_bytes (type) == 16)))
8467 if (named || abi == ABI_V4)
8468 return NULL_RTX;
8469 else
8471 /* Vector parameters to varargs functions under AIX or Darwin
8472 get passed in memory and possibly also in GPRs. */
8473 int align, align_words, n_words;
8474 enum machine_mode part_mode;
8476 /* Vector parameters must be 16-byte aligned. This places them at
8477 2 mod 4 in terms of words in 32-bit mode, since the parameter
8478 save area starts at offset 24 from the stack. In 64-bit mode,
8479 they just have to start on an even word, since the parameter
8480 save area is 16-byte aligned. */
8481 if (TARGET_32BIT)
8482 align = (2 - cum->words) & 3;
8483 else
8484 align = cum->words & 1;
8485 align_words = cum->words + align;
8487 /* Out of registers? Memory, then. */
8488 if (align_words >= GP_ARG_NUM_REG)
8489 return NULL_RTX;
8491 if (TARGET_32BIT && TARGET_POWERPC64)
8492 return rs6000_mixed_function_arg (mode, type, align_words);
8494 /* The vector value goes in GPRs. Only the part of the
8495 value in GPRs is reported here. */
8496 part_mode = mode;
8497 n_words = rs6000_arg_size (mode, type);
8498 if (align_words + n_words > GP_ARG_NUM_REG)
8499 /* Fortunately, there are only two possibilities, the value
8500 is either wholly in GPRs or half in GPRs and half not. */
8501 part_mode = DImode;
8503 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
8506 else if (TARGET_SPE_ABI && TARGET_SPE
8507 && (SPE_VECTOR_MODE (mode)
8508 || (TARGET_E500_DOUBLE && (mode == DFmode
8509 || mode == DCmode
8510 || mode == TFmode
8511 || mode == TCmode))))
8512 return rs6000_spe_function_arg (cum, mode, type);
8514 else if (abi == ABI_V4)
8516 if (TARGET_HARD_FLOAT && TARGET_FPRS
8517 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8518 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8519 || (mode == TFmode && !TARGET_IEEEQUAD)
8520 || mode == SDmode || mode == DDmode || mode == TDmode))
8522 /* _Decimal128 must use an even/odd register pair. This assumes
8523 that the register number is odd when fregno is odd. */
8524 if (mode == TDmode && (cum->fregno % 2) == 1)
8525 cum->fregno++;
8527 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8528 <= FP_ARG_V4_MAX_REG)
8529 return gen_rtx_REG (mode, cum->fregno);
8530 else
8531 return NULL_RTX;
8533 else
8535 int n_words = rs6000_arg_size (mode, type);
8536 int gregno = cum->sysv_gregno;
8538 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8539 (r7,r8) or (r9,r10). As does any other 2 word item such
8540 as complex int due to a historical mistake. */
8541 if (n_words == 2)
8542 gregno += (1 - gregno) & 1;
8544 /* Multi-reg args are not split between registers and stack. */
8545 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8546 return NULL_RTX;
8548 if (TARGET_32BIT && TARGET_POWERPC64)
8549 return rs6000_mixed_function_arg (mode, type,
8550 gregno - GP_ARG_MIN_REG);
8551 return gen_rtx_REG (mode, gregno);
8554 else
8556 int align_words = rs6000_parm_start (mode, type, cum->words);
8558 /* _Decimal128 must be passed in an even/odd float register pair.
8559 This assumes that the register number is odd when fregno is odd. */
8560 if (mode == TDmode && (cum->fregno % 2) == 1)
8561 cum->fregno++;
8563 if (USE_FP_FOR_ARG_P (cum, mode, type))
8565 rtx rvec[GP_ARG_NUM_REG + 1];
8566 rtx r;
8567 int k;
8568 bool needs_psave;
8569 enum machine_mode fmode = mode;
8570 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8572 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8574 /* Currently, we only ever need one reg here because complex
8575 doubles are split. */
8576 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8577 && (fmode == TFmode || fmode == TDmode));
8579 /* Long double or _Decimal128 split over regs and memory. */
8580 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
8583 /* Do we also need to pass this arg in the parameter save
8584 area? */
8585 needs_psave = (type
8586 && (cum->nargs_prototype <= 0
8587 || (DEFAULT_ABI == ABI_AIX
8588 && TARGET_XL_COMPAT
8589 && align_words >= GP_ARG_NUM_REG)));
8591 if (!needs_psave && mode == fmode)
8592 return gen_rtx_REG (fmode, cum->fregno);
8594 k = 0;
8595 if (needs_psave)
8597 /* Describe the part that goes in gprs or the stack.
8598 This piece must come first, before the fprs. */
8599 if (align_words < GP_ARG_NUM_REG)
8601 unsigned long n_words = rs6000_arg_size (mode, type);
8603 if (align_words + n_words > GP_ARG_NUM_REG
8604 || (TARGET_32BIT && TARGET_POWERPC64))
8606 /* If this is partially on the stack, then we only
8607 include the portion actually in registers here. */
8608 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
8609 rtx off;
8610 int i = 0;
8611 if (align_words + n_words > GP_ARG_NUM_REG)
8612 /* Not all of the arg fits in gprs. Say that it
8613 goes in memory too, using a magic NULL_RTX
8614 component. Also see comment in
8615 rs6000_mixed_function_arg for why the normal
8616 function_arg_partial_nregs scheme doesn't work
8617 in this case. */
8618 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
8619 const0_rtx);
8622 r = gen_rtx_REG (rmode,
8623 GP_ARG_MIN_REG + align_words);
8624 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
8625 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8627 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
8629 else
8631 /* The whole arg fits in gprs. */
8632 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8633 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8636 else
8637 /* It's entirely in memory. */
8638 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8641 /* Describe where this piece goes in the fprs. */
8642 r = gen_rtx_REG (fmode, cum->fregno);
8643 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8645 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8647 else if (align_words < GP_ARG_NUM_REG)
8649 if (TARGET_32BIT && TARGET_POWERPC64)
8650 return rs6000_mixed_function_arg (mode, type, align_words);
8652 if (mode == BLKmode)
8653 mode = Pmode;
8655 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8657 else
8658 return NULL_RTX;
8662 /* For an arg passed partly in registers and partly in memory, this is
8663 the number of bytes passed in registers. For args passed entirely in
8664 registers or entirely in memory, zero. When an arg is described by a
8665 PARALLEL, perhaps using more than one register type, this function
8666 returns the number of bytes used by the first element of the PARALLEL. */
8668 static int
8669 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
8670 tree type, bool named)
8672 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8673 int ret = 0;
8674 int align_words;
8676 if (DEFAULT_ABI == ABI_V4)
8677 return 0;
8679 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
8680 && cum->nargs_prototype >= 0)
8681 return 0;
8683 /* In this complicated case we just disable the partial_nregs code. */
8684 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8685 return 0;
8687 align_words = rs6000_parm_start (mode, type, cum->words);
8689 if (USE_FP_FOR_ARG_P (cum, mode, type))
8691 /* If we are passing this arg in the fixed parameter save area
8692 (gprs or memory) as well as fprs, then this function should
8693 return the number of partial bytes passed in the parameter
8694 save area rather than partial bytes passed in fprs. */
8695 if (type
8696 && (cum->nargs_prototype <= 0
8697 || (DEFAULT_ABI == ABI_AIX
8698 && TARGET_XL_COMPAT
8699 && align_words >= GP_ARG_NUM_REG)))
8700 return 0;
8701 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
8702 > FP_ARG_MAX_REG + 1)
8703 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
8704 else if (cum->nargs_prototype >= 0)
8705 return 0;
8708 if (align_words < GP_ARG_NUM_REG
8709 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
8710 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
8712 if (ret != 0 && TARGET_DEBUG_ARG)
8713 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
8715 return ret;
8718 /* A C expression that indicates when an argument must be passed by
8719 reference. If nonzero for an argument, a copy of that argument is
8720 made in memory and a pointer to the argument is passed instead of
8721 the argument itself. The pointer is passed in whatever way is
8722 appropriate for passing a pointer to that type.
8724 Under V.4, aggregates and long double are passed by reference.
8726 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8727 reference unless the AltiVec vector extension ABI is in force.
8729 As an extension to all ABIs, variable sized types are passed by
8730 reference. */
8732 static bool
8733 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
8734 enum machine_mode mode, const_tree type,
8735 bool named ATTRIBUTE_UNUSED)
8737 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
8739 if (TARGET_DEBUG_ARG)
8740 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
8741 return 1;
8744 if (!type)
8745 return 0;
8747 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
8749 if (TARGET_DEBUG_ARG)
8750 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
8751 return 1;
8754 if (int_size_in_bytes (type) < 0)
8756 if (TARGET_DEBUG_ARG)
8757 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
8758 return 1;
8761 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8762 modes only exist for GCC vector types if -maltivec. */
8763 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
8765 if (TARGET_DEBUG_ARG)
8766 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
8767 return 1;
8770 /* Pass synthetic vectors in memory. */
8771 if (TREE_CODE (type) == VECTOR_TYPE
8772 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8774 static bool warned_for_pass_big_vectors = false;
8775 if (TARGET_DEBUG_ARG)
8776 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
8777 if (!warned_for_pass_big_vectors)
8779 warning (0, "GCC vector passed by reference: "
8780 "non-standard ABI extension with no compatibility guarantee");
8781 warned_for_pass_big_vectors = true;
8783 return 1;
8786 return 0;
8789 static void
8790 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
8792 int i;
8793 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
8795 if (nregs == 0)
8796 return;
8798 for (i = 0; i < nregs; i++)
8800 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
8801 if (reload_completed)
8803 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
8804 tem = NULL_RTX;
8805 else
8806 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
8807 i * GET_MODE_SIZE (reg_mode));
8809 else
8810 tem = replace_equiv_address (tem, XEXP (tem, 0));
8812 gcc_assert (tem);
8814 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
8818 /* Perform any needed actions needed for a function that is receiving a
8819 variable number of arguments.
8821 CUM is as above.
8823 MODE and TYPE are the mode and type of the current parameter.
8825 PRETEND_SIZE is a variable that should be set to the amount of stack
8826 that must be pushed by the prolog to pretend that our caller pushed
8829 Normally, this macro will push all remaining incoming registers on the
8830 stack and set PRETEND_SIZE to the length of the registers pushed. */
8832 static void
8833 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
8834 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8835 int no_rtl)
8837 CUMULATIVE_ARGS next_cum;
8838 int reg_size = TARGET_32BIT ? 4 : 8;
8839 rtx save_area = NULL_RTX, mem;
8840 int first_reg_offset;
8841 alias_set_type set;
8843 /* Skip the last named argument. */
8844 next_cum = *get_cumulative_args (cum);
8845 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
8847 if (DEFAULT_ABI == ABI_V4)
8849 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
8851 if (! no_rtl)
8853 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
8854 HOST_WIDE_INT offset = 0;
8856 /* Try to optimize the size of the varargs save area.
8857 The ABI requires that ap.reg_save_area is doubleword
8858 aligned, but we don't need to allocate space for all
8859 the bytes, only those to which we actually will save
8860 anything. */
8861 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
8862 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
8863 if (TARGET_HARD_FLOAT && TARGET_FPRS
8864 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8865 && cfun->va_list_fpr_size)
8867 if (gpr_reg_num)
8868 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
8869 * UNITS_PER_FP_WORD;
8870 if (cfun->va_list_fpr_size
8871 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8872 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
8873 else
8874 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8875 * UNITS_PER_FP_WORD;
8877 if (gpr_reg_num)
8879 offset = -((first_reg_offset * reg_size) & ~7);
8880 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
8882 gpr_reg_num = cfun->va_list_gpr_size;
8883 if (reg_size == 4 && (first_reg_offset & 1))
8884 gpr_reg_num++;
8886 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
8888 else if (fpr_size)
8889 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
8890 * UNITS_PER_FP_WORD
8891 - (int) (GP_ARG_NUM_REG * reg_size);
8893 if (gpr_size + fpr_size)
8895 rtx reg_save_area
8896 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
8897 gcc_assert (GET_CODE (reg_save_area) == MEM);
8898 reg_save_area = XEXP (reg_save_area, 0);
8899 if (GET_CODE (reg_save_area) == PLUS)
8901 gcc_assert (XEXP (reg_save_area, 0)
8902 == virtual_stack_vars_rtx);
8903 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
8904 offset += INTVAL (XEXP (reg_save_area, 1));
8906 else
8907 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
8910 cfun->machine->varargs_save_offset = offset;
8911 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
8914 else
8916 first_reg_offset = next_cum.words;
8917 save_area = virtual_incoming_args_rtx;
8919 if (targetm.calls.must_pass_in_stack (mode, type))
8920 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
8923 set = get_varargs_alias_set ();
8924 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
8925 && cfun->va_list_gpr_size)
8927 int nregs = GP_ARG_NUM_REG - first_reg_offset;
8929 if (va_list_gpr_counter_field)
8931 /* V4 va_list_gpr_size counts number of registers needed. */
8932 if (nregs > cfun->va_list_gpr_size)
8933 nregs = cfun->va_list_gpr_size;
8935 else
8937 /* char * va_list instead counts number of bytes needed. */
8938 if (nregs > cfun->va_list_gpr_size / reg_size)
8939 nregs = cfun->va_list_gpr_size / reg_size;
8942 mem = gen_rtx_MEM (BLKmode,
8943 plus_constant (Pmode, save_area,
8944 first_reg_offset * reg_size));
8945 MEM_NOTRAP_P (mem) = 1;
8946 set_mem_alias_set (mem, set);
8947 set_mem_align (mem, BITS_PER_WORD);
8949 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
8950 nregs);
8953 /* Save FP registers if needed. */
8954 if (DEFAULT_ABI == ABI_V4
8955 && TARGET_HARD_FLOAT && TARGET_FPRS
8956 && ! no_rtl
8957 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8958 && cfun->va_list_fpr_size)
8960 int fregno = next_cum.fregno, nregs;
8961 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
8962 rtx lab = gen_label_rtx ();
8963 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
8964 * UNITS_PER_FP_WORD);
8966 emit_jump_insn
8967 (gen_rtx_SET (VOIDmode,
8968 pc_rtx,
8969 gen_rtx_IF_THEN_ELSE (VOIDmode,
8970 gen_rtx_NE (VOIDmode, cr1,
8971 const0_rtx),
8972 gen_rtx_LABEL_REF (VOIDmode, lab),
8973 pc_rtx)));
8975 for (nregs = 0;
8976 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
8977 fregno++, off += UNITS_PER_FP_WORD, nregs++)
8979 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8980 ? DFmode : SFmode,
8981 plus_constant (Pmode, save_area, off));
8982 MEM_NOTRAP_P (mem) = 1;
8983 set_mem_alias_set (mem, set);
8984 set_mem_align (mem, GET_MODE_ALIGNMENT (
8985 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8986 ? DFmode : SFmode));
8987 emit_move_insn (mem, gen_rtx_REG (
8988 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8989 ? DFmode : SFmode, fregno));
8992 emit_label (lab);
8996 /* Create the va_list data type. */
8998 static tree
8999 rs6000_build_builtin_va_list (void)
9001 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9003 /* For AIX, prefer 'char *' because that's what the system
9004 header files like. */
9005 if (DEFAULT_ABI != ABI_V4)
9006 return build_pointer_type (char_type_node);
9008 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9009 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9010 get_identifier ("__va_list_tag"), record);
9012 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9013 unsigned_char_type_node);
9014 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9015 unsigned_char_type_node);
9016 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9017 every user file. */
9018 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9019 get_identifier ("reserved"), short_unsigned_type_node);
9020 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9021 get_identifier ("overflow_arg_area"),
9022 ptr_type_node);
9023 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9024 get_identifier ("reg_save_area"),
9025 ptr_type_node);
9027 va_list_gpr_counter_field = f_gpr;
9028 va_list_fpr_counter_field = f_fpr;
9030 DECL_FIELD_CONTEXT (f_gpr) = record;
9031 DECL_FIELD_CONTEXT (f_fpr) = record;
9032 DECL_FIELD_CONTEXT (f_res) = record;
9033 DECL_FIELD_CONTEXT (f_ovf) = record;
9034 DECL_FIELD_CONTEXT (f_sav) = record;
9036 TYPE_STUB_DECL (record) = type_decl;
9037 TYPE_NAME (record) = type_decl;
9038 TYPE_FIELDS (record) = f_gpr;
9039 DECL_CHAIN (f_gpr) = f_fpr;
9040 DECL_CHAIN (f_fpr) = f_res;
9041 DECL_CHAIN (f_res) = f_ovf;
9042 DECL_CHAIN (f_ovf) = f_sav;
9044 layout_type (record);
9046 /* The correct type is an array type of one element. */
9047 return build_array_type (record, build_index_type (size_zero_node));
9050 /* Implement va_start. */
9052 static void
9053 rs6000_va_start (tree valist, rtx nextarg)
9055 HOST_WIDE_INT words, n_gpr, n_fpr;
9056 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9057 tree gpr, fpr, ovf, sav, t;
9059 /* Only SVR4 needs something special. */
9060 if (DEFAULT_ABI != ABI_V4)
9062 std_expand_builtin_va_start (valist, nextarg);
9063 return;
9066 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9067 f_fpr = DECL_CHAIN (f_gpr);
9068 f_res = DECL_CHAIN (f_fpr);
9069 f_ovf = DECL_CHAIN (f_res);
9070 f_sav = DECL_CHAIN (f_ovf);
9072 valist = build_simple_mem_ref (valist);
9073 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9074 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9075 f_fpr, NULL_TREE);
9076 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9077 f_ovf, NULL_TREE);
9078 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9079 f_sav, NULL_TREE);
9081 /* Count number of gp and fp argument registers used. */
9082 words = crtl->args.info.words;
9083 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9084 GP_ARG_NUM_REG);
9085 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9086 FP_ARG_NUM_REG);
9088 if (TARGET_DEBUG_ARG)
9089 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9090 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9091 words, n_gpr, n_fpr);
9093 if (cfun->va_list_gpr_size)
9095 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9096 build_int_cst (NULL_TREE, n_gpr));
9097 TREE_SIDE_EFFECTS (t) = 1;
9098 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9101 if (cfun->va_list_fpr_size)
9103 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9104 build_int_cst (NULL_TREE, n_fpr));
9105 TREE_SIDE_EFFECTS (t) = 1;
9106 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9108 #ifdef HAVE_AS_GNU_ATTRIBUTE
9109 if (call_ABI_of_interest (cfun->decl))
9110 rs6000_passes_float = true;
9111 #endif
9114 /* Find the overflow area. */
9115 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9116 if (words != 0)
9117 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9118 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9119 TREE_SIDE_EFFECTS (t) = 1;
9120 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9122 /* If there were no va_arg invocations, don't set up the register
9123 save area. */
9124 if (!cfun->va_list_gpr_size
9125 && !cfun->va_list_fpr_size
9126 && n_gpr < GP_ARG_NUM_REG
9127 && n_fpr < FP_ARG_V4_MAX_REG)
9128 return;
9130 /* Find the register save area. */
9131 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
9132 if (cfun->machine->varargs_save_offset)
9133 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
9134 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9135 TREE_SIDE_EFFECTS (t) = 1;
9136 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9139 /* Implement va_arg. */
9141 static tree
9142 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9143 gimple_seq *post_p)
9145 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9146 tree gpr, fpr, ovf, sav, reg, t, u;
9147 int size, rsize, n_reg, sav_ofs, sav_scale;
9148 tree lab_false, lab_over, addr;
9149 int align;
9150 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9151 int regalign = 0;
9152 gimple stmt;
9154 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9156 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9157 return build_va_arg_indirect_ref (t);
9160 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9161 earlier version of gcc, with the property that it always applied alignment
9162 adjustments to the va-args (even for zero-sized types). The cheapest way
9163 to deal with this is to replicate the effect of the part of
9164 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9165 of relevance.
9166 We don't need to check for pass-by-reference because of the test above.
9167 We can return a simplifed answer, since we know there's no offset to add. */
9169 if (TARGET_MACHO
9170 && rs6000_darwin64_abi
9171 && integer_zerop (TYPE_SIZE (type)))
9173 unsigned HOST_WIDE_INT align, boundary;
9174 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9175 align = PARM_BOUNDARY / BITS_PER_UNIT;
9176 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9177 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9178 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9179 boundary /= BITS_PER_UNIT;
9180 if (boundary > align)
9182 tree t ;
9183 /* This updates arg ptr by the amount that would be necessary
9184 to align the zero-sized (but not zero-alignment) item. */
9185 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9186 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9187 gimplify_and_add (t, pre_p);
9189 t = fold_convert (sizetype, valist_tmp);
9190 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9191 fold_convert (TREE_TYPE (valist),
9192 fold_build2 (BIT_AND_EXPR, sizetype, t,
9193 size_int (-boundary))));
9194 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9195 gimplify_and_add (t, pre_p);
9197 /* Since it is zero-sized there's no increment for the item itself. */
9198 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9199 return build_va_arg_indirect_ref (valist_tmp);
9202 if (DEFAULT_ABI != ABI_V4)
9204 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9206 tree elem_type = TREE_TYPE (type);
9207 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9208 int elem_size = GET_MODE_SIZE (elem_mode);
9210 if (elem_size < UNITS_PER_WORD)
9212 tree real_part, imag_part;
9213 gimple_seq post = NULL;
9215 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9216 &post);
9217 /* Copy the value into a temporary, lest the formal temporary
9218 be reused out from under us. */
9219 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9220 gimple_seq_add_seq (pre_p, post);
9222 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9223 post_p);
9225 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9229 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9232 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9233 f_fpr = DECL_CHAIN (f_gpr);
9234 f_res = DECL_CHAIN (f_fpr);
9235 f_ovf = DECL_CHAIN (f_res);
9236 f_sav = DECL_CHAIN (f_ovf);
9238 valist = build_va_arg_indirect_ref (valist);
9239 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9240 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9241 f_fpr, NULL_TREE);
9242 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9243 f_ovf, NULL_TREE);
9244 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9245 f_sav, NULL_TREE);
9247 size = int_size_in_bytes (type);
9248 rsize = (size + 3) / 4;
9249 align = 1;
9251 if (TARGET_HARD_FLOAT && TARGET_FPRS
9252 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9253 || (TARGET_DOUBLE_FLOAT
9254 && (TYPE_MODE (type) == DFmode
9255 || TYPE_MODE (type) == TFmode
9256 || TYPE_MODE (type) == SDmode
9257 || TYPE_MODE (type) == DDmode
9258 || TYPE_MODE (type) == TDmode))))
9260 /* FP args go in FP registers, if present. */
9261 reg = fpr;
9262 n_reg = (size + 7) / 8;
9263 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9264 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9265 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9266 align = 8;
9268 else
9270 /* Otherwise into GP registers. */
9271 reg = gpr;
9272 n_reg = rsize;
9273 sav_ofs = 0;
9274 sav_scale = 4;
9275 if (n_reg == 2)
9276 align = 8;
9279 /* Pull the value out of the saved registers.... */
9281 lab_over = NULL;
9282 addr = create_tmp_var (ptr_type_node, "addr");
9284 /* AltiVec vectors never go in registers when -mabi=altivec. */
9285 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9286 align = 16;
9287 else
9289 lab_false = create_artificial_label (input_location);
9290 lab_over = create_artificial_label (input_location);
9292 /* Long long and SPE vectors are aligned in the registers.
9293 As are any other 2 gpr item such as complex int due to a
9294 historical mistake. */
9295 u = reg;
9296 if (n_reg == 2 && reg == gpr)
9298 regalign = 1;
9299 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9300 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9301 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9302 unshare_expr (reg), u);
9304 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9305 reg number is 0 for f1, so we want to make it odd. */
9306 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9308 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9309 build_int_cst (TREE_TYPE (reg), 1));
9310 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9313 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9314 t = build2 (GE_EXPR, boolean_type_node, u, t);
9315 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9316 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9317 gimplify_and_add (t, pre_p);
9319 t = sav;
9320 if (sav_ofs)
9321 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9323 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9324 build_int_cst (TREE_TYPE (reg), n_reg));
9325 u = fold_convert (sizetype, u);
9326 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
9327 t = fold_build_pointer_plus (t, u);
9329 /* _Decimal32 varargs are located in the second word of the 64-bit
9330 FP register for 32-bit binaries. */
9331 if (!TARGET_POWERPC64
9332 && TARGET_HARD_FLOAT && TARGET_FPRS
9333 && TYPE_MODE (type) == SDmode)
9334 t = fold_build_pointer_plus_hwi (t, size);
9336 gimplify_assign (addr, t, pre_p);
9338 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9340 stmt = gimple_build_label (lab_false);
9341 gimple_seq_add_stmt (pre_p, stmt);
9343 if ((n_reg == 2 && !regalign) || n_reg > 2)
9345 /* Ensure that we don't find any more args in regs.
9346 Alignment has taken care of for special cases. */
9347 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
9351 /* ... otherwise out of the overflow area. */
9353 /* Care for on-stack alignment if needed. */
9354 t = ovf;
9355 if (align != 1)
9357 t = fold_build_pointer_plus_hwi (t, align - 1);
9358 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9359 build_int_cst (TREE_TYPE (t), -align));
9361 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9363 gimplify_assign (unshare_expr (addr), t, pre_p);
9365 t = fold_build_pointer_plus_hwi (t, size);
9366 gimplify_assign (unshare_expr (ovf), t, pre_p);
9368 if (lab_over)
9370 stmt = gimple_build_label (lab_over);
9371 gimple_seq_add_stmt (pre_p, stmt);
9374 if (STRICT_ALIGNMENT
9375 && (TYPE_ALIGN (type)
9376 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
9378 /* The value (of type complex double, for example) may not be
9379 aligned in memory in the saved registers, so copy via a
9380 temporary. (This is the same code as used for SPARC.) */
9381 tree tmp = create_tmp_var (type, "va_arg_tmp");
9382 tree dest_addr = build_fold_addr_expr (tmp);
9384 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
9385 3, dest_addr, addr, size_int (rsize * 4));
9387 gimplify_and_add (copy, pre_p);
9388 addr = dest_addr;
9391 addr = fold_convert (ptrtype, addr);
9392 return build_va_arg_indirect_ref (addr);
9395 /* Builtins. */
9397 static void
9398 def_builtin (const char *name, tree type, enum rs6000_builtins code)
9400 tree t;
9401 unsigned classify = rs6000_builtin_info[(int)code].attr;
9402 const char *attr_string = "";
9404 gcc_assert (name != NULL);
9405 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
9407 if (rs6000_builtin_decls[(int)code])
9408 fatal_error ("internal error: builtin function %s already processed", name);
9410 rs6000_builtin_decls[(int)code] = t =
9411 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
9413 /* Set any special attributes. */
9414 if ((classify & RS6000_BTC_CONST) != 0)
9416 /* const function, function only depends on the inputs. */
9417 TREE_READONLY (t) = 1;
9418 TREE_NOTHROW (t) = 1;
9419 attr_string = ", pure";
9421 else if ((classify & RS6000_BTC_PURE) != 0)
9423 /* pure function, function can read global memory, but does not set any
9424 external state. */
9425 DECL_PURE_P (t) = 1;
9426 TREE_NOTHROW (t) = 1;
9427 attr_string = ", const";
9429 else if ((classify & RS6000_BTC_FP) != 0)
9431 /* Function is a math function. If rounding mode is on, then treat the
9432 function as not reading global memory, but it can have arbitrary side
9433 effects. If it is off, then assume the function is a const function.
9434 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9435 builtin-attribute.def that is used for the math functions. */
9436 TREE_NOTHROW (t) = 1;
9437 if (flag_rounding_math)
9439 DECL_PURE_P (t) = 1;
9440 DECL_IS_NOVOPS (t) = 1;
9441 attr_string = ", fp, pure";
9443 else
9445 TREE_READONLY (t) = 1;
9446 attr_string = ", fp, const";
9449 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
9450 gcc_unreachable ();
9452 if (TARGET_DEBUG_BUILTIN)
9453 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
9454 (int)code, name, attr_string);
9457 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9459 #undef RS6000_BUILTIN_1
9460 #undef RS6000_BUILTIN_2
9461 #undef RS6000_BUILTIN_3
9462 #undef RS6000_BUILTIN_A
9463 #undef RS6000_BUILTIN_D
9464 #undef RS6000_BUILTIN_E
9465 #undef RS6000_BUILTIN_P
9466 #undef RS6000_BUILTIN_Q
9467 #undef RS6000_BUILTIN_S
9468 #undef RS6000_BUILTIN_X
9470 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9471 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9472 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9473 { MASK, ICODE, NAME, ENUM },
9475 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9476 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9477 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9478 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9479 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9480 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9481 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9483 static const struct builtin_description bdesc_3arg[] =
9485 #include "rs6000-builtin.def"
9488 /* DST operations: void foo (void *, const int, const char). */
9490 #undef RS6000_BUILTIN_1
9491 #undef RS6000_BUILTIN_2
9492 #undef RS6000_BUILTIN_3
9493 #undef RS6000_BUILTIN_A
9494 #undef RS6000_BUILTIN_D
9495 #undef RS6000_BUILTIN_E
9496 #undef RS6000_BUILTIN_P
9497 #undef RS6000_BUILTIN_Q
9498 #undef RS6000_BUILTIN_S
9499 #undef RS6000_BUILTIN_X
9501 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9502 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9503 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9504 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9505 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9506 { MASK, ICODE, NAME, ENUM },
9508 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9509 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9510 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9511 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9512 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9514 static const struct builtin_description bdesc_dst[] =
9516 #include "rs6000-builtin.def"
9519 /* Simple binary operations: VECc = foo (VECa, VECb). */
9521 #undef RS6000_BUILTIN_1
9522 #undef RS6000_BUILTIN_2
9523 #undef RS6000_BUILTIN_3
9524 #undef RS6000_BUILTIN_A
9525 #undef RS6000_BUILTIN_D
9526 #undef RS6000_BUILTIN_E
9527 #undef RS6000_BUILTIN_P
9528 #undef RS6000_BUILTIN_Q
9529 #undef RS6000_BUILTIN_S
9530 #undef RS6000_BUILTIN_X
9532 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9533 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9534 { MASK, ICODE, NAME, ENUM },
9536 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9537 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9538 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9539 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9540 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9541 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9542 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9543 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9545 static const struct builtin_description bdesc_2arg[] =
9547 #include "rs6000-builtin.def"
9550 #undef RS6000_BUILTIN_1
9551 #undef RS6000_BUILTIN_2
9552 #undef RS6000_BUILTIN_3
9553 #undef RS6000_BUILTIN_A
9554 #undef RS6000_BUILTIN_D
9555 #undef RS6000_BUILTIN_E
9556 #undef RS6000_BUILTIN_P
9557 #undef RS6000_BUILTIN_Q
9558 #undef RS6000_BUILTIN_S
9559 #undef RS6000_BUILTIN_X
9561 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9562 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9563 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9564 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9565 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9566 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9567 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9568 { MASK, ICODE, NAME, ENUM },
9570 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9571 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9572 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9574 /* AltiVec predicates. */
9576 static const struct builtin_description bdesc_altivec_preds[] =
9578 #include "rs6000-builtin.def"
9581 /* SPE predicates. */
9582 #undef RS6000_BUILTIN_1
9583 #undef RS6000_BUILTIN_2
9584 #undef RS6000_BUILTIN_3
9585 #undef RS6000_BUILTIN_A
9586 #undef RS6000_BUILTIN_D
9587 #undef RS6000_BUILTIN_E
9588 #undef RS6000_BUILTIN_P
9589 #undef RS6000_BUILTIN_Q
9590 #undef RS6000_BUILTIN_S
9591 #undef RS6000_BUILTIN_X
9593 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9594 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9595 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9596 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9597 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9598 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9599 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9600 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9601 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9602 { MASK, ICODE, NAME, ENUM },
9604 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9606 static const struct builtin_description bdesc_spe_predicates[] =
9608 #include "rs6000-builtin.def"
9611 /* SPE evsel predicates. */
9612 #undef RS6000_BUILTIN_1
9613 #undef RS6000_BUILTIN_2
9614 #undef RS6000_BUILTIN_3
9615 #undef RS6000_BUILTIN_A
9616 #undef RS6000_BUILTIN_D
9617 #undef RS6000_BUILTIN_E
9618 #undef RS6000_BUILTIN_P
9619 #undef RS6000_BUILTIN_Q
9620 #undef RS6000_BUILTIN_S
9621 #undef RS6000_BUILTIN_X
9623 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9624 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9625 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9626 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9627 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9628 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9629 { MASK, ICODE, NAME, ENUM },
9631 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9632 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9633 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9634 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9636 static const struct builtin_description bdesc_spe_evsel[] =
9638 #include "rs6000-builtin.def"
9641 /* PAIRED predicates. */
9642 #undef RS6000_BUILTIN_1
9643 #undef RS6000_BUILTIN_2
9644 #undef RS6000_BUILTIN_3
9645 #undef RS6000_BUILTIN_A
9646 #undef RS6000_BUILTIN_D
9647 #undef RS6000_BUILTIN_E
9648 #undef RS6000_BUILTIN_P
9649 #undef RS6000_BUILTIN_Q
9650 #undef RS6000_BUILTIN_S
9651 #undef RS6000_BUILTIN_X
9653 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9654 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9655 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9656 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9657 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9658 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9659 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9660 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9661 { MASK, ICODE, NAME, ENUM },
9663 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9664 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9666 static const struct builtin_description bdesc_paired_preds[] =
9668 #include "rs6000-builtin.def"
9671 /* ABS* operations. */
9673 #undef RS6000_BUILTIN_1
9674 #undef RS6000_BUILTIN_2
9675 #undef RS6000_BUILTIN_3
9676 #undef RS6000_BUILTIN_A
9677 #undef RS6000_BUILTIN_D
9678 #undef RS6000_BUILTIN_E
9679 #undef RS6000_BUILTIN_P
9680 #undef RS6000_BUILTIN_Q
9681 #undef RS6000_BUILTIN_S
9682 #undef RS6000_BUILTIN_X
9684 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9685 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9686 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9687 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9688 { MASK, ICODE, NAME, ENUM },
9690 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9691 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9692 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9693 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9694 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9695 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9697 static const struct builtin_description bdesc_abs[] =
9699 #include "rs6000-builtin.def"
9702 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9703 foo (VECa). */
9705 #undef RS6000_BUILTIN_1
9706 #undef RS6000_BUILTIN_2
9707 #undef RS6000_BUILTIN_3
9708 #undef RS6000_BUILTIN_A
9709 #undef RS6000_BUILTIN_E
9710 #undef RS6000_BUILTIN_D
9711 #undef RS6000_BUILTIN_P
9712 #undef RS6000_BUILTIN_Q
9713 #undef RS6000_BUILTIN_S
9714 #undef RS6000_BUILTIN_X
9716 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9717 { MASK, ICODE, NAME, ENUM },
9719 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9720 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9721 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9722 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9723 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9724 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9725 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9726 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9727 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9729 static const struct builtin_description bdesc_1arg[] =
9731 #include "rs6000-builtin.def"
9734 #undef RS6000_BUILTIN_1
9735 #undef RS6000_BUILTIN_2
9736 #undef RS6000_BUILTIN_3
9737 #undef RS6000_BUILTIN_A
9738 #undef RS6000_BUILTIN_D
9739 #undef RS6000_BUILTIN_E
9740 #undef RS6000_BUILTIN_P
9741 #undef RS6000_BUILTIN_Q
9742 #undef RS6000_BUILTIN_S
9743 #undef RS6000_BUILTIN_X
9745 /* Return true if a builtin function is overloaded. */
9746 bool
9747 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
9749 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
9752 /* Expand an expression EXP that calls a builtin without arguments. */
9753 static rtx
9754 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
9756 rtx pat;
9757 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9759 if (icode == CODE_FOR_nothing)
9760 /* Builtin not supported on this processor. */
9761 return 0;
9763 if (target == 0
9764 || GET_MODE (target) != tmode
9765 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9766 target = gen_reg_rtx (tmode);
9768 pat = GEN_FCN (icode) (target);
9769 if (! pat)
9770 return 0;
9771 emit_insn (pat);
9773 return target;
9777 static rtx
9778 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
9780 rtx pat;
9781 tree arg0 = CALL_EXPR_ARG (exp, 0);
9782 rtx op0 = expand_normal (arg0);
9783 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9784 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9786 if (icode == CODE_FOR_nothing)
9787 /* Builtin not supported on this processor. */
9788 return 0;
9790 /* If we got invalid arguments bail out before generating bad rtl. */
9791 if (arg0 == error_mark_node)
9792 return const0_rtx;
9794 if (icode == CODE_FOR_altivec_vspltisb
9795 || icode == CODE_FOR_altivec_vspltish
9796 || icode == CODE_FOR_altivec_vspltisw
9797 || icode == CODE_FOR_spe_evsplatfi
9798 || icode == CODE_FOR_spe_evsplati)
9800 /* Only allow 5-bit *signed* literals. */
9801 if (GET_CODE (op0) != CONST_INT
9802 || INTVAL (op0) > 15
9803 || INTVAL (op0) < -16)
9805 error ("argument 1 must be a 5-bit signed literal");
9806 return const0_rtx;
9810 if (target == 0
9811 || GET_MODE (target) != tmode
9812 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9813 target = gen_reg_rtx (tmode);
9815 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9816 op0 = copy_to_mode_reg (mode0, op0);
9818 pat = GEN_FCN (icode) (target, op0);
9819 if (! pat)
9820 return 0;
9821 emit_insn (pat);
9823 return target;
9826 static rtx
9827 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
9829 rtx pat, scratch1, scratch2;
9830 tree arg0 = CALL_EXPR_ARG (exp, 0);
9831 rtx op0 = expand_normal (arg0);
9832 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9833 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9835 /* If we have invalid arguments, bail out before generating bad rtl. */
9836 if (arg0 == error_mark_node)
9837 return const0_rtx;
9839 if (target == 0
9840 || GET_MODE (target) != tmode
9841 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9842 target = gen_reg_rtx (tmode);
9844 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9845 op0 = copy_to_mode_reg (mode0, op0);
9847 scratch1 = gen_reg_rtx (mode0);
9848 scratch2 = gen_reg_rtx (mode0);
9850 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
9851 if (! pat)
9852 return 0;
9853 emit_insn (pat);
9855 return target;
9858 static rtx
9859 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
9861 rtx pat;
9862 tree arg0 = CALL_EXPR_ARG (exp, 0);
9863 tree arg1 = CALL_EXPR_ARG (exp, 1);
9864 rtx op0 = expand_normal (arg0);
9865 rtx op1 = expand_normal (arg1);
9866 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9867 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9868 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9870 if (icode == CODE_FOR_nothing)
9871 /* Builtin not supported on this processor. */
9872 return 0;
9874 /* If we got invalid arguments bail out before generating bad rtl. */
9875 if (arg0 == error_mark_node || arg1 == error_mark_node)
9876 return const0_rtx;
9878 if (icode == CODE_FOR_altivec_vcfux
9879 || icode == CODE_FOR_altivec_vcfsx
9880 || icode == CODE_FOR_altivec_vctsxs
9881 || icode == CODE_FOR_altivec_vctuxs
9882 || icode == CODE_FOR_altivec_vspltb
9883 || icode == CODE_FOR_altivec_vsplth
9884 || icode == CODE_FOR_altivec_vspltw
9885 || icode == CODE_FOR_spe_evaddiw
9886 || icode == CODE_FOR_spe_evldd
9887 || icode == CODE_FOR_spe_evldh
9888 || icode == CODE_FOR_spe_evldw
9889 || icode == CODE_FOR_spe_evlhhesplat
9890 || icode == CODE_FOR_spe_evlhhossplat
9891 || icode == CODE_FOR_spe_evlhhousplat
9892 || icode == CODE_FOR_spe_evlwhe
9893 || icode == CODE_FOR_spe_evlwhos
9894 || icode == CODE_FOR_spe_evlwhou
9895 || icode == CODE_FOR_spe_evlwhsplat
9896 || icode == CODE_FOR_spe_evlwwsplat
9897 || icode == CODE_FOR_spe_evrlwi
9898 || icode == CODE_FOR_spe_evslwi
9899 || icode == CODE_FOR_spe_evsrwis
9900 || icode == CODE_FOR_spe_evsubifw
9901 || icode == CODE_FOR_spe_evsrwiu)
9903 /* Only allow 5-bit unsigned literals. */
9904 STRIP_NOPS (arg1);
9905 if (TREE_CODE (arg1) != INTEGER_CST
9906 || TREE_INT_CST_LOW (arg1) & ~0x1f)
9908 error ("argument 2 must be a 5-bit unsigned literal");
9909 return const0_rtx;
9913 if (target == 0
9914 || GET_MODE (target) != tmode
9915 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9916 target = gen_reg_rtx (tmode);
9918 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9919 op0 = copy_to_mode_reg (mode0, op0);
9920 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9921 op1 = copy_to_mode_reg (mode1, op1);
9923 pat = GEN_FCN (icode) (target, op0, op1);
9924 if (! pat)
9925 return 0;
9926 emit_insn (pat);
9928 return target;
9931 static rtx
9932 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9934 rtx pat, scratch;
9935 tree cr6_form = CALL_EXPR_ARG (exp, 0);
9936 tree arg0 = CALL_EXPR_ARG (exp, 1);
9937 tree arg1 = CALL_EXPR_ARG (exp, 2);
9938 rtx op0 = expand_normal (arg0);
9939 rtx op1 = expand_normal (arg1);
9940 enum machine_mode tmode = SImode;
9941 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9942 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9943 int cr6_form_int;
9945 if (TREE_CODE (cr6_form) != INTEGER_CST)
9947 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9948 return const0_rtx;
9950 else
9951 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
9953 gcc_assert (mode0 == mode1);
9955 /* If we have invalid arguments, bail out before generating bad rtl. */
9956 if (arg0 == error_mark_node || arg1 == error_mark_node)
9957 return const0_rtx;
9959 if (target == 0
9960 || GET_MODE (target) != tmode
9961 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9962 target = gen_reg_rtx (tmode);
9964 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9965 op0 = copy_to_mode_reg (mode0, op0);
9966 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9967 op1 = copy_to_mode_reg (mode1, op1);
9969 scratch = gen_reg_rtx (mode0);
9971 pat = GEN_FCN (icode) (scratch, op0, op1);
9972 if (! pat)
9973 return 0;
9974 emit_insn (pat);
9976 /* The vec_any* and vec_all* predicates use the same opcodes for two
9977 different operations, but the bits in CR6 will be different
9978 depending on what information we want. So we have to play tricks
9979 with CR6 to get the right bits out.
9981 If you think this is disgusting, look at the specs for the
9982 AltiVec predicates. */
9984 switch (cr6_form_int)
9986 case 0:
9987 emit_insn (gen_cr6_test_for_zero (target));
9988 break;
9989 case 1:
9990 emit_insn (gen_cr6_test_for_zero_reverse (target));
9991 break;
9992 case 2:
9993 emit_insn (gen_cr6_test_for_lt (target));
9994 break;
9995 case 3:
9996 emit_insn (gen_cr6_test_for_lt_reverse (target));
9997 break;
9998 default:
9999 error ("argument 1 of __builtin_altivec_predicate is out of range");
10000 break;
10003 return target;
10006 static rtx
10007 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10009 rtx pat, addr;
10010 tree arg0 = CALL_EXPR_ARG (exp, 0);
10011 tree arg1 = CALL_EXPR_ARG (exp, 1);
10012 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10013 enum machine_mode mode0 = Pmode;
10014 enum machine_mode mode1 = Pmode;
10015 rtx op0 = expand_normal (arg0);
10016 rtx op1 = expand_normal (arg1);
10018 if (icode == CODE_FOR_nothing)
10019 /* Builtin not supported on this processor. */
10020 return 0;
10022 /* If we got invalid arguments bail out before generating bad rtl. */
10023 if (arg0 == error_mark_node || arg1 == error_mark_node)
10024 return const0_rtx;
10026 if (target == 0
10027 || GET_MODE (target) != tmode
10028 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10029 target = gen_reg_rtx (tmode);
10031 op1 = copy_to_mode_reg (mode1, op1);
10033 if (op0 == const0_rtx)
10035 addr = gen_rtx_MEM (tmode, op1);
10037 else
10039 op0 = copy_to_mode_reg (mode0, op0);
10040 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10043 pat = GEN_FCN (icode) (target, addr);
10045 if (! pat)
10046 return 0;
10047 emit_insn (pat);
10049 return target;
10052 static rtx
10053 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10055 rtx pat, addr;
10056 tree arg0 = CALL_EXPR_ARG (exp, 0);
10057 tree arg1 = CALL_EXPR_ARG (exp, 1);
10058 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10059 enum machine_mode mode0 = Pmode;
10060 enum machine_mode mode1 = Pmode;
10061 rtx op0 = expand_normal (arg0);
10062 rtx op1 = expand_normal (arg1);
10064 if (icode == CODE_FOR_nothing)
10065 /* Builtin not supported on this processor. */
10066 return 0;
10068 /* If we got invalid arguments bail out before generating bad rtl. */
10069 if (arg0 == error_mark_node || arg1 == error_mark_node)
10070 return const0_rtx;
10072 if (target == 0
10073 || GET_MODE (target) != tmode
10074 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10075 target = gen_reg_rtx (tmode);
10077 op1 = copy_to_mode_reg (mode1, op1);
10079 if (op0 == const0_rtx)
10081 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
10083 else
10085 op0 = copy_to_mode_reg (mode0, op0);
10086 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
10089 pat = GEN_FCN (icode) (target, addr);
10091 if (! pat)
10092 return 0;
10093 emit_insn (pat);
10095 return target;
10098 static rtx
10099 spe_expand_stv_builtin (enum insn_code icode, tree exp)
10101 tree arg0 = CALL_EXPR_ARG (exp, 0);
10102 tree arg1 = CALL_EXPR_ARG (exp, 1);
10103 tree arg2 = CALL_EXPR_ARG (exp, 2);
10104 rtx op0 = expand_normal (arg0);
10105 rtx op1 = expand_normal (arg1);
10106 rtx op2 = expand_normal (arg2);
10107 rtx pat;
10108 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
10109 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
10110 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
10112 /* Invalid arguments. Bail before doing anything stoopid! */
10113 if (arg0 == error_mark_node
10114 || arg1 == error_mark_node
10115 || arg2 == error_mark_node)
10116 return const0_rtx;
10118 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
10119 op0 = copy_to_mode_reg (mode2, op0);
10120 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
10121 op1 = copy_to_mode_reg (mode0, op1);
10122 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10123 op2 = copy_to_mode_reg (mode1, op2);
10125 pat = GEN_FCN (icode) (op1, op2, op0);
10126 if (pat)
10127 emit_insn (pat);
10128 return NULL_RTX;
10131 static rtx
10132 paired_expand_stv_builtin (enum insn_code icode, tree exp)
10134 tree arg0 = CALL_EXPR_ARG (exp, 0);
10135 tree arg1 = CALL_EXPR_ARG (exp, 1);
10136 tree arg2 = CALL_EXPR_ARG (exp, 2);
10137 rtx op0 = expand_normal (arg0);
10138 rtx op1 = expand_normal (arg1);
10139 rtx op2 = expand_normal (arg2);
10140 rtx pat, addr;
10141 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10142 enum machine_mode mode1 = Pmode;
10143 enum machine_mode mode2 = Pmode;
10145 /* Invalid arguments. Bail before doing anything stoopid! */
10146 if (arg0 == error_mark_node
10147 || arg1 == error_mark_node
10148 || arg2 == error_mark_node)
10149 return const0_rtx;
10151 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
10152 op0 = copy_to_mode_reg (tmode, op0);
10154 op2 = copy_to_mode_reg (mode2, op2);
10156 if (op1 == const0_rtx)
10158 addr = gen_rtx_MEM (tmode, op2);
10160 else
10162 op1 = copy_to_mode_reg (mode1, op1);
10163 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10166 pat = GEN_FCN (icode) (addr, op0);
10167 if (pat)
10168 emit_insn (pat);
10169 return NULL_RTX;
10172 static rtx
10173 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10175 tree arg0 = CALL_EXPR_ARG (exp, 0);
10176 tree arg1 = CALL_EXPR_ARG (exp, 1);
10177 tree arg2 = CALL_EXPR_ARG (exp, 2);
10178 rtx op0 = expand_normal (arg0);
10179 rtx op1 = expand_normal (arg1);
10180 rtx op2 = expand_normal (arg2);
10181 rtx pat, addr;
10182 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10183 enum machine_mode smode = insn_data[icode].operand[1].mode;
10184 enum machine_mode mode1 = Pmode;
10185 enum machine_mode mode2 = Pmode;
10187 /* Invalid arguments. Bail before doing anything stoopid! */
10188 if (arg0 == error_mark_node
10189 || arg1 == error_mark_node
10190 || arg2 == error_mark_node)
10191 return const0_rtx;
10193 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10194 op0 = copy_to_mode_reg (smode, op0);
10196 op2 = copy_to_mode_reg (mode2, op2);
10198 if (op1 == const0_rtx)
10200 addr = gen_rtx_MEM (tmode, op2);
10202 else
10204 op1 = copy_to_mode_reg (mode1, op1);
10205 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10208 pat = GEN_FCN (icode) (addr, op0);
10209 if (pat)
10210 emit_insn (pat);
10211 return NULL_RTX;
10214 static rtx
10215 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10217 rtx pat;
10218 tree arg0 = CALL_EXPR_ARG (exp, 0);
10219 tree arg1 = CALL_EXPR_ARG (exp, 1);
10220 tree arg2 = CALL_EXPR_ARG (exp, 2);
10221 rtx op0 = expand_normal (arg0);
10222 rtx op1 = expand_normal (arg1);
10223 rtx op2 = expand_normal (arg2);
10224 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10225 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10226 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10227 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10229 if (icode == CODE_FOR_nothing)
10230 /* Builtin not supported on this processor. */
10231 return 0;
10233 /* If we got invalid arguments bail out before generating bad rtl. */
10234 if (arg0 == error_mark_node
10235 || arg1 == error_mark_node
10236 || arg2 == error_mark_node)
10237 return const0_rtx;
10239 /* Check and prepare argument depending on the instruction code.
10241 Note that a switch statement instead of the sequence of tests
10242 would be incorrect as many of the CODE_FOR values could be
10243 CODE_FOR_nothing and that would yield multiple alternatives
10244 with identical values. We'd never reach here at runtime in
10245 this case. */
10246 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10247 || icode == CODE_FOR_altivec_vsldoi_v4si
10248 || icode == CODE_FOR_altivec_vsldoi_v8hi
10249 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10251 /* Only allow 4-bit unsigned literals. */
10252 STRIP_NOPS (arg2);
10253 if (TREE_CODE (arg2) != INTEGER_CST
10254 || TREE_INT_CST_LOW (arg2) & ~0xf)
10256 error ("argument 3 must be a 4-bit unsigned literal");
10257 return const0_rtx;
10260 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10261 || icode == CODE_FOR_vsx_xxpermdi_v2di
10262 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10263 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10264 || icode == CODE_FOR_vsx_xxsldwi_v4si
10265 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10266 || icode == CODE_FOR_vsx_xxsldwi_v2di
10267 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10269 /* Only allow 2-bit unsigned literals. */
10270 STRIP_NOPS (arg2);
10271 if (TREE_CODE (arg2) != INTEGER_CST
10272 || TREE_INT_CST_LOW (arg2) & ~0x3)
10274 error ("argument 3 must be a 2-bit unsigned literal");
10275 return const0_rtx;
10278 else if (icode == CODE_FOR_vsx_set_v2df
10279 || icode == CODE_FOR_vsx_set_v2di)
10281 /* Only allow 1-bit unsigned literals. */
10282 STRIP_NOPS (arg2);
10283 if (TREE_CODE (arg2) != INTEGER_CST
10284 || TREE_INT_CST_LOW (arg2) & ~0x1)
10286 error ("argument 3 must be a 1-bit unsigned literal");
10287 return const0_rtx;
10291 if (target == 0
10292 || GET_MODE (target) != tmode
10293 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10294 target = gen_reg_rtx (tmode);
10296 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10297 op0 = copy_to_mode_reg (mode0, op0);
10298 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10299 op1 = copy_to_mode_reg (mode1, op1);
10300 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10301 op2 = copy_to_mode_reg (mode2, op2);
10303 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10304 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10305 else
10306 pat = GEN_FCN (icode) (target, op0, op1, op2);
10307 if (! pat)
10308 return 0;
10309 emit_insn (pat);
10311 return target;
10314 /* Expand the lvx builtins. */
10315 static rtx
10316 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10318 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10319 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10320 tree arg0;
10321 enum machine_mode tmode, mode0;
10322 rtx pat, op0;
10323 enum insn_code icode;
10325 switch (fcode)
10327 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10328 icode = CODE_FOR_vector_altivec_load_v16qi;
10329 break;
10330 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10331 icode = CODE_FOR_vector_altivec_load_v8hi;
10332 break;
10333 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10334 icode = CODE_FOR_vector_altivec_load_v4si;
10335 break;
10336 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10337 icode = CODE_FOR_vector_altivec_load_v4sf;
10338 break;
10339 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
10340 icode = CODE_FOR_vector_altivec_load_v2df;
10341 break;
10342 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
10343 icode = CODE_FOR_vector_altivec_load_v2di;
10344 break;
10345 default:
10346 *expandedp = false;
10347 return NULL_RTX;
10350 *expandedp = true;
10352 arg0 = CALL_EXPR_ARG (exp, 0);
10353 op0 = expand_normal (arg0);
10354 tmode = insn_data[icode].operand[0].mode;
10355 mode0 = insn_data[icode].operand[1].mode;
10357 if (target == 0
10358 || GET_MODE (target) != tmode
10359 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10360 target = gen_reg_rtx (tmode);
10362 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10363 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10365 pat = GEN_FCN (icode) (target, op0);
10366 if (! pat)
10367 return 0;
10368 emit_insn (pat);
10369 return target;
10372 /* Expand the stvx builtins. */
10373 static rtx
10374 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10375 bool *expandedp)
10377 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10378 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10379 tree arg0, arg1;
10380 enum machine_mode mode0, mode1;
10381 rtx pat, op0, op1;
10382 enum insn_code icode;
10384 switch (fcode)
10386 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10387 icode = CODE_FOR_vector_altivec_store_v16qi;
10388 break;
10389 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10390 icode = CODE_FOR_vector_altivec_store_v8hi;
10391 break;
10392 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10393 icode = CODE_FOR_vector_altivec_store_v4si;
10394 break;
10395 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10396 icode = CODE_FOR_vector_altivec_store_v4sf;
10397 break;
10398 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
10399 icode = CODE_FOR_vector_altivec_store_v2df;
10400 break;
10401 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
10402 icode = CODE_FOR_vector_altivec_store_v2di;
10403 break;
10404 default:
10405 *expandedp = false;
10406 return NULL_RTX;
10409 arg0 = CALL_EXPR_ARG (exp, 0);
10410 arg1 = CALL_EXPR_ARG (exp, 1);
10411 op0 = expand_normal (arg0);
10412 op1 = expand_normal (arg1);
10413 mode0 = insn_data[icode].operand[0].mode;
10414 mode1 = insn_data[icode].operand[1].mode;
10416 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10417 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10418 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10419 op1 = copy_to_mode_reg (mode1, op1);
10421 pat = GEN_FCN (icode) (op0, op1);
10422 if (pat)
10423 emit_insn (pat);
10425 *expandedp = true;
10426 return NULL_RTX;
10429 /* Expand the dst builtins. */
10430 static rtx
10431 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10432 bool *expandedp)
10434 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10435 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10436 tree arg0, arg1, arg2;
10437 enum machine_mode mode0, mode1;
10438 rtx pat, op0, op1, op2;
10439 const struct builtin_description *d;
10440 size_t i;
10442 *expandedp = false;
10444 /* Handle DST variants. */
10445 d = bdesc_dst;
10446 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10447 if (d->code == fcode)
10449 arg0 = CALL_EXPR_ARG (exp, 0);
10450 arg1 = CALL_EXPR_ARG (exp, 1);
10451 arg2 = CALL_EXPR_ARG (exp, 2);
10452 op0 = expand_normal (arg0);
10453 op1 = expand_normal (arg1);
10454 op2 = expand_normal (arg2);
10455 mode0 = insn_data[d->icode].operand[0].mode;
10456 mode1 = insn_data[d->icode].operand[1].mode;
10458 /* Invalid arguments, bail out before generating bad rtl. */
10459 if (arg0 == error_mark_node
10460 || arg1 == error_mark_node
10461 || arg2 == error_mark_node)
10462 return const0_rtx;
10464 *expandedp = true;
10465 STRIP_NOPS (arg2);
10466 if (TREE_CODE (arg2) != INTEGER_CST
10467 || TREE_INT_CST_LOW (arg2) & ~0x3)
10469 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10470 return const0_rtx;
10473 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10474 op0 = copy_to_mode_reg (Pmode, op0);
10475 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10476 op1 = copy_to_mode_reg (mode1, op1);
10478 pat = GEN_FCN (d->icode) (op0, op1, op2);
10479 if (pat != 0)
10480 emit_insn (pat);
10482 return NULL_RTX;
10485 return NULL_RTX;
10488 /* Expand vec_init builtin. */
10489 static rtx
10490 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10492 enum machine_mode tmode = TYPE_MODE (type);
10493 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10494 int i, n_elt = GET_MODE_NUNITS (tmode);
10495 rtvec v = rtvec_alloc (n_elt);
10497 gcc_assert (VECTOR_MODE_P (tmode));
10498 gcc_assert (n_elt == call_expr_nargs (exp));
10500 for (i = 0; i < n_elt; ++i)
10502 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10503 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10506 if (!target || !register_operand (target, tmode))
10507 target = gen_reg_rtx (tmode);
10509 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10510 return target;
10513 /* Return the integer constant in ARG. Constrain it to be in the range
10514 of the subparts of VEC_TYPE; issue an error if not. */
10516 static int
10517 get_element_number (tree vec_type, tree arg)
10519 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10521 if (!host_integerp (arg, 1)
10522 || (elt = tree_low_cst (arg, 1), elt > max))
10524 error ("selector must be an integer constant in the range 0..%wi", max);
10525 return 0;
10528 return elt;
10531 /* Expand vec_set builtin. */
10532 static rtx
10533 altivec_expand_vec_set_builtin (tree exp)
10535 enum machine_mode tmode, mode1;
10536 tree arg0, arg1, arg2;
10537 int elt;
10538 rtx op0, op1;
10540 arg0 = CALL_EXPR_ARG (exp, 0);
10541 arg1 = CALL_EXPR_ARG (exp, 1);
10542 arg2 = CALL_EXPR_ARG (exp, 2);
10544 tmode = TYPE_MODE (TREE_TYPE (arg0));
10545 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10546 gcc_assert (VECTOR_MODE_P (tmode));
10548 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10549 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10550 elt = get_element_number (TREE_TYPE (arg0), arg2);
10552 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10553 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10555 op0 = force_reg (tmode, op0);
10556 op1 = force_reg (mode1, op1);
10558 rs6000_expand_vector_set (op0, op1, elt);
10560 return op0;
10563 /* Expand vec_ext builtin. */
10564 static rtx
10565 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10567 enum machine_mode tmode, mode0;
10568 tree arg0, arg1;
10569 int elt;
10570 rtx op0;
10572 arg0 = CALL_EXPR_ARG (exp, 0);
10573 arg1 = CALL_EXPR_ARG (exp, 1);
10575 op0 = expand_normal (arg0);
10576 elt = get_element_number (TREE_TYPE (arg0), arg1);
10578 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10579 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10580 gcc_assert (VECTOR_MODE_P (mode0));
10582 op0 = force_reg (mode0, op0);
10584 if (optimize || !target || !register_operand (target, tmode))
10585 target = gen_reg_rtx (tmode);
10587 rs6000_expand_vector_extract (target, op0, elt);
10589 return target;
10592 /* Expand the builtin in EXP and store the result in TARGET. Store
10593 true in *EXPANDEDP if we found a builtin to expand. */
10594 static rtx
10595 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
10597 const struct builtin_description *d;
10598 size_t i;
10599 enum insn_code icode;
10600 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10601 tree arg0;
10602 rtx op0, pat;
10603 enum machine_mode tmode, mode0;
10604 enum rs6000_builtins fcode
10605 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10607 if (rs6000_overloaded_builtin_p (fcode))
10609 *expandedp = true;
10610 error ("unresolved overload for Altivec builtin %qF", fndecl);
10612 /* Given it is invalid, just generate a normal call. */
10613 return expand_call (exp, target, false);
10616 target = altivec_expand_ld_builtin (exp, target, expandedp);
10617 if (*expandedp)
10618 return target;
10620 target = altivec_expand_st_builtin (exp, target, expandedp);
10621 if (*expandedp)
10622 return target;
10624 target = altivec_expand_dst_builtin (exp, target, expandedp);
10625 if (*expandedp)
10626 return target;
10628 *expandedp = true;
10630 switch (fcode)
10632 case ALTIVEC_BUILTIN_STVX:
10633 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
10634 case ALTIVEC_BUILTIN_STVEBX:
10635 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
10636 case ALTIVEC_BUILTIN_STVEHX:
10637 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
10638 case ALTIVEC_BUILTIN_STVEWX:
10639 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
10640 case ALTIVEC_BUILTIN_STVXL:
10641 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
10643 case ALTIVEC_BUILTIN_STVLX:
10644 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
10645 case ALTIVEC_BUILTIN_STVLXL:
10646 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
10647 case ALTIVEC_BUILTIN_STVRX:
10648 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
10649 case ALTIVEC_BUILTIN_STVRXL:
10650 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
10652 case VSX_BUILTIN_STXVD2X_V2DF:
10653 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
10654 case VSX_BUILTIN_STXVD2X_V2DI:
10655 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
10656 case VSX_BUILTIN_STXVW4X_V4SF:
10657 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
10658 case VSX_BUILTIN_STXVW4X_V4SI:
10659 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
10660 case VSX_BUILTIN_STXVW4X_V8HI:
10661 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
10662 case VSX_BUILTIN_STXVW4X_V16QI:
10663 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
10665 case ALTIVEC_BUILTIN_MFVSCR:
10666 icode = CODE_FOR_altivec_mfvscr;
10667 tmode = insn_data[icode].operand[0].mode;
10669 if (target == 0
10670 || GET_MODE (target) != tmode
10671 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10672 target = gen_reg_rtx (tmode);
10674 pat = GEN_FCN (icode) (target);
10675 if (! pat)
10676 return 0;
10677 emit_insn (pat);
10678 return target;
10680 case ALTIVEC_BUILTIN_MTVSCR:
10681 icode = CODE_FOR_altivec_mtvscr;
10682 arg0 = CALL_EXPR_ARG (exp, 0);
10683 op0 = expand_normal (arg0);
10684 mode0 = insn_data[icode].operand[0].mode;
10686 /* If we got invalid arguments bail out before generating bad rtl. */
10687 if (arg0 == error_mark_node)
10688 return const0_rtx;
10690 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10691 op0 = copy_to_mode_reg (mode0, op0);
10693 pat = GEN_FCN (icode) (op0);
10694 if (pat)
10695 emit_insn (pat);
10696 return NULL_RTX;
10698 case ALTIVEC_BUILTIN_DSSALL:
10699 emit_insn (gen_altivec_dssall ());
10700 return NULL_RTX;
10702 case ALTIVEC_BUILTIN_DSS:
10703 icode = CODE_FOR_altivec_dss;
10704 arg0 = CALL_EXPR_ARG (exp, 0);
10705 STRIP_NOPS (arg0);
10706 op0 = expand_normal (arg0);
10707 mode0 = insn_data[icode].operand[0].mode;
10709 /* If we got invalid arguments bail out before generating bad rtl. */
10710 if (arg0 == error_mark_node)
10711 return const0_rtx;
10713 if (TREE_CODE (arg0) != INTEGER_CST
10714 || TREE_INT_CST_LOW (arg0) & ~0x3)
10716 error ("argument to dss must be a 2-bit unsigned literal");
10717 return const0_rtx;
10720 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10721 op0 = copy_to_mode_reg (mode0, op0);
10723 emit_insn (gen_altivec_dss (op0));
10724 return NULL_RTX;
10726 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
10727 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
10728 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
10729 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
10730 case VSX_BUILTIN_VEC_INIT_V2DF:
10731 case VSX_BUILTIN_VEC_INIT_V2DI:
10732 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
10734 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
10735 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
10736 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
10737 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
10738 case VSX_BUILTIN_VEC_SET_V2DF:
10739 case VSX_BUILTIN_VEC_SET_V2DI:
10740 return altivec_expand_vec_set_builtin (exp);
10742 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
10743 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
10744 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
10745 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
10746 case VSX_BUILTIN_VEC_EXT_V2DF:
10747 case VSX_BUILTIN_VEC_EXT_V2DI:
10748 return altivec_expand_vec_ext_builtin (exp, target);
10750 default:
10751 break;
10752 /* Fall through. */
10755 /* Expand abs* operations. */
10756 d = bdesc_abs;
10757 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10758 if (d->code == fcode)
10759 return altivec_expand_abs_builtin (d->icode, exp, target);
10761 /* Expand the AltiVec predicates. */
10762 d = bdesc_altivec_preds;
10763 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
10764 if (d->code == fcode)
10765 return altivec_expand_predicate_builtin (d->icode, exp, target);
10767 /* LV* are funky. We initialized them differently. */
10768 switch (fcode)
10770 case ALTIVEC_BUILTIN_LVSL:
10771 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
10772 exp, target, false);
10773 case ALTIVEC_BUILTIN_LVSR:
10774 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
10775 exp, target, false);
10776 case ALTIVEC_BUILTIN_LVEBX:
10777 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
10778 exp, target, false);
10779 case ALTIVEC_BUILTIN_LVEHX:
10780 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
10781 exp, target, false);
10782 case ALTIVEC_BUILTIN_LVEWX:
10783 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
10784 exp, target, false);
10785 case ALTIVEC_BUILTIN_LVXL:
10786 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
10787 exp, target, false);
10788 case ALTIVEC_BUILTIN_LVX:
10789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
10790 exp, target, false);
10791 case ALTIVEC_BUILTIN_LVLX:
10792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
10793 exp, target, true);
10794 case ALTIVEC_BUILTIN_LVLXL:
10795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
10796 exp, target, true);
10797 case ALTIVEC_BUILTIN_LVRX:
10798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
10799 exp, target, true);
10800 case ALTIVEC_BUILTIN_LVRXL:
10801 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
10802 exp, target, true);
10803 case VSX_BUILTIN_LXVD2X_V2DF:
10804 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
10805 exp, target, false);
10806 case VSX_BUILTIN_LXVD2X_V2DI:
10807 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
10808 exp, target, false);
10809 case VSX_BUILTIN_LXVW4X_V4SF:
10810 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
10811 exp, target, false);
10812 case VSX_BUILTIN_LXVW4X_V4SI:
10813 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
10814 exp, target, false);
10815 case VSX_BUILTIN_LXVW4X_V8HI:
10816 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
10817 exp, target, false);
10818 case VSX_BUILTIN_LXVW4X_V16QI:
10819 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
10820 exp, target, false);
10821 break;
10822 default:
10823 break;
10824 /* Fall through. */
10827 *expandedp = false;
10828 return NULL_RTX;
10831 /* Expand the builtin in EXP and store the result in TARGET. Store
10832 true in *EXPANDEDP if we found a builtin to expand. */
10833 static rtx
10834 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
10836 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10837 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10838 const struct builtin_description *d;
10839 size_t i;
10841 *expandedp = true;
10843 switch (fcode)
10845 case PAIRED_BUILTIN_STX:
10846 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
10847 case PAIRED_BUILTIN_LX:
10848 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
10849 default:
10850 break;
10851 /* Fall through. */
10854 /* Expand the paired predicates. */
10855 d = bdesc_paired_preds;
10856 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
10857 if (d->code == fcode)
10858 return paired_expand_predicate_builtin (d->icode, exp, target);
10860 *expandedp = false;
10861 return NULL_RTX;
10864 /* Binops that need to be initialized manually, but can be expanded
10865 automagically by rs6000_expand_binop_builtin. */
10866 static const struct builtin_description bdesc_2arg_spe[] =
10868 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
10869 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
10870 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
10871 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
10872 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
10873 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
10874 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
10875 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
10876 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
10877 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
10878 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
10879 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
10880 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
10881 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
10882 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
10883 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
10884 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
10885 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
10886 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
10887 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
10888 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
10889 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
10892 /* Expand the builtin in EXP and store the result in TARGET. Store
10893 true in *EXPANDEDP if we found a builtin to expand.
10895 This expands the SPE builtins that are not simple unary and binary
10896 operations. */
10897 static rtx
10898 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
10900 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10901 tree arg1, arg0;
10902 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10903 enum insn_code icode;
10904 enum machine_mode tmode, mode0;
10905 rtx pat, op0;
10906 const struct builtin_description *d;
10907 size_t i;
10909 *expandedp = true;
10911 /* Syntax check for a 5-bit unsigned immediate. */
10912 switch (fcode)
10914 case SPE_BUILTIN_EVSTDD:
10915 case SPE_BUILTIN_EVSTDH:
10916 case SPE_BUILTIN_EVSTDW:
10917 case SPE_BUILTIN_EVSTWHE:
10918 case SPE_BUILTIN_EVSTWHO:
10919 case SPE_BUILTIN_EVSTWWE:
10920 case SPE_BUILTIN_EVSTWWO:
10921 arg1 = CALL_EXPR_ARG (exp, 2);
10922 if (TREE_CODE (arg1) != INTEGER_CST
10923 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10925 error ("argument 2 must be a 5-bit unsigned literal");
10926 return const0_rtx;
10928 break;
10929 default:
10930 break;
10933 /* The evsplat*i instructions are not quite generic. */
10934 switch (fcode)
10936 case SPE_BUILTIN_EVSPLATFI:
10937 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
10938 exp, target);
10939 case SPE_BUILTIN_EVSPLATI:
10940 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
10941 exp, target);
10942 default:
10943 break;
10946 d = bdesc_2arg_spe;
10947 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
10948 if (d->code == fcode)
10949 return rs6000_expand_binop_builtin (d->icode, exp, target);
10951 d = bdesc_spe_predicates;
10952 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
10953 if (d->code == fcode)
10954 return spe_expand_predicate_builtin (d->icode, exp, target);
10956 d = bdesc_spe_evsel;
10957 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
10958 if (d->code == fcode)
10959 return spe_expand_evsel_builtin (d->icode, exp, target);
10961 switch (fcode)
10963 case SPE_BUILTIN_EVSTDDX:
10964 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
10965 case SPE_BUILTIN_EVSTDHX:
10966 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
10967 case SPE_BUILTIN_EVSTDWX:
10968 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
10969 case SPE_BUILTIN_EVSTWHEX:
10970 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
10971 case SPE_BUILTIN_EVSTWHOX:
10972 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
10973 case SPE_BUILTIN_EVSTWWEX:
10974 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
10975 case SPE_BUILTIN_EVSTWWOX:
10976 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
10977 case SPE_BUILTIN_EVSTDD:
10978 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
10979 case SPE_BUILTIN_EVSTDH:
10980 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
10981 case SPE_BUILTIN_EVSTDW:
10982 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
10983 case SPE_BUILTIN_EVSTWHE:
10984 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
10985 case SPE_BUILTIN_EVSTWHO:
10986 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
10987 case SPE_BUILTIN_EVSTWWE:
10988 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
10989 case SPE_BUILTIN_EVSTWWO:
10990 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
10991 case SPE_BUILTIN_MFSPEFSCR:
10992 icode = CODE_FOR_spe_mfspefscr;
10993 tmode = insn_data[icode].operand[0].mode;
10995 if (target == 0
10996 || GET_MODE (target) != tmode
10997 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10998 target = gen_reg_rtx (tmode);
11000 pat = GEN_FCN (icode) (target);
11001 if (! pat)
11002 return 0;
11003 emit_insn (pat);
11004 return target;
11005 case SPE_BUILTIN_MTSPEFSCR:
11006 icode = CODE_FOR_spe_mtspefscr;
11007 arg0 = CALL_EXPR_ARG (exp, 0);
11008 op0 = expand_normal (arg0);
11009 mode0 = insn_data[icode].operand[0].mode;
11011 if (arg0 == error_mark_node)
11012 return const0_rtx;
11014 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11015 op0 = copy_to_mode_reg (mode0, op0);
11017 pat = GEN_FCN (icode) (op0);
11018 if (pat)
11019 emit_insn (pat);
11020 return NULL_RTX;
11021 default:
11022 break;
11025 *expandedp = false;
11026 return NULL_RTX;
11029 static rtx
11030 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11032 rtx pat, scratch, tmp;
11033 tree form = CALL_EXPR_ARG (exp, 0);
11034 tree arg0 = CALL_EXPR_ARG (exp, 1);
11035 tree arg1 = CALL_EXPR_ARG (exp, 2);
11036 rtx op0 = expand_normal (arg0);
11037 rtx op1 = expand_normal (arg1);
11038 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11039 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11040 int form_int;
11041 enum rtx_code code;
11043 if (TREE_CODE (form) != INTEGER_CST)
11045 error ("argument 1 of __builtin_paired_predicate must be a constant");
11046 return const0_rtx;
11048 else
11049 form_int = TREE_INT_CST_LOW (form);
11051 gcc_assert (mode0 == mode1);
11053 if (arg0 == error_mark_node || arg1 == error_mark_node)
11054 return const0_rtx;
11056 if (target == 0
11057 || GET_MODE (target) != SImode
11058 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
11059 target = gen_reg_rtx (SImode);
11060 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
11061 op0 = copy_to_mode_reg (mode0, op0);
11062 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
11063 op1 = copy_to_mode_reg (mode1, op1);
11065 scratch = gen_reg_rtx (CCFPmode);
11067 pat = GEN_FCN (icode) (scratch, op0, op1);
11068 if (!pat)
11069 return const0_rtx;
11071 emit_insn (pat);
11073 switch (form_int)
11075 /* LT bit. */
11076 case 0:
11077 code = LT;
11078 break;
11079 /* GT bit. */
11080 case 1:
11081 code = GT;
11082 break;
11083 /* EQ bit. */
11084 case 2:
11085 code = EQ;
11086 break;
11087 /* UN bit. */
11088 case 3:
11089 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11090 return target;
11091 default:
11092 error ("argument 1 of __builtin_paired_predicate is out of range");
11093 return const0_rtx;
11096 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11097 emit_move_insn (target, tmp);
11098 return target;
11101 static rtx
11102 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11104 rtx pat, scratch, tmp;
11105 tree form = CALL_EXPR_ARG (exp, 0);
11106 tree arg0 = CALL_EXPR_ARG (exp, 1);
11107 tree arg1 = CALL_EXPR_ARG (exp, 2);
11108 rtx op0 = expand_normal (arg0);
11109 rtx op1 = expand_normal (arg1);
11110 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11111 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11112 int form_int;
11113 enum rtx_code code;
11115 if (TREE_CODE (form) != INTEGER_CST)
11117 error ("argument 1 of __builtin_spe_predicate must be a constant");
11118 return const0_rtx;
11120 else
11121 form_int = TREE_INT_CST_LOW (form);
11123 gcc_assert (mode0 == mode1);
11125 if (arg0 == error_mark_node || arg1 == error_mark_node)
11126 return const0_rtx;
11128 if (target == 0
11129 || GET_MODE (target) != SImode
11130 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
11131 target = gen_reg_rtx (SImode);
11133 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11134 op0 = copy_to_mode_reg (mode0, op0);
11135 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11136 op1 = copy_to_mode_reg (mode1, op1);
11138 scratch = gen_reg_rtx (CCmode);
11140 pat = GEN_FCN (icode) (scratch, op0, op1);
11141 if (! pat)
11142 return const0_rtx;
11143 emit_insn (pat);
11145 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11146 _lower_. We use one compare, but look in different bits of the
11147 CR for each variant.
11149 There are 2 elements in each SPE simd type (upper/lower). The CR
11150 bits are set as follows:
11152 BIT0 | BIT 1 | BIT 2 | BIT 3
11153 U | L | (U | L) | (U & L)
11155 So, for an "all" relationship, BIT 3 would be set.
11156 For an "any" relationship, BIT 2 would be set. Etc.
11158 Following traditional nomenclature, these bits map to:
11160 BIT0 | BIT 1 | BIT 2 | BIT 3
11161 LT | GT | EQ | OV
11163 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11166 switch (form_int)
11168 /* All variant. OV bit. */
11169 case 0:
11170 /* We need to get to the OV bit, which is the ORDERED bit. We
11171 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11172 that's ugly and will make validate_condition_mode die.
11173 So let's just use another pattern. */
11174 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11175 return target;
11176 /* Any variant. EQ bit. */
11177 case 1:
11178 code = EQ;
11179 break;
11180 /* Upper variant. LT bit. */
11181 case 2:
11182 code = LT;
11183 break;
11184 /* Lower variant. GT bit. */
11185 case 3:
11186 code = GT;
11187 break;
11188 default:
11189 error ("argument 1 of __builtin_spe_predicate is out of range");
11190 return const0_rtx;
11193 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11194 emit_move_insn (target, tmp);
11196 return target;
11199 /* The evsel builtins look like this:
11201 e = __builtin_spe_evsel_OP (a, b, c, d);
11203 and work like this:
11205 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11206 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11209 static rtx
11210 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11212 rtx pat, scratch;
11213 tree arg0 = CALL_EXPR_ARG (exp, 0);
11214 tree arg1 = CALL_EXPR_ARG (exp, 1);
11215 tree arg2 = CALL_EXPR_ARG (exp, 2);
11216 tree arg3 = CALL_EXPR_ARG (exp, 3);
11217 rtx op0 = expand_normal (arg0);
11218 rtx op1 = expand_normal (arg1);
11219 rtx op2 = expand_normal (arg2);
11220 rtx op3 = expand_normal (arg3);
11221 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11222 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11224 gcc_assert (mode0 == mode1);
11226 if (arg0 == error_mark_node || arg1 == error_mark_node
11227 || arg2 == error_mark_node || arg3 == error_mark_node)
11228 return const0_rtx;
11230 if (target == 0
11231 || GET_MODE (target) != mode0
11232 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11233 target = gen_reg_rtx (mode0);
11235 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11236 op0 = copy_to_mode_reg (mode0, op0);
11237 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11238 op1 = copy_to_mode_reg (mode0, op1);
11239 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11240 op2 = copy_to_mode_reg (mode0, op2);
11241 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11242 op3 = copy_to_mode_reg (mode0, op3);
11244 /* Generate the compare. */
11245 scratch = gen_reg_rtx (CCmode);
11246 pat = GEN_FCN (icode) (scratch, op0, op1);
11247 if (! pat)
11248 return const0_rtx;
11249 emit_insn (pat);
11251 if (mode0 == V2SImode)
11252 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11253 else
11254 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11256 return target;
11259 /* Raise an error message for a builtin function that is called without the
11260 appropriate target options being set. */
11262 static void
11263 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11265 size_t uns_fncode = (size_t)fncode;
11266 const char *name = rs6000_builtin_info[uns_fncode].name;
11267 unsigned fnmask = rs6000_builtin_info[uns_fncode].mask;
11269 gcc_assert (name != NULL);
11270 if ((fnmask & RS6000_BTM_CELL) != 0)
11271 error ("Builtin function %s is only valid for the cell processor", name);
11272 else if ((fnmask & RS6000_BTM_VSX) != 0)
11273 error ("Builtin function %s requires the -mvsx option", name);
11274 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11275 error ("Builtin function %s requires the -maltivec option", name);
11276 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11277 error ("Builtin function %s requires the -mpaired option", name);
11278 else if ((fnmask & RS6000_BTM_SPE) != 0)
11279 error ("Builtin function %s requires the -mspe option", name);
11280 else
11281 error ("Builtin function %s is not supported with the current options",
11282 name);
11285 /* Expand an expression EXP that calls a built-in function,
11286 with result going to TARGET if that's convenient
11287 (and in mode MODE if that's convenient).
11288 SUBTARGET may be used as the target for computing one of EXP's operands.
11289 IGNORE is nonzero if the value is to be ignored. */
11291 static rtx
11292 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11293 enum machine_mode mode ATTRIBUTE_UNUSED,
11294 int ignore ATTRIBUTE_UNUSED)
11296 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11297 enum rs6000_builtins fcode
11298 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
11299 size_t uns_fcode = (size_t)fcode;
11300 const struct builtin_description *d;
11301 size_t i;
11302 rtx ret;
11303 bool success;
11304 unsigned mask = rs6000_builtin_info[uns_fcode].mask;
11305 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
11307 if (TARGET_DEBUG_BUILTIN)
11309 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
11310 const char *name1 = rs6000_builtin_info[uns_fcode].name;
11311 const char *name2 = ((icode != CODE_FOR_nothing)
11312 ? get_insn_name ((int)icode)
11313 : "nothing");
11314 const char *name3;
11316 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
11318 default: name3 = "unknown"; break;
11319 case RS6000_BTC_SPECIAL: name3 = "special"; break;
11320 case RS6000_BTC_UNARY: name3 = "unary"; break;
11321 case RS6000_BTC_BINARY: name3 = "binary"; break;
11322 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
11323 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
11324 case RS6000_BTC_ABS: name3 = "abs"; break;
11325 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
11326 case RS6000_BTC_DST: name3 = "dst"; break;
11330 fprintf (stderr,
11331 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11332 (name1) ? name1 : "---", fcode,
11333 (name2) ? name2 : "---", (int)icode,
11334 name3,
11335 func_valid_p ? "" : ", not valid");
11338 if (!func_valid_p)
11340 rs6000_invalid_builtin (fcode);
11342 /* Given it is invalid, just generate a normal call. */
11343 return expand_call (exp, target, ignore);
11346 switch (fcode)
11348 case RS6000_BUILTIN_RECIP:
11349 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
11351 case RS6000_BUILTIN_RECIPF:
11352 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
11354 case RS6000_BUILTIN_RSQRTF:
11355 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
11357 case RS6000_BUILTIN_RSQRT:
11358 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
11360 case POWER7_BUILTIN_BPERMD:
11361 return rs6000_expand_binop_builtin (((TARGET_64BIT)
11362 ? CODE_FOR_bpermd_di
11363 : CODE_FOR_bpermd_si), exp, target);
11365 case RS6000_BUILTIN_GET_TB:
11366 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
11367 target);
11369 case RS6000_BUILTIN_MFTB:
11370 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
11371 ? CODE_FOR_rs6000_mftb_di
11372 : CODE_FOR_rs6000_mftb_si),
11373 target);
11375 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
11376 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
11378 int icode = (int) CODE_FOR_altivec_lvsr;
11379 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11380 enum machine_mode mode = insn_data[icode].operand[1].mode;
11381 tree arg;
11382 rtx op, addr, pat;
11384 gcc_assert (TARGET_ALTIVEC);
11386 arg = CALL_EXPR_ARG (exp, 0);
11387 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
11388 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
11389 addr = memory_address (mode, op);
11390 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11391 op = addr;
11392 else
11394 /* For the load case need to negate the address. */
11395 op = gen_reg_rtx (GET_MODE (addr));
11396 emit_insn (gen_rtx_SET (VOIDmode, op,
11397 gen_rtx_NEG (GET_MODE (addr), addr)));
11399 op = gen_rtx_MEM (mode, op);
11401 if (target == 0
11402 || GET_MODE (target) != tmode
11403 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11404 target = gen_reg_rtx (tmode);
11406 /*pat = gen_altivec_lvsr (target, op);*/
11407 pat = GEN_FCN (icode) (target, op);
11408 if (!pat)
11409 return 0;
11410 emit_insn (pat);
11412 return target;
11415 case ALTIVEC_BUILTIN_VCFUX:
11416 case ALTIVEC_BUILTIN_VCFSX:
11417 case ALTIVEC_BUILTIN_VCTUXS:
11418 case ALTIVEC_BUILTIN_VCTSXS:
11419 /* FIXME: There's got to be a nicer way to handle this case than
11420 constructing a new CALL_EXPR. */
11421 if (call_expr_nargs (exp) == 1)
11423 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11424 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11426 break;
11428 default:
11429 break;
11432 if (TARGET_ALTIVEC)
11434 ret = altivec_expand_builtin (exp, target, &success);
11436 if (success)
11437 return ret;
11439 if (TARGET_SPE)
11441 ret = spe_expand_builtin (exp, target, &success);
11443 if (success)
11444 return ret;
11446 if (TARGET_PAIRED_FLOAT)
11448 ret = paired_expand_builtin (exp, target, &success);
11450 if (success)
11451 return ret;
11454 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11456 /* Handle simple unary operations. */
11457 d = bdesc_1arg;
11458 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11459 if (d->code == fcode)
11460 return rs6000_expand_unop_builtin (d->icode, exp, target);
11462 /* Handle simple binary operations. */
11463 d = bdesc_2arg;
11464 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11465 if (d->code == fcode)
11466 return rs6000_expand_binop_builtin (d->icode, exp, target);
11468 /* Handle simple ternary operations. */
11469 d = bdesc_3arg;
11470 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11471 if (d->code == fcode)
11472 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11474 gcc_unreachable ();
11477 static void
11478 rs6000_init_builtins (void)
11480 tree tdecl;
11481 tree ftype;
11482 enum machine_mode mode;
11484 if (TARGET_DEBUG_BUILTIN)
11485 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
11486 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
11487 (TARGET_SPE) ? ", spe" : "",
11488 (TARGET_ALTIVEC) ? ", altivec" : "",
11489 (TARGET_VSX) ? ", vsx" : "");
11491 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11492 V2SF_type_node = build_vector_type (float_type_node, 2);
11493 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11494 V2DF_type_node = build_vector_type (double_type_node, 2);
11495 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11496 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11497 V4SF_type_node = build_vector_type (float_type_node, 4);
11498 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11499 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11501 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11502 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11503 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11504 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11506 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11507 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11508 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11509 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11511 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11512 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11513 'vector unsigned short'. */
11515 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11516 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11517 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11518 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11519 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11521 long_integer_type_internal_node = long_integer_type_node;
11522 long_unsigned_type_internal_node = long_unsigned_type_node;
11523 long_long_integer_type_internal_node = long_long_integer_type_node;
11524 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
11525 intQI_type_internal_node = intQI_type_node;
11526 uintQI_type_internal_node = unsigned_intQI_type_node;
11527 intHI_type_internal_node = intHI_type_node;
11528 uintHI_type_internal_node = unsigned_intHI_type_node;
11529 intSI_type_internal_node = intSI_type_node;
11530 uintSI_type_internal_node = unsigned_intSI_type_node;
11531 intDI_type_internal_node = intDI_type_node;
11532 uintDI_type_internal_node = unsigned_intDI_type_node;
11533 float_type_internal_node = float_type_node;
11534 double_type_internal_node = double_type_node;
11535 void_type_internal_node = void_type_node;
11537 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11538 tree type node. */
11539 builtin_mode_to_type[QImode][0] = integer_type_node;
11540 builtin_mode_to_type[HImode][0] = integer_type_node;
11541 builtin_mode_to_type[SImode][0] = intSI_type_node;
11542 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11543 builtin_mode_to_type[DImode][0] = intDI_type_node;
11544 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11545 builtin_mode_to_type[SFmode][0] = float_type_node;
11546 builtin_mode_to_type[DFmode][0] = double_type_node;
11547 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11548 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11549 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11550 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11551 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11552 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11553 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11554 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11555 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11556 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11557 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11558 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11559 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11561 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
11562 TYPE_NAME (bool_char_type_node) = tdecl;
11564 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
11565 TYPE_NAME (bool_short_type_node) = tdecl;
11567 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
11568 TYPE_NAME (bool_int_type_node) = tdecl;
11570 tdecl = add_builtin_type ("__pixel", pixel_type_node);
11571 TYPE_NAME (pixel_type_node) = tdecl;
11573 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11574 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11575 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11576 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11577 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11579 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
11580 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11582 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
11583 TYPE_NAME (V16QI_type_node) = tdecl;
11585 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
11586 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11588 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
11589 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
11591 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
11592 TYPE_NAME (V8HI_type_node) = tdecl;
11594 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
11595 TYPE_NAME (bool_V8HI_type_node) = tdecl;
11597 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
11598 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
11600 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
11601 TYPE_NAME (V4SI_type_node) = tdecl;
11603 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
11604 TYPE_NAME (bool_V4SI_type_node) = tdecl;
11606 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
11607 TYPE_NAME (V4SF_type_node) = tdecl;
11609 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
11610 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
11612 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
11613 TYPE_NAME (V2DF_type_node) = tdecl;
11615 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
11616 TYPE_NAME (V2DI_type_node) = tdecl;
11618 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
11619 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
11621 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
11622 TYPE_NAME (bool_V2DI_type_node) = tdecl;
11624 /* Paired and SPE builtins are only available if you build a compiler with
11625 the appropriate options, so only create those builtins with the
11626 appropriate compiler option. Create Altivec and VSX builtins on machines
11627 with at least the general purpose extensions (970 and newer) to allow the
11628 use of the target attribute. */
11629 if (TARGET_PAIRED_FLOAT)
11630 paired_init_builtins ();
11631 if (TARGET_SPE)
11632 spe_init_builtins ();
11633 if (TARGET_EXTRA_BUILTINS)
11634 altivec_init_builtins ();
11635 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
11636 rs6000_common_init_builtins ();
11638 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
11639 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
11640 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
11642 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
11643 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
11644 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
11646 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
11647 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
11648 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
11650 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
11651 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
11652 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
11654 mode = (TARGET_64BIT) ? DImode : SImode;
11655 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
11656 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
11657 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
11659 ftype = build_function_type_list (unsigned_intDI_type_node,
11660 NULL_TREE);
11661 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
11663 if (TARGET_64BIT)
11664 ftype = build_function_type_list (unsigned_intDI_type_node,
11665 NULL_TREE);
11666 else
11667 ftype = build_function_type_list (unsigned_intSI_type_node,
11668 NULL_TREE);
11669 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
11671 #if TARGET_XCOFF
11672 /* AIX libm provides clog as __clog. */
11673 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
11674 set_user_assembler_name (tdecl, "__clog");
11675 #endif
11677 #ifdef SUBTARGET_INIT_BUILTINS
11678 SUBTARGET_INIT_BUILTINS;
11679 #endif
11682 /* Returns the rs6000 builtin decl for CODE. */
11684 static tree
11685 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11687 unsigned fnmask;
11689 if (code >= RS6000_BUILTIN_COUNT)
11690 return error_mark_node;
11692 fnmask = rs6000_builtin_info[code].mask;
11693 if ((fnmask & rs6000_builtin_mask) != fnmask)
11695 rs6000_invalid_builtin ((enum rs6000_builtins)code);
11696 return error_mark_node;
11699 return rs6000_builtin_decls[code];
11702 static void
11703 spe_init_builtins (void)
11705 tree puint_type_node = build_pointer_type (unsigned_type_node);
11706 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
11707 const struct builtin_description *d;
11708 size_t i;
11710 tree v2si_ftype_4_v2si
11711 = build_function_type_list (opaque_V2SI_type_node,
11712 opaque_V2SI_type_node,
11713 opaque_V2SI_type_node,
11714 opaque_V2SI_type_node,
11715 opaque_V2SI_type_node,
11716 NULL_TREE);
11718 tree v2sf_ftype_4_v2sf
11719 = build_function_type_list (opaque_V2SF_type_node,
11720 opaque_V2SF_type_node,
11721 opaque_V2SF_type_node,
11722 opaque_V2SF_type_node,
11723 opaque_V2SF_type_node,
11724 NULL_TREE);
11726 tree int_ftype_int_v2si_v2si
11727 = build_function_type_list (integer_type_node,
11728 integer_type_node,
11729 opaque_V2SI_type_node,
11730 opaque_V2SI_type_node,
11731 NULL_TREE);
11733 tree int_ftype_int_v2sf_v2sf
11734 = build_function_type_list (integer_type_node,
11735 integer_type_node,
11736 opaque_V2SF_type_node,
11737 opaque_V2SF_type_node,
11738 NULL_TREE);
11740 tree void_ftype_v2si_puint_int
11741 = build_function_type_list (void_type_node,
11742 opaque_V2SI_type_node,
11743 puint_type_node,
11744 integer_type_node,
11745 NULL_TREE);
11747 tree void_ftype_v2si_puint_char
11748 = build_function_type_list (void_type_node,
11749 opaque_V2SI_type_node,
11750 puint_type_node,
11751 char_type_node,
11752 NULL_TREE);
11754 tree void_ftype_v2si_pv2si_int
11755 = build_function_type_list (void_type_node,
11756 opaque_V2SI_type_node,
11757 opaque_p_V2SI_type_node,
11758 integer_type_node,
11759 NULL_TREE);
11761 tree void_ftype_v2si_pv2si_char
11762 = build_function_type_list (void_type_node,
11763 opaque_V2SI_type_node,
11764 opaque_p_V2SI_type_node,
11765 char_type_node,
11766 NULL_TREE);
11768 tree void_ftype_int
11769 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11771 tree int_ftype_void
11772 = build_function_type_list (integer_type_node, NULL_TREE);
11774 tree v2si_ftype_pv2si_int
11775 = build_function_type_list (opaque_V2SI_type_node,
11776 opaque_p_V2SI_type_node,
11777 integer_type_node,
11778 NULL_TREE);
11780 tree v2si_ftype_puint_int
11781 = build_function_type_list (opaque_V2SI_type_node,
11782 puint_type_node,
11783 integer_type_node,
11784 NULL_TREE);
11786 tree v2si_ftype_pushort_int
11787 = build_function_type_list (opaque_V2SI_type_node,
11788 pushort_type_node,
11789 integer_type_node,
11790 NULL_TREE);
11792 tree v2si_ftype_signed_char
11793 = build_function_type_list (opaque_V2SI_type_node,
11794 signed_char_type_node,
11795 NULL_TREE);
11797 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
11799 /* Initialize irregular SPE builtins. */
11801 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
11802 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
11803 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
11804 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
11805 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
11806 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
11807 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
11808 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
11809 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
11810 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
11811 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
11812 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
11813 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
11814 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
11815 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
11816 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
11817 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
11818 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
11820 /* Loads. */
11821 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
11822 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
11823 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
11824 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
11825 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
11826 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
11827 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
11828 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
11829 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
11830 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
11831 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
11832 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
11833 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
11834 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
11835 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
11836 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
11837 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
11838 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
11839 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
11840 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
11841 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
11842 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
11844 /* Predicates. */
11845 d = bdesc_spe_predicates;
11846 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
11848 tree type;
11850 switch (insn_data[d->icode].operand[1].mode)
11852 case V2SImode:
11853 type = int_ftype_int_v2si_v2si;
11854 break;
11855 case V2SFmode:
11856 type = int_ftype_int_v2sf_v2sf;
11857 break;
11858 default:
11859 gcc_unreachable ();
11862 def_builtin (d->name, type, d->code);
11865 /* Evsel predicates. */
11866 d = bdesc_spe_evsel;
11867 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
11869 tree type;
11871 switch (insn_data[d->icode].operand[1].mode)
11873 case V2SImode:
11874 type = v2si_ftype_4_v2si;
11875 break;
11876 case V2SFmode:
11877 type = v2sf_ftype_4_v2sf;
11878 break;
11879 default:
11880 gcc_unreachable ();
11883 def_builtin (d->name, type, d->code);
11887 static void
11888 paired_init_builtins (void)
11890 const struct builtin_description *d;
11891 size_t i;
11893 tree int_ftype_int_v2sf_v2sf
11894 = build_function_type_list (integer_type_node,
11895 integer_type_node,
11896 V2SF_type_node,
11897 V2SF_type_node,
11898 NULL_TREE);
11899 tree pcfloat_type_node =
11900 build_pointer_type (build_qualified_type
11901 (float_type_node, TYPE_QUAL_CONST));
11903 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
11904 long_integer_type_node,
11905 pcfloat_type_node,
11906 NULL_TREE);
11907 tree void_ftype_v2sf_long_pcfloat =
11908 build_function_type_list (void_type_node,
11909 V2SF_type_node,
11910 long_integer_type_node,
11911 pcfloat_type_node,
11912 NULL_TREE);
11915 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
11916 PAIRED_BUILTIN_LX);
11919 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
11920 PAIRED_BUILTIN_STX);
11922 /* Predicates. */
11923 d = bdesc_paired_preds;
11924 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
11926 tree type;
11928 if (TARGET_DEBUG_BUILTIN)
11929 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
11930 (int)i, get_insn_name (d->icode), (int)d->icode,
11931 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
11933 switch (insn_data[d->icode].operand[1].mode)
11935 case V2SFmode:
11936 type = int_ftype_int_v2sf_v2sf;
11937 break;
11938 default:
11939 gcc_unreachable ();
11942 def_builtin (d->name, type, d->code);
11946 static void
11947 altivec_init_builtins (void)
11949 const struct builtin_description *d;
11950 size_t i;
11951 tree ftype;
11952 tree decl;
11954 tree pvoid_type_node = build_pointer_type (void_type_node);
11956 tree pcvoid_type_node
11957 = build_pointer_type (build_qualified_type (void_type_node,
11958 TYPE_QUAL_CONST));
11960 tree int_ftype_opaque
11961 = build_function_type_list (integer_type_node,
11962 opaque_V4SI_type_node, NULL_TREE);
11963 tree opaque_ftype_opaque
11964 = build_function_type_list (integer_type_node, NULL_TREE);
11965 tree opaque_ftype_opaque_int
11966 = build_function_type_list (opaque_V4SI_type_node,
11967 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
11968 tree opaque_ftype_opaque_opaque_int
11969 = build_function_type_list (opaque_V4SI_type_node,
11970 opaque_V4SI_type_node, opaque_V4SI_type_node,
11971 integer_type_node, NULL_TREE);
11972 tree int_ftype_int_opaque_opaque
11973 = build_function_type_list (integer_type_node,
11974 integer_type_node, opaque_V4SI_type_node,
11975 opaque_V4SI_type_node, NULL_TREE);
11976 tree int_ftype_int_v4si_v4si
11977 = build_function_type_list (integer_type_node,
11978 integer_type_node, V4SI_type_node,
11979 V4SI_type_node, NULL_TREE);
11980 tree void_ftype_v4si
11981 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
11982 tree v8hi_ftype_void
11983 = build_function_type_list (V8HI_type_node, NULL_TREE);
11984 tree void_ftype_void
11985 = build_function_type_list (void_type_node, NULL_TREE);
11986 tree void_ftype_int
11987 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11989 tree opaque_ftype_long_pcvoid
11990 = build_function_type_list (opaque_V4SI_type_node,
11991 long_integer_type_node, pcvoid_type_node,
11992 NULL_TREE);
11993 tree v16qi_ftype_long_pcvoid
11994 = build_function_type_list (V16QI_type_node,
11995 long_integer_type_node, pcvoid_type_node,
11996 NULL_TREE);
11997 tree v8hi_ftype_long_pcvoid
11998 = build_function_type_list (V8HI_type_node,
11999 long_integer_type_node, pcvoid_type_node,
12000 NULL_TREE);
12001 tree v4si_ftype_long_pcvoid
12002 = build_function_type_list (V4SI_type_node,
12003 long_integer_type_node, pcvoid_type_node,
12004 NULL_TREE);
12005 tree v4sf_ftype_long_pcvoid
12006 = build_function_type_list (V4SF_type_node,
12007 long_integer_type_node, pcvoid_type_node,
12008 NULL_TREE);
12009 tree v2df_ftype_long_pcvoid
12010 = build_function_type_list (V2DF_type_node,
12011 long_integer_type_node, pcvoid_type_node,
12012 NULL_TREE);
12013 tree v2di_ftype_long_pcvoid
12014 = build_function_type_list (V2DI_type_node,
12015 long_integer_type_node, pcvoid_type_node,
12016 NULL_TREE);
12018 tree void_ftype_opaque_long_pvoid
12019 = build_function_type_list (void_type_node,
12020 opaque_V4SI_type_node, long_integer_type_node,
12021 pvoid_type_node, NULL_TREE);
12022 tree void_ftype_v4si_long_pvoid
12023 = build_function_type_list (void_type_node,
12024 V4SI_type_node, long_integer_type_node,
12025 pvoid_type_node, NULL_TREE);
12026 tree void_ftype_v16qi_long_pvoid
12027 = build_function_type_list (void_type_node,
12028 V16QI_type_node, long_integer_type_node,
12029 pvoid_type_node, NULL_TREE);
12030 tree void_ftype_v8hi_long_pvoid
12031 = build_function_type_list (void_type_node,
12032 V8HI_type_node, long_integer_type_node,
12033 pvoid_type_node, NULL_TREE);
12034 tree void_ftype_v4sf_long_pvoid
12035 = build_function_type_list (void_type_node,
12036 V4SF_type_node, long_integer_type_node,
12037 pvoid_type_node, NULL_TREE);
12038 tree void_ftype_v2df_long_pvoid
12039 = build_function_type_list (void_type_node,
12040 V2DF_type_node, long_integer_type_node,
12041 pvoid_type_node, NULL_TREE);
12042 tree void_ftype_v2di_long_pvoid
12043 = build_function_type_list (void_type_node,
12044 V2DI_type_node, long_integer_type_node,
12045 pvoid_type_node, NULL_TREE);
12046 tree int_ftype_int_v8hi_v8hi
12047 = build_function_type_list (integer_type_node,
12048 integer_type_node, V8HI_type_node,
12049 V8HI_type_node, NULL_TREE);
12050 tree int_ftype_int_v16qi_v16qi
12051 = build_function_type_list (integer_type_node,
12052 integer_type_node, V16QI_type_node,
12053 V16QI_type_node, NULL_TREE);
12054 tree int_ftype_int_v4sf_v4sf
12055 = build_function_type_list (integer_type_node,
12056 integer_type_node, V4SF_type_node,
12057 V4SF_type_node, NULL_TREE);
12058 tree int_ftype_int_v2df_v2df
12059 = build_function_type_list (integer_type_node,
12060 integer_type_node, V2DF_type_node,
12061 V2DF_type_node, NULL_TREE);
12062 tree v4si_ftype_v4si
12063 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
12064 tree v8hi_ftype_v8hi
12065 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
12066 tree v16qi_ftype_v16qi
12067 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
12068 tree v4sf_ftype_v4sf
12069 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12070 tree v2df_ftype_v2df
12071 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12072 tree void_ftype_pcvoid_int_int
12073 = build_function_type_list (void_type_node,
12074 pcvoid_type_node, integer_type_node,
12075 integer_type_node, NULL_TREE);
12077 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
12078 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
12079 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
12080 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
12081 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
12082 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
12083 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
12084 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
12085 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
12086 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
12087 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
12088 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
12089 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
12090 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
12091 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
12092 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
12093 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
12094 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
12095 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
12096 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
12097 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
12098 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
12099 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
12100 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
12101 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
12102 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
12103 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
12104 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
12105 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
12106 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
12108 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
12109 VSX_BUILTIN_LXVD2X_V2DF);
12110 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
12111 VSX_BUILTIN_LXVD2X_V2DI);
12112 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
12113 VSX_BUILTIN_LXVW4X_V4SF);
12114 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
12115 VSX_BUILTIN_LXVW4X_V4SI);
12116 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
12117 VSX_BUILTIN_LXVW4X_V8HI);
12118 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
12119 VSX_BUILTIN_LXVW4X_V16QI);
12120 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
12121 VSX_BUILTIN_STXVD2X_V2DF);
12122 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
12123 VSX_BUILTIN_STXVD2X_V2DI);
12124 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
12125 VSX_BUILTIN_STXVW4X_V4SF);
12126 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
12127 VSX_BUILTIN_STXVW4X_V4SI);
12128 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
12129 VSX_BUILTIN_STXVW4X_V8HI);
12130 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
12131 VSX_BUILTIN_STXVW4X_V16QI);
12132 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
12133 VSX_BUILTIN_VEC_LD);
12134 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
12135 VSX_BUILTIN_VEC_ST);
12137 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
12138 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
12139 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
12141 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
12142 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
12143 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
12144 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
12145 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
12146 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
12147 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
12148 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
12149 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
12150 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
12151 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
12152 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
12154 /* Cell builtins. */
12155 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
12156 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
12157 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
12158 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
12160 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
12161 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
12162 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
12163 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
12165 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
12166 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
12167 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
12168 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
12170 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
12171 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
12172 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
12173 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
12175 /* Add the DST variants. */
12176 d = bdesc_dst;
12177 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12178 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
12180 /* Initialize the predicates. */
12181 d = bdesc_altivec_preds;
12182 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12184 enum machine_mode mode1;
12185 tree type;
12187 if (rs6000_overloaded_builtin_p (d->code))
12188 mode1 = VOIDmode;
12189 else
12190 mode1 = insn_data[d->icode].operand[1].mode;
12192 switch (mode1)
12194 case VOIDmode:
12195 type = int_ftype_int_opaque_opaque;
12196 break;
12197 case V4SImode:
12198 type = int_ftype_int_v4si_v4si;
12199 break;
12200 case V8HImode:
12201 type = int_ftype_int_v8hi_v8hi;
12202 break;
12203 case V16QImode:
12204 type = int_ftype_int_v16qi_v16qi;
12205 break;
12206 case V4SFmode:
12207 type = int_ftype_int_v4sf_v4sf;
12208 break;
12209 case V2DFmode:
12210 type = int_ftype_int_v2df_v2df;
12211 break;
12212 default:
12213 gcc_unreachable ();
12216 def_builtin (d->name, type, d->code);
12219 /* Initialize the abs* operators. */
12220 d = bdesc_abs;
12221 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12223 enum machine_mode mode0;
12224 tree type;
12226 mode0 = insn_data[d->icode].operand[0].mode;
12228 switch (mode0)
12230 case V4SImode:
12231 type = v4si_ftype_v4si;
12232 break;
12233 case V8HImode:
12234 type = v8hi_ftype_v8hi;
12235 break;
12236 case V16QImode:
12237 type = v16qi_ftype_v16qi;
12238 break;
12239 case V4SFmode:
12240 type = v4sf_ftype_v4sf;
12241 break;
12242 case V2DFmode:
12243 type = v2df_ftype_v2df;
12244 break;
12245 default:
12246 gcc_unreachable ();
12249 def_builtin (d->name, type, d->code);
12252 /* Initialize target builtin that implements
12253 targetm.vectorize.builtin_mask_for_load. */
12255 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12256 v16qi_ftype_long_pcvoid,
12257 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12258 BUILT_IN_MD, NULL, NULL_TREE);
12259 TREE_READONLY (decl) = 1;
12260 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12261 altivec_builtin_mask_for_load = decl;
12263 /* Access to the vec_init patterns. */
12264 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12265 integer_type_node, integer_type_node,
12266 integer_type_node, NULL_TREE);
12267 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12269 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12270 short_integer_type_node,
12271 short_integer_type_node,
12272 short_integer_type_node,
12273 short_integer_type_node,
12274 short_integer_type_node,
12275 short_integer_type_node,
12276 short_integer_type_node, NULL_TREE);
12277 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12279 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12280 char_type_node, char_type_node,
12281 char_type_node, char_type_node,
12282 char_type_node, char_type_node,
12283 char_type_node, char_type_node,
12284 char_type_node, char_type_node,
12285 char_type_node, char_type_node,
12286 char_type_node, char_type_node,
12287 char_type_node, NULL_TREE);
12288 def_builtin ("__builtin_vec_init_v16qi", ftype,
12289 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
12291 ftype = build_function_type_list (V4SF_type_node, float_type_node,
12292 float_type_node, float_type_node,
12293 float_type_node, NULL_TREE);
12294 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
12296 /* VSX builtins. */
12297 ftype = build_function_type_list (V2DF_type_node, double_type_node,
12298 double_type_node, NULL_TREE);
12299 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
12301 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12302 intDI_type_node, NULL_TREE);
12303 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
12305 /* Access to the vec_set patterns. */
12306 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12307 intSI_type_node,
12308 integer_type_node, NULL_TREE);
12309 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
12311 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12312 intHI_type_node,
12313 integer_type_node, NULL_TREE);
12314 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
12316 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12317 intQI_type_node,
12318 integer_type_node, NULL_TREE);
12319 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
12321 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12322 float_type_node,
12323 integer_type_node, NULL_TREE);
12324 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
12326 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12327 double_type_node,
12328 integer_type_node, NULL_TREE);
12329 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
12331 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12332 intDI_type_node,
12333 integer_type_node, NULL_TREE);
12334 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
12336 /* Access to the vec_extract patterns. */
12337 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12338 integer_type_node, NULL_TREE);
12339 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12341 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12342 integer_type_node, NULL_TREE);
12343 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12345 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12346 integer_type_node, NULL_TREE);
12347 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12349 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12350 integer_type_node, NULL_TREE);
12351 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12353 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12354 integer_type_node, NULL_TREE);
12355 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
12357 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12358 integer_type_node, NULL_TREE);
12359 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
12362 /* Hash function for builtin functions with up to 3 arguments and a return
12363 type. */
12364 static unsigned
12365 builtin_hash_function (const void *hash_entry)
12367 unsigned ret = 0;
12368 int i;
12369 const struct builtin_hash_struct *bh =
12370 (const struct builtin_hash_struct *) hash_entry;
12372 for (i = 0; i < 4; i++)
12374 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12375 ret = (ret * 2) + bh->uns_p[i];
12378 return ret;
12381 /* Compare builtin hash entries H1 and H2 for equivalence. */
12382 static int
12383 builtin_hash_eq (const void *h1, const void *h2)
12385 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12386 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12388 return ((p1->mode[0] == p2->mode[0])
12389 && (p1->mode[1] == p2->mode[1])
12390 && (p1->mode[2] == p2->mode[2])
12391 && (p1->mode[3] == p2->mode[3])
12392 && (p1->uns_p[0] == p2->uns_p[0])
12393 && (p1->uns_p[1] == p2->uns_p[1])
12394 && (p1->uns_p[2] == p2->uns_p[2])
12395 && (p1->uns_p[3] == p2->uns_p[3]));
12398 /* Map types for builtin functions with an explicit return type and up to 3
12399 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12400 of the argument. */
12401 static tree
12402 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12403 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12404 enum rs6000_builtins builtin, const char *name)
12406 struct builtin_hash_struct h;
12407 struct builtin_hash_struct *h2;
12408 void **found;
12409 int num_args = 3;
12410 int i;
12411 tree ret_type = NULL_TREE;
12412 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12414 /* Create builtin_hash_table. */
12415 if (builtin_hash_table == NULL)
12416 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12417 builtin_hash_eq, NULL);
12419 h.type = NULL_TREE;
12420 h.mode[0] = mode_ret;
12421 h.mode[1] = mode_arg0;
12422 h.mode[2] = mode_arg1;
12423 h.mode[3] = mode_arg2;
12424 h.uns_p[0] = 0;
12425 h.uns_p[1] = 0;
12426 h.uns_p[2] = 0;
12427 h.uns_p[3] = 0;
12429 /* If the builtin is a type that produces unsigned results or takes unsigned
12430 arguments, and it is returned as a decl for the vectorizer (such as
12431 widening multiplies, permute), make sure the arguments and return value
12432 are type correct. */
12433 switch (builtin)
12435 /* unsigned 2 argument functions. */
12436 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12437 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12438 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12439 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12440 h.uns_p[0] = 1;
12441 h.uns_p[1] = 1;
12442 h.uns_p[2] = 1;
12443 break;
12445 /* unsigned 3 argument functions. */
12446 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12447 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12448 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12449 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12450 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12451 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12452 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12453 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12454 case VSX_BUILTIN_VPERM_16QI_UNS:
12455 case VSX_BUILTIN_VPERM_8HI_UNS:
12456 case VSX_BUILTIN_VPERM_4SI_UNS:
12457 case VSX_BUILTIN_VPERM_2DI_UNS:
12458 case VSX_BUILTIN_XXSEL_16QI_UNS:
12459 case VSX_BUILTIN_XXSEL_8HI_UNS:
12460 case VSX_BUILTIN_XXSEL_4SI_UNS:
12461 case VSX_BUILTIN_XXSEL_2DI_UNS:
12462 h.uns_p[0] = 1;
12463 h.uns_p[1] = 1;
12464 h.uns_p[2] = 1;
12465 h.uns_p[3] = 1;
12466 break;
12468 /* signed permute functions with unsigned char mask. */
12469 case ALTIVEC_BUILTIN_VPERM_16QI:
12470 case ALTIVEC_BUILTIN_VPERM_8HI:
12471 case ALTIVEC_BUILTIN_VPERM_4SI:
12472 case ALTIVEC_BUILTIN_VPERM_4SF:
12473 case ALTIVEC_BUILTIN_VPERM_2DI:
12474 case ALTIVEC_BUILTIN_VPERM_2DF:
12475 case VSX_BUILTIN_VPERM_16QI:
12476 case VSX_BUILTIN_VPERM_8HI:
12477 case VSX_BUILTIN_VPERM_4SI:
12478 case VSX_BUILTIN_VPERM_4SF:
12479 case VSX_BUILTIN_VPERM_2DI:
12480 case VSX_BUILTIN_VPERM_2DF:
12481 h.uns_p[3] = 1;
12482 break;
12484 /* unsigned args, signed return. */
12485 case VSX_BUILTIN_XVCVUXDDP_UNS:
12486 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
12487 h.uns_p[1] = 1;
12488 break;
12490 /* signed args, unsigned return. */
12491 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12492 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
12493 h.uns_p[0] = 1;
12494 break;
12496 default:
12497 break;
12500 /* Figure out how many args are present. */
12501 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12502 num_args--;
12504 if (num_args == 0)
12505 fatal_error ("internal error: builtin function %s had no type", name);
12507 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12508 if (!ret_type && h.uns_p[0])
12509 ret_type = builtin_mode_to_type[h.mode[0]][0];
12511 if (!ret_type)
12512 fatal_error ("internal error: builtin function %s had an unexpected "
12513 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12515 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
12516 arg_type[i] = NULL_TREE;
12518 for (i = 0; i < num_args; i++)
12520 int m = (int) h.mode[i+1];
12521 int uns_p = h.uns_p[i+1];
12523 arg_type[i] = builtin_mode_to_type[m][uns_p];
12524 if (!arg_type[i] && uns_p)
12525 arg_type[i] = builtin_mode_to_type[m][0];
12527 if (!arg_type[i])
12528 fatal_error ("internal error: builtin function %s, argument %d "
12529 "had unexpected argument type %s", name, i,
12530 GET_MODE_NAME (m));
12533 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12534 if (*found == NULL)
12536 h2 = ggc_alloc_builtin_hash_struct ();
12537 *h2 = h;
12538 *found = (void *)h2;
12540 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
12541 arg_type[2], NULL_TREE);
12544 return ((struct builtin_hash_struct *)(*found))->type;
12547 static void
12548 rs6000_common_init_builtins (void)
12550 const struct builtin_description *d;
12551 size_t i;
12553 tree opaque_ftype_opaque = NULL_TREE;
12554 tree opaque_ftype_opaque_opaque = NULL_TREE;
12555 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
12556 tree v2si_ftype_qi = NULL_TREE;
12557 tree v2si_ftype_v2si_qi = NULL_TREE;
12558 tree v2si_ftype_int_qi = NULL_TREE;
12559 unsigned builtin_mask = rs6000_builtin_mask;
12561 if (!TARGET_PAIRED_FLOAT)
12563 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
12564 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
12567 /* Paired and SPE builtins are only available if you build a compiler with
12568 the appropriate options, so only create those builtins with the
12569 appropriate compiler option. Create Altivec and VSX builtins on machines
12570 with at least the general purpose extensions (970 and newer) to allow the
12571 use of the target attribute.. */
12573 if (TARGET_EXTRA_BUILTINS)
12574 builtin_mask |= RS6000_BTM_COMMON;
12576 /* Add the ternary operators. */
12577 d = bdesc_3arg;
12578 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12580 tree type;
12581 unsigned mask = d->mask;
12583 if ((mask & builtin_mask) != mask)
12585 if (TARGET_DEBUG_BUILTIN)
12586 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
12587 continue;
12590 if (rs6000_overloaded_builtin_p (d->code))
12592 if (! (type = opaque_ftype_opaque_opaque_opaque))
12593 type = opaque_ftype_opaque_opaque_opaque
12594 = build_function_type_list (opaque_V4SI_type_node,
12595 opaque_V4SI_type_node,
12596 opaque_V4SI_type_node,
12597 opaque_V4SI_type_node,
12598 NULL_TREE);
12600 else
12602 enum insn_code icode = d->icode;
12603 if (d->name == 0 || icode == CODE_FOR_nothing)
12604 continue;
12606 type = builtin_function_type (insn_data[icode].operand[0].mode,
12607 insn_data[icode].operand[1].mode,
12608 insn_data[icode].operand[2].mode,
12609 insn_data[icode].operand[3].mode,
12610 d->code, d->name);
12613 def_builtin (d->name, type, d->code);
12616 /* Add the binary operators. */
12617 d = bdesc_2arg;
12618 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12620 enum machine_mode mode0, mode1, mode2;
12621 tree type;
12622 unsigned mask = d->mask;
12624 if ((mask & builtin_mask) != mask)
12626 if (TARGET_DEBUG_BUILTIN)
12627 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
12628 continue;
12631 if (rs6000_overloaded_builtin_p (d->code))
12633 if (! (type = opaque_ftype_opaque_opaque))
12634 type = opaque_ftype_opaque_opaque
12635 = build_function_type_list (opaque_V4SI_type_node,
12636 opaque_V4SI_type_node,
12637 opaque_V4SI_type_node,
12638 NULL_TREE);
12640 else
12642 enum insn_code icode = d->icode;
12643 if (d->name == 0 || icode == CODE_FOR_nothing)
12644 continue;
12646 mode0 = insn_data[icode].operand[0].mode;
12647 mode1 = insn_data[icode].operand[1].mode;
12648 mode2 = insn_data[icode].operand[2].mode;
12650 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
12652 if (! (type = v2si_ftype_v2si_qi))
12653 type = v2si_ftype_v2si_qi
12654 = build_function_type_list (opaque_V2SI_type_node,
12655 opaque_V2SI_type_node,
12656 char_type_node,
12657 NULL_TREE);
12660 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
12661 && mode2 == QImode)
12663 if (! (type = v2si_ftype_int_qi))
12664 type = v2si_ftype_int_qi
12665 = build_function_type_list (opaque_V2SI_type_node,
12666 integer_type_node,
12667 char_type_node,
12668 NULL_TREE);
12671 else
12672 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
12673 d->code, d->name);
12676 def_builtin (d->name, type, d->code);
12679 /* Add the simple unary operators. */
12680 d = bdesc_1arg;
12681 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12683 enum machine_mode mode0, mode1;
12684 tree type;
12685 unsigned mask = d->mask;
12687 if ((mask & builtin_mask) != mask)
12689 if (TARGET_DEBUG_BUILTIN)
12690 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
12691 continue;
12694 if (rs6000_overloaded_builtin_p (d->code))
12696 if (! (type = opaque_ftype_opaque))
12697 type = opaque_ftype_opaque
12698 = build_function_type_list (opaque_V4SI_type_node,
12699 opaque_V4SI_type_node,
12700 NULL_TREE);
12702 else
12704 enum insn_code icode = d->icode;
12705 if (d->name == 0 || icode == CODE_FOR_nothing)
12706 continue;
12708 mode0 = insn_data[icode].operand[0].mode;
12709 mode1 = insn_data[icode].operand[1].mode;
12711 if (mode0 == V2SImode && mode1 == QImode)
12713 if (! (type = v2si_ftype_qi))
12714 type = v2si_ftype_qi
12715 = build_function_type_list (opaque_V2SI_type_node,
12716 char_type_node,
12717 NULL_TREE);
12720 else
12721 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
12722 d->code, d->name);
12725 def_builtin (d->name, type, d->code);
12729 static void
12730 rs6000_init_libfuncs (void)
12732 if (!TARGET_IEEEQUAD)
12733 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12734 if (!TARGET_XL_COMPAT)
12736 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
12737 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
12738 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
12739 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
12741 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
12743 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
12744 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
12745 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
12746 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
12747 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
12748 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
12749 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
12751 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
12752 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
12753 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
12754 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
12755 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
12756 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
12757 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
12758 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
12761 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
12762 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
12764 else
12766 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
12767 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
12768 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
12769 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
12771 else
12773 /* 32-bit SVR4 quad floating point routines. */
12775 set_optab_libfunc (add_optab, TFmode, "_q_add");
12776 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
12777 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
12778 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
12779 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
12780 if (TARGET_PPC_GPOPT)
12781 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
12783 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
12784 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
12785 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
12786 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
12787 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
12788 set_optab_libfunc (le_optab, TFmode, "_q_fle");
12790 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
12791 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
12792 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
12793 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
12794 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
12795 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
12796 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
12797 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
12802 /* Expand a block clear operation, and return 1 if successful. Return 0
12803 if we should let the compiler generate normal code.
12805 operands[0] is the destination
12806 operands[1] is the length
12807 operands[3] is the alignment */
12810 expand_block_clear (rtx operands[])
12812 rtx orig_dest = operands[0];
12813 rtx bytes_rtx = operands[1];
12814 rtx align_rtx = operands[3];
12815 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
12816 HOST_WIDE_INT align;
12817 HOST_WIDE_INT bytes;
12818 int offset;
12819 int clear_bytes;
12820 int clear_step;
12822 /* If this is not a fixed size move, just call memcpy */
12823 if (! constp)
12824 return 0;
12826 /* This must be a fixed size alignment */
12827 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12828 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12830 /* Anything to clear? */
12831 bytes = INTVAL (bytes_rtx);
12832 if (bytes <= 0)
12833 return 1;
12835 /* Use the builtin memset after a point, to avoid huge code bloat.
12836 When optimize_size, avoid any significant code bloat; calling
12837 memset is about 4 instructions, so allow for one instruction to
12838 load zero and three to do clearing. */
12839 if (TARGET_ALTIVEC && align >= 128)
12840 clear_step = 16;
12841 else if (TARGET_POWERPC64 && align >= 32)
12842 clear_step = 8;
12843 else if (TARGET_SPE && align >= 64)
12844 clear_step = 8;
12845 else
12846 clear_step = 4;
12848 if (optimize_size && bytes > 3 * clear_step)
12849 return 0;
12850 if (! optimize_size && bytes > 8 * clear_step)
12851 return 0;
12853 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
12855 enum machine_mode mode = BLKmode;
12856 rtx dest;
12858 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
12860 clear_bytes = 16;
12861 mode = V4SImode;
12863 else if (bytes >= 8 && TARGET_SPE && align >= 64)
12865 clear_bytes = 8;
12866 mode = V2SImode;
12868 else if (bytes >= 8 && TARGET_POWERPC64
12869 /* 64-bit loads and stores require word-aligned
12870 displacements. */
12871 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12873 clear_bytes = 8;
12874 mode = DImode;
12876 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12877 { /* move 4 bytes */
12878 clear_bytes = 4;
12879 mode = SImode;
12881 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12882 { /* move 2 bytes */
12883 clear_bytes = 2;
12884 mode = HImode;
12886 else /* move 1 byte at a time */
12888 clear_bytes = 1;
12889 mode = QImode;
12892 dest = adjust_address (orig_dest, mode, offset);
12894 emit_move_insn (dest, CONST0_RTX (mode));
12897 return 1;
12901 /* Expand a block move operation, and return 1 if successful. Return 0
12902 if we should let the compiler generate normal code.
12904 operands[0] is the destination
12905 operands[1] is the source
12906 operands[2] is the length
12907 operands[3] is the alignment */
12909 #define MAX_MOVE_REG 4
12912 expand_block_move (rtx operands[])
12914 rtx orig_dest = operands[0];
12915 rtx orig_src = operands[1];
12916 rtx bytes_rtx = operands[2];
12917 rtx align_rtx = operands[3];
12918 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
12919 int align;
12920 int bytes;
12921 int offset;
12922 int move_bytes;
12923 rtx stores[MAX_MOVE_REG];
12924 int num_reg = 0;
12926 /* If this is not a fixed size move, just call memcpy */
12927 if (! constp)
12928 return 0;
12930 /* This must be a fixed size alignment */
12931 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12932 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12934 /* Anything to move? */
12935 bytes = INTVAL (bytes_rtx);
12936 if (bytes <= 0)
12937 return 1;
12939 if (bytes > rs6000_block_move_inline_limit)
12940 return 0;
12942 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
12944 union {
12945 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
12946 rtx (*mov) (rtx, rtx);
12947 } gen_func;
12948 enum machine_mode mode = BLKmode;
12949 rtx src, dest;
12951 /* Altivec first, since it will be faster than a string move
12952 when it applies, and usually not significantly larger. */
12953 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
12955 move_bytes = 16;
12956 mode = V4SImode;
12957 gen_func.mov = gen_movv4si;
12959 else if (TARGET_SPE && bytes >= 8 && align >= 64)
12961 move_bytes = 8;
12962 mode = V2SImode;
12963 gen_func.mov = gen_movv2si;
12965 else if (TARGET_STRING
12966 && bytes > 24 /* move up to 32 bytes at a time */
12967 && ! fixed_regs[5]
12968 && ! fixed_regs[6]
12969 && ! fixed_regs[7]
12970 && ! fixed_regs[8]
12971 && ! fixed_regs[9]
12972 && ! fixed_regs[10]
12973 && ! fixed_regs[11]
12974 && ! fixed_regs[12])
12976 move_bytes = (bytes > 32) ? 32 : bytes;
12977 gen_func.movmemsi = gen_movmemsi_8reg;
12979 else if (TARGET_STRING
12980 && bytes > 16 /* move up to 24 bytes at a time */
12981 && ! fixed_regs[5]
12982 && ! fixed_regs[6]
12983 && ! fixed_regs[7]
12984 && ! fixed_regs[8]
12985 && ! fixed_regs[9]
12986 && ! fixed_regs[10])
12988 move_bytes = (bytes > 24) ? 24 : bytes;
12989 gen_func.movmemsi = gen_movmemsi_6reg;
12991 else if (TARGET_STRING
12992 && bytes > 8 /* move up to 16 bytes at a time */
12993 && ! fixed_regs[5]
12994 && ! fixed_regs[6]
12995 && ! fixed_regs[7]
12996 && ! fixed_regs[8])
12998 move_bytes = (bytes > 16) ? 16 : bytes;
12999 gen_func.movmemsi = gen_movmemsi_4reg;
13001 else if (bytes >= 8 && TARGET_POWERPC64
13002 /* 64-bit loads and stores require word-aligned
13003 displacements. */
13004 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13006 move_bytes = 8;
13007 mode = DImode;
13008 gen_func.mov = gen_movdi;
13010 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
13011 { /* move up to 8 bytes at a time */
13012 move_bytes = (bytes > 8) ? 8 : bytes;
13013 gen_func.movmemsi = gen_movmemsi_2reg;
13015 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13016 { /* move 4 bytes */
13017 move_bytes = 4;
13018 mode = SImode;
13019 gen_func.mov = gen_movsi;
13021 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13022 { /* move 2 bytes */
13023 move_bytes = 2;
13024 mode = HImode;
13025 gen_func.mov = gen_movhi;
13027 else if (TARGET_STRING && bytes > 1)
13028 { /* move up to 4 bytes at a time */
13029 move_bytes = (bytes > 4) ? 4 : bytes;
13030 gen_func.movmemsi = gen_movmemsi_1reg;
13032 else /* move 1 byte at a time */
13034 move_bytes = 1;
13035 mode = QImode;
13036 gen_func.mov = gen_movqi;
13039 src = adjust_address (orig_src, mode, offset);
13040 dest = adjust_address (orig_dest, mode, offset);
13042 if (mode != BLKmode)
13044 rtx tmp_reg = gen_reg_rtx (mode);
13046 emit_insn ((*gen_func.mov) (tmp_reg, src));
13047 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
13050 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
13052 int i;
13053 for (i = 0; i < num_reg; i++)
13054 emit_insn (stores[i]);
13055 num_reg = 0;
13058 if (mode == BLKmode)
13060 /* Move the address into scratch registers. The movmemsi
13061 patterns require zero offset. */
13062 if (!REG_P (XEXP (src, 0)))
13064 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
13065 src = replace_equiv_address (src, src_reg);
13067 set_mem_size (src, move_bytes);
13069 if (!REG_P (XEXP (dest, 0)))
13071 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
13072 dest = replace_equiv_address (dest, dest_reg);
13074 set_mem_size (dest, move_bytes);
13076 emit_insn ((*gen_func.movmemsi) (dest, src,
13077 GEN_INT (move_bytes & 31),
13078 align_rtx));
13082 return 1;
13086 /* Return a string to perform a load_multiple operation.
13087 operands[0] is the vector.
13088 operands[1] is the source address.
13089 operands[2] is the first destination register. */
13091 const char *
13092 rs6000_output_load_multiple (rtx operands[3])
13094 /* We have to handle the case where the pseudo used to contain the address
13095 is assigned to one of the output registers. */
13096 int i, j;
13097 int words = XVECLEN (operands[0], 0);
13098 rtx xop[10];
13100 if (XVECLEN (operands[0], 0) == 1)
13101 return "lwz %2,0(%1)";
13103 for (i = 0; i < words; i++)
13104 if (refers_to_regno_p (REGNO (operands[2]) + i,
13105 REGNO (operands[2]) + i + 1, operands[1], 0))
13107 if (i == words-1)
13109 xop[0] = GEN_INT (4 * (words-1));
13110 xop[1] = operands[1];
13111 xop[2] = operands[2];
13112 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
13113 return "";
13115 else if (i == 0)
13117 xop[0] = GEN_INT (4 * (words-1));
13118 xop[1] = operands[1];
13119 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
13120 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
13121 return "";
13123 else
13125 for (j = 0; j < words; j++)
13126 if (j != i)
13128 xop[0] = GEN_INT (j * 4);
13129 xop[1] = operands[1];
13130 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
13131 output_asm_insn ("lwz %2,%0(%1)", xop);
13133 xop[0] = GEN_INT (i * 4);
13134 xop[1] = operands[1];
13135 output_asm_insn ("lwz %1,%0(%1)", xop);
13136 return "";
13140 return "lswi %2,%1,%N0";
13144 /* A validation routine: say whether CODE, a condition code, and MODE
13145 match. The other alternatives either don't make sense or should
13146 never be generated. */
13148 void
13149 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
13151 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
13152 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
13153 && GET_MODE_CLASS (mode) == MODE_CC);
13155 /* These don't make sense. */
13156 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
13157 || mode != CCUNSmode);
13159 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
13160 || mode == CCUNSmode);
13162 gcc_assert (mode == CCFPmode
13163 || (code != ORDERED && code != UNORDERED
13164 && code != UNEQ && code != LTGT
13165 && code != UNGT && code != UNLT
13166 && code != UNGE && code != UNLE));
13168 /* These should never be generated except for
13169 flag_finite_math_only. */
13170 gcc_assert (mode != CCFPmode
13171 || flag_finite_math_only
13172 || (code != LE && code != GE
13173 && code != UNEQ && code != LTGT
13174 && code != UNGT && code != UNLT));
13176 /* These are invalid; the information is not there. */
13177 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
13181 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13182 mask required to convert the result of a rotate insn into a shift
13183 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13186 includes_lshift_p (rtx shiftop, rtx andop)
13188 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13190 shift_mask <<= INTVAL (shiftop);
13192 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13195 /* Similar, but for right shift. */
13198 includes_rshift_p (rtx shiftop, rtx andop)
13200 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13202 shift_mask >>= INTVAL (shiftop);
13204 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13207 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13208 to perform a left shift. It must have exactly SHIFTOP least
13209 significant 0's, then one or more 1's, then zero or more 0's. */
13212 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13214 if (GET_CODE (andop) == CONST_INT)
13216 HOST_WIDE_INT c, lsb, shift_mask;
13218 c = INTVAL (andop);
13219 if (c == 0 || c == ~0)
13220 return 0;
13222 shift_mask = ~0;
13223 shift_mask <<= INTVAL (shiftop);
13225 /* Find the least significant one bit. */
13226 lsb = c & -c;
13228 /* It must coincide with the LSB of the shift mask. */
13229 if (-lsb != shift_mask)
13230 return 0;
13232 /* Invert to look for the next transition (if any). */
13233 c = ~c;
13235 /* Remove the low group of ones (originally low group of zeros). */
13236 c &= -lsb;
13238 /* Again find the lsb, and check we have all 1's above. */
13239 lsb = c & -c;
13240 return c == -lsb;
13242 else if (GET_CODE (andop) == CONST_DOUBLE
13243 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13245 HOST_WIDE_INT low, high, lsb;
13246 HOST_WIDE_INT shift_mask_low, shift_mask_high;
13248 low = CONST_DOUBLE_LOW (andop);
13249 if (HOST_BITS_PER_WIDE_INT < 64)
13250 high = CONST_DOUBLE_HIGH (andop);
13252 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
13253 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
13254 return 0;
13256 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13258 shift_mask_high = ~0;
13259 if (INTVAL (shiftop) > 32)
13260 shift_mask_high <<= INTVAL (shiftop) - 32;
13262 lsb = high & -high;
13264 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
13265 return 0;
13267 high = ~high;
13268 high &= -lsb;
13270 lsb = high & -high;
13271 return high == -lsb;
13274 shift_mask_low = ~0;
13275 shift_mask_low <<= INTVAL (shiftop);
13277 lsb = low & -low;
13279 if (-lsb != shift_mask_low)
13280 return 0;
13282 if (HOST_BITS_PER_WIDE_INT < 64)
13283 high = ~high;
13284 low = ~low;
13285 low &= -lsb;
13287 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13289 lsb = high & -high;
13290 return high == -lsb;
13293 lsb = low & -low;
13294 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
13296 else
13297 return 0;
13300 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13301 to perform a left shift. It must have SHIFTOP or more least
13302 significant 0's, with the remainder of the word 1's. */
13305 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13307 if (GET_CODE (andop) == CONST_INT)
13309 HOST_WIDE_INT c, lsb, shift_mask;
13311 shift_mask = ~0;
13312 shift_mask <<= INTVAL (shiftop);
13313 c = INTVAL (andop);
13315 /* Find the least significant one bit. */
13316 lsb = c & -c;
13318 /* It must be covered by the shift mask.
13319 This test also rejects c == 0. */
13320 if ((lsb & shift_mask) == 0)
13321 return 0;
13323 /* Check we have all 1's above the transition, and reject all 1's. */
13324 return c == -lsb && lsb != 1;
13326 else if (GET_CODE (andop) == CONST_DOUBLE
13327 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13329 HOST_WIDE_INT low, lsb, shift_mask_low;
13331 low = CONST_DOUBLE_LOW (andop);
13333 if (HOST_BITS_PER_WIDE_INT < 64)
13335 HOST_WIDE_INT high, shift_mask_high;
13337 high = CONST_DOUBLE_HIGH (andop);
13339 if (low == 0)
13341 shift_mask_high = ~0;
13342 if (INTVAL (shiftop) > 32)
13343 shift_mask_high <<= INTVAL (shiftop) - 32;
13345 lsb = high & -high;
13347 if ((lsb & shift_mask_high) == 0)
13348 return 0;
13350 return high == -lsb;
13352 if (high != ~0)
13353 return 0;
13356 shift_mask_low = ~0;
13357 shift_mask_low <<= INTVAL (shiftop);
13359 lsb = low & -low;
13361 if ((lsb & shift_mask_low) == 0)
13362 return 0;
13364 return low == -lsb && lsb != 1;
13366 else
13367 return 0;
13370 /* Return 1 if operands will generate a valid arguments to rlwimi
13371 instruction for insert with right shift in 64-bit mode. The mask may
13372 not start on the first bit or stop on the last bit because wrap-around
13373 effects of instruction do not correspond to semantics of RTL insn. */
13376 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13378 if (INTVAL (startop) > 32
13379 && INTVAL (startop) < 64
13380 && INTVAL (sizeop) > 1
13381 && INTVAL (sizeop) + INTVAL (startop) < 64
13382 && INTVAL (shiftop) > 0
13383 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13384 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13385 return 1;
13387 return 0;
13390 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13391 for lfq and stfq insns iff the registers are hard registers. */
13394 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13396 /* We might have been passed a SUBREG. */
13397 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13398 return 0;
13400 /* We might have been passed non floating point registers. */
13401 if (!FP_REGNO_P (REGNO (reg1))
13402 || !FP_REGNO_P (REGNO (reg2)))
13403 return 0;
13405 return (REGNO (reg1) == REGNO (reg2) - 1);
13408 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13409 addr1 and addr2 must be in consecutive memory locations
13410 (addr2 == addr1 + 8). */
13413 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13415 rtx addr1, addr2;
13416 unsigned int reg1, reg2;
13417 int offset1, offset2;
13419 /* The mems cannot be volatile. */
13420 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13421 return 0;
13423 addr1 = XEXP (mem1, 0);
13424 addr2 = XEXP (mem2, 0);
13426 /* Extract an offset (if used) from the first addr. */
13427 if (GET_CODE (addr1) == PLUS)
13429 /* If not a REG, return zero. */
13430 if (GET_CODE (XEXP (addr1, 0)) != REG)
13431 return 0;
13432 else
13434 reg1 = REGNO (XEXP (addr1, 0));
13435 /* The offset must be constant! */
13436 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13437 return 0;
13438 offset1 = INTVAL (XEXP (addr1, 1));
13441 else if (GET_CODE (addr1) != REG)
13442 return 0;
13443 else
13445 reg1 = REGNO (addr1);
13446 /* This was a simple (mem (reg)) expression. Offset is 0. */
13447 offset1 = 0;
13450 /* And now for the second addr. */
13451 if (GET_CODE (addr2) == PLUS)
13453 /* If not a REG, return zero. */
13454 if (GET_CODE (XEXP (addr2, 0)) != REG)
13455 return 0;
13456 else
13458 reg2 = REGNO (XEXP (addr2, 0));
13459 /* The offset must be constant. */
13460 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13461 return 0;
13462 offset2 = INTVAL (XEXP (addr2, 1));
13465 else if (GET_CODE (addr2) != REG)
13466 return 0;
13467 else
13469 reg2 = REGNO (addr2);
13470 /* This was a simple (mem (reg)) expression. Offset is 0. */
13471 offset2 = 0;
13474 /* Both of these must have the same base register. */
13475 if (reg1 != reg2)
13476 return 0;
13478 /* The offset for the second addr must be 8 more than the first addr. */
13479 if (offset2 != offset1 + 8)
13480 return 0;
13482 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13483 instructions. */
13484 return 1;
13489 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13491 static bool eliminated = false;
13492 rtx ret;
13494 if (mode != SDmode)
13495 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13496 else
13498 rtx mem = cfun->machine->sdmode_stack_slot;
13499 gcc_assert (mem != NULL_RTX);
13501 if (!eliminated)
13503 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13504 cfun->machine->sdmode_stack_slot = mem;
13505 eliminated = true;
13507 ret = mem;
13510 if (TARGET_DEBUG_ADDR)
13512 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13513 GET_MODE_NAME (mode));
13514 if (!ret)
13515 fprintf (stderr, "\tNULL_RTX\n");
13516 else
13517 debug_rtx (ret);
13520 return ret;
13523 static tree
13524 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13526 /* Don't walk into types. */
13527 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13529 *walk_subtrees = 0;
13530 return NULL_TREE;
13533 switch (TREE_CODE (*tp))
13535 case VAR_DECL:
13536 case PARM_DECL:
13537 case FIELD_DECL:
13538 case RESULT_DECL:
13539 case SSA_NAME:
13540 case REAL_CST:
13541 case MEM_REF:
13542 case VIEW_CONVERT_EXPR:
13543 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13544 return *tp;
13545 break;
13546 default:
13547 break;
13550 return NULL_TREE;
13553 enum reload_reg_type {
13554 GPR_REGISTER_TYPE,
13555 VECTOR_REGISTER_TYPE,
13556 OTHER_REGISTER_TYPE
13559 static enum reload_reg_type
13560 rs6000_reload_register_type (enum reg_class rclass)
13562 switch (rclass)
13564 case GENERAL_REGS:
13565 case BASE_REGS:
13566 return GPR_REGISTER_TYPE;
13568 case FLOAT_REGS:
13569 case ALTIVEC_REGS:
13570 case VSX_REGS:
13571 return VECTOR_REGISTER_TYPE;
13573 default:
13574 return OTHER_REGISTER_TYPE;
13578 /* Inform reload about cases where moving X with a mode MODE to a register in
13579 RCLASS requires an extra scratch or immediate register. Return the class
13580 needed for the immediate register.
13582 For VSX and Altivec, we may need a register to convert sp+offset into
13583 reg+sp.
13585 For misaligned 64-bit gpr loads and stores we need a register to
13586 convert an offset address to indirect. */
13588 static reg_class_t
13589 rs6000_secondary_reload (bool in_p,
13590 rtx x,
13591 reg_class_t rclass_i,
13592 enum machine_mode mode,
13593 secondary_reload_info *sri)
13595 enum reg_class rclass = (enum reg_class) rclass_i;
13596 reg_class_t ret = ALL_REGS;
13597 enum insn_code icode;
13598 bool default_p = false;
13600 sri->icode = CODE_FOR_nothing;
13602 /* Convert vector loads and stores into gprs to use an additional base
13603 register. */
13604 icode = rs6000_vector_reload[mode][in_p != false];
13605 if (icode != CODE_FOR_nothing)
13607 ret = NO_REGS;
13608 sri->icode = CODE_FOR_nothing;
13609 sri->extra_cost = 0;
13611 if (GET_CODE (x) == MEM)
13613 rtx addr = XEXP (x, 0);
13615 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13616 an extra register in that case, but it would need an extra
13617 register if the addressing is reg+reg or (reg+reg)&(-16). */
13618 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
13620 if (!legitimate_indirect_address_p (addr, false)
13621 && !rs6000_legitimate_offset_address_p (TImode, addr,
13622 false, true))
13624 sri->icode = icode;
13625 /* account for splitting the loads, and converting the
13626 address from reg+reg to reg. */
13627 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
13628 + ((GET_CODE (addr) == AND) ? 1 : 0));
13631 /* Loads to and stores from vector registers can only do reg+reg
13632 addressing. Altivec registers can also do (reg+reg)&(-16). */
13633 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
13634 || rclass == FLOAT_REGS || rclass == NO_REGS)
13636 if (!VECTOR_MEM_ALTIVEC_P (mode)
13637 && GET_CODE (addr) == AND
13638 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13639 && INTVAL (XEXP (addr, 1)) == -16
13640 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
13641 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
13643 sri->icode = icode;
13644 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
13645 ? 2 : 1);
13647 else if (!legitimate_indirect_address_p (addr, false)
13648 && (rclass == NO_REGS
13649 || !legitimate_indexed_address_p (addr, false)))
13651 sri->icode = icode;
13652 sri->extra_cost = 1;
13654 else
13655 icode = CODE_FOR_nothing;
13657 /* Any other loads, including to pseudo registers which haven't been
13658 assigned to a register yet, default to require a scratch
13659 register. */
13660 else
13662 sri->icode = icode;
13663 sri->extra_cost = 2;
13666 else if (REG_P (x))
13668 int regno = true_regnum (x);
13670 icode = CODE_FOR_nothing;
13671 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
13672 default_p = true;
13673 else
13675 enum reg_class xclass = REGNO_REG_CLASS (regno);
13676 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
13677 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
13679 /* If memory is needed, use default_secondary_reload to create the
13680 stack slot. */
13681 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
13682 default_p = true;
13683 else
13684 ret = NO_REGS;
13687 else
13688 default_p = true;
13690 else if (TARGET_POWERPC64
13691 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13692 && MEM_P (x)
13693 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
13695 rtx off = address_offset (XEXP (x, 0));
13696 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13698 if (off != NULL_RTX
13699 && (INTVAL (off) & 3) != 0
13700 && (unsigned HOST_WIDE_INT) INTVAL (off) + 0x8000 < 0x10000 - extra)
13702 if (in_p)
13703 sri->icode = CODE_FOR_reload_di_load;
13704 else
13705 sri->icode = CODE_FOR_reload_di_store;
13706 sri->extra_cost = 2;
13707 ret = NO_REGS;
13709 else
13710 default_p = true;
13712 else if (!TARGET_POWERPC64
13713 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13714 && MEM_P (x)
13715 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
13717 rtx off = address_offset (XEXP (x, 0));
13718 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13720 /* We need a secondary reload only when our legitimate_address_p
13721 says the address is good (as otherwise the entire address
13722 will be reloaded). So for mode sizes of 8 and 16 this will
13723 be when the offset is in the ranges [0x7ffc,0x7fff] and
13724 [0x7ff4,0x7ff7] respectively. Note that the address we see
13725 here may have been manipulated by legitimize_reload_address. */
13726 if (off != NULL_RTX
13727 && ((unsigned HOST_WIDE_INT) INTVAL (off) - (0x8000 - extra)
13728 < UNITS_PER_WORD))
13730 if (in_p)
13731 sri->icode = CODE_FOR_reload_si_load;
13732 else
13733 sri->icode = CODE_FOR_reload_si_store;
13734 sri->extra_cost = 2;
13735 ret = NO_REGS;
13737 else
13738 default_p = true;
13740 else
13741 default_p = true;
13743 if (default_p)
13744 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
13746 gcc_assert (ret != ALL_REGS);
13748 if (TARGET_DEBUG_ADDR)
13750 fprintf (stderr,
13751 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13752 "mode = %s",
13753 reg_class_names[ret],
13754 in_p ? "true" : "false",
13755 reg_class_names[rclass],
13756 GET_MODE_NAME (mode));
13758 if (default_p)
13759 fprintf (stderr, ", default secondary reload");
13761 if (sri->icode != CODE_FOR_nothing)
13762 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
13763 insn_data[sri->icode].name, sri->extra_cost);
13764 else
13765 fprintf (stderr, "\n");
13767 debug_rtx (x);
13770 return ret;
13773 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13774 to SP+reg addressing. */
13776 void
13777 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
13779 int regno = true_regnum (reg);
13780 enum machine_mode mode = GET_MODE (reg);
13781 enum reg_class rclass;
13782 rtx addr;
13783 rtx and_op2 = NULL_RTX;
13784 rtx addr_op1;
13785 rtx addr_op2;
13786 rtx scratch_or_premodify = scratch;
13787 rtx and_rtx;
13788 rtx cc_clobber;
13790 if (TARGET_DEBUG_ADDR)
13792 fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
13793 store_p ? "store" : "load");
13794 fprintf (stderr, "reg:\n");
13795 debug_rtx (reg);
13796 fprintf (stderr, "mem:\n");
13797 debug_rtx (mem);
13798 fprintf (stderr, "scratch:\n");
13799 debug_rtx (scratch);
13802 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13803 gcc_assert (GET_CODE (mem) == MEM);
13804 rclass = REGNO_REG_CLASS (regno);
13805 addr = XEXP (mem, 0);
13807 switch (rclass)
13809 /* GPRs can handle reg + small constant, all other addresses need to use
13810 the scratch register. */
13811 case GENERAL_REGS:
13812 case BASE_REGS:
13813 if (GET_CODE (addr) == AND)
13815 and_op2 = XEXP (addr, 1);
13816 addr = XEXP (addr, 0);
13819 if (GET_CODE (addr) == PRE_MODIFY)
13821 scratch_or_premodify = XEXP (addr, 0);
13822 gcc_assert (REG_P (scratch_or_premodify));
13823 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13824 addr = XEXP (addr, 1);
13827 if (GET_CODE (addr) == PLUS
13828 && (and_op2 != NULL_RTX
13829 || !rs6000_legitimate_offset_address_p (TImode, addr,
13830 false, true)))
13832 addr_op1 = XEXP (addr, 0);
13833 addr_op2 = XEXP (addr, 1);
13834 gcc_assert (legitimate_indirect_address_p (addr_op1, false));
13836 if (!REG_P (addr_op2)
13837 && (GET_CODE (addr_op2) != CONST_INT
13838 || !satisfies_constraint_I (addr_op2)))
13840 if (TARGET_DEBUG_ADDR)
13842 fprintf (stderr,
13843 "\nMove plus addr to register %s, mode = %s: ",
13844 rs6000_reg_names[REGNO (scratch)],
13845 GET_MODE_NAME (mode));
13846 debug_rtx (addr_op2);
13848 rs6000_emit_move (scratch, addr_op2, Pmode);
13849 addr_op2 = scratch;
13852 emit_insn (gen_rtx_SET (VOIDmode,
13853 scratch_or_premodify,
13854 gen_rtx_PLUS (Pmode,
13855 addr_op1,
13856 addr_op2)));
13858 addr = scratch_or_premodify;
13859 scratch_or_premodify = scratch;
13861 else if (!legitimate_indirect_address_p (addr, false)
13862 && !rs6000_legitimate_offset_address_p (TImode, addr,
13863 false, true))
13865 if (TARGET_DEBUG_ADDR)
13867 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13868 rs6000_reg_names[REGNO (scratch_or_premodify)],
13869 GET_MODE_NAME (mode));
13870 debug_rtx (addr);
13872 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13873 addr = scratch_or_premodify;
13874 scratch_or_premodify = scratch;
13876 break;
13878 /* Float/Altivec registers can only handle reg+reg addressing. Move
13879 other addresses into a scratch register. */
13880 case FLOAT_REGS:
13881 case VSX_REGS:
13882 case ALTIVEC_REGS:
13884 /* With float regs, we need to handle the AND ourselves, since we can't
13885 use the Altivec instruction with an implicit AND -16. Allow scalar
13886 loads to float registers to use reg+offset even if VSX. */
13887 if (GET_CODE (addr) == AND
13888 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
13889 || GET_CODE (XEXP (addr, 1)) != CONST_INT
13890 || INTVAL (XEXP (addr, 1)) != -16
13891 || !VECTOR_MEM_ALTIVEC_P (mode)))
13893 and_op2 = XEXP (addr, 1);
13894 addr = XEXP (addr, 0);
13897 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13898 as the address later. */
13899 if (GET_CODE (addr) == PRE_MODIFY
13900 && (!VECTOR_MEM_VSX_P (mode)
13901 || and_op2 != NULL_RTX
13902 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
13904 scratch_or_premodify = XEXP (addr, 0);
13905 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
13906 false));
13907 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13908 addr = XEXP (addr, 1);
13911 if (legitimate_indirect_address_p (addr, false) /* reg */
13912 || legitimate_indexed_address_p (addr, false) /* reg+reg */
13913 || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
13914 || (GET_CODE (addr) == AND /* Altivec memory */
13915 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13916 && INTVAL (XEXP (addr, 1)) == -16
13917 && VECTOR_MEM_ALTIVEC_P (mode))
13918 || (rclass == FLOAT_REGS /* legacy float mem */
13919 && GET_MODE_SIZE (mode) == 8
13920 && and_op2 == NULL_RTX
13921 && scratch_or_premodify == scratch
13922 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
13925 else if (GET_CODE (addr) == PLUS)
13927 addr_op1 = XEXP (addr, 0);
13928 addr_op2 = XEXP (addr, 1);
13929 gcc_assert (REG_P (addr_op1));
13931 if (TARGET_DEBUG_ADDR)
13933 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
13934 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13935 debug_rtx (addr_op2);
13937 rs6000_emit_move (scratch, addr_op2, Pmode);
13938 emit_insn (gen_rtx_SET (VOIDmode,
13939 scratch_or_premodify,
13940 gen_rtx_PLUS (Pmode,
13941 addr_op1,
13942 scratch)));
13943 addr = scratch_or_premodify;
13944 scratch_or_premodify = scratch;
13947 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
13948 || GET_CODE (addr) == CONST_INT || REG_P (addr))
13950 if (TARGET_DEBUG_ADDR)
13952 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13953 rs6000_reg_names[REGNO (scratch_or_premodify)],
13954 GET_MODE_NAME (mode));
13955 debug_rtx (addr);
13958 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13959 addr = scratch_or_premodify;
13960 scratch_or_premodify = scratch;
13963 else
13964 gcc_unreachable ();
13966 break;
13968 default:
13969 gcc_unreachable ();
13972 /* If the original address involved a pre-modify that we couldn't use the VSX
13973 memory instruction with update, and we haven't taken care of already,
13974 store the address in the pre-modify register and use that as the
13975 address. */
13976 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
13978 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
13979 addr = scratch_or_premodify;
13982 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13983 memory instruction, recreate the AND now, including the clobber which is
13984 generated by the general ANDSI3/ANDDI3 patterns for the
13985 andi. instruction. */
13986 if (and_op2 != NULL_RTX)
13988 if (! legitimate_indirect_address_p (addr, false))
13990 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
13991 addr = scratch;
13994 if (TARGET_DEBUG_ADDR)
13996 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
13997 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13998 debug_rtx (and_op2);
14001 and_rtx = gen_rtx_SET (VOIDmode,
14002 scratch,
14003 gen_rtx_AND (Pmode,
14004 addr,
14005 and_op2));
14007 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
14008 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14009 gen_rtvec (2, and_rtx, cc_clobber)));
14010 addr = scratch;
14013 /* Adjust the address if it changed. */
14014 if (addr != XEXP (mem, 0))
14016 mem = change_address (mem, mode, addr);
14017 if (TARGET_DEBUG_ADDR)
14018 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
14021 /* Now create the move. */
14022 if (store_p)
14023 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14024 else
14025 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14027 return;
14030 /* Convert reloads involving 64-bit gprs and misaligned offset
14031 addressing, or multiple 32-bit gprs and offsets that are too large,
14032 to use indirect addressing. */
14034 void
14035 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
14037 int regno = true_regnum (reg);
14038 enum reg_class rclass;
14039 rtx addr;
14040 rtx scratch_or_premodify = scratch;
14042 if (TARGET_DEBUG_ADDR)
14044 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
14045 store_p ? "store" : "load");
14046 fprintf (stderr, "reg:\n");
14047 debug_rtx (reg);
14048 fprintf (stderr, "mem:\n");
14049 debug_rtx (mem);
14050 fprintf (stderr, "scratch:\n");
14051 debug_rtx (scratch);
14054 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
14055 gcc_assert (GET_CODE (mem) == MEM);
14056 rclass = REGNO_REG_CLASS (regno);
14057 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
14058 addr = XEXP (mem, 0);
14060 if (GET_CODE (addr) == PRE_MODIFY)
14062 scratch_or_premodify = XEXP (addr, 0);
14063 gcc_assert (REG_P (scratch_or_premodify));
14064 addr = XEXP (addr, 1);
14066 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
14068 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14070 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
14072 /* Now create the move. */
14073 if (store_p)
14074 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14075 else
14076 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14078 return;
14081 /* Allocate a 64-bit stack slot to be used for copying SDmode
14082 values through if this function has any SDmode references. */
14084 static void
14085 rs6000_alloc_sdmode_stack_slot (void)
14087 tree t;
14088 basic_block bb;
14089 gimple_stmt_iterator gsi;
14091 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
14093 FOR_EACH_BB (bb)
14094 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
14096 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
14097 if (ret)
14099 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14100 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14101 SDmode, 0);
14102 return;
14106 /* Check for any SDmode parameters of the function. */
14107 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
14109 if (TREE_TYPE (t) == error_mark_node)
14110 continue;
14112 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
14113 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
14115 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14116 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14117 SDmode, 0);
14118 return;
14123 static void
14124 rs6000_instantiate_decls (void)
14126 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
14127 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
14130 /* Given an rtx X being reloaded into a reg required to be
14131 in class CLASS, return the class of reg to actually use.
14132 In general this is just CLASS; but on some machines
14133 in some cases it is preferable to use a more restrictive class.
14135 On the RS/6000, we have to return NO_REGS when we want to reload a
14136 floating-point CONST_DOUBLE to force it to be copied to memory.
14138 We also don't want to reload integer values into floating-point
14139 registers if we can at all help it. In fact, this can
14140 cause reload to die, if it tries to generate a reload of CTR
14141 into a FP register and discovers it doesn't have the memory location
14142 required.
14144 ??? Would it be a good idea to have reload do the converse, that is
14145 try to reload floating modes into FP registers if possible?
14148 static enum reg_class
14149 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
14151 enum machine_mode mode = GET_MODE (x);
14153 if (VECTOR_UNIT_VSX_P (mode)
14154 && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
14155 return rclass;
14157 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
14158 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
14159 && easy_vector_constant (x, mode))
14160 return ALTIVEC_REGS;
14162 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
14163 return NO_REGS;
14165 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
14166 return GENERAL_REGS;
14168 /* For VSX, prefer the traditional registers for 64-bit values because we can
14169 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14170 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14171 prefer Altivec loads.. */
14172 if (rclass == VSX_REGS)
14174 if (GET_MODE_SIZE (mode) <= 8)
14175 return FLOAT_REGS;
14177 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
14178 return ALTIVEC_REGS;
14180 return rclass;
14183 return rclass;
14186 /* Debug version of rs6000_preferred_reload_class. */
14187 static enum reg_class
14188 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
14190 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
14192 fprintf (stderr,
14193 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14194 "mode = %s, x:\n",
14195 reg_class_names[ret], reg_class_names[rclass],
14196 GET_MODE_NAME (GET_MODE (x)));
14197 debug_rtx (x);
14199 return ret;
14202 /* If we are copying between FP or AltiVec registers and anything else, we need
14203 a memory location. The exception is when we are targeting ppc64 and the
14204 move to/from fpr to gpr instructions are available. Also, under VSX, you
14205 can copy vector registers from the FP register set to the Altivec register
14206 set and vice versa. */
14208 static bool
14209 rs6000_secondary_memory_needed (enum reg_class class1,
14210 enum reg_class class2,
14211 enum machine_mode mode)
14213 if (class1 == class2)
14214 return false;
14216 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14217 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14218 between these classes. But we need memory for other things that can go in
14219 FLOAT_REGS like SFmode. */
14220 if (TARGET_VSX
14221 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
14222 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
14223 || class1 == FLOAT_REGS))
14224 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
14225 && class2 != FLOAT_REGS);
14227 if (class1 == VSX_REGS || class2 == VSX_REGS)
14228 return true;
14230 if (class1 == FLOAT_REGS
14231 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14232 || ((mode != DFmode)
14233 && (mode != DDmode)
14234 && (mode != DImode))))
14235 return true;
14237 if (class2 == FLOAT_REGS
14238 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14239 || ((mode != DFmode)
14240 && (mode != DDmode)
14241 && (mode != DImode))))
14242 return true;
14244 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
14245 return true;
14247 return false;
14250 /* Debug version of rs6000_secondary_memory_needed. */
14251 static bool
14252 rs6000_debug_secondary_memory_needed (enum reg_class class1,
14253 enum reg_class class2,
14254 enum machine_mode mode)
14256 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
14258 fprintf (stderr,
14259 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14260 "class2 = %s, mode = %s\n",
14261 ret ? "true" : "false", reg_class_names[class1],
14262 reg_class_names[class2], GET_MODE_NAME (mode));
14264 return ret;
14267 /* Return the register class of a scratch register needed to copy IN into
14268 or out of a register in RCLASS in MODE. If it can be done directly,
14269 NO_REGS is returned. */
14271 static enum reg_class
14272 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
14273 rtx in)
14275 int regno;
14277 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
14278 #if TARGET_MACHO
14279 && MACHOPIC_INDIRECT
14280 #endif
14283 /* We cannot copy a symbolic operand directly into anything
14284 other than BASE_REGS for TARGET_ELF. So indicate that a
14285 register from BASE_REGS is needed as an intermediate
14286 register.
14288 On Darwin, pic addresses require a load from memory, which
14289 needs a base register. */
14290 if (rclass != BASE_REGS
14291 && (GET_CODE (in) == SYMBOL_REF
14292 || GET_CODE (in) == HIGH
14293 || GET_CODE (in) == LABEL_REF
14294 || GET_CODE (in) == CONST))
14295 return BASE_REGS;
14298 if (GET_CODE (in) == REG)
14300 regno = REGNO (in);
14301 if (regno >= FIRST_PSEUDO_REGISTER)
14303 regno = true_regnum (in);
14304 if (regno >= FIRST_PSEUDO_REGISTER)
14305 regno = -1;
14308 else if (GET_CODE (in) == SUBREG)
14310 regno = true_regnum (in);
14311 if (regno >= FIRST_PSEUDO_REGISTER)
14312 regno = -1;
14314 else
14315 regno = -1;
14317 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14318 into anything. */
14319 if (rclass == GENERAL_REGS || rclass == BASE_REGS
14320 || (regno >= 0 && INT_REGNO_P (regno)))
14321 return NO_REGS;
14323 /* Constants, memory, and FP registers can go into FP registers. */
14324 if ((regno == -1 || FP_REGNO_P (regno))
14325 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
14326 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
14328 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14329 VSX. */
14330 if (TARGET_VSX
14331 && (regno == -1 || VSX_REGNO_P (regno))
14332 && VSX_REG_CLASS_P (rclass))
14333 return NO_REGS;
14335 /* Memory, and AltiVec registers can go into AltiVec registers. */
14336 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
14337 && rclass == ALTIVEC_REGS)
14338 return NO_REGS;
14340 /* We can copy among the CR registers. */
14341 if ((rclass == CR_REGS || rclass == CR0_REGS)
14342 && regno >= 0 && CR_REGNO_P (regno))
14343 return NO_REGS;
14345 /* Otherwise, we need GENERAL_REGS. */
14346 return GENERAL_REGS;
14349 /* Debug version of rs6000_secondary_reload_class. */
14350 static enum reg_class
14351 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14352 enum machine_mode mode, rtx in)
14354 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14355 fprintf (stderr,
14356 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14357 "mode = %s, input rtx:\n",
14358 reg_class_names[ret], reg_class_names[rclass],
14359 GET_MODE_NAME (mode));
14360 debug_rtx (in);
14362 return ret;
14365 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14367 static bool
14368 rs6000_cannot_change_mode_class (enum machine_mode from,
14369 enum machine_mode to,
14370 enum reg_class rclass)
14372 unsigned from_size = GET_MODE_SIZE (from);
14373 unsigned to_size = GET_MODE_SIZE (to);
14375 if (from_size != to_size)
14377 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14378 return ((from_size < 8 || to_size < 8 || TARGET_IEEEQUAD)
14379 && reg_classes_intersect_p (xclass, rclass));
14382 if (TARGET_E500_DOUBLE
14383 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14384 || (((to) == TFmode) + ((from) == TFmode)) == 1
14385 || (((to) == DDmode) + ((from) == DDmode)) == 1
14386 || (((to) == TDmode) + ((from) == TDmode)) == 1
14387 || (((to) == DImode) + ((from) == DImode)) == 1))
14388 return true;
14390 /* Since the VSX register set includes traditional floating point registers
14391 and altivec registers, just check for the size being different instead of
14392 trying to check whether the modes are vector modes. Otherwise it won't
14393 allow say DF and DI to change classes. */
14394 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14395 return (from_size != 8 && from_size != 16);
14397 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14398 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14399 return true;
14401 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14402 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14403 return true;
14405 return false;
14408 /* Debug version of rs6000_cannot_change_mode_class. */
14409 static bool
14410 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14411 enum machine_mode to,
14412 enum reg_class rclass)
14414 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14416 fprintf (stderr,
14417 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14418 "to = %s, rclass = %s\n",
14419 ret ? "true" : "false",
14420 GET_MODE_NAME (from), GET_MODE_NAME (to),
14421 reg_class_names[rclass]);
14423 return ret;
14426 /* Given a comparison operation, return the bit number in CCR to test. We
14427 know this is a valid comparison.
14429 SCC_P is 1 if this is for an scc. That means that %D will have been
14430 used instead of %C, so the bits will be in different places.
14432 Return -1 if OP isn't a valid comparison for some reason. */
14435 ccr_bit (rtx op, int scc_p)
14437 enum rtx_code code = GET_CODE (op);
14438 enum machine_mode cc_mode;
14439 int cc_regnum;
14440 int base_bit;
14441 rtx reg;
14443 if (!COMPARISON_P (op))
14444 return -1;
14446 reg = XEXP (op, 0);
14448 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14450 cc_mode = GET_MODE (reg);
14451 cc_regnum = REGNO (reg);
14452 base_bit = 4 * (cc_regnum - CR0_REGNO);
14454 validate_condition_mode (code, cc_mode);
14456 /* When generating a sCOND operation, only positive conditions are
14457 allowed. */
14458 gcc_assert (!scc_p
14459 || code == EQ || code == GT || code == LT || code == UNORDERED
14460 || code == GTU || code == LTU);
14462 switch (code)
14464 case NE:
14465 return scc_p ? base_bit + 3 : base_bit + 2;
14466 case EQ:
14467 return base_bit + 2;
14468 case GT: case GTU: case UNLE:
14469 return base_bit + 1;
14470 case LT: case LTU: case UNGE:
14471 return base_bit;
14472 case ORDERED: case UNORDERED:
14473 return base_bit + 3;
14475 case GE: case GEU:
14476 /* If scc, we will have done a cror to put the bit in the
14477 unordered position. So test that bit. For integer, this is ! LT
14478 unless this is an scc insn. */
14479 return scc_p ? base_bit + 3 : base_bit;
14481 case LE: case LEU:
14482 return scc_p ? base_bit + 3 : base_bit + 1;
14484 default:
14485 gcc_unreachable ();
14489 /* Return the GOT register. */
14492 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
14494 /* The second flow pass currently (June 1999) can't update
14495 regs_ever_live without disturbing other parts of the compiler, so
14496 update it here to make the prolog/epilogue code happy. */
14497 if (!can_create_pseudo_p ()
14498 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
14499 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
14501 crtl->uses_pic_offset_table = 1;
14503 return pic_offset_table_rtx;
14506 static rs6000_stack_t stack_info;
14508 /* Function to init struct machine_function.
14509 This will be called, via a pointer variable,
14510 from push_function_context. */
14512 static struct machine_function *
14513 rs6000_init_machine_status (void)
14515 stack_info.reload_completed = 0;
14516 return ggc_alloc_cleared_machine_function ();
14519 /* These macros test for integers and extract the low-order bits. */
14520 #define INT_P(X) \
14521 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14522 && GET_MODE (X) == VOIDmode)
14524 #define INT_LOWPART(X) \
14525 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14528 extract_MB (rtx op)
14530 int i;
14531 unsigned long val = INT_LOWPART (op);
14533 /* If the high bit is zero, the value is the first 1 bit we find
14534 from the left. */
14535 if ((val & 0x80000000) == 0)
14537 gcc_assert (val & 0xffffffff);
14539 i = 1;
14540 while (((val <<= 1) & 0x80000000) == 0)
14541 ++i;
14542 return i;
14545 /* If the high bit is set and the low bit is not, or the mask is all
14546 1's, the value is zero. */
14547 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
14548 return 0;
14550 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14551 from the right. */
14552 i = 31;
14553 while (((val >>= 1) & 1) != 0)
14554 --i;
14556 return i;
14560 extract_ME (rtx op)
14562 int i;
14563 unsigned long val = INT_LOWPART (op);
14565 /* If the low bit is zero, the value is the first 1 bit we find from
14566 the right. */
14567 if ((val & 1) == 0)
14569 gcc_assert (val & 0xffffffff);
14571 i = 30;
14572 while (((val >>= 1) & 1) == 0)
14573 --i;
14575 return i;
14578 /* If the low bit is set and the high bit is not, or the mask is all
14579 1's, the value is 31. */
14580 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
14581 return 31;
14583 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14584 from the left. */
14585 i = 0;
14586 while (((val <<= 1) & 0x80000000) != 0)
14587 ++i;
14589 return i;
14592 /* Locate some local-dynamic symbol still in use by this function
14593 so that we can print its name in some tls_ld pattern. */
14595 static const char *
14596 rs6000_get_some_local_dynamic_name (void)
14598 rtx insn;
14600 if (cfun->machine->some_ld_name)
14601 return cfun->machine->some_ld_name;
14603 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
14604 if (INSN_P (insn)
14605 && for_each_rtx (&PATTERN (insn),
14606 rs6000_get_some_local_dynamic_name_1, 0))
14607 return cfun->machine->some_ld_name;
14609 gcc_unreachable ();
14612 /* Helper function for rs6000_get_some_local_dynamic_name. */
14614 static int
14615 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
14617 rtx x = *px;
14619 if (GET_CODE (x) == SYMBOL_REF)
14621 const char *str = XSTR (x, 0);
14622 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
14624 cfun->machine->some_ld_name = str;
14625 return 1;
14629 return 0;
14632 /* Write out a function code label. */
14634 void
14635 rs6000_output_function_entry (FILE *file, const char *fname)
14637 if (fname[0] != '.')
14639 switch (DEFAULT_ABI)
14641 default:
14642 gcc_unreachable ();
14644 case ABI_AIX:
14645 if (DOT_SYMBOLS)
14646 putc ('.', file);
14647 else
14648 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
14649 break;
14651 case ABI_V4:
14652 case ABI_DARWIN:
14653 break;
14657 RS6000_OUTPUT_BASENAME (file, fname);
14660 /* Print an operand. Recognize special options, documented below. */
14662 #if TARGET_ELF
14663 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14664 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14665 #else
14666 #define SMALL_DATA_RELOC "sda21"
14667 #define SMALL_DATA_REG 0
14668 #endif
14670 void
14671 print_operand (FILE *file, rtx x, int code)
14673 int i;
14674 unsigned HOST_WIDE_INT uval;
14676 switch (code)
14678 /* %a is output_address. */
14680 case 'A':
14681 /* If X is a constant integer whose low-order 5 bits are zero,
14682 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14683 in the AIX assembler where "sri" with a zero shift count
14684 writes a trash instruction. */
14685 if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
14686 putc ('l', file);
14687 else
14688 putc ('r', file);
14689 return;
14691 case 'b':
14692 /* If constant, low-order 16 bits of constant, unsigned.
14693 Otherwise, write normally. */
14694 if (INT_P (x))
14695 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
14696 else
14697 print_operand (file, x, 0);
14698 return;
14700 case 'B':
14701 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14702 for 64-bit mask direction. */
14703 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
14704 return;
14706 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14707 output_operand. */
14709 case 'D':
14710 /* Like 'J' but get to the GT bit only. */
14711 gcc_assert (REG_P (x));
14713 /* Bit 1 is GT bit. */
14714 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
14716 /* Add one for shift count in rlinm for scc. */
14717 fprintf (file, "%d", i + 1);
14718 return;
14720 case 'E':
14721 /* X is a CR register. Print the number of the EQ bit of the CR */
14722 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14723 output_operand_lossage ("invalid %%E value");
14724 else
14725 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
14726 return;
14728 case 'f':
14729 /* X is a CR register. Print the shift count needed to move it
14730 to the high-order four bits. */
14731 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14732 output_operand_lossage ("invalid %%f value");
14733 else
14734 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
14735 return;
14737 case 'F':
14738 /* Similar, but print the count for the rotate in the opposite
14739 direction. */
14740 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14741 output_operand_lossage ("invalid %%F value");
14742 else
14743 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
14744 return;
14746 case 'G':
14747 /* X is a constant integer. If it is negative, print "m",
14748 otherwise print "z". This is to make an aze or ame insn. */
14749 if (GET_CODE (x) != CONST_INT)
14750 output_operand_lossage ("invalid %%G value");
14751 else if (INTVAL (x) >= 0)
14752 putc ('z', file);
14753 else
14754 putc ('m', file);
14755 return;
14757 case 'h':
14758 /* If constant, output low-order five bits. Otherwise, write
14759 normally. */
14760 if (INT_P (x))
14761 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
14762 else
14763 print_operand (file, x, 0);
14764 return;
14766 case 'H':
14767 /* If constant, output low-order six bits. Otherwise, write
14768 normally. */
14769 if (INT_P (x))
14770 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
14771 else
14772 print_operand (file, x, 0);
14773 return;
14775 case 'I':
14776 /* Print `i' if this is a constant, else nothing. */
14777 if (INT_P (x))
14778 putc ('i', file);
14779 return;
14781 case 'j':
14782 /* Write the bit number in CCR for jump. */
14783 i = ccr_bit (x, 0);
14784 if (i == -1)
14785 output_operand_lossage ("invalid %%j code");
14786 else
14787 fprintf (file, "%d", i);
14788 return;
14790 case 'J':
14791 /* Similar, but add one for shift count in rlinm for scc and pass
14792 scc flag to `ccr_bit'. */
14793 i = ccr_bit (x, 1);
14794 if (i == -1)
14795 output_operand_lossage ("invalid %%J code");
14796 else
14797 /* If we want bit 31, write a shift count of zero, not 32. */
14798 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14799 return;
14801 case 'k':
14802 /* X must be a constant. Write the 1's complement of the
14803 constant. */
14804 if (! INT_P (x))
14805 output_operand_lossage ("invalid %%k value");
14806 else
14807 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
14808 return;
14810 case 'K':
14811 /* X must be a symbolic constant on ELF. Write an
14812 expression suitable for an 'addi' that adds in the low 16
14813 bits of the MEM. */
14814 if (GET_CODE (x) == CONST)
14816 if (GET_CODE (XEXP (x, 0)) != PLUS
14817 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
14818 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
14819 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
14820 output_operand_lossage ("invalid %%K value");
14822 print_operand_address (file, x);
14823 fputs ("@l", file);
14824 return;
14826 /* %l is output_asm_label. */
14828 case 'L':
14829 /* Write second word of DImode or DFmode reference. Works on register
14830 or non-indexed memory only. */
14831 if (REG_P (x))
14832 fputs (reg_names[REGNO (x) + 1], file);
14833 else if (MEM_P (x))
14835 /* Handle possible auto-increment. Since it is pre-increment and
14836 we have already done it, we can just use an offset of word. */
14837 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14838 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14839 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14840 UNITS_PER_WORD));
14841 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14842 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14843 UNITS_PER_WORD));
14844 else
14845 output_address (XEXP (adjust_address_nv (x, SImode,
14846 UNITS_PER_WORD),
14847 0));
14849 if (small_data_operand (x, GET_MODE (x)))
14850 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14851 reg_names[SMALL_DATA_REG]);
14853 return;
14855 case 'm':
14856 /* MB value for a mask operand. */
14857 if (! mask_operand (x, SImode))
14858 output_operand_lossage ("invalid %%m value");
14860 fprintf (file, "%d", extract_MB (x));
14861 return;
14863 case 'M':
14864 /* ME value for a mask operand. */
14865 if (! mask_operand (x, SImode))
14866 output_operand_lossage ("invalid %%M value");
14868 fprintf (file, "%d", extract_ME (x));
14869 return;
14871 /* %n outputs the negative of its operand. */
14873 case 'N':
14874 /* Write the number of elements in the vector times 4. */
14875 if (GET_CODE (x) != PARALLEL)
14876 output_operand_lossage ("invalid %%N value");
14877 else
14878 fprintf (file, "%d", XVECLEN (x, 0) * 4);
14879 return;
14881 case 'O':
14882 /* Similar, but subtract 1 first. */
14883 if (GET_CODE (x) != PARALLEL)
14884 output_operand_lossage ("invalid %%O value");
14885 else
14886 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
14887 return;
14889 case 'p':
14890 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14891 if (! INT_P (x)
14892 || INT_LOWPART (x) < 0
14893 || (i = exact_log2 (INT_LOWPART (x))) < 0)
14894 output_operand_lossage ("invalid %%p value");
14895 else
14896 fprintf (file, "%d", i);
14897 return;
14899 case 'P':
14900 /* The operand must be an indirect memory reference. The result
14901 is the register name. */
14902 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
14903 || REGNO (XEXP (x, 0)) >= 32)
14904 output_operand_lossage ("invalid %%P value");
14905 else
14906 fputs (reg_names[REGNO (XEXP (x, 0))], file);
14907 return;
14909 case 'q':
14910 /* This outputs the logical code corresponding to a boolean
14911 expression. The expression may have one or both operands
14912 negated (if one, only the first one). For condition register
14913 logical operations, it will also treat the negated
14914 CR codes as NOTs, but not handle NOTs of them. */
14916 const char *const *t = 0;
14917 const char *s;
14918 enum rtx_code code = GET_CODE (x);
14919 static const char * const tbl[3][3] = {
14920 { "and", "andc", "nor" },
14921 { "or", "orc", "nand" },
14922 { "xor", "eqv", "xor" } };
14924 if (code == AND)
14925 t = tbl[0];
14926 else if (code == IOR)
14927 t = tbl[1];
14928 else if (code == XOR)
14929 t = tbl[2];
14930 else
14931 output_operand_lossage ("invalid %%q value");
14933 if (GET_CODE (XEXP (x, 0)) != NOT)
14934 s = t[0];
14935 else
14937 if (GET_CODE (XEXP (x, 1)) == NOT)
14938 s = t[2];
14939 else
14940 s = t[1];
14943 fputs (s, file);
14945 return;
14947 case 'Q':
14948 if (TARGET_MFCRF)
14949 fputc (',', file);
14950 /* FALLTHRU */
14951 else
14952 return;
14954 case 'R':
14955 /* X is a CR register. Print the mask for `mtcrf'. */
14956 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14957 output_operand_lossage ("invalid %%R value");
14958 else
14959 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
14960 return;
14962 case 's':
14963 /* Low 5 bits of 32 - value */
14964 if (! INT_P (x))
14965 output_operand_lossage ("invalid %%s value");
14966 else
14967 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
14968 return;
14970 case 'S':
14971 /* PowerPC64 mask position. All 0's is excluded.
14972 CONST_INT 32-bit mask is considered sign-extended so any
14973 transition must occur within the CONST_INT, not on the boundary. */
14974 if (! mask64_operand (x, DImode))
14975 output_operand_lossage ("invalid %%S value");
14977 uval = INT_LOWPART (x);
14979 if (uval & 1) /* Clear Left */
14981 #if HOST_BITS_PER_WIDE_INT > 64
14982 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14983 #endif
14984 i = 64;
14986 else /* Clear Right */
14988 uval = ~uval;
14989 #if HOST_BITS_PER_WIDE_INT > 64
14990 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14991 #endif
14992 i = 63;
14994 while (uval != 0)
14995 --i, uval >>= 1;
14996 gcc_assert (i >= 0);
14997 fprintf (file, "%d", i);
14998 return;
15000 case 't':
15001 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
15002 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
15004 /* Bit 3 is OV bit. */
15005 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
15007 /* If we want bit 31, write a shift count of zero, not 32. */
15008 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15009 return;
15011 case 'T':
15012 /* Print the symbolic name of a branch target register. */
15013 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
15014 && REGNO (x) != CTR_REGNO))
15015 output_operand_lossage ("invalid %%T value");
15016 else if (REGNO (x) == LR_REGNO)
15017 fputs ("lr", file);
15018 else
15019 fputs ("ctr", file);
15020 return;
15022 case 'u':
15023 /* High-order 16 bits of constant for use in unsigned operand. */
15024 if (! INT_P (x))
15025 output_operand_lossage ("invalid %%u value");
15026 else
15027 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15028 (INT_LOWPART (x) >> 16) & 0xffff);
15029 return;
15031 case 'v':
15032 /* High-order 16 bits of constant for use in signed operand. */
15033 if (! INT_P (x))
15034 output_operand_lossage ("invalid %%v value");
15035 else
15036 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15037 (INT_LOWPART (x) >> 16) & 0xffff);
15038 return;
15040 case 'U':
15041 /* Print `u' if this has an auto-increment or auto-decrement. */
15042 if (MEM_P (x)
15043 && (GET_CODE (XEXP (x, 0)) == PRE_INC
15044 || GET_CODE (XEXP (x, 0)) == PRE_DEC
15045 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
15046 putc ('u', file);
15047 return;
15049 case 'V':
15050 /* Print the trap code for this operand. */
15051 switch (GET_CODE (x))
15053 case EQ:
15054 fputs ("eq", file); /* 4 */
15055 break;
15056 case NE:
15057 fputs ("ne", file); /* 24 */
15058 break;
15059 case LT:
15060 fputs ("lt", file); /* 16 */
15061 break;
15062 case LE:
15063 fputs ("le", file); /* 20 */
15064 break;
15065 case GT:
15066 fputs ("gt", file); /* 8 */
15067 break;
15068 case GE:
15069 fputs ("ge", file); /* 12 */
15070 break;
15071 case LTU:
15072 fputs ("llt", file); /* 2 */
15073 break;
15074 case LEU:
15075 fputs ("lle", file); /* 6 */
15076 break;
15077 case GTU:
15078 fputs ("lgt", file); /* 1 */
15079 break;
15080 case GEU:
15081 fputs ("lge", file); /* 5 */
15082 break;
15083 default:
15084 gcc_unreachable ();
15086 break;
15088 case 'w':
15089 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15090 normally. */
15091 if (INT_P (x))
15092 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
15093 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
15094 else
15095 print_operand (file, x, 0);
15096 return;
15098 case 'W':
15099 /* MB value for a PowerPC64 rldic operand. */
15100 i = clz_hwi (GET_CODE (x) == CONST_INT
15101 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
15103 #if HOST_BITS_PER_WIDE_INT == 32
15104 if (GET_CODE (x) == CONST_INT && i > 0)
15105 i += 32; /* zero-extend high-part was all 0's */
15106 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
15107 i = clz_hwi (CONST_DOUBLE_LOW (x)) + 32;
15108 #endif
15110 fprintf (file, "%d", i);
15111 return;
15113 case 'x':
15114 /* X is a FPR or Altivec register used in a VSX context. */
15115 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
15116 output_operand_lossage ("invalid %%x value");
15117 else
15119 int reg = REGNO (x);
15120 int vsx_reg = (FP_REGNO_P (reg)
15121 ? reg - 32
15122 : reg - FIRST_ALTIVEC_REGNO + 32);
15124 #ifdef TARGET_REGNAMES
15125 if (TARGET_REGNAMES)
15126 fprintf (file, "%%vs%d", vsx_reg);
15127 else
15128 #endif
15129 fprintf (file, "%d", vsx_reg);
15131 return;
15133 case 'X':
15134 if (MEM_P (x)
15135 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
15136 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
15137 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
15138 putc ('x', file);
15139 return;
15141 case 'Y':
15142 /* Like 'L', for third word of TImode */
15143 if (REG_P (x))
15144 fputs (reg_names[REGNO (x) + 2], file);
15145 else if (MEM_P (x))
15147 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15148 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15149 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15150 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15151 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15152 else
15153 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
15154 if (small_data_operand (x, GET_MODE (x)))
15155 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15156 reg_names[SMALL_DATA_REG]);
15158 return;
15160 case 'z':
15161 /* X is a SYMBOL_REF. Write out the name preceded by a
15162 period and without any trailing data in brackets. Used for function
15163 names. If we are configured for System V (or the embedded ABI) on
15164 the PowerPC, do not emit the period, since those systems do not use
15165 TOCs and the like. */
15166 gcc_assert (GET_CODE (x) == SYMBOL_REF);
15168 /* Mark the decl as referenced so that cgraph will output the
15169 function. */
15170 if (SYMBOL_REF_DECL (x))
15171 mark_decl_referenced (SYMBOL_REF_DECL (x));
15173 /* For macho, check to see if we need a stub. */
15174 if (TARGET_MACHO)
15176 const char *name = XSTR (x, 0);
15177 #if TARGET_MACHO
15178 if (darwin_emit_branch_islands
15179 && MACHOPIC_INDIRECT
15180 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
15181 name = machopic_indirection_name (x, /*stub_p=*/true);
15182 #endif
15183 assemble_name (file, name);
15185 else if (!DOT_SYMBOLS)
15186 assemble_name (file, XSTR (x, 0));
15187 else
15188 rs6000_output_function_entry (file, XSTR (x, 0));
15189 return;
15191 case 'Z':
15192 /* Like 'L', for last word of TImode. */
15193 if (REG_P (x))
15194 fputs (reg_names[REGNO (x) + 3], file);
15195 else if (MEM_P (x))
15197 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15198 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15199 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15200 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15201 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15202 else
15203 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
15204 if (small_data_operand (x, GET_MODE (x)))
15205 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15206 reg_names[SMALL_DATA_REG]);
15208 return;
15210 /* Print AltiVec or SPE memory operand. */
15211 case 'y':
15213 rtx tmp;
15215 gcc_assert (MEM_P (x));
15217 tmp = XEXP (x, 0);
15219 /* Ugly hack because %y is overloaded. */
15220 if ((TARGET_SPE || TARGET_E500_DOUBLE)
15221 && (GET_MODE_SIZE (GET_MODE (x)) == 8
15222 || GET_MODE (x) == TFmode
15223 || GET_MODE (x) == TImode))
15225 /* Handle [reg]. */
15226 if (REG_P (tmp))
15228 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
15229 break;
15231 /* Handle [reg+UIMM]. */
15232 else if (GET_CODE (tmp) == PLUS &&
15233 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
15235 int x;
15237 gcc_assert (REG_P (XEXP (tmp, 0)));
15239 x = INTVAL (XEXP (tmp, 1));
15240 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
15241 break;
15244 /* Fall through. Must be [reg+reg]. */
15246 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
15247 && GET_CODE (tmp) == AND
15248 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
15249 && INTVAL (XEXP (tmp, 1)) == -16)
15250 tmp = XEXP (tmp, 0);
15251 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
15252 && GET_CODE (tmp) == PRE_MODIFY)
15253 tmp = XEXP (tmp, 1);
15254 if (REG_P (tmp))
15255 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
15256 else
15258 if (!GET_CODE (tmp) == PLUS
15259 || !REG_P (XEXP (tmp, 0))
15260 || !REG_P (XEXP (tmp, 1)))
15262 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15263 break;
15266 if (REGNO (XEXP (tmp, 0)) == 0)
15267 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
15268 reg_names[ REGNO (XEXP (tmp, 0)) ]);
15269 else
15270 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
15271 reg_names[ REGNO (XEXP (tmp, 1)) ]);
15273 break;
15276 case 0:
15277 if (REG_P (x))
15278 fprintf (file, "%s", reg_names[REGNO (x)]);
15279 else if (MEM_P (x))
15281 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15282 know the width from the mode. */
15283 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
15284 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
15285 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15286 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
15287 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
15288 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15289 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15290 output_address (XEXP (XEXP (x, 0), 1));
15291 else
15292 output_address (XEXP (x, 0));
15294 else
15296 if (toc_relative_expr_p (x, false))
15297 /* This hack along with a corresponding hack in
15298 rs6000_output_addr_const_extra arranges to output addends
15299 where the assembler expects to find them. eg.
15300 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15301 without this hack would be output as "x@toc+4". We
15302 want "x+4@toc". */
15303 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15304 else
15305 output_addr_const (file, x);
15307 return;
15309 case '&':
15310 assemble_name (file, rs6000_get_some_local_dynamic_name ());
15311 return;
15313 default:
15314 output_operand_lossage ("invalid %%xn code");
15318 /* Print the address of an operand. */
15320 void
15321 print_operand_address (FILE *file, rtx x)
15323 if (REG_P (x))
15324 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15325 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15326 || GET_CODE (x) == LABEL_REF)
15328 output_addr_const (file, x);
15329 if (small_data_operand (x, GET_MODE (x)))
15330 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15331 reg_names[SMALL_DATA_REG]);
15332 else
15333 gcc_assert (!TARGET_TOC);
15335 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15336 && REG_P (XEXP (x, 1)))
15338 if (REGNO (XEXP (x, 0)) == 0)
15339 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15340 reg_names[ REGNO (XEXP (x, 0)) ]);
15341 else
15342 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15343 reg_names[ REGNO (XEXP (x, 1)) ]);
15345 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15346 && GET_CODE (XEXP (x, 1)) == CONST_INT)
15347 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15348 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15349 #if TARGET_MACHO
15350 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15351 && CONSTANT_P (XEXP (x, 1)))
15353 fprintf (file, "lo16(");
15354 output_addr_const (file, XEXP (x, 1));
15355 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15357 #endif
15358 #if TARGET_ELF
15359 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15360 && CONSTANT_P (XEXP (x, 1)))
15362 output_addr_const (file, XEXP (x, 1));
15363 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15365 #endif
15366 else if (toc_relative_expr_p (x, false))
15368 /* This hack along with a corresponding hack in
15369 rs6000_output_addr_const_extra arranges to output addends
15370 where the assembler expects to find them. eg.
15371 (lo_sum (reg 9)
15372 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15373 without this hack would be output as "x@toc+8@l(9)". We
15374 want "x+8@toc@l(9)". */
15375 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15376 if (GET_CODE (x) == LO_SUM)
15377 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
15378 else
15379 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
15381 else
15382 gcc_unreachable ();
15385 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15387 static bool
15388 rs6000_output_addr_const_extra (FILE *file, rtx x)
15390 if (GET_CODE (x) == UNSPEC)
15391 switch (XINT (x, 1))
15393 case UNSPEC_TOCREL:
15394 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
15395 && REG_P (XVECEXP (x, 0, 1))
15396 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
15397 output_addr_const (file, XVECEXP (x, 0, 0));
15398 if (x == tocrel_base && tocrel_offset != const0_rtx)
15400 if (INTVAL (tocrel_offset) >= 0)
15401 fprintf (file, "+");
15402 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
15404 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15406 putc ('-', file);
15407 assemble_name (file, toc_label_name);
15409 else if (TARGET_ELF)
15410 fputs ("@toc", file);
15411 return true;
15413 #if TARGET_MACHO
15414 case UNSPEC_MACHOPIC_OFFSET:
15415 output_addr_const (file, XVECEXP (x, 0, 0));
15416 putc ('-', file);
15417 machopic_output_function_base_name (file);
15418 return true;
15419 #endif
15421 return false;
15424 /* Target hook for assembling integer objects. The PowerPC version has
15425 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15426 is defined. It also needs to handle DI-mode objects on 64-bit
15427 targets. */
15429 static bool
15430 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15432 #ifdef RELOCATABLE_NEEDS_FIXUP
15433 /* Special handling for SI values. */
15434 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15436 static int recurse = 0;
15438 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15439 the .fixup section. Since the TOC section is already relocated, we
15440 don't need to mark it here. We used to skip the text section, but it
15441 should never be valid for relocated addresses to be placed in the text
15442 section. */
15443 if (TARGET_RELOCATABLE
15444 && in_section != toc_section
15445 && !recurse
15446 && GET_CODE (x) != CONST_INT
15447 && GET_CODE (x) != CONST_DOUBLE
15448 && CONSTANT_P (x))
15450 char buf[256];
15452 recurse = 1;
15453 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15454 fixuplabelno++;
15455 ASM_OUTPUT_LABEL (asm_out_file, buf);
15456 fprintf (asm_out_file, "\t.long\t(");
15457 output_addr_const (asm_out_file, x);
15458 fprintf (asm_out_file, ")@fixup\n");
15459 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15460 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15461 fprintf (asm_out_file, "\t.long\t");
15462 assemble_name (asm_out_file, buf);
15463 fprintf (asm_out_file, "\n\t.previous\n");
15464 recurse = 0;
15465 return true;
15467 /* Remove initial .'s to turn a -mcall-aixdesc function
15468 address into the address of the descriptor, not the function
15469 itself. */
15470 else if (GET_CODE (x) == SYMBOL_REF
15471 && XSTR (x, 0)[0] == '.'
15472 && DEFAULT_ABI == ABI_AIX)
15474 const char *name = XSTR (x, 0);
15475 while (*name == '.')
15476 name++;
15478 fprintf (asm_out_file, "\t.long\t%s\n", name);
15479 return true;
15482 #endif /* RELOCATABLE_NEEDS_FIXUP */
15483 return default_assemble_integer (x, size, aligned_p);
15486 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15487 /* Emit an assembler directive to set symbol visibility for DECL to
15488 VISIBILITY_TYPE. */
15490 static void
15491 rs6000_assemble_visibility (tree decl, int vis)
15493 /* Functions need to have their entry point symbol visibility set as
15494 well as their descriptor symbol visibility. */
15495 if (DEFAULT_ABI == ABI_AIX
15496 && DOT_SYMBOLS
15497 && TREE_CODE (decl) == FUNCTION_DECL)
15499 static const char * const visibility_types[] = {
15500 NULL, "internal", "hidden", "protected"
15503 const char *name, *type;
15505 name = ((* targetm.strip_name_encoding)
15506 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
15507 type = visibility_types[vis];
15509 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
15510 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
15512 else
15513 default_assemble_visibility (decl, vis);
15515 #endif
15517 enum rtx_code
15518 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
15520 /* Reversal of FP compares takes care -- an ordered compare
15521 becomes an unordered compare and vice versa. */
15522 if (mode == CCFPmode
15523 && (!flag_finite_math_only
15524 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
15525 || code == UNEQ || code == LTGT))
15526 return reverse_condition_maybe_unordered (code);
15527 else
15528 return reverse_condition (code);
15531 /* Generate a compare for CODE. Return a brand-new rtx that
15532 represents the result of the compare. */
15534 static rtx
15535 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
15537 enum machine_mode comp_mode;
15538 rtx compare_result;
15539 enum rtx_code code = GET_CODE (cmp);
15540 rtx op0 = XEXP (cmp, 0);
15541 rtx op1 = XEXP (cmp, 1);
15543 if (FLOAT_MODE_P (mode))
15544 comp_mode = CCFPmode;
15545 else if (code == GTU || code == LTU
15546 || code == GEU || code == LEU)
15547 comp_mode = CCUNSmode;
15548 else if ((code == EQ || code == NE)
15549 && unsigned_reg_p (op0)
15550 && (unsigned_reg_p (op1)
15551 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
15552 /* These are unsigned values, perhaps there will be a later
15553 ordering compare that can be shared with this one. */
15554 comp_mode = CCUNSmode;
15555 else
15556 comp_mode = CCmode;
15558 /* If we have an unsigned compare, make sure we don't have a signed value as
15559 an immediate. */
15560 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
15561 && INTVAL (op1) < 0)
15563 op0 = copy_rtx_if_shared (op0);
15564 op1 = force_reg (GET_MODE (op0), op1);
15565 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
15568 /* First, the compare. */
15569 compare_result = gen_reg_rtx (comp_mode);
15571 /* E500 FP compare instructions on the GPRs. Yuck! */
15572 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
15573 && FLOAT_MODE_P (mode))
15575 rtx cmp, or_result, compare_result2;
15576 enum machine_mode op_mode = GET_MODE (op0);
15578 if (op_mode == VOIDmode)
15579 op_mode = GET_MODE (op1);
15581 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15582 This explains the following mess. */
15584 switch (code)
15586 case EQ: case UNEQ: case NE: case LTGT:
15587 switch (op_mode)
15589 case SFmode:
15590 cmp = (flag_finite_math_only && !flag_trapping_math)
15591 ? gen_tstsfeq_gpr (compare_result, op0, op1)
15592 : gen_cmpsfeq_gpr (compare_result, op0, op1);
15593 break;
15595 case DFmode:
15596 cmp = (flag_finite_math_only && !flag_trapping_math)
15597 ? gen_tstdfeq_gpr (compare_result, op0, op1)
15598 : gen_cmpdfeq_gpr (compare_result, op0, op1);
15599 break;
15601 case TFmode:
15602 cmp = (flag_finite_math_only && !flag_trapping_math)
15603 ? gen_tsttfeq_gpr (compare_result, op0, op1)
15604 : gen_cmptfeq_gpr (compare_result, op0, op1);
15605 break;
15607 default:
15608 gcc_unreachable ();
15610 break;
15612 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
15613 switch (op_mode)
15615 case SFmode:
15616 cmp = (flag_finite_math_only && !flag_trapping_math)
15617 ? gen_tstsfgt_gpr (compare_result, op0, op1)
15618 : gen_cmpsfgt_gpr (compare_result, op0, op1);
15619 break;
15621 case DFmode:
15622 cmp = (flag_finite_math_only && !flag_trapping_math)
15623 ? gen_tstdfgt_gpr (compare_result, op0, op1)
15624 : gen_cmpdfgt_gpr (compare_result, op0, op1);
15625 break;
15627 case TFmode:
15628 cmp = (flag_finite_math_only && !flag_trapping_math)
15629 ? gen_tsttfgt_gpr (compare_result, op0, op1)
15630 : gen_cmptfgt_gpr (compare_result, op0, op1);
15631 break;
15633 default:
15634 gcc_unreachable ();
15636 break;
15638 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
15639 switch (op_mode)
15641 case SFmode:
15642 cmp = (flag_finite_math_only && !flag_trapping_math)
15643 ? gen_tstsflt_gpr (compare_result, op0, op1)
15644 : gen_cmpsflt_gpr (compare_result, op0, op1);
15645 break;
15647 case DFmode:
15648 cmp = (flag_finite_math_only && !flag_trapping_math)
15649 ? gen_tstdflt_gpr (compare_result, op0, op1)
15650 : gen_cmpdflt_gpr (compare_result, op0, op1);
15651 break;
15653 case TFmode:
15654 cmp = (flag_finite_math_only && !flag_trapping_math)
15655 ? gen_tsttflt_gpr (compare_result, op0, op1)
15656 : gen_cmptflt_gpr (compare_result, op0, op1);
15657 break;
15659 default:
15660 gcc_unreachable ();
15662 break;
15663 default:
15664 gcc_unreachable ();
15667 /* Synthesize LE and GE from LT/GT || EQ. */
15668 if (code == LE || code == GE || code == LEU || code == GEU)
15670 emit_insn (cmp);
15672 switch (code)
15674 case LE: code = LT; break;
15675 case GE: code = GT; break;
15676 case LEU: code = LT; break;
15677 case GEU: code = GT; break;
15678 default: gcc_unreachable ();
15681 compare_result2 = gen_reg_rtx (CCFPmode);
15683 /* Do the EQ. */
15684 switch (op_mode)
15686 case SFmode:
15687 cmp = (flag_finite_math_only && !flag_trapping_math)
15688 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
15689 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
15690 break;
15692 case DFmode:
15693 cmp = (flag_finite_math_only && !flag_trapping_math)
15694 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
15695 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
15696 break;
15698 case TFmode:
15699 cmp = (flag_finite_math_only && !flag_trapping_math)
15700 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
15701 : gen_cmptfeq_gpr (compare_result2, op0, op1);
15702 break;
15704 default:
15705 gcc_unreachable ();
15707 emit_insn (cmp);
15709 /* OR them together. */
15710 or_result = gen_reg_rtx (CCFPmode);
15711 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
15712 compare_result2);
15713 compare_result = or_result;
15714 code = EQ;
15716 else
15718 if (code == NE || code == LTGT)
15719 code = NE;
15720 else
15721 code = EQ;
15724 emit_insn (cmp);
15726 else
15728 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15729 CLOBBERs to match cmptf_internal2 pattern. */
15730 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
15731 && GET_MODE (op0) == TFmode
15732 && !TARGET_IEEEQUAD
15733 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
15734 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15735 gen_rtvec (10,
15736 gen_rtx_SET (VOIDmode,
15737 compare_result,
15738 gen_rtx_COMPARE (comp_mode, op0, op1)),
15739 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15740 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15741 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15742 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15743 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15744 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15745 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15746 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15747 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
15748 else if (GET_CODE (op1) == UNSPEC
15749 && XINT (op1, 1) == UNSPEC_SP_TEST)
15751 rtx op1b = XVECEXP (op1, 0, 0);
15752 comp_mode = CCEQmode;
15753 compare_result = gen_reg_rtx (CCEQmode);
15754 if (TARGET_64BIT)
15755 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
15756 else
15757 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
15759 else
15760 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
15761 gen_rtx_COMPARE (comp_mode, op0, op1)));
15764 /* Some kinds of FP comparisons need an OR operation;
15765 under flag_finite_math_only we don't bother. */
15766 if (FLOAT_MODE_P (mode)
15767 && !flag_finite_math_only
15768 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
15769 && (code == LE || code == GE
15770 || code == UNEQ || code == LTGT
15771 || code == UNGT || code == UNLT))
15773 enum rtx_code or1, or2;
15774 rtx or1_rtx, or2_rtx, compare2_rtx;
15775 rtx or_result = gen_reg_rtx (CCEQmode);
15777 switch (code)
15779 case LE: or1 = LT; or2 = EQ; break;
15780 case GE: or1 = GT; or2 = EQ; break;
15781 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
15782 case LTGT: or1 = LT; or2 = GT; break;
15783 case UNGT: or1 = UNORDERED; or2 = GT; break;
15784 case UNLT: or1 = UNORDERED; or2 = LT; break;
15785 default: gcc_unreachable ();
15787 validate_condition_mode (or1, comp_mode);
15788 validate_condition_mode (or2, comp_mode);
15789 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
15790 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
15791 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
15792 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
15793 const_true_rtx);
15794 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
15796 compare_result = or_result;
15797 code = EQ;
15800 validate_condition_mode (code, GET_MODE (compare_result));
15802 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
15806 /* Emit the RTL for an sISEL pattern. */
15808 void
15809 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
15811 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
15814 void
15815 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
15817 rtx condition_rtx;
15818 enum machine_mode op_mode;
15819 enum rtx_code cond_code;
15820 rtx result = operands[0];
15822 if (TARGET_ISEL && (mode == SImode || mode == DImode))
15824 rs6000_emit_sISEL (mode, operands);
15825 return;
15828 condition_rtx = rs6000_generate_compare (operands[1], mode);
15829 cond_code = GET_CODE (condition_rtx);
15831 if (FLOAT_MODE_P (mode)
15832 && !TARGET_FPRS && TARGET_HARD_FLOAT)
15834 rtx t;
15836 PUT_MODE (condition_rtx, SImode);
15837 t = XEXP (condition_rtx, 0);
15839 gcc_assert (cond_code == NE || cond_code == EQ);
15841 if (cond_code == NE)
15842 emit_insn (gen_e500_flip_gt_bit (t, t));
15844 emit_insn (gen_move_from_CR_gt_bit (result, t));
15845 return;
15848 if (cond_code == NE
15849 || cond_code == GE || cond_code == LE
15850 || cond_code == GEU || cond_code == LEU
15851 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
15853 rtx not_result = gen_reg_rtx (CCEQmode);
15854 rtx not_op, rev_cond_rtx;
15855 enum machine_mode cc_mode;
15857 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
15859 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
15860 SImode, XEXP (condition_rtx, 0), const0_rtx);
15861 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
15862 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
15863 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
15866 op_mode = GET_MODE (XEXP (operands[1], 0));
15867 if (op_mode == VOIDmode)
15868 op_mode = GET_MODE (XEXP (operands[1], 1));
15870 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
15872 PUT_MODE (condition_rtx, DImode);
15873 convert_move (result, condition_rtx, 0);
15875 else
15877 PUT_MODE (condition_rtx, SImode);
15878 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
15882 /* Emit a branch of kind CODE to location LOC. */
15884 void
15885 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
15887 rtx condition_rtx, loc_ref;
15889 condition_rtx = rs6000_generate_compare (operands[0], mode);
15890 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
15891 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
15892 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
15893 loc_ref, pc_rtx)));
15896 /* Return the string to output a conditional branch to LABEL, which is
15897 the operand number of the label, or -1 if the branch is really a
15898 conditional return.
15900 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15901 condition code register and its mode specifies what kind of
15902 comparison we made.
15904 REVERSED is nonzero if we should reverse the sense of the comparison.
15906 INSN is the insn. */
15908 char *
15909 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
15911 static char string[64];
15912 enum rtx_code code = GET_CODE (op);
15913 rtx cc_reg = XEXP (op, 0);
15914 enum machine_mode mode = GET_MODE (cc_reg);
15915 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
15916 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
15917 int really_reversed = reversed ^ need_longbranch;
15918 char *s = string;
15919 const char *ccode;
15920 const char *pred;
15921 rtx note;
15923 validate_condition_mode (code, mode);
15925 /* Work out which way this really branches. We could use
15926 reverse_condition_maybe_unordered here always but this
15927 makes the resulting assembler clearer. */
15928 if (really_reversed)
15930 /* Reversal of FP compares takes care -- an ordered compare
15931 becomes an unordered compare and vice versa. */
15932 if (mode == CCFPmode)
15933 code = reverse_condition_maybe_unordered (code);
15934 else
15935 code = reverse_condition (code);
15938 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
15940 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15941 to the GT bit. */
15942 switch (code)
15944 case EQ:
15945 /* Opposite of GT. */
15946 code = GT;
15947 break;
15949 case NE:
15950 code = UNLE;
15951 break;
15953 default:
15954 gcc_unreachable ();
15958 switch (code)
15960 /* Not all of these are actually distinct opcodes, but
15961 we distinguish them for clarity of the resulting assembler. */
15962 case NE: case LTGT:
15963 ccode = "ne"; break;
15964 case EQ: case UNEQ:
15965 ccode = "eq"; break;
15966 case GE: case GEU:
15967 ccode = "ge"; break;
15968 case GT: case GTU: case UNGT:
15969 ccode = "gt"; break;
15970 case LE: case LEU:
15971 ccode = "le"; break;
15972 case LT: case LTU: case UNLT:
15973 ccode = "lt"; break;
15974 case UNORDERED: ccode = "un"; break;
15975 case ORDERED: ccode = "nu"; break;
15976 case UNGE: ccode = "nl"; break;
15977 case UNLE: ccode = "ng"; break;
15978 default:
15979 gcc_unreachable ();
15982 /* Maybe we have a guess as to how likely the branch is. */
15983 pred = "";
15984 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
15985 if (note != NULL_RTX)
15987 /* PROB is the difference from 50%. */
15988 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
15990 /* Only hint for highly probable/improbable branches on newer
15991 cpus as static prediction overrides processor dynamic
15992 prediction. For older cpus we may as well always hint, but
15993 assume not taken for branches that are very close to 50% as a
15994 mispredicted taken branch is more expensive than a
15995 mispredicted not-taken branch. */
15996 if (rs6000_always_hint
15997 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
15998 && br_prob_note_reliable_p (note)))
16000 if (abs (prob) > REG_BR_PROB_BASE / 20
16001 && ((prob > 0) ^ need_longbranch))
16002 pred = "+";
16003 else
16004 pred = "-";
16008 if (label == NULL)
16009 s += sprintf (s, "b%slr%s ", ccode, pred);
16010 else
16011 s += sprintf (s, "b%s%s ", ccode, pred);
16013 /* We need to escape any '%' characters in the reg_names string.
16014 Assume they'd only be the first character.... */
16015 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
16016 *s++ = '%';
16017 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
16019 if (label != NULL)
16021 /* If the branch distance was too far, we may have to use an
16022 unconditional branch to go the distance. */
16023 if (need_longbranch)
16024 s += sprintf (s, ",$+8\n\tb %s", label);
16025 else
16026 s += sprintf (s, ",%s", label);
16029 return string;
16032 /* Return the string to flip the GT bit on a CR. */
16033 char *
16034 output_e500_flip_gt_bit (rtx dst, rtx src)
16036 static char string[64];
16037 int a, b;
16039 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
16040 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
16042 /* GT bit. */
16043 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
16044 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
16046 sprintf (string, "crnot %d,%d", a, b);
16047 return string;
16050 /* Return insn for VSX or Altivec comparisons. */
16052 static rtx
16053 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
16055 rtx mask;
16056 enum machine_mode mode = GET_MODE (op0);
16058 switch (code)
16060 default:
16061 break;
16063 case GE:
16064 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
16065 return NULL_RTX;
16067 case EQ:
16068 case GT:
16069 case GTU:
16070 case ORDERED:
16071 case UNORDERED:
16072 case UNEQ:
16073 case LTGT:
16074 mask = gen_reg_rtx (mode);
16075 emit_insn (gen_rtx_SET (VOIDmode,
16076 mask,
16077 gen_rtx_fmt_ee (code, mode, op0, op1)));
16078 return mask;
16081 return NULL_RTX;
16084 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16085 DMODE is expected destination mode. This is a recursive function. */
16087 static rtx
16088 rs6000_emit_vector_compare (enum rtx_code rcode,
16089 rtx op0, rtx op1,
16090 enum machine_mode dmode)
16092 rtx mask;
16093 bool swap_operands = false;
16094 bool try_again = false;
16096 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
16097 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
16099 /* See if the comparison works as is. */
16100 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16101 if (mask)
16102 return mask;
16104 switch (rcode)
16106 case LT:
16107 rcode = GT;
16108 swap_operands = true;
16109 try_again = true;
16110 break;
16111 case LTU:
16112 rcode = GTU;
16113 swap_operands = true;
16114 try_again = true;
16115 break;
16116 case NE:
16117 case UNLE:
16118 case UNLT:
16119 case UNGE:
16120 case UNGT:
16121 /* Invert condition and try again.
16122 e.g., A != B becomes ~(A==B). */
16124 enum rtx_code rev_code;
16125 enum insn_code nor_code;
16126 rtx mask2;
16128 rev_code = reverse_condition_maybe_unordered (rcode);
16129 if (rev_code == UNKNOWN)
16130 return NULL_RTX;
16132 nor_code = optab_handler (one_cmpl_optab, dmode);
16133 if (nor_code == CODE_FOR_nothing)
16134 return NULL_RTX;
16136 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
16137 if (!mask2)
16138 return NULL_RTX;
16140 mask = gen_reg_rtx (dmode);
16141 emit_insn (GEN_FCN (nor_code) (mask, mask2));
16142 return mask;
16144 break;
16145 case GE:
16146 case GEU:
16147 case LE:
16148 case LEU:
16149 /* Try GT/GTU/LT/LTU OR EQ */
16151 rtx c_rtx, eq_rtx;
16152 enum insn_code ior_code;
16153 enum rtx_code new_code;
16155 switch (rcode)
16157 case GE:
16158 new_code = GT;
16159 break;
16161 case GEU:
16162 new_code = GTU;
16163 break;
16165 case LE:
16166 new_code = LT;
16167 break;
16169 case LEU:
16170 new_code = LTU;
16171 break;
16173 default:
16174 gcc_unreachable ();
16177 ior_code = optab_handler (ior_optab, dmode);
16178 if (ior_code == CODE_FOR_nothing)
16179 return NULL_RTX;
16181 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
16182 if (!c_rtx)
16183 return NULL_RTX;
16185 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
16186 if (!eq_rtx)
16187 return NULL_RTX;
16189 mask = gen_reg_rtx (dmode);
16190 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
16191 return mask;
16193 break;
16194 default:
16195 return NULL_RTX;
16198 if (try_again)
16200 if (swap_operands)
16202 rtx tmp;
16203 tmp = op0;
16204 op0 = op1;
16205 op1 = tmp;
16208 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16209 if (mask)
16210 return mask;
16213 /* You only get two chances. */
16214 return NULL_RTX;
16217 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16218 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16219 operands for the relation operation COND. */
16222 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
16223 rtx cond, rtx cc_op0, rtx cc_op1)
16225 enum machine_mode dest_mode = GET_MODE (dest);
16226 enum machine_mode mask_mode = GET_MODE (cc_op0);
16227 enum rtx_code rcode = GET_CODE (cond);
16228 enum machine_mode cc_mode = CCmode;
16229 rtx mask;
16230 rtx cond2;
16231 rtx tmp;
16232 bool invert_move = false;
16234 if (VECTOR_UNIT_NONE_P (dest_mode))
16235 return 0;
16237 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
16238 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
16240 switch (rcode)
16242 /* Swap operands if we can, and fall back to doing the operation as
16243 specified, and doing a NOR to invert the test. */
16244 case NE:
16245 case UNLE:
16246 case UNLT:
16247 case UNGE:
16248 case UNGT:
16249 /* Invert condition and try again.
16250 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16251 invert_move = true;
16252 rcode = reverse_condition_maybe_unordered (rcode);
16253 if (rcode == UNKNOWN)
16254 return 0;
16255 break;
16257 /* Mark unsigned tests with CCUNSmode. */
16258 case GTU:
16259 case GEU:
16260 case LTU:
16261 case LEU:
16262 cc_mode = CCUNSmode;
16263 break;
16265 default:
16266 break;
16269 /* Get the vector mask for the given relational operations. */
16270 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
16272 if (!mask)
16273 return 0;
16275 if (invert_move)
16277 tmp = op_true;
16278 op_true = op_false;
16279 op_false = tmp;
16282 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
16283 CONST0_RTX (dest_mode));
16284 emit_insn (gen_rtx_SET (VOIDmode,
16285 dest,
16286 gen_rtx_IF_THEN_ELSE (dest_mode,
16287 cond2,
16288 op_true,
16289 op_false)));
16290 return 1;
16293 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16294 operands of the last comparison is nonzero/true, FALSE_COND if it
16295 is zero/false. Return 0 if the hardware has no such operation. */
16298 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16300 enum rtx_code code = GET_CODE (op);
16301 rtx op0 = XEXP (op, 0);
16302 rtx op1 = XEXP (op, 1);
16303 REAL_VALUE_TYPE c1;
16304 enum machine_mode compare_mode = GET_MODE (op0);
16305 enum machine_mode result_mode = GET_MODE (dest);
16306 rtx temp;
16307 bool is_against_zero;
16309 /* These modes should always match. */
16310 if (GET_MODE (op1) != compare_mode
16311 /* In the isel case however, we can use a compare immediate, so
16312 op1 may be a small constant. */
16313 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
16314 return 0;
16315 if (GET_MODE (true_cond) != result_mode)
16316 return 0;
16317 if (GET_MODE (false_cond) != result_mode)
16318 return 0;
16320 /* Don't allow using floating point comparisons for integer results for
16321 now. */
16322 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
16323 return 0;
16325 /* First, work out if the hardware can do this at all, or
16326 if it's too slow.... */
16327 if (!FLOAT_MODE_P (compare_mode))
16329 if (TARGET_ISEL)
16330 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16331 return 0;
16333 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16334 && SCALAR_FLOAT_MODE_P (compare_mode))
16335 return 0;
16337 is_against_zero = op1 == CONST0_RTX (compare_mode);
16339 /* A floating-point subtract might overflow, underflow, or produce
16340 an inexact result, thus changing the floating-point flags, so it
16341 can't be generated if we care about that. It's safe if one side
16342 of the construct is zero, since then no subtract will be
16343 generated. */
16344 if (SCALAR_FLOAT_MODE_P (compare_mode)
16345 && flag_trapping_math && ! is_against_zero)
16346 return 0;
16348 /* Eliminate half of the comparisons by switching operands, this
16349 makes the remaining code simpler. */
16350 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16351 || code == LTGT || code == LT || code == UNLE)
16353 code = reverse_condition_maybe_unordered (code);
16354 temp = true_cond;
16355 true_cond = false_cond;
16356 false_cond = temp;
16359 /* UNEQ and LTGT take four instructions for a comparison with zero,
16360 it'll probably be faster to use a branch here too. */
16361 if (code == UNEQ && HONOR_NANS (compare_mode))
16362 return 0;
16364 if (GET_CODE (op1) == CONST_DOUBLE)
16365 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16367 /* We're going to try to implement comparisons by performing
16368 a subtract, then comparing against zero. Unfortunately,
16369 Inf - Inf is NaN which is not zero, and so if we don't
16370 know that the operand is finite and the comparison
16371 would treat EQ different to UNORDERED, we can't do it. */
16372 if (HONOR_INFINITIES (compare_mode)
16373 && code != GT && code != UNGE
16374 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16375 /* Constructs of the form (a OP b ? a : b) are safe. */
16376 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16377 || (! rtx_equal_p (op0, true_cond)
16378 && ! rtx_equal_p (op1, true_cond))))
16379 return 0;
16381 /* At this point we know we can use fsel. */
16383 /* Reduce the comparison to a comparison against zero. */
16384 if (! is_against_zero)
16386 temp = gen_reg_rtx (compare_mode);
16387 emit_insn (gen_rtx_SET (VOIDmode, temp,
16388 gen_rtx_MINUS (compare_mode, op0, op1)));
16389 op0 = temp;
16390 op1 = CONST0_RTX (compare_mode);
16393 /* If we don't care about NaNs we can reduce some of the comparisons
16394 down to faster ones. */
16395 if (! HONOR_NANS (compare_mode))
16396 switch (code)
16398 case GT:
16399 code = LE;
16400 temp = true_cond;
16401 true_cond = false_cond;
16402 false_cond = temp;
16403 break;
16404 case UNGE:
16405 code = GE;
16406 break;
16407 case UNEQ:
16408 code = EQ;
16409 break;
16410 default:
16411 break;
16414 /* Now, reduce everything down to a GE. */
16415 switch (code)
16417 case GE:
16418 break;
16420 case LE:
16421 temp = gen_reg_rtx (compare_mode);
16422 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16423 op0 = temp;
16424 break;
16426 case ORDERED:
16427 temp = gen_reg_rtx (compare_mode);
16428 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16429 op0 = temp;
16430 break;
16432 case EQ:
16433 temp = gen_reg_rtx (compare_mode);
16434 emit_insn (gen_rtx_SET (VOIDmode, temp,
16435 gen_rtx_NEG (compare_mode,
16436 gen_rtx_ABS (compare_mode, op0))));
16437 op0 = temp;
16438 break;
16440 case UNGE:
16441 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16442 temp = gen_reg_rtx (result_mode);
16443 emit_insn (gen_rtx_SET (VOIDmode, temp,
16444 gen_rtx_IF_THEN_ELSE (result_mode,
16445 gen_rtx_GE (VOIDmode,
16446 op0, op1),
16447 true_cond, false_cond)));
16448 false_cond = true_cond;
16449 true_cond = temp;
16451 temp = gen_reg_rtx (compare_mode);
16452 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16453 op0 = temp;
16454 break;
16456 case GT:
16457 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16458 temp = gen_reg_rtx (result_mode);
16459 emit_insn (gen_rtx_SET (VOIDmode, temp,
16460 gen_rtx_IF_THEN_ELSE (result_mode,
16461 gen_rtx_GE (VOIDmode,
16462 op0, op1),
16463 true_cond, false_cond)));
16464 true_cond = false_cond;
16465 false_cond = temp;
16467 temp = gen_reg_rtx (compare_mode);
16468 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16469 op0 = temp;
16470 break;
16472 default:
16473 gcc_unreachable ();
16476 emit_insn (gen_rtx_SET (VOIDmode, dest,
16477 gen_rtx_IF_THEN_ELSE (result_mode,
16478 gen_rtx_GE (VOIDmode,
16479 op0, op1),
16480 true_cond, false_cond)));
16481 return 1;
16484 /* Same as above, but for ints (isel). */
16486 static int
16487 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16489 rtx condition_rtx, cr;
16490 enum machine_mode mode = GET_MODE (dest);
16491 enum rtx_code cond_code;
16492 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
16493 bool signedp;
16495 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
16496 return 0;
16498 /* We still have to do the compare, because isel doesn't do a
16499 compare, it just looks at the CRx bits set by a previous compare
16500 instruction. */
16501 condition_rtx = rs6000_generate_compare (op, mode);
16502 cond_code = GET_CODE (condition_rtx);
16503 cr = XEXP (condition_rtx, 0);
16504 signedp = GET_MODE (cr) == CCmode;
16506 isel_func = (mode == SImode
16507 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
16508 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
16510 switch (cond_code)
16512 case LT: case GT: case LTU: case GTU: case EQ:
16513 /* isel handles these directly. */
16514 break;
16516 default:
16517 /* We need to swap the sense of the comparison. */
16519 rtx t = true_cond;
16520 true_cond = false_cond;
16521 false_cond = t;
16522 PUT_CODE (condition_rtx, reverse_condition (cond_code));
16524 break;
16527 false_cond = force_reg (mode, false_cond);
16528 if (true_cond != const0_rtx)
16529 true_cond = force_reg (mode, true_cond);
16531 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
16533 return 1;
16536 const char *
16537 output_isel (rtx *operands)
16539 enum rtx_code code;
16541 code = GET_CODE (operands[1]);
16543 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
16545 gcc_assert (GET_CODE (operands[2]) == REG
16546 && GET_CODE (operands[3]) == REG);
16547 PUT_CODE (operands[1], reverse_condition (code));
16548 return "isel %0,%3,%2,%j1";
16551 return "isel %0,%2,%3,%j1";
16554 void
16555 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16557 enum machine_mode mode = GET_MODE (op0);
16558 enum rtx_code c;
16559 rtx target;
16561 /* VSX/altivec have direct min/max insns. */
16562 if ((code == SMAX || code == SMIN)
16563 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
16564 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
16566 emit_insn (gen_rtx_SET (VOIDmode,
16567 dest,
16568 gen_rtx_fmt_ee (code, mode, op0, op1)));
16569 return;
16572 if (code == SMAX || code == SMIN)
16573 c = GE;
16574 else
16575 c = GEU;
16577 if (code == SMAX || code == UMAX)
16578 target = emit_conditional_move (dest, c, op0, op1, mode,
16579 op0, op1, mode, 0);
16580 else
16581 target = emit_conditional_move (dest, c, op0, op1, mode,
16582 op1, op0, mode, 0);
16583 gcc_assert (target);
16584 if (target != dest)
16585 emit_move_insn (dest, target);
16588 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16589 COND is true. Mark the jump as unlikely to be taken. */
16591 static void
16592 emit_unlikely_jump (rtx cond, rtx label)
16594 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
16595 rtx x;
16597 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
16598 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
16599 add_reg_note (x, REG_BR_PROB, very_unlikely);
16602 /* A subroutine of the atomic operation splitters. Emit a load-locked
16603 instruction in MODE. */
16605 static void
16606 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
16608 rtx (*fn) (rtx, rtx) = NULL;
16610 switch (mode)
16612 case SImode:
16613 fn = gen_load_lockedsi;
16614 break;
16615 case DImode:
16616 fn = gen_load_lockeddi;
16617 break;
16618 default:
16619 gcc_unreachable ();
16621 emit_insn (fn (reg, mem));
16624 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16625 instruction in MODE. */
16627 static void
16628 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
16630 rtx (*fn) (rtx, rtx, rtx) = NULL;
16632 switch (mode)
16634 case SImode:
16635 fn = gen_store_conditionalsi;
16636 break;
16637 case DImode:
16638 fn = gen_store_conditionaldi;
16639 break;
16640 default:
16641 gcc_unreachable ();
16644 /* Emit sync before stwcx. to address PPC405 Erratum. */
16645 if (PPC405_ERRATUM77)
16646 emit_insn (gen_hwsync ());
16648 emit_insn (fn (res, mem, val));
16651 /* Expand barriers before and after a load_locked/store_cond sequence. */
16653 static rtx
16654 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
16656 rtx addr = XEXP (mem, 0);
16657 int strict_p = (reload_in_progress || reload_completed);
16659 if (!legitimate_indirect_address_p (addr, strict_p)
16660 && !legitimate_indexed_address_p (addr, strict_p))
16662 addr = force_reg (Pmode, addr);
16663 mem = replace_equiv_address_nv (mem, addr);
16666 switch (model)
16668 case MEMMODEL_RELAXED:
16669 case MEMMODEL_CONSUME:
16670 case MEMMODEL_ACQUIRE:
16671 break;
16672 case MEMMODEL_RELEASE:
16673 case MEMMODEL_ACQ_REL:
16674 emit_insn (gen_lwsync ());
16675 break;
16676 case MEMMODEL_SEQ_CST:
16677 emit_insn (gen_hwsync ());
16678 break;
16679 default:
16680 gcc_unreachable ();
16682 return mem;
16685 static void
16686 rs6000_post_atomic_barrier (enum memmodel model)
16688 switch (model)
16690 case MEMMODEL_RELAXED:
16691 case MEMMODEL_CONSUME:
16692 case MEMMODEL_RELEASE:
16693 break;
16694 case MEMMODEL_ACQUIRE:
16695 case MEMMODEL_ACQ_REL:
16696 case MEMMODEL_SEQ_CST:
16697 emit_insn (gen_isync ());
16698 break;
16699 default:
16700 gcc_unreachable ();
16704 /* A subroutine of the various atomic expanders. For sub-word operations,
16705 we must adjust things to operate on SImode. Given the original MEM,
16706 return a new aligned memory. Also build and return the quantities by
16707 which to shift and mask. */
16709 static rtx
16710 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
16712 rtx addr, align, shift, mask, mem;
16713 HOST_WIDE_INT shift_mask;
16714 enum machine_mode mode = GET_MODE (orig_mem);
16716 /* For smaller modes, we have to implement this via SImode. */
16717 shift_mask = (mode == QImode ? 0x18 : 0x10);
16719 addr = XEXP (orig_mem, 0);
16720 addr = force_reg (GET_MODE (addr), addr);
16722 /* Aligned memory containing subword. Generate a new memory. We
16723 do not want any of the existing MEM_ATTR data, as we're now
16724 accessing memory outside the original object. */
16725 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
16726 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16727 mem = gen_rtx_MEM (SImode, align);
16728 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
16729 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
16730 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
16732 /* Shift amount for subword relative to aligned word. */
16733 shift = gen_reg_rtx (SImode);
16734 addr = gen_lowpart (SImode, addr);
16735 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
16736 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
16737 shift, 1, OPTAB_LIB_WIDEN);
16738 *pshift = shift;
16740 /* Mask for insertion. */
16741 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
16742 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
16743 *pmask = mask;
16745 return mem;
16748 /* A subroutine of the various atomic expanders. For sub-word operands,
16749 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16751 static rtx
16752 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
16754 rtx x;
16756 x = gen_reg_rtx (SImode);
16757 emit_insn (gen_rtx_SET (VOIDmode, x,
16758 gen_rtx_AND (SImode,
16759 gen_rtx_NOT (SImode, mask),
16760 oldval)));
16762 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
16764 return x;
16767 /* A subroutine of the various atomic expanders. For sub-word operands,
16768 extract WIDE to NARROW via SHIFT. */
16770 static void
16771 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
16773 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
16774 wide, 1, OPTAB_LIB_WIDEN);
16775 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
16778 /* Expand an atomic compare and swap operation. */
16780 void
16781 rs6000_expand_atomic_compare_and_swap (rtx operands[])
16783 rtx boolval, retval, mem, oldval, newval, cond;
16784 rtx label1, label2, x, mask, shift;
16785 enum machine_mode mode;
16786 enum memmodel mod_s, mod_f;
16787 bool is_weak;
16789 boolval = operands[0];
16790 retval = operands[1];
16791 mem = operands[2];
16792 oldval = operands[3];
16793 newval = operands[4];
16794 is_weak = (INTVAL (operands[5]) != 0);
16795 mod_s = (enum memmodel) INTVAL (operands[6]);
16796 mod_f = (enum memmodel) INTVAL (operands[7]);
16797 mode = GET_MODE (mem);
16799 mask = shift = NULL_RTX;
16800 if (mode == QImode || mode == HImode)
16802 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16804 /* Shift and mask OLDVAL into position with the word. */
16805 oldval = convert_modes (SImode, mode, oldval, 1);
16806 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
16807 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16809 /* Shift and mask NEWVAL into position within the word. */
16810 newval = convert_modes (SImode, mode, newval, 1);
16811 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
16812 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16814 /* Prepare to adjust the return value. */
16815 retval = gen_reg_rtx (SImode);
16816 mode = SImode;
16818 else if (reg_overlap_mentioned_p (retval, oldval))
16819 oldval = copy_to_reg (oldval);
16821 mem = rs6000_pre_atomic_barrier (mem, mod_s);
16823 label1 = NULL_RTX;
16824 if (!is_weak)
16826 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16827 emit_label (XEXP (label1, 0));
16829 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16831 emit_load_locked (mode, retval, mem);
16833 x = retval;
16834 if (mask)
16836 x = expand_simple_binop (SImode, AND, retval, mask,
16837 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16840 cond = gen_reg_rtx (CCmode);
16841 x = gen_rtx_COMPARE (CCmode, x, oldval);
16842 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
16844 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16845 emit_unlikely_jump (x, label2);
16847 x = newval;
16848 if (mask)
16849 x = rs6000_mask_atomic_subword (retval, newval, mask);
16851 emit_store_conditional (mode, cond, mem, x);
16853 if (!is_weak)
16855 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16856 emit_unlikely_jump (x, label1);
16859 if (mod_f != MEMMODEL_RELAXED)
16860 emit_label (XEXP (label2, 0));
16862 rs6000_post_atomic_barrier (mod_s);
16864 if (mod_f == MEMMODEL_RELAXED)
16865 emit_label (XEXP (label2, 0));
16867 if (shift)
16868 rs6000_finish_atomic_subword (operands[1], retval, shift);
16870 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16871 x = gen_rtx_EQ (SImode, cond, const0_rtx);
16872 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
16875 /* Expand an atomic exchange operation. */
16877 void
16878 rs6000_expand_atomic_exchange (rtx operands[])
16880 rtx retval, mem, val, cond;
16881 enum machine_mode mode;
16882 enum memmodel model;
16883 rtx label, x, mask, shift;
16885 retval = operands[0];
16886 mem = operands[1];
16887 val = operands[2];
16888 model = (enum memmodel) INTVAL (operands[3]);
16889 mode = GET_MODE (mem);
16891 mask = shift = NULL_RTX;
16892 if (mode == QImode || mode == HImode)
16894 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16896 /* Shift and mask VAL into position with the word. */
16897 val = convert_modes (SImode, mode, val, 1);
16898 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16899 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16901 /* Prepare to adjust the return value. */
16902 retval = gen_reg_rtx (SImode);
16903 mode = SImode;
16906 mem = rs6000_pre_atomic_barrier (mem, model);
16908 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16909 emit_label (XEXP (label, 0));
16911 emit_load_locked (mode, retval, mem);
16913 x = val;
16914 if (mask)
16915 x = rs6000_mask_atomic_subword (retval, val, mask);
16917 cond = gen_reg_rtx (CCmode);
16918 emit_store_conditional (mode, cond, mem, x);
16920 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16921 emit_unlikely_jump (x, label);
16923 rs6000_post_atomic_barrier (model);
16925 if (shift)
16926 rs6000_finish_atomic_subword (operands[0], retval, shift);
16929 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16930 to perform. MEM is the memory on which to operate. VAL is the second
16931 operand of the binary operator. BEFORE and AFTER are optional locations to
16932 return the value of MEM either before of after the operation. MODEL_RTX
16933 is a CONST_INT containing the memory model to use. */
16935 void
16936 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
16937 rtx orig_before, rtx orig_after, rtx model_rtx)
16939 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
16940 enum machine_mode mode = GET_MODE (mem);
16941 rtx label, x, cond, mask, shift;
16942 rtx before = orig_before, after = orig_after;
16944 mask = shift = NULL_RTX;
16945 if (mode == QImode || mode == HImode)
16947 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16949 /* Shift and mask VAL into position with the word. */
16950 val = convert_modes (SImode, mode, val, 1);
16951 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16952 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16954 switch (code)
16956 case IOR:
16957 case XOR:
16958 /* We've already zero-extended VAL. That is sufficient to
16959 make certain that it does not affect other bits. */
16960 mask = NULL;
16961 break;
16963 case AND:
16964 /* If we make certain that all of the other bits in VAL are
16965 set, that will be sufficient to not affect other bits. */
16966 x = gen_rtx_NOT (SImode, mask);
16967 x = gen_rtx_IOR (SImode, x, val);
16968 emit_insn (gen_rtx_SET (VOIDmode, val, x));
16969 mask = NULL;
16970 break;
16972 case NOT:
16973 case PLUS:
16974 case MINUS:
16975 /* These will all affect bits outside the field and need
16976 adjustment via MASK within the loop. */
16977 break;
16979 default:
16980 gcc_unreachable ();
16983 /* Prepare to adjust the return value. */
16984 before = gen_reg_rtx (SImode);
16985 if (after)
16986 after = gen_reg_rtx (SImode);
16987 mode = SImode;
16990 mem = rs6000_pre_atomic_barrier (mem, model);
16992 label = gen_label_rtx ();
16993 emit_label (label);
16994 label = gen_rtx_LABEL_REF (VOIDmode, label);
16996 if (before == NULL_RTX)
16997 before = gen_reg_rtx (mode);
16999 emit_load_locked (mode, before, mem);
17001 if (code == NOT)
17003 x = expand_simple_binop (mode, AND, before, val,
17004 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17005 after = expand_simple_unop (mode, NOT, x, after, 1);
17007 else
17009 after = expand_simple_binop (mode, code, before, val,
17010 after, 1, OPTAB_LIB_WIDEN);
17013 x = after;
17014 if (mask)
17016 x = expand_simple_binop (SImode, AND, after, mask,
17017 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17018 x = rs6000_mask_atomic_subword (before, x, mask);
17021 cond = gen_reg_rtx (CCmode);
17022 emit_store_conditional (mode, cond, mem, x);
17024 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17025 emit_unlikely_jump (x, label);
17027 rs6000_post_atomic_barrier (model);
17029 if (shift)
17031 if (orig_before)
17032 rs6000_finish_atomic_subword (orig_before, before, shift);
17033 if (orig_after)
17034 rs6000_finish_atomic_subword (orig_after, after, shift);
17036 else if (orig_after && after != orig_after)
17037 emit_move_insn (orig_after, after);
17040 /* Emit instructions to move SRC to DST. Called by splitters for
17041 multi-register moves. It will emit at most one instruction for
17042 each register that is accessed; that is, it won't emit li/lis pairs
17043 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17044 register. */
17046 void
17047 rs6000_split_multireg_move (rtx dst, rtx src)
17049 /* The register number of the first register being moved. */
17050 int reg;
17051 /* The mode that is to be moved. */
17052 enum machine_mode mode;
17053 /* The mode that the move is being done in, and its size. */
17054 enum machine_mode reg_mode;
17055 int reg_mode_size;
17056 /* The number of registers that will be moved. */
17057 int nregs;
17059 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
17060 mode = GET_MODE (dst);
17061 nregs = hard_regno_nregs[reg][mode];
17062 if (FP_REGNO_P (reg))
17063 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
17064 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
17065 else if (ALTIVEC_REGNO_P (reg))
17066 reg_mode = V16QImode;
17067 else if (TARGET_E500_DOUBLE && mode == TFmode)
17068 reg_mode = DFmode;
17069 else
17070 reg_mode = word_mode;
17071 reg_mode_size = GET_MODE_SIZE (reg_mode);
17073 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
17075 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
17077 /* Move register range backwards, if we might have destructive
17078 overlap. */
17079 int i;
17080 for (i = nregs - 1; i >= 0; i--)
17081 emit_insn (gen_rtx_SET (VOIDmode,
17082 simplify_gen_subreg (reg_mode, dst, mode,
17083 i * reg_mode_size),
17084 simplify_gen_subreg (reg_mode, src, mode,
17085 i * reg_mode_size)));
17087 else
17089 int i;
17090 int j = -1;
17091 bool used_update = false;
17092 rtx restore_basereg = NULL_RTX;
17094 if (MEM_P (src) && INT_REGNO_P (reg))
17096 rtx breg;
17098 if (GET_CODE (XEXP (src, 0)) == PRE_INC
17099 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
17101 rtx delta_rtx;
17102 breg = XEXP (XEXP (src, 0), 0);
17103 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
17104 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
17105 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
17106 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17107 src = replace_equiv_address (src, breg);
17109 else if (! rs6000_offsettable_memref_p (src, reg_mode))
17111 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
17113 rtx basereg = XEXP (XEXP (src, 0), 0);
17114 if (TARGET_UPDATE)
17116 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
17117 emit_insn (gen_rtx_SET (VOIDmode, ndst,
17118 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
17119 used_update = true;
17121 else
17122 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17123 XEXP (XEXP (src, 0), 1)));
17124 src = replace_equiv_address (src, basereg);
17126 else
17128 rtx basereg = gen_rtx_REG (Pmode, reg);
17129 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
17130 src = replace_equiv_address (src, basereg);
17134 breg = XEXP (src, 0);
17135 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
17136 breg = XEXP (breg, 0);
17138 /* If the base register we are using to address memory is
17139 also a destination reg, then change that register last. */
17140 if (REG_P (breg)
17141 && REGNO (breg) >= REGNO (dst)
17142 && REGNO (breg) < REGNO (dst) + nregs)
17143 j = REGNO (breg) - REGNO (dst);
17145 else if (MEM_P (dst) && INT_REGNO_P (reg))
17147 rtx breg;
17149 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
17150 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
17152 rtx delta_rtx;
17153 breg = XEXP (XEXP (dst, 0), 0);
17154 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
17155 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
17156 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
17158 /* We have to update the breg before doing the store.
17159 Use store with update, if available. */
17161 if (TARGET_UPDATE)
17163 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17164 emit_insn (TARGET_32BIT
17165 ? (TARGET_POWERPC64
17166 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
17167 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
17168 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
17169 used_update = true;
17171 else
17172 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17173 dst = replace_equiv_address (dst, breg);
17175 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
17176 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
17178 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
17180 rtx basereg = XEXP (XEXP (dst, 0), 0);
17181 if (TARGET_UPDATE)
17183 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17184 emit_insn (gen_rtx_SET (VOIDmode,
17185 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
17186 used_update = true;
17188 else
17189 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17190 XEXP (XEXP (dst, 0), 1)));
17191 dst = replace_equiv_address (dst, basereg);
17193 else
17195 rtx basereg = XEXP (XEXP (dst, 0), 0);
17196 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
17197 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
17198 && REG_P (basereg)
17199 && REG_P (offsetreg)
17200 && REGNO (basereg) != REGNO (offsetreg));
17201 if (REGNO (basereg) == 0)
17203 rtx tmp = offsetreg;
17204 offsetreg = basereg;
17205 basereg = tmp;
17207 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
17208 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
17209 dst = replace_equiv_address (dst, basereg);
17212 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
17213 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
17216 for (i = 0; i < nregs; i++)
17218 /* Calculate index to next subword. */
17219 ++j;
17220 if (j == nregs)
17221 j = 0;
17223 /* If compiler already emitted move of first word by
17224 store with update, no need to do anything. */
17225 if (j == 0 && used_update)
17226 continue;
17228 emit_insn (gen_rtx_SET (VOIDmode,
17229 simplify_gen_subreg (reg_mode, dst, mode,
17230 j * reg_mode_size),
17231 simplify_gen_subreg (reg_mode, src, mode,
17232 j * reg_mode_size)));
17234 if (restore_basereg != NULL_RTX)
17235 emit_insn (restore_basereg);
17240 /* This page contains routines that are used to determine what the
17241 function prologue and epilogue code will do and write them out. */
17243 static inline bool
17244 save_reg_p (int r)
17246 return !call_used_regs[r] && df_regs_ever_live_p (r);
17249 /* Return the first fixed-point register that is required to be
17250 saved. 32 if none. */
17253 first_reg_to_save (void)
17255 int first_reg;
17257 /* Find lowest numbered live register. */
17258 for (first_reg = 13; first_reg <= 31; first_reg++)
17259 if (save_reg_p (first_reg))
17260 break;
17262 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
17263 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
17264 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
17265 || (TARGET_TOC && TARGET_MINIMAL_TOC))
17266 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17267 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
17269 #if TARGET_MACHO
17270 if (flag_pic
17271 && crtl->uses_pic_offset_table
17272 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
17273 return RS6000_PIC_OFFSET_TABLE_REGNUM;
17274 #endif
17276 return first_reg;
17279 /* Similar, for FP regs. */
17282 first_fp_reg_to_save (void)
17284 int first_reg;
17286 /* Find lowest numbered live register. */
17287 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
17288 if (save_reg_p (first_reg))
17289 break;
17291 return first_reg;
17294 /* Similar, for AltiVec regs. */
17296 static int
17297 first_altivec_reg_to_save (void)
17299 int i;
17301 /* Stack frame remains as is unless we are in AltiVec ABI. */
17302 if (! TARGET_ALTIVEC_ABI)
17303 return LAST_ALTIVEC_REGNO + 1;
17305 /* On Darwin, the unwind routines are compiled without
17306 TARGET_ALTIVEC, and use save_world to save/restore the
17307 altivec registers when necessary. */
17308 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17309 && ! TARGET_ALTIVEC)
17310 return FIRST_ALTIVEC_REGNO + 20;
17312 /* Find lowest numbered live register. */
17313 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17314 if (save_reg_p (i))
17315 break;
17317 return i;
17320 /* Return a 32-bit mask of the AltiVec registers we need to set in
17321 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17322 the 32-bit word is 0. */
17324 static unsigned int
17325 compute_vrsave_mask (void)
17327 unsigned int i, mask = 0;
17329 /* On Darwin, the unwind routines are compiled without
17330 TARGET_ALTIVEC, and use save_world to save/restore the
17331 call-saved altivec registers when necessary. */
17332 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17333 && ! TARGET_ALTIVEC)
17334 mask |= 0xFFF;
17336 /* First, find out if we use _any_ altivec registers. */
17337 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
17338 if (df_regs_ever_live_p (i))
17339 mask |= ALTIVEC_REG_BIT (i);
17341 if (mask == 0)
17342 return mask;
17344 /* Next, remove the argument registers from the set. These must
17345 be in the VRSAVE mask set by the caller, so we don't need to add
17346 them in again. More importantly, the mask we compute here is
17347 used to generate CLOBBERs in the set_vrsave insn, and we do not
17348 wish the argument registers to die. */
17349 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
17350 mask &= ~ALTIVEC_REG_BIT (i);
17352 /* Similarly, remove the return value from the set. */
17354 bool yes = false;
17355 diddle_return_value (is_altivec_return_reg, &yes);
17356 if (yes)
17357 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
17360 return mask;
17363 /* For a very restricted set of circumstances, we can cut down the
17364 size of prologues/epilogues by calling our own save/restore-the-world
17365 routines. */
17367 static void
17368 compute_save_world_info (rs6000_stack_t *info_ptr)
17370 info_ptr->world_save_p = 1;
17371 info_ptr->world_save_p
17372 = (WORLD_SAVE_P (info_ptr)
17373 && DEFAULT_ABI == ABI_DARWIN
17374 && !cfun->has_nonlocal_label
17375 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17376 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17377 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17378 && info_ptr->cr_save_p);
17380 /* This will not work in conjunction with sibcalls. Make sure there
17381 are none. (This check is expensive, but seldom executed.) */
17382 if (WORLD_SAVE_P (info_ptr))
17384 rtx insn;
17385 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17386 if ( GET_CODE (insn) == CALL_INSN
17387 && SIBLING_CALL_P (insn))
17389 info_ptr->world_save_p = 0;
17390 break;
17394 if (WORLD_SAVE_P (info_ptr))
17396 /* Even if we're not touching VRsave, make sure there's room on the
17397 stack for it, if it looks like we're calling SAVE_WORLD, which
17398 will attempt to save it. */
17399 info_ptr->vrsave_size = 4;
17401 /* If we are going to save the world, we need to save the link register too. */
17402 info_ptr->lr_save_p = 1;
17404 /* "Save" the VRsave register too if we're saving the world. */
17405 if (info_ptr->vrsave_mask == 0)
17406 info_ptr->vrsave_mask = compute_vrsave_mask ();
17408 /* Because the Darwin register save/restore routines only handle
17409 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17410 check. */
17411 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17412 && (info_ptr->first_altivec_reg_save
17413 >= FIRST_SAVED_ALTIVEC_REGNO));
17415 return;
17419 static void
17420 is_altivec_return_reg (rtx reg, void *xyes)
17422 bool *yes = (bool *) xyes;
17423 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17424 *yes = true;
17428 /* Look for user-defined global regs in the range FIRST to LAST-1.
17429 We should not restore these, and so cannot use lmw or out-of-line
17430 restore functions if there are any. We also can't save them
17431 (well, emit frame notes for them), because frame unwinding during
17432 exception handling will restore saved registers. */
17434 static bool
17435 global_regs_p (unsigned first, unsigned last)
17437 while (first < last)
17438 if (global_regs[first++])
17439 return true;
17440 return false;
17443 /* Determine the strategy for savings/restoring registers. */
17445 enum {
17446 SAVRES_MULTIPLE = 0x1,
17447 SAVE_INLINE_FPRS = 0x2,
17448 SAVE_INLINE_GPRS = 0x4,
17449 REST_INLINE_FPRS = 0x8,
17450 REST_INLINE_GPRS = 0x10,
17451 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
17452 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
17453 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
17454 SAVE_INLINE_VRS = 0x100,
17455 REST_INLINE_VRS = 0x200
17458 static int
17459 rs6000_savres_strategy (rs6000_stack_t *info,
17460 bool using_static_chain_p)
17462 int strategy = 0;
17463 bool lr_save_p;
17465 if (TARGET_MULTIPLE
17466 && !TARGET_POWERPC64
17467 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
17468 && info->first_gp_reg_save < 31
17469 && !global_regs_p (info->first_gp_reg_save, 32))
17470 strategy |= SAVRES_MULTIPLE;
17472 if (crtl->calls_eh_return
17473 || cfun->machine->ra_need_lr)
17474 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
17475 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
17476 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17478 if (info->first_fp_reg_save == 64
17479 /* The out-of-line FP routines use double-precision stores;
17480 we can't use those routines if we don't have such stores. */
17481 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
17482 || global_regs_p (info->first_fp_reg_save, 64))
17483 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17485 if (info->first_gp_reg_save == 32
17486 || (!(strategy & SAVRES_MULTIPLE)
17487 && global_regs_p (info->first_gp_reg_save, 32)))
17488 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17490 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
17491 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
17492 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17494 /* Define cutoff for using out-of-line functions to save registers. */
17495 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
17497 if (!optimize_size)
17499 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17500 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17501 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17503 else
17505 /* Prefer out-of-line restore if it will exit. */
17506 if (info->first_fp_reg_save > 61)
17507 strategy |= SAVE_INLINE_FPRS;
17508 if (info->first_gp_reg_save > 29)
17510 if (info->first_fp_reg_save == 64)
17511 strategy |= SAVE_INLINE_GPRS;
17512 else
17513 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17515 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
17516 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17519 else if (DEFAULT_ABI == ABI_DARWIN)
17521 if (info->first_fp_reg_save > 60)
17522 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17523 if (info->first_gp_reg_save > 29)
17524 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17525 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17527 else
17529 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
17530 if (info->first_fp_reg_save > 61)
17531 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17532 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17533 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17536 /* Don't bother to try to save things out-of-line if r11 is occupied
17537 by the static chain. It would require too much fiddling and the
17538 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17539 pointer on Darwin, and AIX uses r1 or r12. */
17540 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
17541 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
17542 | SAVE_INLINE_GPRS
17543 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17545 /* We can only use the out-of-line routines to restore if we've
17546 saved all the registers from first_fp_reg_save in the prologue.
17547 Otherwise, we risk loading garbage. */
17548 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
17550 int i;
17552 for (i = info->first_fp_reg_save; i < 64; i++)
17553 if (!save_reg_p (i))
17555 strategy |= REST_INLINE_FPRS;
17556 break;
17560 /* If we are going to use store multiple, then don't even bother
17561 with the out-of-line routines, since the store-multiple
17562 instruction will always be smaller. */
17563 if ((strategy & SAVRES_MULTIPLE))
17564 strategy |= SAVE_INLINE_GPRS;
17566 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17567 saved is an out-of-line save or restore. Set up the value for
17568 the next test (excluding out-of-line gpr restore). */
17569 lr_save_p = (info->lr_save_p
17570 || !(strategy & SAVE_INLINE_GPRS)
17571 || !(strategy & SAVE_INLINE_FPRS)
17572 || !(strategy & SAVE_INLINE_VRS)
17573 || !(strategy & REST_INLINE_FPRS)
17574 || !(strategy & REST_INLINE_VRS));
17576 /* The situation is more complicated with load multiple. We'd
17577 prefer to use the out-of-line routines for restores, since the
17578 "exit" out-of-line routines can handle the restore of LR and the
17579 frame teardown. However if doesn't make sense to use the
17580 out-of-line routine if that is the only reason we'd need to save
17581 LR, and we can't use the "exit" out-of-line gpr restore if we
17582 have saved some fprs; In those cases it is advantageous to use
17583 load multiple when available. */
17584 if ((strategy & SAVRES_MULTIPLE)
17585 && (!lr_save_p
17586 || info->first_fp_reg_save != 64))
17587 strategy |= REST_INLINE_GPRS;
17589 /* Saving CR interferes with the exit routines used on the SPE, so
17590 just punt here. */
17591 if (TARGET_SPE_ABI
17592 && info->spe_64bit_regs_used
17593 && info->cr_save_p)
17594 strategy |= REST_INLINE_GPRS;
17596 /* We can only use load multiple or the out-of-line routines to
17597 restore if we've used store multiple or out-of-line routines
17598 in the prologue, i.e. if we've saved all the registers from
17599 first_gp_reg_save. Otherwise, we risk loading garbage. */
17600 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
17601 == SAVE_INLINE_GPRS)
17603 int i;
17605 for (i = info->first_gp_reg_save; i < 32; i++)
17606 if (!save_reg_p (i))
17608 strategy |= REST_INLINE_GPRS;
17609 break;
17613 if (TARGET_ELF && TARGET_64BIT)
17615 if (!(strategy & SAVE_INLINE_FPRS))
17616 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17617 else if (!(strategy & SAVE_INLINE_GPRS)
17618 && info->first_fp_reg_save == 64)
17619 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
17621 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
17622 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
17624 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
17625 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17627 return strategy;
17630 /* Calculate the stack information for the current function. This is
17631 complicated by having two separate calling sequences, the AIX calling
17632 sequence and the V.4 calling sequence.
17634 AIX (and Darwin/Mac OS X) stack frames look like:
17635 32-bit 64-bit
17636 SP----> +---------------------------------------+
17637 | back chain to caller | 0 0
17638 +---------------------------------------+
17639 | saved CR | 4 8 (8-11)
17640 +---------------------------------------+
17641 | saved LR | 8 16
17642 +---------------------------------------+
17643 | reserved for compilers | 12 24
17644 +---------------------------------------+
17645 | reserved for binders | 16 32
17646 +---------------------------------------+
17647 | saved TOC pointer | 20 40
17648 +---------------------------------------+
17649 | Parameter save area (P) | 24 48
17650 +---------------------------------------+
17651 | Alloca space (A) | 24+P etc.
17652 +---------------------------------------+
17653 | Local variable space (L) | 24+P+A
17654 +---------------------------------------+
17655 | Float/int conversion temporary (X) | 24+P+A+L
17656 +---------------------------------------+
17657 | Save area for AltiVec registers (W) | 24+P+A+L+X
17658 +---------------------------------------+
17659 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17660 +---------------------------------------+
17661 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17662 +---------------------------------------+
17663 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17664 +---------------------------------------+
17665 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17666 +---------------------------------------+
17667 old SP->| back chain to caller's caller |
17668 +---------------------------------------+
17670 The required alignment for AIX configurations is two words (i.e., 8
17671 or 16 bytes).
17674 V.4 stack frames look like:
17676 SP----> +---------------------------------------+
17677 | back chain to caller | 0
17678 +---------------------------------------+
17679 | caller's saved LR | 4
17680 +---------------------------------------+
17681 | Parameter save area (P) | 8
17682 +---------------------------------------+
17683 | Alloca space (A) | 8+P
17684 +---------------------------------------+
17685 | Varargs save area (V) | 8+P+A
17686 +---------------------------------------+
17687 | Local variable space (L) | 8+P+A+V
17688 +---------------------------------------+
17689 | Float/int conversion temporary (X) | 8+P+A+V+L
17690 +---------------------------------------+
17691 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17692 +---------------------------------------+
17693 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17694 +---------------------------------------+
17695 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17696 +---------------------------------------+
17697 | SPE: area for 64-bit GP registers |
17698 +---------------------------------------+
17699 | SPE alignment padding |
17700 +---------------------------------------+
17701 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17702 +---------------------------------------+
17703 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17704 +---------------------------------------+
17705 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17706 +---------------------------------------+
17707 old SP->| back chain to caller's caller |
17708 +---------------------------------------+
17710 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17711 given. (But note below and in sysv4.h that we require only 8 and
17712 may round up the size of our stack frame anyways. The historical
17713 reason is early versions of powerpc-linux which didn't properly
17714 align the stack at program startup. A happy side-effect is that
17715 -mno-eabi libraries can be used with -meabi programs.)
17717 The EABI configuration defaults to the V.4 layout. However,
17718 the stack alignment requirements may differ. If -mno-eabi is not
17719 given, the required stack alignment is 8 bytes; if -mno-eabi is
17720 given, the required alignment is 16 bytes. (But see V.4 comment
17721 above.) */
17723 #ifndef ABI_STACK_BOUNDARY
17724 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17725 #endif
17727 static rs6000_stack_t *
17728 rs6000_stack_info (void)
17730 rs6000_stack_t *info_ptr = &stack_info;
17731 int reg_size = TARGET_32BIT ? 4 : 8;
17732 int ehrd_size;
17733 int save_align;
17734 int first_gp;
17735 HOST_WIDE_INT non_fixed_size;
17736 bool using_static_chain_p;
17738 if (reload_completed && info_ptr->reload_completed)
17739 return info_ptr;
17741 memset (info_ptr, 0, sizeof (*info_ptr));
17742 info_ptr->reload_completed = reload_completed;
17744 if (TARGET_SPE)
17746 /* Cache value so we don't rescan instruction chain over and over. */
17747 if (cfun->machine->insn_chain_scanned_p == 0)
17748 cfun->machine->insn_chain_scanned_p
17749 = spe_func_has_64bit_regs_p () + 1;
17750 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
17753 /* Select which calling sequence. */
17754 info_ptr->abi = DEFAULT_ABI;
17756 /* Calculate which registers need to be saved & save area size. */
17757 info_ptr->first_gp_reg_save = first_reg_to_save ();
17758 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17759 even if it currently looks like we won't. Reload may need it to
17760 get at a constant; if so, it will have already created a constant
17761 pool entry for it. */
17762 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
17763 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
17764 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
17765 && crtl->uses_const_pool
17766 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
17767 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
17768 else
17769 first_gp = info_ptr->first_gp_reg_save;
17771 info_ptr->gp_size = reg_size * (32 - first_gp);
17773 /* For the SPE, we have an additional upper 32-bits on each GPR.
17774 Ideally we should save the entire 64-bits only when the upper
17775 half is used in SIMD instructions. Since we only record
17776 registers live (not the size they are used in), this proves
17777 difficult because we'd have to traverse the instruction chain at
17778 the right time, taking reload into account. This is a real pain,
17779 so we opt to save the GPRs in 64-bits always if but one register
17780 gets used in 64-bits. Otherwise, all the registers in the frame
17781 get saved in 32-bits.
17783 So... since when we save all GPRs (except the SP) in 64-bits, the
17784 traditional GP save area will be empty. */
17785 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17786 info_ptr->gp_size = 0;
17788 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
17789 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
17791 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
17792 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
17793 - info_ptr->first_altivec_reg_save);
17795 /* Does this function call anything? */
17796 info_ptr->calls_p = (! crtl->is_leaf
17797 || cfun->machine->ra_needs_full_frame);
17799 /* Determine if we need to save the condition code registers. */
17800 if (df_regs_ever_live_p (CR2_REGNO)
17801 || df_regs_ever_live_p (CR3_REGNO)
17802 || df_regs_ever_live_p (CR4_REGNO))
17804 info_ptr->cr_save_p = 1;
17805 if (DEFAULT_ABI == ABI_V4)
17806 info_ptr->cr_size = reg_size;
17809 /* If the current function calls __builtin_eh_return, then we need
17810 to allocate stack space for registers that will hold data for
17811 the exception handler. */
17812 if (crtl->calls_eh_return)
17814 unsigned int i;
17815 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
17816 continue;
17818 /* SPE saves EH registers in 64-bits. */
17819 ehrd_size = i * (TARGET_SPE_ABI
17820 && info_ptr->spe_64bit_regs_used != 0
17821 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
17823 else
17824 ehrd_size = 0;
17826 /* Determine various sizes. */
17827 info_ptr->reg_size = reg_size;
17828 info_ptr->fixed_size = RS6000_SAVE_AREA;
17829 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
17830 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
17831 TARGET_ALTIVEC ? 16 : 8);
17832 if (FRAME_GROWS_DOWNWARD)
17833 info_ptr->vars_size
17834 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
17835 + info_ptr->parm_size,
17836 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
17837 - (info_ptr->fixed_size + info_ptr->vars_size
17838 + info_ptr->parm_size);
17840 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17841 info_ptr->spe_gp_size = 8 * (32 - first_gp);
17842 else
17843 info_ptr->spe_gp_size = 0;
17845 if (TARGET_ALTIVEC_ABI)
17846 info_ptr->vrsave_mask = compute_vrsave_mask ();
17847 else
17848 info_ptr->vrsave_mask = 0;
17850 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
17851 info_ptr->vrsave_size = 4;
17852 else
17853 info_ptr->vrsave_size = 0;
17855 compute_save_world_info (info_ptr);
17857 /* Calculate the offsets. */
17858 switch (DEFAULT_ABI)
17860 case ABI_NONE:
17861 default:
17862 gcc_unreachable ();
17864 case ABI_AIX:
17865 case ABI_DARWIN:
17866 info_ptr->fp_save_offset = - info_ptr->fp_size;
17867 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17869 if (TARGET_ALTIVEC_ABI)
17871 info_ptr->vrsave_save_offset
17872 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
17874 /* Align stack so vector save area is on a quadword boundary.
17875 The padding goes above the vectors. */
17876 if (info_ptr->altivec_size != 0)
17877 info_ptr->altivec_padding_size
17878 = info_ptr->vrsave_save_offset & 0xF;
17879 else
17880 info_ptr->altivec_padding_size = 0;
17882 info_ptr->altivec_save_offset
17883 = info_ptr->vrsave_save_offset
17884 - info_ptr->altivec_padding_size
17885 - info_ptr->altivec_size;
17886 gcc_assert (info_ptr->altivec_size == 0
17887 || info_ptr->altivec_save_offset % 16 == 0);
17889 /* Adjust for AltiVec case. */
17890 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
17892 else
17893 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
17894 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
17895 info_ptr->lr_save_offset = 2*reg_size;
17896 break;
17898 case ABI_V4:
17899 info_ptr->fp_save_offset = - info_ptr->fp_size;
17900 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17901 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
17903 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17905 /* Align stack so SPE GPR save area is aligned on a
17906 double-word boundary. */
17907 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
17908 info_ptr->spe_padding_size
17909 = 8 - (-info_ptr->cr_save_offset % 8);
17910 else
17911 info_ptr->spe_padding_size = 0;
17913 info_ptr->spe_gp_save_offset
17914 = info_ptr->cr_save_offset
17915 - info_ptr->spe_padding_size
17916 - info_ptr->spe_gp_size;
17918 /* Adjust for SPE case. */
17919 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
17921 else if (TARGET_ALTIVEC_ABI)
17923 info_ptr->vrsave_save_offset
17924 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
17926 /* Align stack so vector save area is on a quadword boundary. */
17927 if (info_ptr->altivec_size != 0)
17928 info_ptr->altivec_padding_size
17929 = 16 - (-info_ptr->vrsave_save_offset % 16);
17930 else
17931 info_ptr->altivec_padding_size = 0;
17933 info_ptr->altivec_save_offset
17934 = info_ptr->vrsave_save_offset
17935 - info_ptr->altivec_padding_size
17936 - info_ptr->altivec_size;
17938 /* Adjust for AltiVec case. */
17939 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
17941 else
17942 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
17943 info_ptr->ehrd_offset -= ehrd_size;
17944 info_ptr->lr_save_offset = reg_size;
17945 break;
17948 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
17949 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
17950 + info_ptr->gp_size
17951 + info_ptr->altivec_size
17952 + info_ptr->altivec_padding_size
17953 + info_ptr->spe_gp_size
17954 + info_ptr->spe_padding_size
17955 + ehrd_size
17956 + info_ptr->cr_size
17957 + info_ptr->vrsave_size,
17958 save_align);
17960 non_fixed_size = (info_ptr->vars_size
17961 + info_ptr->parm_size
17962 + info_ptr->save_size);
17964 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
17965 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
17967 /* Determine if we need to save the link register. */
17968 if (info_ptr->calls_p
17969 || (DEFAULT_ABI == ABI_AIX
17970 && crtl->profile
17971 && !TARGET_PROFILE_KERNEL)
17972 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
17973 #ifdef TARGET_RELOCATABLE
17974 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
17975 #endif
17976 || rs6000_ra_ever_killed ())
17977 info_ptr->lr_save_p = 1;
17979 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
17980 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
17981 && call_used_regs[STATIC_CHAIN_REGNUM]);
17982 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
17983 using_static_chain_p);
17985 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
17986 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
17987 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
17988 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
17989 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
17990 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
17991 info_ptr->lr_save_p = 1;
17993 if (info_ptr->lr_save_p)
17994 df_set_regs_ever_live (LR_REGNO, true);
17996 /* Determine if we need to allocate any stack frame:
17998 For AIX we need to push the stack if a frame pointer is needed
17999 (because the stack might be dynamically adjusted), if we are
18000 debugging, if we make calls, or if the sum of fp_save, gp_save,
18001 and local variables are more than the space needed to save all
18002 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
18003 + 18*8 = 288 (GPR13 reserved).
18005 For V.4 we don't have the stack cushion that AIX uses, but assume
18006 that the debugger can handle stackless frames. */
18008 if (info_ptr->calls_p)
18009 info_ptr->push_p = 1;
18011 else if (DEFAULT_ABI == ABI_V4)
18012 info_ptr->push_p = non_fixed_size != 0;
18014 else if (frame_pointer_needed)
18015 info_ptr->push_p = 1;
18017 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
18018 info_ptr->push_p = 1;
18020 else
18021 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
18023 /* Zero offsets if we're not saving those registers. */
18024 if (info_ptr->fp_size == 0)
18025 info_ptr->fp_save_offset = 0;
18027 if (info_ptr->gp_size == 0)
18028 info_ptr->gp_save_offset = 0;
18030 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
18031 info_ptr->altivec_save_offset = 0;
18033 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
18034 info_ptr->vrsave_save_offset = 0;
18036 if (! TARGET_SPE_ABI
18037 || info_ptr->spe_64bit_regs_used == 0
18038 || info_ptr->spe_gp_size == 0)
18039 info_ptr->spe_gp_save_offset = 0;
18041 if (! info_ptr->lr_save_p)
18042 info_ptr->lr_save_offset = 0;
18044 if (! info_ptr->cr_save_p)
18045 info_ptr->cr_save_offset = 0;
18047 return info_ptr;
18050 /* Return true if the current function uses any GPRs in 64-bit SIMD
18051 mode. */
18053 static bool
18054 spe_func_has_64bit_regs_p (void)
18056 rtx insns, insn;
18058 /* Functions that save and restore all the call-saved registers will
18059 need to save/restore the registers in 64-bits. */
18060 if (crtl->calls_eh_return
18061 || cfun->calls_setjmp
18062 || crtl->has_nonlocal_goto)
18063 return true;
18065 insns = get_insns ();
18067 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
18069 if (INSN_P (insn))
18071 rtx i;
18073 /* FIXME: This should be implemented with attributes...
18075 (set_attr "spe64" "true")....then,
18076 if (get_spe64(insn)) return true;
18078 It's the only reliable way to do the stuff below. */
18080 i = PATTERN (insn);
18081 if (GET_CODE (i) == SET)
18083 enum machine_mode mode = GET_MODE (SET_SRC (i));
18085 if (SPE_VECTOR_MODE (mode))
18086 return true;
18087 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
18088 return true;
18093 return false;
18096 static void
18097 debug_stack_info (rs6000_stack_t *info)
18099 const char *abi_string;
18101 if (! info)
18102 info = rs6000_stack_info ();
18104 fprintf (stderr, "\nStack information for function %s:\n",
18105 ((current_function_decl && DECL_NAME (current_function_decl))
18106 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
18107 : "<unknown>"));
18109 switch (info->abi)
18111 default: abi_string = "Unknown"; break;
18112 case ABI_NONE: abi_string = "NONE"; break;
18113 case ABI_AIX: abi_string = "AIX"; break;
18114 case ABI_DARWIN: abi_string = "Darwin"; break;
18115 case ABI_V4: abi_string = "V.4"; break;
18118 fprintf (stderr, "\tABI = %5s\n", abi_string);
18120 if (TARGET_ALTIVEC_ABI)
18121 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
18123 if (TARGET_SPE_ABI)
18124 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
18126 if (info->first_gp_reg_save != 32)
18127 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
18129 if (info->first_fp_reg_save != 64)
18130 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
18132 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
18133 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
18134 info->first_altivec_reg_save);
18136 if (info->lr_save_p)
18137 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
18139 if (info->cr_save_p)
18140 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
18142 if (info->vrsave_mask)
18143 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
18145 if (info->push_p)
18146 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
18148 if (info->calls_p)
18149 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
18151 if (info->gp_save_offset)
18152 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
18154 if (info->fp_save_offset)
18155 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
18157 if (info->altivec_save_offset)
18158 fprintf (stderr, "\taltivec_save_offset = %5d\n",
18159 info->altivec_save_offset);
18161 if (info->spe_gp_save_offset)
18162 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
18163 info->spe_gp_save_offset);
18165 if (info->vrsave_save_offset)
18166 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
18167 info->vrsave_save_offset);
18169 if (info->lr_save_offset)
18170 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
18172 if (info->cr_save_offset)
18173 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
18175 if (info->varargs_save_offset)
18176 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
18178 if (info->total_size)
18179 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18180 info->total_size);
18182 if (info->vars_size)
18183 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18184 info->vars_size);
18186 if (info->parm_size)
18187 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
18189 if (info->fixed_size)
18190 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
18192 if (info->gp_size)
18193 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
18195 if (info->spe_gp_size)
18196 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
18198 if (info->fp_size)
18199 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
18201 if (info->altivec_size)
18202 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
18204 if (info->vrsave_size)
18205 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
18207 if (info->altivec_padding_size)
18208 fprintf (stderr, "\taltivec_padding_size= %5d\n",
18209 info->altivec_padding_size);
18211 if (info->spe_padding_size)
18212 fprintf (stderr, "\tspe_padding_size = %5d\n",
18213 info->spe_padding_size);
18215 if (info->cr_size)
18216 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
18218 if (info->save_size)
18219 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
18221 if (info->reg_size != 4)
18222 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
18224 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
18226 fprintf (stderr, "\n");
18230 rs6000_return_addr (int count, rtx frame)
18232 /* Currently we don't optimize very well between prolog and body
18233 code and for PIC code the code can be actually quite bad, so
18234 don't try to be too clever here. */
18235 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
18237 cfun->machine->ra_needs_full_frame = 1;
18239 return
18240 gen_rtx_MEM
18241 (Pmode,
18242 memory_address
18243 (Pmode,
18244 plus_constant (Pmode,
18245 copy_to_reg
18246 (gen_rtx_MEM (Pmode,
18247 memory_address (Pmode, frame))),
18248 RETURN_ADDRESS_OFFSET)));
18251 cfun->machine->ra_need_lr = 1;
18252 return get_hard_reg_initial_val (Pmode, LR_REGNO);
18255 /* Say whether a function is a candidate for sibcall handling or not. */
18257 static bool
18258 rs6000_function_ok_for_sibcall (tree decl, tree exp)
18260 tree fntype;
18262 if (decl)
18263 fntype = TREE_TYPE (decl);
18264 else
18265 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
18267 /* We can't do it if the called function has more vector parameters
18268 than the current function; there's nowhere to put the VRsave code. */
18269 if (TARGET_ALTIVEC_ABI
18270 && TARGET_ALTIVEC_VRSAVE
18271 && !(decl && decl == current_function_decl))
18273 function_args_iterator args_iter;
18274 tree type;
18275 int nvreg = 0;
18277 /* Functions with vector parameters are required to have a
18278 prototype, so the argument type info must be available
18279 here. */
18280 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
18281 if (TREE_CODE (type) == VECTOR_TYPE
18282 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18283 nvreg++;
18285 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
18286 if (TREE_CODE (type) == VECTOR_TYPE
18287 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18288 nvreg--;
18290 if (nvreg > 0)
18291 return false;
18294 /* Under the AIX ABI we can't allow calls to non-local functions,
18295 because the callee may have a different TOC pointer to the
18296 caller and there's no way to ensure we restore the TOC when we
18297 return. With the secure-plt SYSV ABI we can't make non-local
18298 calls when -fpic/PIC because the plt call stubs use r30. */
18299 if (DEFAULT_ABI == ABI_DARWIN
18300 || (DEFAULT_ABI == ABI_AIX
18301 && decl
18302 && !DECL_EXTERNAL (decl)
18303 && (*targetm.binds_local_p) (decl))
18304 || (DEFAULT_ABI == ABI_V4
18305 && (!TARGET_SECURE_PLT
18306 || !flag_pic
18307 || (decl
18308 && (*targetm.binds_local_p) (decl)))))
18310 tree attr_list = TYPE_ATTRIBUTES (fntype);
18312 if (!lookup_attribute ("longcall", attr_list)
18313 || lookup_attribute ("shortcall", attr_list))
18314 return true;
18317 return false;
18320 /* NULL if INSN insn is valid within a low-overhead loop.
18321 Otherwise return why doloop cannot be applied.
18322 PowerPC uses the COUNT register for branch on table instructions. */
18324 static const char *
18325 rs6000_invalid_within_doloop (const_rtx insn)
18327 if (CALL_P (insn))
18328 return "Function call in the loop.";
18330 if (JUMP_P (insn)
18331 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
18332 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
18333 return "Computed branch in the loop.";
18335 return NULL;
18338 static int
18339 rs6000_ra_ever_killed (void)
18341 rtx top;
18342 rtx reg;
18343 rtx insn;
18345 if (cfun->is_thunk)
18346 return 0;
18348 if (cfun->machine->lr_save_state)
18349 return cfun->machine->lr_save_state - 1;
18351 /* regs_ever_live has LR marked as used if any sibcalls are present,
18352 but this should not force saving and restoring in the
18353 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18354 clobbers LR, so that is inappropriate. */
18356 /* Also, the prologue can generate a store into LR that
18357 doesn't really count, like this:
18359 move LR->R0
18360 bcl to set PIC register
18361 move LR->R31
18362 move R0->LR
18364 When we're called from the epilogue, we need to avoid counting
18365 this as a store. */
18367 push_topmost_sequence ();
18368 top = get_insns ();
18369 pop_topmost_sequence ();
18370 reg = gen_rtx_REG (Pmode, LR_REGNO);
18372 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
18374 if (INSN_P (insn))
18376 if (CALL_P (insn))
18378 if (!SIBLING_CALL_P (insn))
18379 return 1;
18381 else if (find_regno_note (insn, REG_INC, LR_REGNO))
18382 return 1;
18383 else if (set_of (reg, insn) != NULL_RTX
18384 && !prologue_epilogue_contains (insn))
18385 return 1;
18388 return 0;
18391 /* Emit instructions needed to load the TOC register.
18392 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18393 a constant pool; or for SVR4 -fpic. */
18395 void
18396 rs6000_emit_load_toc_table (int fromprolog)
18398 rtx dest;
18399 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
18401 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
18403 char buf[30];
18404 rtx lab, tmp1, tmp2, got;
18406 lab = gen_label_rtx ();
18407 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
18408 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18409 if (flag_pic == 2)
18410 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18411 else
18412 got = rs6000_got_sym ();
18413 tmp1 = tmp2 = dest;
18414 if (!fromprolog)
18416 tmp1 = gen_reg_rtx (Pmode);
18417 tmp2 = gen_reg_rtx (Pmode);
18419 emit_insn (gen_load_toc_v4_PIC_1 (lab));
18420 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
18421 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
18422 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
18424 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
18426 emit_insn (gen_load_toc_v4_pic_si ());
18427 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18429 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
18431 char buf[30];
18432 rtx temp0 = (fromprolog
18433 ? gen_rtx_REG (Pmode, 0)
18434 : gen_reg_rtx (Pmode));
18436 if (fromprolog)
18438 rtx symF, symL;
18440 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
18441 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18443 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
18444 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18446 emit_insn (gen_load_toc_v4_PIC_1 (symF));
18447 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18448 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
18450 else
18452 rtx tocsym, lab;
18454 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18455 lab = gen_label_rtx ();
18456 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
18457 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18458 if (TARGET_LINK_STACK)
18459 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
18460 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
18462 emit_insn (gen_addsi3 (dest, temp0, dest));
18464 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
18466 /* This is for AIX code running in non-PIC ELF32. */
18467 char buf[30];
18468 rtx realsym;
18469 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
18470 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18472 emit_insn (gen_elf_high (dest, realsym));
18473 emit_insn (gen_elf_low (dest, dest, realsym));
18475 else
18477 gcc_assert (DEFAULT_ABI == ABI_AIX);
18479 if (TARGET_32BIT)
18480 emit_insn (gen_load_toc_aix_si (dest));
18481 else
18482 emit_insn (gen_load_toc_aix_di (dest));
18486 /* Emit instructions to restore the link register after determining where
18487 its value has been stored. */
18489 void
18490 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
18492 rs6000_stack_t *info = rs6000_stack_info ();
18493 rtx operands[2];
18495 operands[0] = source;
18496 operands[1] = scratch;
18498 if (info->lr_save_p)
18500 rtx frame_rtx = stack_pointer_rtx;
18501 HOST_WIDE_INT sp_offset = 0;
18502 rtx tmp;
18504 if (frame_pointer_needed
18505 || cfun->calls_alloca
18506 || info->total_size > 32767)
18508 tmp = gen_frame_mem (Pmode, frame_rtx);
18509 emit_move_insn (operands[1], tmp);
18510 frame_rtx = operands[1];
18512 else if (info->push_p)
18513 sp_offset = info->total_size;
18515 tmp = plus_constant (Pmode, frame_rtx,
18516 info->lr_save_offset + sp_offset);
18517 tmp = gen_frame_mem (Pmode, tmp);
18518 emit_move_insn (tmp, operands[0]);
18520 else
18521 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
18523 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18524 state of lr_save_p so any change from here on would be a bug. In
18525 particular, stop rs6000_ra_ever_killed from considering the SET
18526 of lr we may have added just above. */
18527 cfun->machine->lr_save_state = info->lr_save_p + 1;
18530 static GTY(()) alias_set_type set = -1;
18532 alias_set_type
18533 get_TOC_alias_set (void)
18535 if (set == -1)
18536 set = new_alias_set ();
18537 return set;
18540 /* This returns nonzero if the current function uses the TOC. This is
18541 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18542 is generated by the ABI_V4 load_toc_* patterns. */
18543 #if TARGET_ELF
18544 static int
18545 uses_TOC (void)
18547 rtx insn;
18549 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18550 if (INSN_P (insn))
18552 rtx pat = PATTERN (insn);
18553 int i;
18555 if (GET_CODE (pat) == PARALLEL)
18556 for (i = 0; i < XVECLEN (pat, 0); i++)
18558 rtx sub = XVECEXP (pat, 0, i);
18559 if (GET_CODE (sub) == USE)
18561 sub = XEXP (sub, 0);
18562 if (GET_CODE (sub) == UNSPEC
18563 && XINT (sub, 1) == UNSPEC_TOC)
18564 return 1;
18568 return 0;
18570 #endif
18573 create_TOC_reference (rtx symbol, rtx largetoc_reg)
18575 rtx tocrel, tocreg, hi;
18577 if (TARGET_DEBUG_ADDR)
18579 if (GET_CODE (symbol) == SYMBOL_REF)
18580 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18581 XSTR (symbol, 0));
18582 else
18584 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
18585 GET_RTX_NAME (GET_CODE (symbol)));
18586 debug_rtx (symbol);
18590 if (!can_create_pseudo_p ())
18591 df_set_regs_ever_live (TOC_REGISTER, true);
18593 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
18594 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
18595 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
18596 return tocrel;
18598 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
18599 if (largetoc_reg != NULL)
18601 emit_move_insn (largetoc_reg, hi);
18602 hi = largetoc_reg;
18604 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
18607 /* Issue assembly directives that create a reference to the given DWARF
18608 FRAME_TABLE_LABEL from the current function section. */
18609 void
18610 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
18612 fprintf (asm_out_file, "\t.ref %s\n",
18613 (* targetm.strip_name_encoding) (frame_table_label));
18616 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18617 and the change to the stack pointer. */
18619 static void
18620 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
18622 rtvec p;
18623 int i;
18624 rtx regs[3];
18626 i = 0;
18627 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18628 if (hard_frame_needed)
18629 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
18630 if (!(REGNO (fp) == STACK_POINTER_REGNUM
18631 || (hard_frame_needed
18632 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
18633 regs[i++] = fp;
18635 p = rtvec_alloc (i);
18636 while (--i >= 0)
18638 rtx mem = gen_frame_mem (BLKmode, regs[i]);
18639 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
18642 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
18645 /* Emit the correct code for allocating stack space, as insns.
18646 If COPY_REG, make sure a copy of the old frame is left there.
18647 The generated code may use hard register 0 as a temporary. */
18649 static void
18650 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
18652 rtx insn;
18653 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18654 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
18655 rtx todec = gen_int_mode (-size, Pmode);
18656 rtx par, set, mem;
18658 if (INTVAL (todec) != -size)
18660 warning (0, "stack frame too large");
18661 emit_insn (gen_trap ());
18662 return;
18665 if (crtl->limit_stack)
18667 if (REG_P (stack_limit_rtx)
18668 && REGNO (stack_limit_rtx) > 1
18669 && REGNO (stack_limit_rtx) <= 31)
18671 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
18672 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18673 const0_rtx));
18675 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
18676 && TARGET_32BIT
18677 && DEFAULT_ABI == ABI_V4)
18679 rtx toload = gen_rtx_CONST (VOIDmode,
18680 gen_rtx_PLUS (Pmode,
18681 stack_limit_rtx,
18682 GEN_INT (size)));
18684 emit_insn (gen_elf_high (tmp_reg, toload));
18685 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
18686 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18687 const0_rtx));
18689 else
18690 warning (0, "stack limit expression is not supported");
18693 if (copy_reg)
18695 if (copy_off != 0)
18696 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
18697 else
18698 emit_move_insn (copy_reg, stack_reg);
18701 if (size > 32767)
18703 /* Need a note here so that try_split doesn't get confused. */
18704 if (get_last_insn () == NULL_RTX)
18705 emit_note (NOTE_INSN_DELETED);
18706 insn = emit_move_insn (tmp_reg, todec);
18707 try_split (PATTERN (insn), insn, 0);
18708 todec = tmp_reg;
18711 insn = emit_insn (TARGET_32BIT
18712 ? gen_movsi_update_stack (stack_reg, stack_reg,
18713 todec, stack_reg)
18714 : gen_movdi_di_update_stack (stack_reg, stack_reg,
18715 todec, stack_reg));
18716 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18717 it now and set the alias set/attributes. The above gen_*_update
18718 calls will generate a PARALLEL with the MEM set being the first
18719 operation. */
18720 par = PATTERN (insn);
18721 gcc_assert (GET_CODE (par) == PARALLEL);
18722 set = XVECEXP (par, 0, 0);
18723 gcc_assert (GET_CODE (set) == SET);
18724 mem = SET_DEST (set);
18725 gcc_assert (MEM_P (mem));
18726 MEM_NOTRAP_P (mem) = 1;
18727 set_mem_alias_set (mem, get_frame_alias_set ());
18729 RTX_FRAME_RELATED_P (insn) = 1;
18730 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
18731 gen_rtx_SET (VOIDmode, stack_reg,
18732 gen_rtx_PLUS (Pmode, stack_reg,
18733 GEN_INT (-size))));
18736 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18738 #if PROBE_INTERVAL > 32768
18739 #error Cannot use indexed addressing mode for stack probing
18740 #endif
18742 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18743 inclusive. These are offsets from the current stack pointer. */
18745 static void
18746 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
18748 /* See if we have a constant small number of probes to generate. If so,
18749 that's the easy case. */
18750 if (first + size <= 32768)
18752 HOST_WIDE_INT i;
18754 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18755 it exceeds SIZE. If only one probe is needed, this will not
18756 generate any code. Then probe at FIRST + SIZE. */
18757 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
18758 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18759 -(first + i)));
18761 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18762 -(first + size)));
18765 /* Otherwise, do the same as above, but in a loop. Note that we must be
18766 extra careful with variables wrapping around because we might be at
18767 the very top (or the very bottom) of the address space and we have
18768 to be able to handle this case properly; in particular, we use an
18769 equality test for the loop condition. */
18770 else
18772 HOST_WIDE_INT rounded_size;
18773 rtx r12 = gen_rtx_REG (Pmode, 12);
18774 rtx r0 = gen_rtx_REG (Pmode, 0);
18776 /* Sanity check for the addressing mode we're going to use. */
18777 gcc_assert (first <= 32768);
18779 /* Step 1: round SIZE to the previous multiple of the interval. */
18781 rounded_size = size & -PROBE_INTERVAL;
18784 /* Step 2: compute initial and final value of the loop counter. */
18786 /* TEST_ADDR = SP + FIRST. */
18787 emit_insn (gen_rtx_SET (VOIDmode, r12,
18788 plus_constant (Pmode, stack_pointer_rtx,
18789 -first)));
18791 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18792 if (rounded_size > 32768)
18794 emit_move_insn (r0, GEN_INT (-rounded_size));
18795 emit_insn (gen_rtx_SET (VOIDmode, r0,
18796 gen_rtx_PLUS (Pmode, r12, r0)));
18798 else
18799 emit_insn (gen_rtx_SET (VOIDmode, r0,
18800 plus_constant (Pmode, r12, -rounded_size)));
18803 /* Step 3: the loop
18805 while (TEST_ADDR != LAST_ADDR)
18807 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18808 probe at TEST_ADDR
18811 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18812 until it is equal to ROUNDED_SIZE. */
18814 if (TARGET_64BIT)
18815 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
18816 else
18817 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
18820 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18821 that SIZE is equal to ROUNDED_SIZE. */
18823 if (size != rounded_size)
18824 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
18828 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18829 absolute addresses. */
18831 const char *
18832 output_probe_stack_range (rtx reg1, rtx reg2)
18834 static int labelno = 0;
18835 char loop_lab[32], end_lab[32];
18836 rtx xops[2];
18838 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
18839 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
18841 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
18843 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18844 xops[0] = reg1;
18845 xops[1] = reg2;
18846 if (TARGET_64BIT)
18847 output_asm_insn ("cmpd 0,%0,%1", xops);
18848 else
18849 output_asm_insn ("cmpw 0,%0,%1", xops);
18851 fputs ("\tbeq 0,", asm_out_file);
18852 assemble_name_raw (asm_out_file, end_lab);
18853 fputc ('\n', asm_out_file);
18855 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18856 xops[1] = GEN_INT (-PROBE_INTERVAL);
18857 output_asm_insn ("addi %0,%0,%1", xops);
18859 /* Probe at TEST_ADDR and branch. */
18860 xops[1] = gen_rtx_REG (Pmode, 0);
18861 output_asm_insn ("stw %1,0(%0)", xops);
18862 fprintf (asm_out_file, "\tb ");
18863 assemble_name_raw (asm_out_file, loop_lab);
18864 fputc ('\n', asm_out_file);
18866 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
18868 return "";
18871 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18872 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18873 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18874 deduce these equivalences by itself so it wasn't necessary to hold
18875 its hand so much. Don't be tempted to always supply d2_f_d_e with
18876 the actual cfa register, ie. r31 when we are using a hard frame
18877 pointer. That fails when saving regs off r1, and sched moves the
18878 r31 setup past the reg saves. */
18880 static rtx
18881 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
18882 rtx reg2, rtx rreg)
18884 rtx real, temp;
18886 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
18888 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18889 int i;
18891 gcc_checking_assert (val == 0);
18892 real = PATTERN (insn);
18893 if (GET_CODE (real) == PARALLEL)
18894 for (i = 0; i < XVECLEN (real, 0); i++)
18895 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18897 rtx set = XVECEXP (real, 0, i);
18899 RTX_FRAME_RELATED_P (set) = 1;
18901 RTX_FRAME_RELATED_P (insn) = 1;
18902 return insn;
18905 /* copy_rtx will not make unique copies of registers, so we need to
18906 ensure we don't have unwanted sharing here. */
18907 if (reg == reg2)
18908 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18910 if (reg == rreg)
18911 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18913 real = copy_rtx (PATTERN (insn));
18915 if (reg2 != NULL_RTX)
18916 real = replace_rtx (real, reg2, rreg);
18918 if (REGNO (reg) == STACK_POINTER_REGNUM)
18919 gcc_checking_assert (val == 0);
18920 else
18921 real = replace_rtx (real, reg,
18922 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
18923 STACK_POINTER_REGNUM),
18924 GEN_INT (val)));
18926 /* We expect that 'real' is either a SET or a PARALLEL containing
18927 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18928 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18930 if (GET_CODE (real) == SET)
18932 rtx set = real;
18934 temp = simplify_rtx (SET_SRC (set));
18935 if (temp)
18936 SET_SRC (set) = temp;
18937 temp = simplify_rtx (SET_DEST (set));
18938 if (temp)
18939 SET_DEST (set) = temp;
18940 if (GET_CODE (SET_DEST (set)) == MEM)
18942 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18943 if (temp)
18944 XEXP (SET_DEST (set), 0) = temp;
18947 else
18949 int i;
18951 gcc_assert (GET_CODE (real) == PARALLEL);
18952 for (i = 0; i < XVECLEN (real, 0); i++)
18953 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18955 rtx set = XVECEXP (real, 0, i);
18957 temp = simplify_rtx (SET_SRC (set));
18958 if (temp)
18959 SET_SRC (set) = temp;
18960 temp = simplify_rtx (SET_DEST (set));
18961 if (temp)
18962 SET_DEST (set) = temp;
18963 if (GET_CODE (SET_DEST (set)) == MEM)
18965 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18966 if (temp)
18967 XEXP (SET_DEST (set), 0) = temp;
18969 RTX_FRAME_RELATED_P (set) = 1;
18973 RTX_FRAME_RELATED_P (insn) = 1;
18974 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
18976 return insn;
18979 /* Returns an insn that has a vrsave set operation with the
18980 appropriate CLOBBERs. */
18982 static rtx
18983 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
18985 int nclobs, i;
18986 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
18987 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
18989 clobs[0]
18990 = gen_rtx_SET (VOIDmode,
18991 vrsave,
18992 gen_rtx_UNSPEC_VOLATILE (SImode,
18993 gen_rtvec (2, reg, vrsave),
18994 UNSPECV_SET_VRSAVE));
18996 nclobs = 1;
18998 /* We need to clobber the registers in the mask so the scheduler
18999 does not move sets to VRSAVE before sets of AltiVec registers.
19001 However, if the function receives nonlocal gotos, reload will set
19002 all call saved registers live. We will end up with:
19004 (set (reg 999) (mem))
19005 (parallel [ (set (reg vrsave) (unspec blah))
19006 (clobber (reg 999))])
19008 The clobber will cause the store into reg 999 to be dead, and
19009 flow will attempt to delete an epilogue insn. In this case, we
19010 need an unspec use/set of the register. */
19012 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
19013 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19015 if (!epiloguep || call_used_regs [i])
19016 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
19017 gen_rtx_REG (V4SImode, i));
19018 else
19020 rtx reg = gen_rtx_REG (V4SImode, i);
19022 clobs[nclobs++]
19023 = gen_rtx_SET (VOIDmode,
19024 reg,
19025 gen_rtx_UNSPEC (V4SImode,
19026 gen_rtvec (1, reg), 27));
19030 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
19032 for (i = 0; i < nclobs; ++i)
19033 XVECEXP (insn, 0, i) = clobs[i];
19035 return insn;
19038 static rtx
19039 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
19041 rtx addr, mem;
19043 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
19044 mem = gen_frame_mem (GET_MODE (reg), addr);
19045 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
19048 static rtx
19049 gen_frame_load (rtx reg, rtx frame_reg, int offset)
19051 return gen_frame_set (reg, frame_reg, offset, false);
19054 static rtx
19055 gen_frame_store (rtx reg, rtx frame_reg, int offset)
19057 return gen_frame_set (reg, frame_reg, offset, true);
19060 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19061 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19063 static rtx
19064 emit_frame_save (rtx frame_reg, enum machine_mode mode,
19065 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
19067 rtx reg, insn;
19069 /* Some cases that need register indexed addressing. */
19070 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
19071 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
19072 || (TARGET_E500_DOUBLE && mode == DFmode)
19073 || (TARGET_SPE_ABI
19074 && SPE_VECTOR_MODE (mode)
19075 && !SPE_CONST_OFFSET_OK (offset))));
19077 reg = gen_rtx_REG (mode, regno);
19078 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
19079 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
19080 NULL_RTX, NULL_RTX);
19083 /* Emit an offset memory reference suitable for a frame store, while
19084 converting to a valid addressing mode. */
19086 static rtx
19087 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
19089 rtx int_rtx, offset_rtx;
19091 int_rtx = GEN_INT (offset);
19093 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
19094 || (TARGET_E500_DOUBLE && mode == DFmode))
19096 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
19097 emit_move_insn (offset_rtx, int_rtx);
19099 else
19100 offset_rtx = int_rtx;
19102 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
19105 #ifndef TARGET_FIX_AND_CONTINUE
19106 #define TARGET_FIX_AND_CONTINUE 0
19107 #endif
19109 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19110 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19111 #define LAST_SAVRES_REGISTER 31
19112 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19114 enum {
19115 SAVRES_LR = 0x1,
19116 SAVRES_SAVE = 0x2,
19117 SAVRES_REG = 0x0c,
19118 SAVRES_GPR = 0,
19119 SAVRES_FPR = 4,
19120 SAVRES_VR = 8
19123 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
19125 /* Temporary holding space for an out-of-line register save/restore
19126 routine name. */
19127 static char savres_routine_name[30];
19129 /* Return the name for an out-of-line register save/restore routine.
19130 We are saving/restoring GPRs if GPR is true. */
19132 static char *
19133 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
19135 const char *prefix = "";
19136 const char *suffix = "";
19138 /* Different targets are supposed to define
19139 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19140 routine name could be defined with:
19142 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19144 This is a nice idea in practice, but in reality, things are
19145 complicated in several ways:
19147 - ELF targets have save/restore routines for GPRs.
19149 - SPE targets use different prefixes for 32/64-bit registers, and
19150 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19152 - PPC64 ELF targets have routines for save/restore of GPRs that
19153 differ in what they do with the link register, so having a set
19154 prefix doesn't work. (We only use one of the save routines at
19155 the moment, though.)
19157 - PPC32 elf targets have "exit" versions of the restore routines
19158 that restore the link register and can save some extra space.
19159 These require an extra suffix. (There are also "tail" versions
19160 of the restore routines and "GOT" versions of the save routines,
19161 but we don't generate those at present. Same problems apply,
19162 though.)
19164 We deal with all this by synthesizing our own prefix/suffix and
19165 using that for the simple sprintf call shown above. */
19166 if (TARGET_SPE)
19168 /* No floating point saves on the SPE. */
19169 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
19171 if ((sel & SAVRES_SAVE))
19172 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
19173 else
19174 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
19176 if ((sel & SAVRES_LR))
19177 suffix = "_x";
19179 else if (DEFAULT_ABI == ABI_V4)
19181 if (TARGET_64BIT)
19182 goto aix_names;
19184 if ((sel & SAVRES_REG) == SAVRES_GPR)
19185 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
19186 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19187 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
19188 else if ((sel & SAVRES_REG) == SAVRES_VR)
19189 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19190 else
19191 abort ();
19193 if ((sel & SAVRES_LR))
19194 suffix = "_x";
19196 else if (DEFAULT_ABI == ABI_AIX)
19198 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19199 /* No out-of-line save/restore routines for GPRs on AIX. */
19200 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
19201 #endif
19203 aix_names:
19204 if ((sel & SAVRES_REG) == SAVRES_GPR)
19205 prefix = ((sel & SAVRES_SAVE)
19206 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
19207 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
19208 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19210 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19211 if ((sel & SAVRES_LR))
19212 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
19213 else
19214 #endif
19216 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
19217 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
19220 else if ((sel & SAVRES_REG) == SAVRES_VR)
19221 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19222 else
19223 abort ();
19226 if (DEFAULT_ABI == ABI_DARWIN)
19228 /* The Darwin approach is (slightly) different, in order to be
19229 compatible with code generated by the system toolchain. There is a
19230 single symbol for the start of save sequence, and the code here
19231 embeds an offset into that code on the basis of the first register
19232 to be saved. */
19233 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
19234 if ((sel & SAVRES_REG) == SAVRES_GPR)
19235 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
19236 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
19237 (regno - 13) * 4, prefix, regno);
19238 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19239 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
19240 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
19241 else if ((sel & SAVRES_REG) == SAVRES_VR)
19242 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
19243 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
19244 else
19245 abort ();
19247 else
19248 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
19250 return savres_routine_name;
19253 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19254 We are saving/restoring GPRs if GPR is true. */
19256 static rtx
19257 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
19259 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
19260 ? info->first_gp_reg_save
19261 : (sel & SAVRES_REG) == SAVRES_FPR
19262 ? info->first_fp_reg_save - 32
19263 : (sel & SAVRES_REG) == SAVRES_VR
19264 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
19265 : -1);
19266 rtx sym;
19267 int select = sel;
19269 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19270 versions of the gpr routines. */
19271 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
19272 && info->spe_64bit_regs_used)
19273 select ^= SAVRES_FPR ^ SAVRES_GPR;
19275 /* Don't generate bogus routine names. */
19276 gcc_assert (FIRST_SAVRES_REGISTER <= regno
19277 && regno <= LAST_SAVRES_REGISTER
19278 && select >= 0 && select <= 12);
19280 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
19282 if (sym == NULL)
19284 char *name;
19286 name = rs6000_savres_routine_name (info, regno, sel);
19288 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
19289 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
19290 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
19293 return sym;
19296 /* Emit a sequence of insns, including a stack tie if needed, for
19297 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19298 reset the stack pointer, but move the base of the frame into
19299 reg UPDT_REGNO for use by out-of-line register restore routines. */
19301 static rtx
19302 rs6000_emit_stack_reset (rs6000_stack_t *info,
19303 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
19304 unsigned updt_regno)
19306 rtx updt_reg_rtx;
19308 /* This blockage is needed so that sched doesn't decide to move
19309 the sp change before the register restores. */
19310 if (DEFAULT_ABI == ABI_V4
19311 || (TARGET_SPE_ABI
19312 && info->spe_64bit_regs_used != 0
19313 && info->first_gp_reg_save != 32))
19314 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
19316 /* If we are restoring registers out-of-line, we will be using the
19317 "exit" variants of the restore routines, which will reset the
19318 stack for us. But we do need to point updt_reg into the
19319 right place for those routines. */
19320 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
19322 if (frame_off != 0)
19323 return emit_insn (gen_add3_insn (updt_reg_rtx,
19324 frame_reg_rtx, GEN_INT (frame_off)));
19325 else if (REGNO (frame_reg_rtx) != updt_regno)
19326 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
19328 return NULL_RTX;
19331 /* Return the register number used as a pointer by out-of-line
19332 save/restore functions. */
19334 static inline unsigned
19335 ptr_regno_for_savres (int sel)
19337 if (DEFAULT_ABI == ABI_AIX)
19338 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
19339 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
19342 /* Construct a parallel rtx describing the effect of a call to an
19343 out-of-line register save/restore routine, and emit the insn
19344 or jump_insn as appropriate. */
19346 static rtx
19347 rs6000_emit_savres_rtx (rs6000_stack_t *info,
19348 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
19349 enum machine_mode reg_mode, int sel)
19351 int i;
19352 int offset, start_reg, end_reg, n_regs, use_reg;
19353 int reg_size = GET_MODE_SIZE (reg_mode);
19354 rtx sym;
19355 rtvec p;
19356 rtx par, insn;
19358 offset = 0;
19359 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19360 ? info->first_gp_reg_save
19361 : (sel & SAVRES_REG) == SAVRES_FPR
19362 ? info->first_fp_reg_save
19363 : (sel & SAVRES_REG) == SAVRES_VR
19364 ? info->first_altivec_reg_save
19365 : -1);
19366 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19367 ? 32
19368 : (sel & SAVRES_REG) == SAVRES_FPR
19369 ? 64
19370 : (sel & SAVRES_REG) == SAVRES_VR
19371 ? LAST_ALTIVEC_REGNO + 1
19372 : -1);
19373 n_regs = end_reg - start_reg;
19374 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
19375 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
19376 + n_regs);
19378 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19379 RTVEC_ELT (p, offset++) = ret_rtx;
19381 RTVEC_ELT (p, offset++)
19382 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
19384 sym = rs6000_savres_routine_sym (info, sel);
19385 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
19387 use_reg = ptr_regno_for_savres (sel);
19388 if ((sel & SAVRES_REG) == SAVRES_VR)
19390 /* Vector regs are saved/restored using [reg+reg] addressing. */
19391 RTVEC_ELT (p, offset++)
19392 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19393 RTVEC_ELT (p, offset++)
19394 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
19396 else
19397 RTVEC_ELT (p, offset++)
19398 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19400 for (i = 0; i < end_reg - start_reg; i++)
19401 RTVEC_ELT (p, i + offset)
19402 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
19403 frame_reg_rtx, save_area_offset + reg_size * i,
19404 (sel & SAVRES_SAVE) != 0);
19406 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19407 RTVEC_ELT (p, i + offset)
19408 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
19410 par = gen_rtx_PARALLEL (VOIDmode, p);
19412 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19414 insn = emit_jump_insn (par);
19415 JUMP_LABEL (insn) = ret_rtx;
19417 else
19418 insn = emit_insn (par);
19419 return insn;
19422 /* Determine whether the gp REG is really used. */
19424 static bool
19425 rs6000_reg_live_or_pic_offset_p (int reg)
19427 /* If the function calls eh_return, claim used all the registers that would
19428 be checked for liveness otherwise. This is required for the PIC offset
19429 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19430 register allocation purposes in this case. */
19432 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
19433 && (!call_used_regs[reg]
19434 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19435 && !TARGET_SINGLE_PIC_BASE
19436 && TARGET_TOC && TARGET_MINIMAL_TOC)))
19437 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19438 && !TARGET_SINGLE_PIC_BASE
19439 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19440 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
19443 /* Emit function prologue as insns. */
19445 void
19446 rs6000_emit_prologue (void)
19448 rs6000_stack_t *info = rs6000_stack_info ();
19449 enum machine_mode reg_mode = Pmode;
19450 int reg_size = TARGET_32BIT ? 4 : 8;
19451 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19452 rtx frame_reg_rtx = sp_reg_rtx;
19453 unsigned int cr_save_regno;
19454 rtx cr_save_rtx = NULL_RTX;
19455 rtx insn;
19456 int strategy;
19457 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19458 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19459 && call_used_regs[STATIC_CHAIN_REGNUM]);
19460 /* Offset to top of frame for frame_reg and sp respectively. */
19461 HOST_WIDE_INT frame_off = 0;
19462 HOST_WIDE_INT sp_off = 0;
19464 #ifdef ENABLE_CHECKING
19465 /* Track and check usage of r0, r11, r12. */
19466 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
19467 #define START_USE(R) do \
19469 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19470 reg_inuse |= 1 << (R); \
19471 } while (0)
19472 #define END_USE(R) do \
19474 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19475 reg_inuse &= ~(1 << (R)); \
19476 } while (0)
19477 #define NOT_INUSE(R) do \
19479 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19480 } while (0)
19481 #else
19482 #define START_USE(R) do {} while (0)
19483 #define END_USE(R) do {} while (0)
19484 #define NOT_INUSE(R) do {} while (0)
19485 #endif
19487 if (flag_stack_usage_info)
19488 current_function_static_stack_size = info->total_size;
19490 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
19491 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
19493 if (TARGET_FIX_AND_CONTINUE)
19495 /* gdb on darwin arranges to forward a function from the old
19496 address by modifying the first 5 instructions of the function
19497 to branch to the overriding function. This is necessary to
19498 permit function pointers that point to the old function to
19499 actually forward to the new function. */
19500 emit_insn (gen_nop ());
19501 emit_insn (gen_nop ());
19502 emit_insn (gen_nop ());
19503 emit_insn (gen_nop ());
19504 emit_insn (gen_nop ());
19507 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
19509 reg_mode = V2SImode;
19510 reg_size = 8;
19513 /* Handle world saves specially here. */
19514 if (WORLD_SAVE_P (info))
19516 int i, j, sz;
19517 rtx treg;
19518 rtvec p;
19519 rtx reg0;
19521 /* save_world expects lr in r0. */
19522 reg0 = gen_rtx_REG (Pmode, 0);
19523 if (info->lr_save_p)
19525 insn = emit_move_insn (reg0,
19526 gen_rtx_REG (Pmode, LR_REGNO));
19527 RTX_FRAME_RELATED_P (insn) = 1;
19530 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19531 assumptions about the offsets of various bits of the stack
19532 frame. */
19533 gcc_assert (info->gp_save_offset == -220
19534 && info->fp_save_offset == -144
19535 && info->lr_save_offset == 8
19536 && info->cr_save_offset == 4
19537 && info->push_p
19538 && info->lr_save_p
19539 && (!crtl->calls_eh_return
19540 || info->ehrd_offset == -432)
19541 && info->vrsave_save_offset == -224
19542 && info->altivec_save_offset == -416);
19544 treg = gen_rtx_REG (SImode, 11);
19545 emit_move_insn (treg, GEN_INT (-info->total_size));
19547 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19548 in R11. It also clobbers R12, so beware! */
19550 /* Preserve CR2 for save_world prologues */
19551 sz = 5;
19552 sz += 32 - info->first_gp_reg_save;
19553 sz += 64 - info->first_fp_reg_save;
19554 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
19555 p = rtvec_alloc (sz);
19556 j = 0;
19557 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
19558 gen_rtx_REG (SImode,
19559 LR_REGNO));
19560 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
19561 gen_rtx_SYMBOL_REF (Pmode,
19562 "*save_world"));
19563 /* We do floats first so that the instruction pattern matches
19564 properly. */
19565 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19566 RTVEC_ELT (p, j++)
19567 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19568 ? DFmode : SFmode,
19569 info->first_fp_reg_save + i),
19570 frame_reg_rtx,
19571 info->fp_save_offset + frame_off + 8 * i);
19572 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
19573 RTVEC_ELT (p, j++)
19574 = gen_frame_store (gen_rtx_REG (V4SImode,
19575 info->first_altivec_reg_save + i),
19576 frame_reg_rtx,
19577 info->altivec_save_offset + frame_off + 16 * i);
19578 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19579 RTVEC_ELT (p, j++)
19580 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19581 frame_reg_rtx,
19582 info->gp_save_offset + frame_off + reg_size * i);
19584 /* CR register traditionally saved as CR2. */
19585 RTVEC_ELT (p, j++)
19586 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
19587 frame_reg_rtx, info->cr_save_offset + frame_off);
19588 /* Explain about use of R0. */
19589 if (info->lr_save_p)
19590 RTVEC_ELT (p, j++)
19591 = gen_frame_store (reg0,
19592 frame_reg_rtx, info->lr_save_offset + frame_off);
19593 /* Explain what happens to the stack pointer. */
19595 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
19596 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
19599 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19600 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19601 treg, GEN_INT (-info->total_size));
19602 sp_off = frame_off = info->total_size;
19605 strategy = info->savres_strategy;
19607 /* For V.4, update stack before we do any saving and set back pointer. */
19608 if (! WORLD_SAVE_P (info)
19609 && info->push_p
19610 && (DEFAULT_ABI == ABI_V4
19611 || crtl->calls_eh_return))
19613 bool need_r11 = (TARGET_SPE
19614 ? (!(strategy & SAVE_INLINE_GPRS)
19615 && info->spe_64bit_regs_used == 0)
19616 : (!(strategy & SAVE_INLINE_FPRS)
19617 || !(strategy & SAVE_INLINE_GPRS)
19618 || !(strategy & SAVE_INLINE_VRS)));
19619 int ptr_regno = -1;
19620 rtx ptr_reg = NULL_RTX;
19621 int ptr_off = 0;
19623 if (info->total_size < 32767)
19624 frame_off = info->total_size;
19625 else if (need_r11)
19626 ptr_regno = 11;
19627 else if (info->cr_save_p
19628 || info->lr_save_p
19629 || info->first_fp_reg_save < 64
19630 || info->first_gp_reg_save < 32
19631 || info->altivec_size != 0
19632 || info->vrsave_mask != 0
19633 || crtl->calls_eh_return)
19634 ptr_regno = 12;
19635 else
19637 /* The prologue won't be saving any regs so there is no need
19638 to set up a frame register to access any frame save area.
19639 We also won't be using frame_off anywhere below, but set
19640 the correct value anyway to protect against future
19641 changes to this function. */
19642 frame_off = info->total_size;
19644 if (ptr_regno != -1)
19646 /* Set up the frame offset to that needed by the first
19647 out-of-line save function. */
19648 START_USE (ptr_regno);
19649 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19650 frame_reg_rtx = ptr_reg;
19651 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
19652 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
19653 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
19654 ptr_off = info->gp_save_offset + info->gp_size;
19655 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
19656 ptr_off = info->altivec_save_offset + info->altivec_size;
19657 frame_off = -ptr_off;
19659 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
19660 sp_off = info->total_size;
19661 if (frame_reg_rtx != sp_reg_rtx)
19662 rs6000_emit_stack_tie (frame_reg_rtx, false);
19665 /* If we use the link register, get it into r0. */
19666 if (!WORLD_SAVE_P (info) && info->lr_save_p)
19668 rtx addr, reg, mem;
19670 reg = gen_rtx_REG (Pmode, 0);
19671 START_USE (0);
19672 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
19673 RTX_FRAME_RELATED_P (insn) = 1;
19675 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
19676 | SAVE_NOINLINE_FPRS_SAVES_LR)))
19678 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19679 GEN_INT (info->lr_save_offset + frame_off));
19680 mem = gen_rtx_MEM (Pmode, addr);
19681 /* This should not be of rs6000_sr_alias_set, because of
19682 __builtin_return_address. */
19684 insn = emit_move_insn (mem, reg);
19685 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19686 NULL_RTX, NULL_RTX);
19687 END_USE (0);
19691 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19692 r12 will be needed by out-of-line gpr restore. */
19693 cr_save_regno = (DEFAULT_ABI == ABI_AIX
19694 && !(strategy & (SAVE_INLINE_GPRS
19695 | SAVE_NOINLINE_GPRS_SAVES_LR))
19696 ? 11 : 12);
19697 if (!WORLD_SAVE_P (info)
19698 && info->cr_save_p
19699 && REGNO (frame_reg_rtx) != cr_save_regno
19700 && !(using_static_chain_p && cr_save_regno == 11))
19702 rtx set;
19704 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
19705 START_USE (cr_save_regno);
19706 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19707 RTX_FRAME_RELATED_P (insn) = 1;
19708 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19709 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19710 But that's OK. All we have to do is specify that _one_ condition
19711 code register is saved in this stack slot. The thrower's epilogue
19712 will then restore all the call-saved registers.
19713 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19714 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
19715 gen_rtx_REG (SImode, CR2_REGNO));
19716 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19719 /* Do any required saving of fpr's. If only one or two to save, do
19720 it ourselves. Otherwise, call function. */
19721 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
19723 int i;
19724 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19725 if (save_reg_p (info->first_fp_reg_save + i))
19726 emit_frame_save (frame_reg_rtx,
19727 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19728 ? DFmode : SFmode),
19729 info->first_fp_reg_save + i,
19730 info->fp_save_offset + frame_off + 8 * i,
19731 sp_off - frame_off);
19733 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
19735 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
19736 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
19737 unsigned ptr_regno = ptr_regno_for_savres (sel);
19738 rtx ptr_reg = frame_reg_rtx;
19740 if (REGNO (frame_reg_rtx) == ptr_regno)
19741 gcc_checking_assert (frame_off == 0);
19742 else
19744 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19745 NOT_INUSE (ptr_regno);
19746 emit_insn (gen_add3_insn (ptr_reg,
19747 frame_reg_rtx, GEN_INT (frame_off)));
19749 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19750 info->fp_save_offset,
19751 info->lr_save_offset,
19752 DFmode, sel);
19753 rs6000_frame_related (insn, ptr_reg, sp_off,
19754 NULL_RTX, NULL_RTX);
19755 if (lr)
19756 END_USE (0);
19759 /* Save GPRs. This is done as a PARALLEL if we are using
19760 the store-multiple instructions. */
19761 if (!WORLD_SAVE_P (info)
19762 && TARGET_SPE_ABI
19763 && info->spe_64bit_regs_used != 0
19764 && info->first_gp_reg_save != 32)
19766 int i;
19767 rtx spe_save_area_ptr;
19768 HOST_WIDE_INT save_off;
19769 int ool_adjust = 0;
19771 /* Determine whether we can address all of the registers that need
19772 to be saved with an offset from frame_reg_rtx that fits in
19773 the small const field for SPE memory instructions. */
19774 int spe_regs_addressable
19775 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
19776 + reg_size * (32 - info->first_gp_reg_save - 1))
19777 && (strategy & SAVE_INLINE_GPRS));
19779 if (spe_regs_addressable)
19781 spe_save_area_ptr = frame_reg_rtx;
19782 save_off = frame_off;
19784 else
19786 /* Make r11 point to the start of the SPE save area. We need
19787 to be careful here if r11 is holding the static chain. If
19788 it is, then temporarily save it in r0. */
19789 HOST_WIDE_INT offset;
19791 if (!(strategy & SAVE_INLINE_GPRS))
19792 ool_adjust = 8 * (info->first_gp_reg_save
19793 - (FIRST_SAVRES_REGISTER + 1));
19794 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
19795 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
19796 save_off = frame_off - offset;
19798 if (using_static_chain_p)
19800 rtx r0 = gen_rtx_REG (Pmode, 0);
19802 START_USE (0);
19803 gcc_assert (info->first_gp_reg_save > 11);
19805 emit_move_insn (r0, spe_save_area_ptr);
19807 else if (REGNO (frame_reg_rtx) != 11)
19808 START_USE (11);
19810 emit_insn (gen_addsi3 (spe_save_area_ptr,
19811 frame_reg_rtx, GEN_INT (offset)));
19812 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
19813 frame_off = -info->spe_gp_save_offset + ool_adjust;
19816 if ((strategy & SAVE_INLINE_GPRS))
19818 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19819 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19820 emit_frame_save (spe_save_area_ptr, reg_mode,
19821 info->first_gp_reg_save + i,
19822 (info->spe_gp_save_offset + save_off
19823 + reg_size * i),
19824 sp_off - save_off);
19826 else
19828 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
19829 info->spe_gp_save_offset + save_off,
19830 0, reg_mode,
19831 SAVRES_SAVE | SAVRES_GPR);
19833 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
19834 NULL_RTX, NULL_RTX);
19837 /* Move the static chain pointer back. */
19838 if (!spe_regs_addressable)
19840 if (using_static_chain_p)
19842 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
19843 END_USE (0);
19845 else if (REGNO (frame_reg_rtx) != 11)
19846 END_USE (11);
19849 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
19851 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
19852 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
19853 unsigned ptr_regno = ptr_regno_for_savres (sel);
19854 rtx ptr_reg = frame_reg_rtx;
19855 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
19856 int end_save = info->gp_save_offset + info->gp_size;
19857 int ptr_off;
19859 if (!ptr_set_up)
19860 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19862 /* Need to adjust r11 (r12) if we saved any FPRs. */
19863 if (end_save + frame_off != 0)
19865 rtx offset = GEN_INT (end_save + frame_off);
19867 if (ptr_set_up)
19868 frame_off = -end_save;
19869 else
19870 NOT_INUSE (ptr_regno);
19871 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
19873 else if (!ptr_set_up)
19875 NOT_INUSE (ptr_regno);
19876 emit_move_insn (ptr_reg, frame_reg_rtx);
19878 ptr_off = -end_save;
19879 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19880 info->gp_save_offset + ptr_off,
19881 info->lr_save_offset + ptr_off,
19882 reg_mode, sel);
19883 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
19884 NULL_RTX, NULL_RTX);
19885 if (lr)
19886 END_USE (0);
19888 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
19890 rtvec p;
19891 int i;
19892 p = rtvec_alloc (32 - info->first_gp_reg_save);
19893 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19894 RTVEC_ELT (p, i)
19895 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19896 frame_reg_rtx,
19897 info->gp_save_offset + frame_off + reg_size * i);
19898 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19899 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19900 NULL_RTX, NULL_RTX);
19902 else if (!WORLD_SAVE_P (info))
19904 int i;
19905 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19906 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19907 emit_frame_save (frame_reg_rtx, reg_mode,
19908 info->first_gp_reg_save + i,
19909 info->gp_save_offset + frame_off + reg_size * i,
19910 sp_off - frame_off);
19913 if (crtl->calls_eh_return)
19915 unsigned int i;
19916 rtvec p;
19918 for (i = 0; ; ++i)
19920 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19921 if (regno == INVALID_REGNUM)
19922 break;
19925 p = rtvec_alloc (i);
19927 for (i = 0; ; ++i)
19929 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19930 if (regno == INVALID_REGNUM)
19931 break;
19933 insn
19934 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
19935 sp_reg_rtx,
19936 info->ehrd_offset + sp_off + reg_size * (int) i);
19937 RTVEC_ELT (p, i) = insn;
19938 RTX_FRAME_RELATED_P (insn) = 1;
19941 insn = emit_insn (gen_blockage ());
19942 RTX_FRAME_RELATED_P (insn) = 1;
19943 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
19946 /* In AIX ABI we need to make sure r2 is really saved. */
19947 if (TARGET_AIX && crtl->calls_eh_return)
19949 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
19950 rtx save_insn, join_insn, note;
19951 long toc_restore_insn;
19953 tmp_reg = gen_rtx_REG (Pmode, 11);
19954 tmp_reg_si = gen_rtx_REG (SImode, 11);
19955 if (using_static_chain_p)
19957 START_USE (0);
19958 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
19960 else
19961 START_USE (11);
19962 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
19963 /* Peek at instruction to which this function returns. If it's
19964 restoring r2, then we know we've already saved r2. We can't
19965 unconditionally save r2 because the value we have will already
19966 be updated if we arrived at this function via a plt call or
19967 toc adjusting stub. */
19968 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
19969 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
19970 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
19971 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
19972 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
19973 validate_condition_mode (EQ, CCUNSmode);
19974 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
19975 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19976 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
19977 toc_save_done = gen_label_rtx ();
19978 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
19979 gen_rtx_EQ (VOIDmode, compare_result,
19980 const0_rtx),
19981 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
19982 pc_rtx);
19983 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
19984 JUMP_LABEL (jump) = toc_save_done;
19985 LABEL_NUSES (toc_save_done) += 1;
19987 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
19988 TOC_REGNUM, frame_off + 5 * reg_size,
19989 sp_off - frame_off);
19991 emit_label (toc_save_done);
19993 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
19994 have a CFG that has different saves along different paths.
19995 Move the note to a dummy blockage insn, which describes that
19996 R2 is unconditionally saved after the label. */
19997 /* ??? An alternate representation might be a special insn pattern
19998 containing both the branch and the store. That might let the
19999 code that minimizes the number of DW_CFA_advance opcodes better
20000 freedom in placing the annotations. */
20001 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
20002 if (note)
20003 remove_note (save_insn, note);
20004 else
20005 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
20006 copy_rtx (PATTERN (save_insn)), NULL_RTX);
20007 RTX_FRAME_RELATED_P (save_insn) = 0;
20009 join_insn = emit_insn (gen_blockage ());
20010 REG_NOTES (join_insn) = note;
20011 RTX_FRAME_RELATED_P (join_insn) = 1;
20013 if (using_static_chain_p)
20015 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
20016 END_USE (0);
20018 else
20019 END_USE (11);
20022 /* Save CR if we use any that must be preserved. */
20023 if (!WORLD_SAVE_P (info) && info->cr_save_p)
20025 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20026 GEN_INT (info->cr_save_offset + frame_off));
20027 rtx mem = gen_frame_mem (SImode, addr);
20028 /* See the large comment above about why CR2_REGNO is used. */
20029 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
20031 /* If we didn't copy cr before, do so now using r0. */
20032 if (cr_save_rtx == NULL_RTX)
20034 rtx set;
20036 START_USE (0);
20037 cr_save_rtx = gen_rtx_REG (SImode, 0);
20038 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20039 RTX_FRAME_RELATED_P (insn) = 1;
20040 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
20041 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20043 insn = emit_move_insn (mem, cr_save_rtx);
20044 END_USE (REGNO (cr_save_rtx));
20046 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20047 NULL_RTX, NULL_RTX);
20050 /* Update stack and set back pointer unless this is V.4,
20051 for which it was done previously. */
20052 if (!WORLD_SAVE_P (info) && info->push_p
20053 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
20055 rtx ptr_reg = NULL;
20056 int ptr_off = 0;
20058 /* If saving altivec regs we need to be able to address all save
20059 locations using a 16-bit offset. */
20060 if ((strategy & SAVE_INLINE_VRS) == 0
20061 || (info->altivec_size != 0
20062 && (info->altivec_save_offset + info->altivec_size - 16
20063 + info->total_size - frame_off) > 32767)
20064 || (info->vrsave_mask != 0
20065 && (info->vrsave_save_offset
20066 + info->total_size - frame_off) > 32767))
20068 int sel = SAVRES_SAVE | SAVRES_VR;
20069 unsigned ptr_regno = ptr_regno_for_savres (sel);
20071 if (using_static_chain_p
20072 && ptr_regno == STATIC_CHAIN_REGNUM)
20073 ptr_regno = 12;
20074 if (REGNO (frame_reg_rtx) != ptr_regno)
20075 START_USE (ptr_regno);
20076 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20077 frame_reg_rtx = ptr_reg;
20078 ptr_off = info->altivec_save_offset + info->altivec_size;
20079 frame_off = -ptr_off;
20081 else if (REGNO (frame_reg_rtx) == 1)
20082 frame_off = info->total_size;
20083 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20084 sp_off = info->total_size;
20085 if (frame_reg_rtx != sp_reg_rtx)
20086 rs6000_emit_stack_tie (frame_reg_rtx, false);
20089 /* Set frame pointer, if needed. */
20090 if (frame_pointer_needed)
20092 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
20093 sp_reg_rtx);
20094 RTX_FRAME_RELATED_P (insn) = 1;
20097 /* Save AltiVec registers if needed. Save here because the red zone does
20098 not always include AltiVec registers. */
20099 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20100 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
20102 int end_save = info->altivec_save_offset + info->altivec_size;
20103 int ptr_off;
20104 /* Oddly, the vector save/restore functions point r0 at the end
20105 of the save area, then use r11 or r12 to load offsets for
20106 [reg+reg] addressing. */
20107 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20108 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
20109 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20111 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20112 NOT_INUSE (0);
20113 if (end_save + frame_off != 0)
20115 rtx offset = GEN_INT (end_save + frame_off);
20117 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20119 else
20120 emit_move_insn (ptr_reg, frame_reg_rtx);
20122 ptr_off = -end_save;
20123 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20124 info->altivec_save_offset + ptr_off,
20125 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
20126 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
20127 NULL_RTX, NULL_RTX);
20128 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20130 /* The oddity mentioned above clobbered our frame reg. */
20131 emit_move_insn (frame_reg_rtx, ptr_reg);
20132 frame_off = ptr_off;
20135 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20136 && info->altivec_size != 0)
20138 int i;
20140 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20141 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20143 rtx areg, savereg, mem;
20144 int offset;
20146 offset = (info->altivec_save_offset + frame_off
20147 + 16 * (i - info->first_altivec_reg_save));
20149 savereg = gen_rtx_REG (V4SImode, i);
20151 NOT_INUSE (0);
20152 areg = gen_rtx_REG (Pmode, 0);
20153 emit_move_insn (areg, GEN_INT (offset));
20155 /* AltiVec addressing mode is [reg+reg]. */
20156 mem = gen_frame_mem (V4SImode,
20157 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
20159 insn = emit_move_insn (mem, savereg);
20161 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20162 areg, GEN_INT (offset));
20166 /* VRSAVE is a bit vector representing which AltiVec registers
20167 are used. The OS uses this to determine which vector
20168 registers to save on a context switch. We need to save
20169 VRSAVE on the stack frame, add whatever AltiVec registers we
20170 used in this function, and do the corresponding magic in the
20171 epilogue. */
20173 if (!WORLD_SAVE_P (info)
20174 && TARGET_ALTIVEC
20175 && TARGET_ALTIVEC_VRSAVE
20176 && info->vrsave_mask != 0)
20178 rtx reg, vrsave;
20179 int offset;
20180 int save_regno;
20182 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20183 be using r12 as frame_reg_rtx and r11 as the static chain
20184 pointer for nested functions. */
20185 save_regno = 12;
20186 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
20187 save_regno = 11;
20188 else if (REGNO (frame_reg_rtx) == 12)
20190 save_regno = 11;
20191 if (using_static_chain_p)
20192 save_regno = 0;
20195 NOT_INUSE (save_regno);
20196 reg = gen_rtx_REG (SImode, save_regno);
20197 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20198 if (TARGET_MACHO)
20199 emit_insn (gen_get_vrsave_internal (reg));
20200 else
20201 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
20203 /* Save VRSAVE. */
20204 offset = info->vrsave_save_offset + frame_off;
20205 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
20207 /* Include the registers in the mask. */
20208 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
20210 insn = emit_insn (generate_set_vrsave (reg, info, 0));
20213 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20214 if (!TARGET_SINGLE_PIC_BASE
20215 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
20216 || (DEFAULT_ABI == ABI_V4
20217 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
20218 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
20220 /* If emit_load_toc_table will use the link register, we need to save
20221 it. We use R12 for this purpose because emit_load_toc_table
20222 can use register 0. This allows us to use a plain 'blr' to return
20223 from the procedure more often. */
20224 int save_LR_around_toc_setup = (TARGET_ELF
20225 && DEFAULT_ABI != ABI_AIX
20226 && flag_pic
20227 && ! info->lr_save_p
20228 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
20229 if (save_LR_around_toc_setup)
20231 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20232 rtx tmp = gen_rtx_REG (Pmode, 12);
20234 insn = emit_move_insn (tmp, lr);
20235 RTX_FRAME_RELATED_P (insn) = 1;
20237 rs6000_emit_load_toc_table (TRUE);
20239 insn = emit_move_insn (lr, tmp);
20240 add_reg_note (insn, REG_CFA_RESTORE, lr);
20241 RTX_FRAME_RELATED_P (insn) = 1;
20243 else
20244 rs6000_emit_load_toc_table (TRUE);
20247 #if TARGET_MACHO
20248 if (!TARGET_SINGLE_PIC_BASE
20249 && DEFAULT_ABI == ABI_DARWIN
20250 && flag_pic && crtl->uses_pic_offset_table)
20252 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20253 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
20255 /* Save and restore LR locally around this call (in R0). */
20256 if (!info->lr_save_p)
20257 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
20259 emit_insn (gen_load_macho_picbase (src));
20261 emit_move_insn (gen_rtx_REG (Pmode,
20262 RS6000_PIC_OFFSET_TABLE_REGNUM),
20263 lr);
20265 if (!info->lr_save_p)
20266 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
20268 #endif
20270 /* If we need to, save the TOC register after doing the stack setup.
20271 Do not emit eh frame info for this save. The unwinder wants info,
20272 conceptually attached to instructions in this function, about
20273 register values in the caller of this function. This R2 may have
20274 already been changed from the value in the caller.
20275 We don't attempt to write accurate DWARF EH frame info for R2
20276 because code emitted by gcc for a (non-pointer) function call
20277 doesn't save and restore R2. Instead, R2 is managed out-of-line
20278 by a linker generated plt call stub when the function resides in
20279 a shared library. This behaviour is costly to describe in DWARF,
20280 both in terms of the size of DWARF info and the time taken in the
20281 unwinder to interpret it. R2 changes, apart from the
20282 calls_eh_return case earlier in this function, are handled by
20283 linux-unwind.h frob_update_context. */
20284 if (rs6000_save_toc_in_prologue_p ())
20286 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
20287 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
20291 /* Write function prologue. */
20293 static void
20294 rs6000_output_function_prologue (FILE *file,
20295 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20297 rs6000_stack_t *info = rs6000_stack_info ();
20299 if (TARGET_DEBUG_STACK)
20300 debug_stack_info (info);
20302 /* Write .extern for any function we will call to save and restore
20303 fp values. */
20304 if (info->first_fp_reg_save < 64
20305 && !TARGET_MACHO
20306 && !TARGET_ELF)
20308 char *name;
20309 int regno = info->first_fp_reg_save - 32;
20311 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
20313 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20314 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20315 name = rs6000_savres_routine_name (info, regno, sel);
20316 fprintf (file, "\t.extern %s\n", name);
20318 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
20320 bool lr = (info->savres_strategy
20321 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20322 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20323 name = rs6000_savres_routine_name (info, regno, sel);
20324 fprintf (file, "\t.extern %s\n", name);
20328 rs6000_pic_labelno++;
20331 /* Non-zero if vmx regs are restored before the frame pop, zero if
20332 we restore after the pop when possible. */
20333 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20335 /* Restoring cr is a two step process: loading a reg from the frame
20336 save, then moving the reg to cr. For ABI_V4 we must let the
20337 unwinder know that the stack location is no longer valid at or
20338 before the stack deallocation, but we can't emit a cfa_restore for
20339 cr at the stack deallocation like we do for other registers.
20340 The trouble is that it is possible for the move to cr to be
20341 scheduled after the stack deallocation. So say exactly where cr
20342 is located on each of the two insns. */
20344 static rtx
20345 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
20347 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
20348 rtx reg = gen_rtx_REG (SImode, regno);
20349 rtx insn = emit_move_insn (reg, mem);
20351 if (!exit_func && DEFAULT_ABI == ABI_V4)
20353 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20354 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
20356 add_reg_note (insn, REG_CFA_REGISTER, set);
20357 RTX_FRAME_RELATED_P (insn) = 1;
20359 return reg;
20362 /* Reload CR from REG. */
20364 static void
20365 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
20367 int count = 0;
20368 int i;
20370 if (using_mfcr_multiple)
20372 for (i = 0; i < 8; i++)
20373 if (save_reg_p (CR0_REGNO + i))
20374 count++;
20375 gcc_assert (count);
20378 if (using_mfcr_multiple && count > 1)
20380 rtvec p;
20381 int ndx;
20383 p = rtvec_alloc (count);
20385 ndx = 0;
20386 for (i = 0; i < 8; i++)
20387 if (save_reg_p (CR0_REGNO + i))
20389 rtvec r = rtvec_alloc (2);
20390 RTVEC_ELT (r, 0) = reg;
20391 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
20392 RTVEC_ELT (p, ndx) =
20393 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
20394 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
20395 ndx++;
20397 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20398 gcc_assert (ndx == count);
20400 else
20401 for (i = 0; i < 8; i++)
20402 if (save_reg_p (CR0_REGNO + i))
20403 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
20404 reg));
20406 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20408 rtx insn = get_last_insn ();
20409 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20411 add_reg_note (insn, REG_CFA_RESTORE, cr);
20412 RTX_FRAME_RELATED_P (insn) = 1;
20416 /* Like cr, the move to lr instruction can be scheduled after the
20417 stack deallocation, but unlike cr, its stack frame save is still
20418 valid. So we only need to emit the cfa_restore on the correct
20419 instruction. */
20421 static void
20422 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
20424 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
20425 rtx reg = gen_rtx_REG (Pmode, regno);
20427 emit_move_insn (reg, mem);
20430 static void
20431 restore_saved_lr (int regno, bool exit_func)
20433 rtx reg = gen_rtx_REG (Pmode, regno);
20434 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20435 rtx insn = emit_move_insn (lr, reg);
20437 if (!exit_func && flag_shrink_wrap)
20439 add_reg_note (insn, REG_CFA_RESTORE, lr);
20440 RTX_FRAME_RELATED_P (insn) = 1;
20444 static rtx
20445 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
20447 if (info->cr_save_p)
20448 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20449 gen_rtx_REG (SImode, CR2_REGNO),
20450 cfa_restores);
20451 if (info->lr_save_p)
20452 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20453 gen_rtx_REG (Pmode, LR_REGNO),
20454 cfa_restores);
20455 return cfa_restores;
20458 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20459 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20460 below stack pointer not cloberred by signals. */
20462 static inline bool
20463 offset_below_red_zone_p (HOST_WIDE_INT offset)
20465 return offset < (DEFAULT_ABI == ABI_V4
20467 : TARGET_32BIT ? -220 : -288);
20470 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20472 static void
20473 emit_cfa_restores (rtx cfa_restores)
20475 rtx insn = get_last_insn ();
20476 rtx *loc = &REG_NOTES (insn);
20478 while (*loc)
20479 loc = &XEXP (*loc, 1);
20480 *loc = cfa_restores;
20481 RTX_FRAME_RELATED_P (insn) = 1;
20484 /* Emit function epilogue as insns. */
20486 void
20487 rs6000_emit_epilogue (int sibcall)
20489 rs6000_stack_t *info;
20490 int restoring_GPRs_inline;
20491 int restoring_FPRs_inline;
20492 int using_load_multiple;
20493 int using_mtcr_multiple;
20494 int use_backchain_to_restore_sp;
20495 int restore_lr;
20496 int strategy;
20497 HOST_WIDE_INT frame_off = 0;
20498 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
20499 rtx frame_reg_rtx = sp_reg_rtx;
20500 rtx cfa_restores = NULL_RTX;
20501 rtx insn;
20502 rtx cr_save_reg = NULL_RTX;
20503 enum machine_mode reg_mode = Pmode;
20504 int reg_size = TARGET_32BIT ? 4 : 8;
20505 int i;
20506 bool exit_func;
20507 unsigned ptr_regno;
20509 info = rs6000_stack_info ();
20511 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20513 reg_mode = V2SImode;
20514 reg_size = 8;
20517 strategy = info->savres_strategy;
20518 using_load_multiple = strategy & SAVRES_MULTIPLE;
20519 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
20520 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
20521 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
20522 || rs6000_cpu == PROCESSOR_PPC603
20523 || rs6000_cpu == PROCESSOR_PPC750
20524 || optimize_size);
20525 /* Restore via the backchain when we have a large frame, since this
20526 is more efficient than an addis, addi pair. The second condition
20527 here will not trigger at the moment; We don't actually need a
20528 frame pointer for alloca, but the generic parts of the compiler
20529 give us one anyway. */
20530 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
20531 || (cfun->calls_alloca
20532 && !frame_pointer_needed));
20533 restore_lr = (info->lr_save_p
20534 && (restoring_FPRs_inline
20535 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
20536 && (restoring_GPRs_inline
20537 || info->first_fp_reg_save < 64));
20539 if (WORLD_SAVE_P (info))
20541 int i, j;
20542 char rname[30];
20543 const char *alloc_rname;
20544 rtvec p;
20546 /* eh_rest_world_r10 will return to the location saved in the LR
20547 stack slot (which is not likely to be our caller.)
20548 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20549 rest_world is similar, except any R10 parameter is ignored.
20550 The exception-handling stuff that was here in 2.95 is no
20551 longer necessary. */
20553 p = rtvec_alloc (9
20555 + 32 - info->first_gp_reg_save
20556 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
20557 + 63 + 1 - info->first_fp_reg_save);
20559 strcpy (rname, ((crtl->calls_eh_return) ?
20560 "*eh_rest_world_r10" : "*rest_world"));
20561 alloc_rname = ggc_strdup (rname);
20563 j = 0;
20564 RTVEC_ELT (p, j++) = ret_rtx;
20565 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20566 gen_rtx_REG (Pmode,
20567 LR_REGNO));
20568 RTVEC_ELT (p, j++)
20569 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
20570 /* The instruction pattern requires a clobber here;
20571 it is shared with the restVEC helper. */
20572 RTVEC_ELT (p, j++)
20573 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
20576 /* CR register traditionally saved as CR2. */
20577 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
20578 RTVEC_ELT (p, j++)
20579 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
20580 if (flag_shrink_wrap)
20582 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20583 gen_rtx_REG (Pmode, LR_REGNO),
20584 cfa_restores);
20585 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20589 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20591 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20592 RTVEC_ELT (p, j++)
20593 = gen_frame_load (reg,
20594 frame_reg_rtx, info->gp_save_offset + reg_size * i);
20595 if (flag_shrink_wrap)
20596 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20598 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20600 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
20601 RTVEC_ELT (p, j++)
20602 = gen_frame_load (reg,
20603 frame_reg_rtx, info->altivec_save_offset + 16 * i);
20604 if (flag_shrink_wrap)
20605 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20607 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
20609 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20610 ? DFmode : SFmode),
20611 info->first_fp_reg_save + i);
20612 RTVEC_ELT (p, j++)
20613 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
20614 if (flag_shrink_wrap)
20615 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20617 RTVEC_ELT (p, j++)
20618 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
20619 RTVEC_ELT (p, j++)
20620 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
20621 RTVEC_ELT (p, j++)
20622 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
20623 RTVEC_ELT (p, j++)
20624 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
20625 RTVEC_ELT (p, j++)
20626 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
20627 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
20629 if (flag_shrink_wrap)
20631 REG_NOTES (insn) = cfa_restores;
20632 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20633 RTX_FRAME_RELATED_P (insn) = 1;
20635 return;
20638 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20639 if (info->push_p)
20640 frame_off = info->total_size;
20642 /* Restore AltiVec registers if we must do so before adjusting the
20643 stack. */
20644 if (TARGET_ALTIVEC_ABI
20645 && info->altivec_size != 0
20646 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20647 || (DEFAULT_ABI != ABI_V4
20648 && offset_below_red_zone_p (info->altivec_save_offset))))
20650 int i;
20651 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20653 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20654 if (use_backchain_to_restore_sp)
20656 int frame_regno = 11;
20658 if ((strategy & REST_INLINE_VRS) == 0)
20660 /* Of r11 and r12, select the one not clobbered by an
20661 out-of-line restore function for the frame register. */
20662 frame_regno = 11 + 12 - scratch_regno;
20664 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
20665 emit_move_insn (frame_reg_rtx,
20666 gen_rtx_MEM (Pmode, sp_reg_rtx));
20667 frame_off = 0;
20669 else if (frame_pointer_needed)
20670 frame_reg_rtx = hard_frame_pointer_rtx;
20672 if ((strategy & REST_INLINE_VRS) == 0)
20674 int end_save = info->altivec_save_offset + info->altivec_size;
20675 int ptr_off;
20676 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20677 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20679 if (end_save + frame_off != 0)
20681 rtx offset = GEN_INT (end_save + frame_off);
20683 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20685 else
20686 emit_move_insn (ptr_reg, frame_reg_rtx);
20688 ptr_off = -end_save;
20689 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20690 info->altivec_save_offset + ptr_off,
20691 0, V4SImode, SAVRES_VR);
20693 else
20695 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20696 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20698 rtx addr, areg, mem, reg;
20700 areg = gen_rtx_REG (Pmode, 0);
20701 emit_move_insn
20702 (areg, GEN_INT (info->altivec_save_offset
20703 + frame_off
20704 + 16 * (i - info->first_altivec_reg_save)));
20706 /* AltiVec addressing mode is [reg+reg]. */
20707 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20708 mem = gen_frame_mem (V4SImode, addr);
20710 reg = gen_rtx_REG (V4SImode, i);
20711 emit_move_insn (reg, mem);
20715 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20716 if (((strategy & REST_INLINE_VRS) == 0
20717 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20718 && (flag_shrink_wrap
20719 || (offset_below_red_zone_p
20720 (info->altivec_save_offset
20721 + 16 * (i - info->first_altivec_reg_save)))))
20723 rtx reg = gen_rtx_REG (V4SImode, i);
20724 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20728 /* Restore VRSAVE if we must do so before adjusting the stack. */
20729 if (TARGET_ALTIVEC
20730 && TARGET_ALTIVEC_VRSAVE
20731 && info->vrsave_mask != 0
20732 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20733 || (DEFAULT_ABI != ABI_V4
20734 && offset_below_red_zone_p (info->vrsave_save_offset))))
20736 rtx reg;
20738 if (frame_reg_rtx == sp_reg_rtx)
20740 if (use_backchain_to_restore_sp)
20742 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20743 emit_move_insn (frame_reg_rtx,
20744 gen_rtx_MEM (Pmode, sp_reg_rtx));
20745 frame_off = 0;
20747 else if (frame_pointer_needed)
20748 frame_reg_rtx = hard_frame_pointer_rtx;
20751 reg = gen_rtx_REG (SImode, 12);
20752 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20753 info->vrsave_save_offset + frame_off));
20755 emit_insn (generate_set_vrsave (reg, info, 1));
20758 insn = NULL_RTX;
20759 /* If we have a large stack frame, restore the old stack pointer
20760 using the backchain. */
20761 if (use_backchain_to_restore_sp)
20763 if (frame_reg_rtx == sp_reg_rtx)
20765 /* Under V.4, don't reset the stack pointer until after we're done
20766 loading the saved registers. */
20767 if (DEFAULT_ABI == ABI_V4)
20768 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20770 insn = emit_move_insn (frame_reg_rtx,
20771 gen_rtx_MEM (Pmode, sp_reg_rtx));
20772 frame_off = 0;
20774 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20775 && DEFAULT_ABI == ABI_V4)
20776 /* frame_reg_rtx has been set up by the altivec restore. */
20778 else
20780 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
20781 frame_reg_rtx = sp_reg_rtx;
20784 /* If we have a frame pointer, we can restore the old stack pointer
20785 from it. */
20786 else if (frame_pointer_needed)
20788 frame_reg_rtx = sp_reg_rtx;
20789 if (DEFAULT_ABI == ABI_V4)
20790 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20791 /* Prevent reordering memory accesses against stack pointer restore. */
20792 else if (cfun->calls_alloca
20793 || offset_below_red_zone_p (-info->total_size))
20794 rs6000_emit_stack_tie (frame_reg_rtx, true);
20796 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
20797 GEN_INT (info->total_size)));
20798 frame_off = 0;
20800 else if (info->push_p
20801 && DEFAULT_ABI != ABI_V4
20802 && !crtl->calls_eh_return)
20804 /* Prevent reordering memory accesses against stack pointer restore. */
20805 if (cfun->calls_alloca
20806 || offset_below_red_zone_p (-info->total_size))
20807 rs6000_emit_stack_tie (frame_reg_rtx, false);
20808 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
20809 GEN_INT (info->total_size)));
20810 frame_off = 0;
20812 if (insn && frame_reg_rtx == sp_reg_rtx)
20814 if (cfa_restores)
20816 REG_NOTES (insn) = cfa_restores;
20817 cfa_restores = NULL_RTX;
20819 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20820 RTX_FRAME_RELATED_P (insn) = 1;
20823 /* Restore AltiVec registers if we have not done so already. */
20824 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20825 && TARGET_ALTIVEC_ABI
20826 && info->altivec_size != 0
20827 && (DEFAULT_ABI == ABI_V4
20828 || !offset_below_red_zone_p (info->altivec_save_offset)))
20830 int i;
20832 if ((strategy & REST_INLINE_VRS) == 0)
20834 int end_save = info->altivec_save_offset + info->altivec_size;
20835 int ptr_off;
20836 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20837 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20838 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20840 if (end_save + frame_off != 0)
20842 rtx offset = GEN_INT (end_save + frame_off);
20844 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20846 else
20847 emit_move_insn (ptr_reg, frame_reg_rtx);
20849 ptr_off = -end_save;
20850 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20851 info->altivec_save_offset + ptr_off,
20852 0, V4SImode, SAVRES_VR);
20853 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20855 /* Frame reg was clobbered by out-of-line save. Restore it
20856 from ptr_reg, and if we are calling out-of-line gpr or
20857 fpr restore set up the correct pointer and offset. */
20858 unsigned newptr_regno = 1;
20859 if (!restoring_GPRs_inline)
20861 bool lr = info->gp_save_offset + info->gp_size == 0;
20862 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20863 newptr_regno = ptr_regno_for_savres (sel);
20864 end_save = info->gp_save_offset + info->gp_size;
20866 else if (!restoring_FPRs_inline)
20868 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
20869 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20870 newptr_regno = ptr_regno_for_savres (sel);
20871 end_save = info->gp_save_offset + info->gp_size;
20874 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
20875 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
20877 if (end_save + ptr_off != 0)
20879 rtx offset = GEN_INT (end_save + ptr_off);
20881 frame_off = -end_save;
20882 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
20884 else
20886 frame_off = ptr_off;
20887 emit_move_insn (frame_reg_rtx, ptr_reg);
20891 else
20893 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20894 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20896 rtx addr, areg, mem, reg;
20898 areg = gen_rtx_REG (Pmode, 0);
20899 emit_move_insn
20900 (areg, GEN_INT (info->altivec_save_offset
20901 + frame_off
20902 + 16 * (i - info->first_altivec_reg_save)));
20904 /* AltiVec addressing mode is [reg+reg]. */
20905 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20906 mem = gen_frame_mem (V4SImode, addr);
20908 reg = gen_rtx_REG (V4SImode, i);
20909 emit_move_insn (reg, mem);
20913 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20914 if (((strategy & REST_INLINE_VRS) == 0
20915 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20916 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20918 rtx reg = gen_rtx_REG (V4SImode, i);
20919 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20923 /* Restore VRSAVE if we have not done so already. */
20924 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20925 && TARGET_ALTIVEC
20926 && TARGET_ALTIVEC_VRSAVE
20927 && info->vrsave_mask != 0
20928 && (DEFAULT_ABI == ABI_V4
20929 || !offset_below_red_zone_p (info->vrsave_save_offset)))
20931 rtx reg;
20933 reg = gen_rtx_REG (SImode, 12);
20934 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20935 info->vrsave_save_offset + frame_off));
20937 emit_insn (generate_set_vrsave (reg, info, 1));
20940 /* If we exit by an out-of-line restore function on ABI_V4 then that
20941 function will deallocate the stack, so we don't need to worry
20942 about the unwinder restoring cr from an invalid stack frame
20943 location. */
20944 exit_func = (!restoring_FPRs_inline
20945 || (!restoring_GPRs_inline
20946 && info->first_fp_reg_save == 64));
20948 /* Get the old lr if we saved it. If we are restoring registers
20949 out-of-line, then the out-of-line routines can do this for us. */
20950 if (restore_lr && restoring_GPRs_inline)
20951 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
20953 /* Get the old cr if we saved it. */
20954 if (info->cr_save_p)
20956 unsigned cr_save_regno = 12;
20958 if (!restoring_GPRs_inline)
20960 /* Ensure we don't use the register used by the out-of-line
20961 gpr register restore below. */
20962 bool lr = info->gp_save_offset + info->gp_size == 0;
20963 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20964 int gpr_ptr_regno = ptr_regno_for_savres (sel);
20966 if (gpr_ptr_regno == 12)
20967 cr_save_regno = 11;
20968 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
20970 else if (REGNO (frame_reg_rtx) == 12)
20971 cr_save_regno = 11;
20973 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
20974 info->cr_save_offset + frame_off,
20975 exit_func);
20978 /* Set LR here to try to overlap restores below. */
20979 if (restore_lr && restoring_GPRs_inline)
20980 restore_saved_lr (0, exit_func);
20982 /* Load exception handler data registers, if needed. */
20983 if (crtl->calls_eh_return)
20985 unsigned int i, regno;
20987 if (TARGET_AIX)
20989 rtx reg = gen_rtx_REG (reg_mode, 2);
20990 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20991 frame_off + 5 * reg_size));
20994 for (i = 0; ; ++i)
20996 rtx mem;
20998 regno = EH_RETURN_DATA_REGNO (i);
20999 if (regno == INVALID_REGNUM)
21000 break;
21002 /* Note: possible use of r0 here to address SPE regs. */
21003 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
21004 info->ehrd_offset + frame_off
21005 + reg_size * (int) i);
21007 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
21011 /* Restore GPRs. This is done as a PARALLEL if we are using
21012 the load-multiple instructions. */
21013 if (TARGET_SPE_ABI
21014 && info->spe_64bit_regs_used
21015 && info->first_gp_reg_save != 32)
21017 /* Determine whether we can address all of the registers that need
21018 to be saved with an offset from frame_reg_rtx that fits in
21019 the small const field for SPE memory instructions. */
21020 int spe_regs_addressable
21021 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21022 + reg_size * (32 - info->first_gp_reg_save - 1))
21023 && restoring_GPRs_inline);
21025 if (!spe_regs_addressable)
21027 int ool_adjust = 0;
21028 rtx old_frame_reg_rtx = frame_reg_rtx;
21029 /* Make r11 point to the start of the SPE save area. We worried about
21030 not clobbering it when we were saving registers in the prologue.
21031 There's no need to worry here because the static chain is passed
21032 anew to every function. */
21034 if (!restoring_GPRs_inline)
21035 ool_adjust = 8 * (info->first_gp_reg_save
21036 - (FIRST_SAVRES_REGISTER + 1));
21037 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21038 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
21039 GEN_INT (info->spe_gp_save_offset
21040 + frame_off
21041 - ool_adjust)));
21042 /* Keep the invariant that frame_reg_rtx + frame_off points
21043 at the top of the stack frame. */
21044 frame_off = -info->spe_gp_save_offset + ool_adjust;
21047 if (restoring_GPRs_inline)
21049 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
21051 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21052 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21054 rtx offset, addr, mem, reg;
21056 /* We're doing all this to ensure that the immediate offset
21057 fits into the immediate field of 'evldd'. */
21058 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
21060 offset = GEN_INT (spe_offset + reg_size * i);
21061 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
21062 mem = gen_rtx_MEM (V2SImode, addr);
21063 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21065 emit_move_insn (reg, mem);
21068 else
21069 rs6000_emit_savres_rtx (info, frame_reg_rtx,
21070 info->spe_gp_save_offset + frame_off,
21071 info->lr_save_offset + frame_off,
21072 reg_mode,
21073 SAVRES_GPR | SAVRES_LR);
21075 else if (!restoring_GPRs_inline)
21077 /* We are jumping to an out-of-line function. */
21078 rtx ptr_reg;
21079 int end_save = info->gp_save_offset + info->gp_size;
21080 bool can_use_exit = end_save == 0;
21081 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
21082 int ptr_off;
21084 /* Emit stack reset code if we need it. */
21085 ptr_regno = ptr_regno_for_savres (sel);
21086 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21087 if (can_use_exit)
21088 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21089 else if (end_save + frame_off != 0)
21090 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
21091 GEN_INT (end_save + frame_off)));
21092 else if (REGNO (frame_reg_rtx) != ptr_regno)
21093 emit_move_insn (ptr_reg, frame_reg_rtx);
21094 if (REGNO (frame_reg_rtx) == ptr_regno)
21095 frame_off = -end_save;
21097 if (can_use_exit && info->cr_save_p)
21098 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
21100 ptr_off = -end_save;
21101 rs6000_emit_savres_rtx (info, ptr_reg,
21102 info->gp_save_offset + ptr_off,
21103 info->lr_save_offset + ptr_off,
21104 reg_mode, sel);
21106 else if (using_load_multiple)
21108 rtvec p;
21109 p = rtvec_alloc (32 - info->first_gp_reg_save);
21110 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21111 RTVEC_ELT (p, i)
21112 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21113 frame_reg_rtx,
21114 info->gp_save_offset + frame_off + reg_size * i);
21115 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21117 else
21119 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21120 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21121 emit_insn (gen_frame_load
21122 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21123 frame_reg_rtx,
21124 info->gp_save_offset + frame_off + reg_size * i));
21127 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21129 /* If the frame pointer was used then we can't delay emitting
21130 a REG_CFA_DEF_CFA note. This must happen on the insn that
21131 restores the frame pointer, r31. We may have already emitted
21132 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21133 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21134 be harmless if emitted. */
21135 if (frame_pointer_needed)
21137 insn = get_last_insn ();
21138 add_reg_note (insn, REG_CFA_DEF_CFA,
21139 plus_constant (Pmode, frame_reg_rtx, frame_off));
21140 RTX_FRAME_RELATED_P (insn) = 1;
21143 /* Set up cfa_restores. We always need these when
21144 shrink-wrapping. If not shrink-wrapping then we only need
21145 the cfa_restore when the stack location is no longer valid.
21146 The cfa_restores must be emitted on or before the insn that
21147 invalidates the stack, and of course must not be emitted
21148 before the insn that actually does the restore. The latter
21149 is why it is a bad idea to emit the cfa_restores as a group
21150 on the last instruction here that actually does a restore:
21151 That insn may be reordered with respect to others doing
21152 restores. */
21153 if (flag_shrink_wrap
21154 && !restoring_GPRs_inline
21155 && info->first_fp_reg_save == 64)
21156 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21158 for (i = info->first_gp_reg_save; i < 32; i++)
21159 if (!restoring_GPRs_inline
21160 || using_load_multiple
21161 || rs6000_reg_live_or_pic_offset_p (i))
21163 rtx reg = gen_rtx_REG (reg_mode, i);
21165 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21169 if (!restoring_GPRs_inline
21170 && info->first_fp_reg_save == 64)
21172 /* We are jumping to an out-of-line function. */
21173 if (cfa_restores)
21174 emit_cfa_restores (cfa_restores);
21175 return;
21178 if (restore_lr && !restoring_GPRs_inline)
21180 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21181 restore_saved_lr (0, exit_func);
21184 /* Restore fpr's if we need to do it without calling a function. */
21185 if (restoring_FPRs_inline)
21186 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21187 if (save_reg_p (info->first_fp_reg_save + i))
21189 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21190 ? DFmode : SFmode),
21191 info->first_fp_reg_save + i);
21192 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21193 info->fp_save_offset + frame_off + 8 * i));
21194 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21195 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21198 /* If we saved cr, restore it here. Just those that were used. */
21199 if (info->cr_save_p)
21200 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
21202 /* If this is V.4, unwind the stack pointer after all of the loads
21203 have been done, or set up r11 if we are restoring fp out of line. */
21204 ptr_regno = 1;
21205 if (!restoring_FPRs_inline)
21207 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21208 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21209 ptr_regno = ptr_regno_for_savres (sel);
21212 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21213 if (REGNO (frame_reg_rtx) == ptr_regno)
21214 frame_off = 0;
21216 if (insn && restoring_FPRs_inline)
21218 if (cfa_restores)
21220 REG_NOTES (insn) = cfa_restores;
21221 cfa_restores = NULL_RTX;
21223 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21224 RTX_FRAME_RELATED_P (insn) = 1;
21227 if (crtl->calls_eh_return)
21229 rtx sa = EH_RETURN_STACKADJ_RTX;
21230 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
21233 if (!sibcall)
21235 rtvec p;
21236 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21237 if (! restoring_FPRs_inline)
21239 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
21240 RTVEC_ELT (p, 0) = ret_rtx;
21242 else
21244 if (cfa_restores)
21246 /* We can't hang the cfa_restores off a simple return,
21247 since the shrink-wrap code sometimes uses an existing
21248 return. This means there might be a path from
21249 pre-prologue code to this return, and dwarf2cfi code
21250 wants the eh_frame unwinder state to be the same on
21251 all paths to any point. So we need to emit the
21252 cfa_restores before the return. For -m64 we really
21253 don't need epilogue cfa_restores at all, except for
21254 this irritating dwarf2cfi with shrink-wrap
21255 requirement; The stack red-zone means eh_frame info
21256 from the prologue telling the unwinder to restore
21257 from the stack is perfectly good right to the end of
21258 the function. */
21259 emit_insn (gen_blockage ());
21260 emit_cfa_restores (cfa_restores);
21261 cfa_restores = NULL_RTX;
21263 p = rtvec_alloc (2);
21264 RTVEC_ELT (p, 0) = simple_return_rtx;
21267 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
21268 ? gen_rtx_USE (VOIDmode,
21269 gen_rtx_REG (Pmode, LR_REGNO))
21270 : gen_rtx_CLOBBER (VOIDmode,
21271 gen_rtx_REG (Pmode, LR_REGNO)));
21273 /* If we have to restore more than two FP registers, branch to the
21274 restore function. It will return to our caller. */
21275 if (! restoring_FPRs_inline)
21277 int i;
21278 rtx sym;
21280 if (flag_shrink_wrap)
21281 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21283 sym = rs6000_savres_routine_sym (info,
21284 SAVRES_FPR | (lr ? SAVRES_LR : 0));
21285 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
21286 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
21287 gen_rtx_REG (Pmode,
21288 DEFAULT_ABI == ABI_AIX
21289 ? 1 : 11));
21290 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21292 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
21294 RTVEC_ELT (p, i + 4)
21295 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
21296 if (flag_shrink_wrap)
21297 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
21298 cfa_restores);
21302 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21305 if (cfa_restores)
21307 if (sibcall)
21308 /* Ensure the cfa_restores are hung off an insn that won't
21309 be reordered above other restores. */
21310 emit_insn (gen_blockage ());
21312 emit_cfa_restores (cfa_restores);
21316 /* Write function epilogue. */
21318 static void
21319 rs6000_output_function_epilogue (FILE *file,
21320 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21322 #if TARGET_MACHO
21323 macho_branch_islands ();
21324 /* Mach-O doesn't support labels at the end of objects, so if
21325 it looks like we might want one, insert a NOP. */
21327 rtx insn = get_last_insn ();
21328 rtx deleted_debug_label = NULL_RTX;
21329 while (insn
21330 && NOTE_P (insn)
21331 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
21333 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21334 notes only, instead set their CODE_LABEL_NUMBER to -1,
21335 otherwise there would be code generation differences
21336 in between -g and -g0. */
21337 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21338 deleted_debug_label = insn;
21339 insn = PREV_INSN (insn);
21341 if (insn
21342 && (LABEL_P (insn)
21343 || (NOTE_P (insn)
21344 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
21345 fputs ("\tnop\n", file);
21346 else if (deleted_debug_label)
21347 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
21348 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21349 CODE_LABEL_NUMBER (insn) = -1;
21351 #endif
21353 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21354 on its format.
21356 We don't output a traceback table if -finhibit-size-directive was
21357 used. The documentation for -finhibit-size-directive reads
21358 ``don't output a @code{.size} assembler directive, or anything
21359 else that would cause trouble if the function is split in the
21360 middle, and the two halves are placed at locations far apart in
21361 memory.'' The traceback table has this property, since it
21362 includes the offset from the start of the function to the
21363 traceback table itself.
21365 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21366 different traceback table. */
21367 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
21368 && rs6000_traceback != traceback_none && !cfun->is_thunk)
21370 const char *fname = NULL;
21371 const char *language_string = lang_hooks.name;
21372 int fixed_parms = 0, float_parms = 0, parm_info = 0;
21373 int i;
21374 int optional_tbtab;
21375 rs6000_stack_t *info = rs6000_stack_info ();
21377 if (rs6000_traceback == traceback_full)
21378 optional_tbtab = 1;
21379 else if (rs6000_traceback == traceback_part)
21380 optional_tbtab = 0;
21381 else
21382 optional_tbtab = !optimize_size && !TARGET_ELF;
21384 if (optional_tbtab)
21386 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
21387 while (*fname == '.') /* V.4 encodes . in the name */
21388 fname++;
21390 /* Need label immediately before tbtab, so we can compute
21391 its offset from the function start. */
21392 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21393 ASM_OUTPUT_LABEL (file, fname);
21396 /* The .tbtab pseudo-op can only be used for the first eight
21397 expressions, since it can't handle the possibly variable
21398 length fields that follow. However, if you omit the optional
21399 fields, the assembler outputs zeros for all optional fields
21400 anyways, giving each variable length field is minimum length
21401 (as defined in sys/debug.h). Thus we can not use the .tbtab
21402 pseudo-op at all. */
21404 /* An all-zero word flags the start of the tbtab, for debuggers
21405 that have to find it by searching forward from the entry
21406 point or from the current pc. */
21407 fputs ("\t.long 0\n", file);
21409 /* Tbtab format type. Use format type 0. */
21410 fputs ("\t.byte 0,", file);
21412 /* Language type. Unfortunately, there does not seem to be any
21413 official way to discover the language being compiled, so we
21414 use language_string.
21415 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21416 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21417 a number, so for now use 9. LTO and Go aren't assigned numbers
21418 either, so for now use 0. */
21419 if (! strcmp (language_string, "GNU C")
21420 || ! strcmp (language_string, "GNU GIMPLE")
21421 || ! strcmp (language_string, "GNU Go"))
21422 i = 0;
21423 else if (! strcmp (language_string, "GNU F77")
21424 || ! strcmp (language_string, "GNU Fortran"))
21425 i = 1;
21426 else if (! strcmp (language_string, "GNU Pascal"))
21427 i = 2;
21428 else if (! strcmp (language_string, "GNU Ada"))
21429 i = 3;
21430 else if (! strcmp (language_string, "GNU C++")
21431 || ! strcmp (language_string, "GNU Objective-C++"))
21432 i = 9;
21433 else if (! strcmp (language_string, "GNU Java"))
21434 i = 13;
21435 else if (! strcmp (language_string, "GNU Objective-C"))
21436 i = 14;
21437 else
21438 gcc_unreachable ();
21439 fprintf (file, "%d,", i);
21441 /* 8 single bit fields: global linkage (not set for C extern linkage,
21442 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21443 from start of procedure stored in tbtab, internal function, function
21444 has controlled storage, function has no toc, function uses fp,
21445 function logs/aborts fp operations. */
21446 /* Assume that fp operations are used if any fp reg must be saved. */
21447 fprintf (file, "%d,",
21448 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
21450 /* 6 bitfields: function is interrupt handler, name present in
21451 proc table, function calls alloca, on condition directives
21452 (controls stack walks, 3 bits), saves condition reg, saves
21453 link reg. */
21454 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21455 set up as a frame pointer, even when there is no alloca call. */
21456 fprintf (file, "%d,",
21457 ((optional_tbtab << 6)
21458 | ((optional_tbtab & frame_pointer_needed) << 5)
21459 | (info->cr_save_p << 1)
21460 | (info->lr_save_p)));
21462 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21463 (6 bits). */
21464 fprintf (file, "%d,",
21465 (info->push_p << 7) | (64 - info->first_fp_reg_save));
21467 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21468 fprintf (file, "%d,", (32 - first_reg_to_save ()));
21470 if (optional_tbtab)
21472 /* Compute the parameter info from the function decl argument
21473 list. */
21474 tree decl;
21475 int next_parm_info_bit = 31;
21477 for (decl = DECL_ARGUMENTS (current_function_decl);
21478 decl; decl = DECL_CHAIN (decl))
21480 rtx parameter = DECL_INCOMING_RTL (decl);
21481 enum machine_mode mode = GET_MODE (parameter);
21483 if (GET_CODE (parameter) == REG)
21485 if (SCALAR_FLOAT_MODE_P (mode))
21487 int bits;
21489 float_parms++;
21491 switch (mode)
21493 case SFmode:
21494 case SDmode:
21495 bits = 0x2;
21496 break;
21498 case DFmode:
21499 case DDmode:
21500 case TFmode:
21501 case TDmode:
21502 bits = 0x3;
21503 break;
21505 default:
21506 gcc_unreachable ();
21509 /* If only one bit will fit, don't or in this entry. */
21510 if (next_parm_info_bit > 0)
21511 parm_info |= (bits << (next_parm_info_bit - 1));
21512 next_parm_info_bit -= 2;
21514 else
21516 fixed_parms += ((GET_MODE_SIZE (mode)
21517 + (UNITS_PER_WORD - 1))
21518 / UNITS_PER_WORD);
21519 next_parm_info_bit -= 1;
21525 /* Number of fixed point parameters. */
21526 /* This is actually the number of words of fixed point parameters; thus
21527 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21528 fprintf (file, "%d,", fixed_parms);
21530 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21531 all on stack. */
21532 /* This is actually the number of fp registers that hold parameters;
21533 and thus the maximum value is 13. */
21534 /* Set parameters on stack bit if parameters are not in their original
21535 registers, regardless of whether they are on the stack? Xlc
21536 seems to set the bit when not optimizing. */
21537 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
21539 if (! optional_tbtab)
21540 return;
21542 /* Optional fields follow. Some are variable length. */
21544 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21545 11 double float. */
21546 /* There is an entry for each parameter in a register, in the order that
21547 they occur in the parameter list. Any intervening arguments on the
21548 stack are ignored. If the list overflows a long (max possible length
21549 34 bits) then completely leave off all elements that don't fit. */
21550 /* Only emit this long if there was at least one parameter. */
21551 if (fixed_parms || float_parms)
21552 fprintf (file, "\t.long %d\n", parm_info);
21554 /* Offset from start of code to tb table. */
21555 fputs ("\t.long ", file);
21556 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21557 RS6000_OUTPUT_BASENAME (file, fname);
21558 putc ('-', file);
21559 rs6000_output_function_entry (file, fname);
21560 putc ('\n', file);
21562 /* Interrupt handler mask. */
21563 /* Omit this long, since we never set the interrupt handler bit
21564 above. */
21566 /* Number of CTL (controlled storage) anchors. */
21567 /* Omit this long, since the has_ctl bit is never set above. */
21569 /* Displacement into stack of each CTL anchor. */
21570 /* Omit this list of longs, because there are no CTL anchors. */
21572 /* Length of function name. */
21573 if (*fname == '*')
21574 ++fname;
21575 fprintf (file, "\t.short %d\n", (int) strlen (fname));
21577 /* Function name. */
21578 assemble_string (fname, strlen (fname));
21580 /* Register for alloca automatic storage; this is always reg 31.
21581 Only emit this if the alloca bit was set above. */
21582 if (frame_pointer_needed)
21583 fputs ("\t.byte 31\n", file);
21585 fputs ("\t.align 2\n", file);
21589 /* A C compound statement that outputs the assembler code for a thunk
21590 function, used to implement C++ virtual function calls with
21591 multiple inheritance. The thunk acts as a wrapper around a virtual
21592 function, adjusting the implicit object parameter before handing
21593 control off to the real function.
21595 First, emit code to add the integer DELTA to the location that
21596 contains the incoming first argument. Assume that this argument
21597 contains a pointer, and is the one used to pass the `this' pointer
21598 in C++. This is the incoming argument *before* the function
21599 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21600 values of all other incoming arguments.
21602 After the addition, emit code to jump to FUNCTION, which is a
21603 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21604 not touch the return address. Hence returning from FUNCTION will
21605 return to whoever called the current `thunk'.
21607 The effect must be as if FUNCTION had been called directly with the
21608 adjusted first argument. This macro is responsible for emitting
21609 all of the code for a thunk function; output_function_prologue()
21610 and output_function_epilogue() are not invoked.
21612 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21613 been extracted from it.) It might possibly be useful on some
21614 targets, but probably not.
21616 If you do not define this macro, the target-independent code in the
21617 C++ frontend will generate a less efficient heavyweight thunk that
21618 calls FUNCTION instead of jumping to it. The generic approach does
21619 not support varargs. */
21621 static void
21622 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
21623 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
21624 tree function)
21626 rtx this_rtx, insn, funexp;
21628 reload_completed = 1;
21629 epilogue_completed = 1;
21631 /* Mark the end of the (empty) prologue. */
21632 emit_note (NOTE_INSN_PROLOGUE_END);
21634 /* Find the "this" pointer. If the function returns a structure,
21635 the structure return pointer is in r3. */
21636 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
21637 this_rtx = gen_rtx_REG (Pmode, 4);
21638 else
21639 this_rtx = gen_rtx_REG (Pmode, 3);
21641 /* Apply the constant offset, if required. */
21642 if (delta)
21643 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
21645 /* Apply the offset from the vtable, if required. */
21646 if (vcall_offset)
21648 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
21649 rtx tmp = gen_rtx_REG (Pmode, 12);
21651 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
21652 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
21654 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
21655 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
21657 else
21659 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
21661 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
21663 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
21666 /* Generate a tail call to the target function. */
21667 if (!TREE_USED (function))
21669 assemble_external (function);
21670 TREE_USED (function) = 1;
21672 funexp = XEXP (DECL_RTL (function), 0);
21673 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
21675 #if TARGET_MACHO
21676 if (MACHOPIC_INDIRECT)
21677 funexp = machopic_indirect_call_target (funexp);
21678 #endif
21680 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21681 generate sibcall RTL explicitly. */
21682 insn = emit_call_insn (
21683 gen_rtx_PARALLEL (VOIDmode,
21684 gen_rtvec (4,
21685 gen_rtx_CALL (VOIDmode,
21686 funexp, const0_rtx),
21687 gen_rtx_USE (VOIDmode, const0_rtx),
21688 gen_rtx_USE (VOIDmode,
21689 gen_rtx_REG (SImode,
21690 LR_REGNO)),
21691 simple_return_rtx)));
21692 SIBLING_CALL_P (insn) = 1;
21693 emit_barrier ();
21695 /* Run just enough of rest_of_compilation to get the insns emitted.
21696 There's not really enough bulk here to make other passes such as
21697 instruction scheduling worth while. Note that use_thunk calls
21698 assemble_start_function and assemble_end_function. */
21699 insn = get_insns ();
21700 shorten_branches (insn);
21701 final_start_function (insn, file, 1);
21702 final (insn, file, 1);
21703 final_end_function ();
21705 reload_completed = 0;
21706 epilogue_completed = 0;
21709 /* A quick summary of the various types of 'constant-pool tables'
21710 under PowerPC:
21712 Target Flags Name One table per
21713 AIX (none) AIX TOC object file
21714 AIX -mfull-toc AIX TOC object file
21715 AIX -mminimal-toc AIX minimal TOC translation unit
21716 SVR4/EABI (none) SVR4 SDATA object file
21717 SVR4/EABI -fpic SVR4 pic object file
21718 SVR4/EABI -fPIC SVR4 PIC translation unit
21719 SVR4/EABI -mrelocatable EABI TOC function
21720 SVR4/EABI -maix AIX TOC object file
21721 SVR4/EABI -maix -mminimal-toc
21722 AIX minimal TOC translation unit
21724 Name Reg. Set by entries contains:
21725 made by addrs? fp? sum?
21727 AIX TOC 2 crt0 as Y option option
21728 AIX minimal TOC 30 prolog gcc Y Y option
21729 SVR4 SDATA 13 crt0 gcc N Y N
21730 SVR4 pic 30 prolog ld Y not yet N
21731 SVR4 PIC 30 prolog gcc Y option option
21732 EABI TOC 30 prolog gcc Y option option
21736 /* Hash functions for the hash table. */
21738 static unsigned
21739 rs6000_hash_constant (rtx k)
21741 enum rtx_code code = GET_CODE (k);
21742 enum machine_mode mode = GET_MODE (k);
21743 unsigned result = (code << 3) ^ mode;
21744 const char *format;
21745 int flen, fidx;
21747 format = GET_RTX_FORMAT (code);
21748 flen = strlen (format);
21749 fidx = 0;
21751 switch (code)
21753 case LABEL_REF:
21754 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
21756 case CONST_DOUBLE:
21757 if (mode != VOIDmode)
21758 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
21759 flen = 2;
21760 break;
21762 case CODE_LABEL:
21763 fidx = 3;
21764 break;
21766 default:
21767 break;
21770 for (; fidx < flen; fidx++)
21771 switch (format[fidx])
21773 case 's':
21775 unsigned i, len;
21776 const char *str = XSTR (k, fidx);
21777 len = strlen (str);
21778 result = result * 613 + len;
21779 for (i = 0; i < len; i++)
21780 result = result * 613 + (unsigned) str[i];
21781 break;
21783 case 'u':
21784 case 'e':
21785 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
21786 break;
21787 case 'i':
21788 case 'n':
21789 result = result * 613 + (unsigned) XINT (k, fidx);
21790 break;
21791 case 'w':
21792 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
21793 result = result * 613 + (unsigned) XWINT (k, fidx);
21794 else
21796 size_t i;
21797 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
21798 result = result * 613 + (unsigned) (XWINT (k, fidx)
21799 >> CHAR_BIT * i);
21801 break;
21802 case '0':
21803 break;
21804 default:
21805 gcc_unreachable ();
21808 return result;
21811 static unsigned
21812 toc_hash_function (const void *hash_entry)
21814 const struct toc_hash_struct *thc =
21815 (const struct toc_hash_struct *) hash_entry;
21816 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
21819 /* Compare H1 and H2 for equivalence. */
21821 static int
21822 toc_hash_eq (const void *h1, const void *h2)
21824 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
21825 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
21827 if (((const struct toc_hash_struct *) h1)->key_mode
21828 != ((const struct toc_hash_struct *) h2)->key_mode)
21829 return 0;
21831 return rtx_equal_p (r1, r2);
21834 /* These are the names given by the C++ front-end to vtables, and
21835 vtable-like objects. Ideally, this logic should not be here;
21836 instead, there should be some programmatic way of inquiring as
21837 to whether or not an object is a vtable. */
21839 #define VTABLE_NAME_P(NAME) \
21840 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21841 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21842 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21843 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21844 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21846 #ifdef NO_DOLLAR_IN_LABEL
21847 /* Return a GGC-allocated character string translating dollar signs in
21848 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21850 const char *
21851 rs6000_xcoff_strip_dollar (const char *name)
21853 char *strip, *p;
21854 const char *q;
21855 size_t len;
21857 q = (const char *) strchr (name, '$');
21859 if (q == 0 || q == name)
21860 return name;
21862 len = strlen (name);
21863 strip = XALLOCAVEC (char, len + 1);
21864 strcpy (strip, name);
21865 p = strip + (q - name);
21866 while (p)
21868 *p = '_';
21869 p = strchr (p + 1, '$');
21872 return ggc_alloc_string (strip, len);
21874 #endif
21876 void
21877 rs6000_output_symbol_ref (FILE *file, rtx x)
21879 /* Currently C++ toc references to vtables can be emitted before it
21880 is decided whether the vtable is public or private. If this is
21881 the case, then the linker will eventually complain that there is
21882 a reference to an unknown section. Thus, for vtables only,
21883 we emit the TOC reference to reference the symbol and not the
21884 section. */
21885 const char *name = XSTR (x, 0);
21887 if (VTABLE_NAME_P (name))
21889 RS6000_OUTPUT_BASENAME (file, name);
21891 else
21892 assemble_name (file, name);
21895 /* Output a TOC entry. We derive the entry name from what is being
21896 written. */
21898 void
21899 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
21901 char buf[256];
21902 const char *name = buf;
21903 rtx base = x;
21904 HOST_WIDE_INT offset = 0;
21906 gcc_assert (!TARGET_NO_TOC);
21908 /* When the linker won't eliminate them, don't output duplicate
21909 TOC entries (this happens on AIX if there is any kind of TOC,
21910 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21911 CODE_LABELs. */
21912 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
21914 struct toc_hash_struct *h;
21915 void * * found;
21917 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21918 time because GGC is not initialized at that point. */
21919 if (toc_hash_table == NULL)
21920 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
21921 toc_hash_eq, NULL);
21923 h = ggc_alloc_toc_hash_struct ();
21924 h->key = x;
21925 h->key_mode = mode;
21926 h->labelno = labelno;
21928 found = htab_find_slot (toc_hash_table, h, INSERT);
21929 if (*found == NULL)
21930 *found = h;
21931 else /* This is indeed a duplicate.
21932 Set this label equal to that label. */
21934 fputs ("\t.set ", file);
21935 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21936 fprintf (file, "%d,", labelno);
21937 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21938 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
21939 found)->labelno));
21940 return;
21944 /* If we're going to put a double constant in the TOC, make sure it's
21945 aligned properly when strict alignment is on. */
21946 if (GET_CODE (x) == CONST_DOUBLE
21947 && STRICT_ALIGNMENT
21948 && GET_MODE_BITSIZE (mode) >= 64
21949 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
21950 ASM_OUTPUT_ALIGN (file, 3);
21953 (*targetm.asm_out.internal_label) (file, "LC", labelno);
21955 /* Handle FP constants specially. Note that if we have a minimal
21956 TOC, things we put here aren't actually in the TOC, so we can allow
21957 FP constants. */
21958 if (GET_CODE (x) == CONST_DOUBLE &&
21959 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
21961 REAL_VALUE_TYPE rv;
21962 long k[4];
21964 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21965 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21966 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
21967 else
21968 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
21970 if (TARGET_64BIT)
21972 if (TARGET_MINIMAL_TOC)
21973 fputs (DOUBLE_INT_ASM_OP, file);
21974 else
21975 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21976 k[0] & 0xffffffff, k[1] & 0xffffffff,
21977 k[2] & 0xffffffff, k[3] & 0xffffffff);
21978 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
21979 k[0] & 0xffffffff, k[1] & 0xffffffff,
21980 k[2] & 0xffffffff, k[3] & 0xffffffff);
21981 return;
21983 else
21985 if (TARGET_MINIMAL_TOC)
21986 fputs ("\t.long ", file);
21987 else
21988 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21989 k[0] & 0xffffffff, k[1] & 0xffffffff,
21990 k[2] & 0xffffffff, k[3] & 0xffffffff);
21991 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
21992 k[0] & 0xffffffff, k[1] & 0xffffffff,
21993 k[2] & 0xffffffff, k[3] & 0xffffffff);
21994 return;
21997 else if (GET_CODE (x) == CONST_DOUBLE &&
21998 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
22000 REAL_VALUE_TYPE rv;
22001 long k[2];
22003 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22005 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22006 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
22007 else
22008 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
22010 if (TARGET_64BIT)
22012 if (TARGET_MINIMAL_TOC)
22013 fputs (DOUBLE_INT_ASM_OP, file);
22014 else
22015 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22016 k[0] & 0xffffffff, k[1] & 0xffffffff);
22017 fprintf (file, "0x%lx%08lx\n",
22018 k[0] & 0xffffffff, k[1] & 0xffffffff);
22019 return;
22021 else
22023 if (TARGET_MINIMAL_TOC)
22024 fputs ("\t.long ", file);
22025 else
22026 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22027 k[0] & 0xffffffff, k[1] & 0xffffffff);
22028 fprintf (file, "0x%lx,0x%lx\n",
22029 k[0] & 0xffffffff, k[1] & 0xffffffff);
22030 return;
22033 else if (GET_CODE (x) == CONST_DOUBLE &&
22034 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
22036 REAL_VALUE_TYPE rv;
22037 long l;
22039 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22040 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22041 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
22042 else
22043 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
22045 if (TARGET_64BIT)
22047 if (TARGET_MINIMAL_TOC)
22048 fputs (DOUBLE_INT_ASM_OP, file);
22049 else
22050 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22051 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
22052 return;
22054 else
22056 if (TARGET_MINIMAL_TOC)
22057 fputs ("\t.long ", file);
22058 else
22059 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22060 fprintf (file, "0x%lx\n", l & 0xffffffff);
22061 return;
22064 else if (GET_MODE (x) == VOIDmode
22065 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
22067 unsigned HOST_WIDE_INT low;
22068 HOST_WIDE_INT high;
22070 if (GET_CODE (x) == CONST_DOUBLE)
22072 low = CONST_DOUBLE_LOW (x);
22073 high = CONST_DOUBLE_HIGH (x);
22075 else
22076 #if HOST_BITS_PER_WIDE_INT == 32
22078 low = INTVAL (x);
22079 high = (low & 0x80000000) ? ~0 : 0;
22081 #else
22083 low = INTVAL (x) & 0xffffffff;
22084 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
22086 #endif
22088 /* TOC entries are always Pmode-sized, but since this
22089 is a bigendian machine then if we're putting smaller
22090 integer constants in the TOC we have to pad them.
22091 (This is still a win over putting the constants in
22092 a separate constant pool, because then we'd have
22093 to have both a TOC entry _and_ the actual constant.)
22095 For a 32-bit target, CONST_INT values are loaded and shifted
22096 entirely within `low' and can be stored in one TOC entry. */
22098 /* It would be easy to make this work, but it doesn't now. */
22099 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
22101 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
22103 #if HOST_BITS_PER_WIDE_INT == 32
22104 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
22105 POINTER_SIZE, &low, &high, 0);
22106 #else
22107 low |= high << 32;
22108 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
22109 high = (HOST_WIDE_INT) low >> 32;
22110 low &= 0xffffffff;
22111 #endif
22114 if (TARGET_64BIT)
22116 if (TARGET_MINIMAL_TOC)
22117 fputs (DOUBLE_INT_ASM_OP, file);
22118 else
22119 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22120 (long) high & 0xffffffff, (long) low & 0xffffffff);
22121 fprintf (file, "0x%lx%08lx\n",
22122 (long) high & 0xffffffff, (long) low & 0xffffffff);
22123 return;
22125 else
22127 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
22129 if (TARGET_MINIMAL_TOC)
22130 fputs ("\t.long ", file);
22131 else
22132 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22133 (long) high & 0xffffffff, (long) low & 0xffffffff);
22134 fprintf (file, "0x%lx,0x%lx\n",
22135 (long) high & 0xffffffff, (long) low & 0xffffffff);
22137 else
22139 if (TARGET_MINIMAL_TOC)
22140 fputs ("\t.long ", file);
22141 else
22142 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
22143 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
22145 return;
22149 if (GET_CODE (x) == CONST)
22151 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
22152 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
22154 base = XEXP (XEXP (x, 0), 0);
22155 offset = INTVAL (XEXP (XEXP (x, 0), 1));
22158 switch (GET_CODE (base))
22160 case SYMBOL_REF:
22161 name = XSTR (base, 0);
22162 break;
22164 case LABEL_REF:
22165 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
22166 CODE_LABEL_NUMBER (XEXP (base, 0)));
22167 break;
22169 case CODE_LABEL:
22170 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
22171 break;
22173 default:
22174 gcc_unreachable ();
22177 if (TARGET_MINIMAL_TOC)
22178 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
22179 else
22181 fputs ("\t.tc ", file);
22182 RS6000_OUTPUT_BASENAME (file, name);
22184 if (offset < 0)
22185 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
22186 else if (offset)
22187 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
22189 fputs ("[TC],", file);
22192 /* Currently C++ toc references to vtables can be emitted before it
22193 is decided whether the vtable is public or private. If this is
22194 the case, then the linker will eventually complain that there is
22195 a TOC reference to an unknown section. Thus, for vtables only,
22196 we emit the TOC reference to reference the symbol and not the
22197 section. */
22198 if (VTABLE_NAME_P (name))
22200 RS6000_OUTPUT_BASENAME (file, name);
22201 if (offset < 0)
22202 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
22203 else if (offset > 0)
22204 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
22206 else
22207 output_addr_const (file, x);
22208 putc ('\n', file);
22211 /* Output an assembler pseudo-op to write an ASCII string of N characters
22212 starting at P to FILE.
22214 On the RS/6000, we have to do this using the .byte operation and
22215 write out special characters outside the quoted string.
22216 Also, the assembler is broken; very long strings are truncated,
22217 so we must artificially break them up early. */
22219 void
22220 output_ascii (FILE *file, const char *p, int n)
22222 char c;
22223 int i, count_string;
22224 const char *for_string = "\t.byte \"";
22225 const char *for_decimal = "\t.byte ";
22226 const char *to_close = NULL;
22228 count_string = 0;
22229 for (i = 0; i < n; i++)
22231 c = *p++;
22232 if (c >= ' ' && c < 0177)
22234 if (for_string)
22235 fputs (for_string, file);
22236 putc (c, file);
22238 /* Write two quotes to get one. */
22239 if (c == '"')
22241 putc (c, file);
22242 ++count_string;
22245 for_string = NULL;
22246 for_decimal = "\"\n\t.byte ";
22247 to_close = "\"\n";
22248 ++count_string;
22250 if (count_string >= 512)
22252 fputs (to_close, file);
22254 for_string = "\t.byte \"";
22255 for_decimal = "\t.byte ";
22256 to_close = NULL;
22257 count_string = 0;
22260 else
22262 if (for_decimal)
22263 fputs (for_decimal, file);
22264 fprintf (file, "%d", c);
22266 for_string = "\n\t.byte \"";
22267 for_decimal = ", ";
22268 to_close = "\n";
22269 count_string = 0;
22273 /* Now close the string if we have written one. Then end the line. */
22274 if (to_close)
22275 fputs (to_close, file);
22278 /* Generate a unique section name for FILENAME for a section type
22279 represented by SECTION_DESC. Output goes into BUF.
22281 SECTION_DESC can be any string, as long as it is different for each
22282 possible section type.
22284 We name the section in the same manner as xlc. The name begins with an
22285 underscore followed by the filename (after stripping any leading directory
22286 names) with the last period replaced by the string SECTION_DESC. If
22287 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22288 the name. */
22290 void
22291 rs6000_gen_section_name (char **buf, const char *filename,
22292 const char *section_desc)
22294 const char *q, *after_last_slash, *last_period = 0;
22295 char *p;
22296 int len;
22298 after_last_slash = filename;
22299 for (q = filename; *q; q++)
22301 if (*q == '/')
22302 after_last_slash = q + 1;
22303 else if (*q == '.')
22304 last_period = q;
22307 len = strlen (after_last_slash) + strlen (section_desc) + 2;
22308 *buf = (char *) xmalloc (len);
22310 p = *buf;
22311 *p++ = '_';
22313 for (q = after_last_slash; *q; q++)
22315 if (q == last_period)
22317 strcpy (p, section_desc);
22318 p += strlen (section_desc);
22319 break;
22322 else if (ISALNUM (*q))
22323 *p++ = *q;
22326 if (last_period == 0)
22327 strcpy (p, section_desc);
22328 else
22329 *p = '\0';
22332 /* Emit profile function. */
22334 void
22335 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
22337 /* Non-standard profiling for kernels, which just saves LR then calls
22338 _mcount without worrying about arg saves. The idea is to change
22339 the function prologue as little as possible as it isn't easy to
22340 account for arg save/restore code added just for _mcount. */
22341 if (TARGET_PROFILE_KERNEL)
22342 return;
22344 if (DEFAULT_ABI == ABI_AIX)
22346 #ifndef NO_PROFILE_COUNTERS
22347 # define NO_PROFILE_COUNTERS 0
22348 #endif
22349 if (NO_PROFILE_COUNTERS)
22350 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22351 LCT_NORMAL, VOIDmode, 0);
22352 else
22354 char buf[30];
22355 const char *label_name;
22356 rtx fun;
22358 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22359 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
22360 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
22362 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22363 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
22366 else if (DEFAULT_ABI == ABI_DARWIN)
22368 const char *mcount_name = RS6000_MCOUNT;
22369 int caller_addr_regno = LR_REGNO;
22371 /* Be conservative and always set this, at least for now. */
22372 crtl->uses_pic_offset_table = 1;
22374 #if TARGET_MACHO
22375 /* For PIC code, set up a stub and collect the caller's address
22376 from r0, which is where the prologue puts it. */
22377 if (MACHOPIC_INDIRECT
22378 && crtl->uses_pic_offset_table)
22379 caller_addr_regno = 0;
22380 #endif
22381 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
22382 LCT_NORMAL, VOIDmode, 1,
22383 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
22387 /* Write function profiler code. */
22389 void
22390 output_function_profiler (FILE *file, int labelno)
22392 char buf[100];
22394 switch (DEFAULT_ABI)
22396 default:
22397 gcc_unreachable ();
22399 case ABI_V4:
22400 if (!TARGET_32BIT)
22402 warning (0, "no profiling of 64-bit code for this ABI");
22403 return;
22405 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22406 fprintf (file, "\tmflr %s\n", reg_names[0]);
22407 if (NO_PROFILE_COUNTERS)
22409 asm_fprintf (file, "\tstw %s,4(%s)\n",
22410 reg_names[0], reg_names[1]);
22412 else if (TARGET_SECURE_PLT && flag_pic)
22414 if (TARGET_LINK_STACK)
22416 char name[32];
22417 get_ppc476_thunk_name (name);
22418 asm_fprintf (file, "\tbl %s\n", name);
22420 else
22421 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
22422 asm_fprintf (file, "\tstw %s,4(%s)\n",
22423 reg_names[0], reg_names[1]);
22424 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22425 asm_fprintf (file, "\taddis %s,%s,",
22426 reg_names[12], reg_names[12]);
22427 assemble_name (file, buf);
22428 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
22429 assemble_name (file, buf);
22430 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
22432 else if (flag_pic == 1)
22434 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
22435 asm_fprintf (file, "\tstw %s,4(%s)\n",
22436 reg_names[0], reg_names[1]);
22437 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22438 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
22439 assemble_name (file, buf);
22440 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
22442 else if (flag_pic > 1)
22444 asm_fprintf (file, "\tstw %s,4(%s)\n",
22445 reg_names[0], reg_names[1]);
22446 /* Now, we need to get the address of the label. */
22447 if (TARGET_LINK_STACK)
22449 char name[32];
22450 get_ppc476_thunk_name (name);
22451 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
22452 assemble_name (file, buf);
22453 fputs ("-.\n1:", file);
22454 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22455 asm_fprintf (file, "\taddi %s,%s,4\n",
22456 reg_names[11], reg_names[11]);
22458 else
22460 fputs ("\tbcl 20,31,1f\n\t.long ", file);
22461 assemble_name (file, buf);
22462 fputs ("-.\n1:", file);
22463 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22465 asm_fprintf (file, "\tlwz %s,0(%s)\n",
22466 reg_names[0], reg_names[11]);
22467 asm_fprintf (file, "\tadd %s,%s,%s\n",
22468 reg_names[0], reg_names[0], reg_names[11]);
22470 else
22472 asm_fprintf (file, "\tlis %s,", reg_names[12]);
22473 assemble_name (file, buf);
22474 fputs ("@ha\n", file);
22475 asm_fprintf (file, "\tstw %s,4(%s)\n",
22476 reg_names[0], reg_names[1]);
22477 asm_fprintf (file, "\tla %s,", reg_names[0]);
22478 assemble_name (file, buf);
22479 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
22482 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22483 fprintf (file, "\tbl %s%s\n",
22484 RS6000_MCOUNT, flag_pic ? "@plt" : "");
22485 break;
22487 case ABI_AIX:
22488 case ABI_DARWIN:
22489 if (!TARGET_PROFILE_KERNEL)
22491 /* Don't do anything, done in output_profile_hook (). */
22493 else
22495 gcc_assert (!TARGET_32BIT);
22497 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
22498 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
22500 if (cfun->static_chain_decl != NULL)
22502 asm_fprintf (file, "\tstd %s,24(%s)\n",
22503 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22504 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22505 asm_fprintf (file, "\tld %s,24(%s)\n",
22506 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22508 else
22509 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22511 break;
22517 /* The following variable value is the last issued insn. */
22519 static rtx last_scheduled_insn;
22521 /* The following variable helps to balance issuing of load and
22522 store instructions */
22524 static int load_store_pendulum;
22526 /* Power4 load update and store update instructions are cracked into a
22527 load or store and an integer insn which are executed in the same cycle.
22528 Branches have their own dispatch slot which does not count against the
22529 GCC issue rate, but it changes the program flow so there are no other
22530 instructions to issue in this cycle. */
22532 static int
22533 rs6000_variable_issue_1 (rtx insn, int more)
22535 last_scheduled_insn = insn;
22536 if (GET_CODE (PATTERN (insn)) == USE
22537 || GET_CODE (PATTERN (insn)) == CLOBBER)
22539 cached_can_issue_more = more;
22540 return cached_can_issue_more;
22543 if (insn_terminates_group_p (insn, current_group))
22545 cached_can_issue_more = 0;
22546 return cached_can_issue_more;
22549 /* If no reservation, but reach here */
22550 if (recog_memoized (insn) < 0)
22551 return more;
22553 if (rs6000_sched_groups)
22555 if (is_microcoded_insn (insn))
22556 cached_can_issue_more = 0;
22557 else if (is_cracked_insn (insn))
22558 cached_can_issue_more = more > 2 ? more - 2 : 0;
22559 else
22560 cached_can_issue_more = more - 1;
22562 return cached_can_issue_more;
22565 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
22566 return 0;
22568 cached_can_issue_more = more - 1;
22569 return cached_can_issue_more;
22572 static int
22573 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
22575 int r = rs6000_variable_issue_1 (insn, more);
22576 if (verbose)
22577 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
22578 return r;
22581 /* Adjust the cost of a scheduling dependency. Return the new cost of
22582 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22584 static int
22585 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22587 enum attr_type attr_type;
22589 if (! recog_memoized (insn))
22590 return 0;
22592 switch (REG_NOTE_KIND (link))
22594 case REG_DEP_TRUE:
22596 /* Data dependency; DEP_INSN writes a register that INSN reads
22597 some cycles later. */
22599 /* Separate a load from a narrower, dependent store. */
22600 if (rs6000_sched_groups
22601 && GET_CODE (PATTERN (insn)) == SET
22602 && GET_CODE (PATTERN (dep_insn)) == SET
22603 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
22604 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
22605 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
22606 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
22607 return cost + 14;
22609 attr_type = get_attr_type (insn);
22611 switch (attr_type)
22613 case TYPE_JMPREG:
22614 /* Tell the first scheduling pass about the latency between
22615 a mtctr and bctr (and mtlr and br/blr). The first
22616 scheduling pass will not know about this latency since
22617 the mtctr instruction, which has the latency associated
22618 to it, will be generated by reload. */
22619 return 4;
22620 case TYPE_BRANCH:
22621 /* Leave some extra cycles between a compare and its
22622 dependent branch, to inhibit expensive mispredicts. */
22623 if ((rs6000_cpu_attr == CPU_PPC603
22624 || rs6000_cpu_attr == CPU_PPC604
22625 || rs6000_cpu_attr == CPU_PPC604E
22626 || rs6000_cpu_attr == CPU_PPC620
22627 || rs6000_cpu_attr == CPU_PPC630
22628 || rs6000_cpu_attr == CPU_PPC750
22629 || rs6000_cpu_attr == CPU_PPC7400
22630 || rs6000_cpu_attr == CPU_PPC7450
22631 || rs6000_cpu_attr == CPU_PPCE5500
22632 || rs6000_cpu_attr == CPU_PPCE6500
22633 || rs6000_cpu_attr == CPU_POWER4
22634 || rs6000_cpu_attr == CPU_POWER5
22635 || rs6000_cpu_attr == CPU_POWER7
22636 || rs6000_cpu_attr == CPU_CELL)
22637 && recog_memoized (dep_insn)
22638 && (INSN_CODE (dep_insn) >= 0))
22640 switch (get_attr_type (dep_insn))
22642 case TYPE_CMP:
22643 case TYPE_COMPARE:
22644 case TYPE_DELAYED_COMPARE:
22645 case TYPE_IMUL_COMPARE:
22646 case TYPE_LMUL_COMPARE:
22647 case TYPE_FPCOMPARE:
22648 case TYPE_CR_LOGICAL:
22649 case TYPE_DELAYED_CR:
22650 return cost + 2;
22651 default:
22652 break;
22654 break;
22656 case TYPE_STORE:
22657 case TYPE_STORE_U:
22658 case TYPE_STORE_UX:
22659 case TYPE_FPSTORE:
22660 case TYPE_FPSTORE_U:
22661 case TYPE_FPSTORE_UX:
22662 if ((rs6000_cpu == PROCESSOR_POWER6)
22663 && recog_memoized (dep_insn)
22664 && (INSN_CODE (dep_insn) >= 0))
22667 if (GET_CODE (PATTERN (insn)) != SET)
22668 /* If this happens, we have to extend this to schedule
22669 optimally. Return default for now. */
22670 return cost;
22672 /* Adjust the cost for the case where the value written
22673 by a fixed point operation is used as the address
22674 gen value on a store. */
22675 switch (get_attr_type (dep_insn))
22677 case TYPE_LOAD:
22678 case TYPE_LOAD_U:
22679 case TYPE_LOAD_UX:
22680 case TYPE_CNTLZ:
22682 if (! store_data_bypass_p (dep_insn, insn))
22683 return 4;
22684 break;
22686 case TYPE_LOAD_EXT:
22687 case TYPE_LOAD_EXT_U:
22688 case TYPE_LOAD_EXT_UX:
22689 case TYPE_VAR_SHIFT_ROTATE:
22690 case TYPE_VAR_DELAYED_COMPARE:
22692 if (! store_data_bypass_p (dep_insn, insn))
22693 return 6;
22694 break;
22696 case TYPE_INTEGER:
22697 case TYPE_COMPARE:
22698 case TYPE_FAST_COMPARE:
22699 case TYPE_EXTS:
22700 case TYPE_SHIFT:
22701 case TYPE_INSERT_WORD:
22702 case TYPE_INSERT_DWORD:
22703 case TYPE_FPLOAD_U:
22704 case TYPE_FPLOAD_UX:
22705 case TYPE_STORE_U:
22706 case TYPE_STORE_UX:
22707 case TYPE_FPSTORE_U:
22708 case TYPE_FPSTORE_UX:
22710 if (! store_data_bypass_p (dep_insn, insn))
22711 return 3;
22712 break;
22714 case TYPE_IMUL:
22715 case TYPE_IMUL2:
22716 case TYPE_IMUL3:
22717 case TYPE_LMUL:
22718 case TYPE_IMUL_COMPARE:
22719 case TYPE_LMUL_COMPARE:
22721 if (! store_data_bypass_p (dep_insn, insn))
22722 return 17;
22723 break;
22725 case TYPE_IDIV:
22727 if (! store_data_bypass_p (dep_insn, insn))
22728 return 45;
22729 break;
22731 case TYPE_LDIV:
22733 if (! store_data_bypass_p (dep_insn, insn))
22734 return 57;
22735 break;
22737 default:
22738 break;
22741 break;
22743 case TYPE_LOAD:
22744 case TYPE_LOAD_U:
22745 case TYPE_LOAD_UX:
22746 case TYPE_LOAD_EXT:
22747 case TYPE_LOAD_EXT_U:
22748 case TYPE_LOAD_EXT_UX:
22749 if ((rs6000_cpu == PROCESSOR_POWER6)
22750 && recog_memoized (dep_insn)
22751 && (INSN_CODE (dep_insn) >= 0))
22754 /* Adjust the cost for the case where the value written
22755 by a fixed point instruction is used within the address
22756 gen portion of a subsequent load(u)(x) */
22757 switch (get_attr_type (dep_insn))
22759 case TYPE_LOAD:
22760 case TYPE_LOAD_U:
22761 case TYPE_LOAD_UX:
22762 case TYPE_CNTLZ:
22764 if (set_to_load_agen (dep_insn, insn))
22765 return 4;
22766 break;
22768 case TYPE_LOAD_EXT:
22769 case TYPE_LOAD_EXT_U:
22770 case TYPE_LOAD_EXT_UX:
22771 case TYPE_VAR_SHIFT_ROTATE:
22772 case TYPE_VAR_DELAYED_COMPARE:
22774 if (set_to_load_agen (dep_insn, insn))
22775 return 6;
22776 break;
22778 case TYPE_INTEGER:
22779 case TYPE_COMPARE:
22780 case TYPE_FAST_COMPARE:
22781 case TYPE_EXTS:
22782 case TYPE_SHIFT:
22783 case TYPE_INSERT_WORD:
22784 case TYPE_INSERT_DWORD:
22785 case TYPE_FPLOAD_U:
22786 case TYPE_FPLOAD_UX:
22787 case TYPE_STORE_U:
22788 case TYPE_STORE_UX:
22789 case TYPE_FPSTORE_U:
22790 case TYPE_FPSTORE_UX:
22792 if (set_to_load_agen (dep_insn, insn))
22793 return 3;
22794 break;
22796 case TYPE_IMUL:
22797 case TYPE_IMUL2:
22798 case TYPE_IMUL3:
22799 case TYPE_LMUL:
22800 case TYPE_IMUL_COMPARE:
22801 case TYPE_LMUL_COMPARE:
22803 if (set_to_load_agen (dep_insn, insn))
22804 return 17;
22805 break;
22807 case TYPE_IDIV:
22809 if (set_to_load_agen (dep_insn, insn))
22810 return 45;
22811 break;
22813 case TYPE_LDIV:
22815 if (set_to_load_agen (dep_insn, insn))
22816 return 57;
22817 break;
22819 default:
22820 break;
22823 break;
22825 case TYPE_FPLOAD:
22826 if ((rs6000_cpu == PROCESSOR_POWER6)
22827 && recog_memoized (dep_insn)
22828 && (INSN_CODE (dep_insn) >= 0)
22829 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
22830 return 2;
22832 default:
22833 break;
22836 /* Fall out to return default cost. */
22838 break;
22840 case REG_DEP_OUTPUT:
22841 /* Output dependency; DEP_INSN writes a register that INSN writes some
22842 cycles later. */
22843 if ((rs6000_cpu == PROCESSOR_POWER6)
22844 && recog_memoized (dep_insn)
22845 && (INSN_CODE (dep_insn) >= 0))
22847 attr_type = get_attr_type (insn);
22849 switch (attr_type)
22851 case TYPE_FP:
22852 if (get_attr_type (dep_insn) == TYPE_FP)
22853 return 1;
22854 break;
22855 case TYPE_FPLOAD:
22856 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
22857 return 2;
22858 break;
22859 default:
22860 break;
22863 case REG_DEP_ANTI:
22864 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22865 cycles later. */
22866 return 0;
22868 default:
22869 gcc_unreachable ();
22872 return cost;
22875 /* Debug version of rs6000_adjust_cost. */
22877 static int
22878 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22880 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
22882 if (ret != cost)
22884 const char *dep;
22886 switch (REG_NOTE_KIND (link))
22888 default: dep = "unknown depencency"; break;
22889 case REG_DEP_TRUE: dep = "data dependency"; break;
22890 case REG_DEP_OUTPUT: dep = "output dependency"; break;
22891 case REG_DEP_ANTI: dep = "anti depencency"; break;
22894 fprintf (stderr,
22895 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22896 "%s, insn:\n", ret, cost, dep);
22898 debug_rtx (insn);
22901 return ret;
22904 /* The function returns a true if INSN is microcoded.
22905 Return false otherwise. */
22907 static bool
22908 is_microcoded_insn (rtx insn)
22910 if (!insn || !NONDEBUG_INSN_P (insn)
22911 || GET_CODE (PATTERN (insn)) == USE
22912 || GET_CODE (PATTERN (insn)) == CLOBBER)
22913 return false;
22915 if (rs6000_cpu_attr == CPU_CELL)
22916 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
22918 if (rs6000_sched_groups)
22920 enum attr_type type = get_attr_type (insn);
22921 if (type == TYPE_LOAD_EXT_U
22922 || type == TYPE_LOAD_EXT_UX
22923 || type == TYPE_LOAD_UX
22924 || type == TYPE_STORE_UX
22925 || type == TYPE_MFCR)
22926 return true;
22929 return false;
22932 /* The function returns true if INSN is cracked into 2 instructions
22933 by the processor (and therefore occupies 2 issue slots). */
22935 static bool
22936 is_cracked_insn (rtx insn)
22938 if (!insn || !NONDEBUG_INSN_P (insn)
22939 || GET_CODE (PATTERN (insn)) == USE
22940 || GET_CODE (PATTERN (insn)) == CLOBBER)
22941 return false;
22943 if (rs6000_sched_groups)
22945 enum attr_type type = get_attr_type (insn);
22946 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
22947 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
22948 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
22949 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
22950 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
22951 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
22952 || type == TYPE_IDIV || type == TYPE_LDIV
22953 || type == TYPE_INSERT_WORD)
22954 return true;
22957 return false;
22960 /* The function returns true if INSN can be issued only from
22961 the branch slot. */
22963 static bool
22964 is_branch_slot_insn (rtx insn)
22966 if (!insn || !NONDEBUG_INSN_P (insn)
22967 || GET_CODE (PATTERN (insn)) == USE
22968 || GET_CODE (PATTERN (insn)) == CLOBBER)
22969 return false;
22971 if (rs6000_sched_groups)
22973 enum attr_type type = get_attr_type (insn);
22974 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
22975 return true;
22976 return false;
22979 return false;
22982 /* The function returns true if out_inst sets a value that is
22983 used in the address generation computation of in_insn */
22984 static bool
22985 set_to_load_agen (rtx out_insn, rtx in_insn)
22987 rtx out_set, in_set;
22989 /* For performance reasons, only handle the simple case where
22990 both loads are a single_set. */
22991 out_set = single_set (out_insn);
22992 if (out_set)
22994 in_set = single_set (in_insn);
22995 if (in_set)
22996 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
22999 return false;
23002 /* Try to determine base/offset/size parts of the given MEM.
23003 Return true if successful, false if all the values couldn't
23004 be determined.
23006 This function only looks for REG or REG+CONST address forms.
23007 REG+REG address form will return false. */
23009 static bool
23010 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
23011 HOST_WIDE_INT *size)
23013 rtx addr_rtx;
23014 if MEM_SIZE_KNOWN_P (mem)
23015 *size = MEM_SIZE (mem);
23016 else
23017 return false;
23019 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
23020 addr_rtx = XEXP (XEXP (mem, 0), 1);
23021 else
23022 addr_rtx = (XEXP (mem, 0));
23024 if (GET_CODE (addr_rtx) == REG)
23026 *base = addr_rtx;
23027 *offset = 0;
23029 else if (GET_CODE (addr_rtx) == PLUS
23030 && CONST_INT_P (XEXP (addr_rtx, 1)))
23032 *base = XEXP (addr_rtx, 0);
23033 *offset = INTVAL (XEXP (addr_rtx, 1));
23035 else
23036 return false;
23038 return true;
23041 /* The function returns true if the target storage location of
23042 mem1 is adjacent to the target storage location of mem2 */
23043 /* Return 1 if memory locations are adjacent. */
23045 static bool
23046 adjacent_mem_locations (rtx mem1, rtx mem2)
23048 rtx reg1, reg2;
23049 HOST_WIDE_INT off1, size1, off2, size2;
23051 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23052 && get_memref_parts (mem2, &reg2, &off2, &size2))
23053 return ((REGNO (reg1) == REGNO (reg2))
23054 && ((off1 + size1 == off2)
23055 || (off2 + size2 == off1)));
23057 return false;
23060 /* This function returns true if it can be determined that the two MEM
23061 locations overlap by at least 1 byte based on base reg/offset/size. */
23063 static bool
23064 mem_locations_overlap (rtx mem1, rtx mem2)
23066 rtx reg1, reg2;
23067 HOST_WIDE_INT off1, size1, off2, size2;
23069 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23070 && get_memref_parts (mem2, &reg2, &off2, &size2))
23071 return ((REGNO (reg1) == REGNO (reg2))
23072 && (((off1 <= off2) && (off1 + size1 > off2))
23073 || ((off2 <= off1) && (off2 + size2 > off1))));
23075 return false;
23078 /* A C statement (sans semicolon) to update the integer scheduling
23079 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23080 INSN earlier, reduce the priority to execute INSN later. Do not
23081 define this macro if you do not need to adjust the scheduling
23082 priorities of insns. */
23084 static int
23085 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
23087 rtx load_mem, str_mem;
23088 /* On machines (like the 750) which have asymmetric integer units,
23089 where one integer unit can do multiply and divides and the other
23090 can't, reduce the priority of multiply/divide so it is scheduled
23091 before other integer operations. */
23093 #if 0
23094 if (! INSN_P (insn))
23095 return priority;
23097 if (GET_CODE (PATTERN (insn)) == USE)
23098 return priority;
23100 switch (rs6000_cpu_attr) {
23101 case CPU_PPC750:
23102 switch (get_attr_type (insn))
23104 default:
23105 break;
23107 case TYPE_IMUL:
23108 case TYPE_IDIV:
23109 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
23110 priority, priority);
23111 if (priority >= 0 && priority < 0x01000000)
23112 priority >>= 3;
23113 break;
23116 #endif
23118 if (insn_must_be_first_in_group (insn)
23119 && reload_completed
23120 && current_sched_info->sched_max_insns_priority
23121 && rs6000_sched_restricted_insns_priority)
23124 /* Prioritize insns that can be dispatched only in the first
23125 dispatch slot. */
23126 if (rs6000_sched_restricted_insns_priority == 1)
23127 /* Attach highest priority to insn. This means that in
23128 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23129 precede 'priority' (critical path) considerations. */
23130 return current_sched_info->sched_max_insns_priority;
23131 else if (rs6000_sched_restricted_insns_priority == 2)
23132 /* Increase priority of insn by a minimal amount. This means that in
23133 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23134 considerations precede dispatch-slot restriction considerations. */
23135 return (priority + 1);
23138 if (rs6000_cpu == PROCESSOR_POWER6
23139 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
23140 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
23141 /* Attach highest priority to insn if the scheduler has just issued two
23142 stores and this instruction is a load, or two loads and this instruction
23143 is a store. Power6 wants loads and stores scheduled alternately
23144 when possible */
23145 return current_sched_info->sched_max_insns_priority;
23147 return priority;
23150 /* Return true if the instruction is nonpipelined on the Cell. */
23151 static bool
23152 is_nonpipeline_insn (rtx insn)
23154 enum attr_type type;
23155 if (!insn || !NONDEBUG_INSN_P (insn)
23156 || GET_CODE (PATTERN (insn)) == USE
23157 || GET_CODE (PATTERN (insn)) == CLOBBER)
23158 return false;
23160 type = get_attr_type (insn);
23161 if (type == TYPE_IMUL
23162 || type == TYPE_IMUL2
23163 || type == TYPE_IMUL3
23164 || type == TYPE_LMUL
23165 || type == TYPE_IDIV
23166 || type == TYPE_LDIV
23167 || type == TYPE_SDIV
23168 || type == TYPE_DDIV
23169 || type == TYPE_SSQRT
23170 || type == TYPE_DSQRT
23171 || type == TYPE_MFCR
23172 || type == TYPE_MFCRF
23173 || type == TYPE_MFJMPR)
23175 return true;
23177 return false;
23181 /* Return how many instructions the machine can issue per cycle. */
23183 static int
23184 rs6000_issue_rate (void)
23186 /* Unless scheduling for register pressure, use issue rate of 1 for
23187 first scheduling pass to decrease degradation. */
23188 if (!reload_completed && !flag_sched_pressure)
23189 return 1;
23191 switch (rs6000_cpu_attr) {
23192 case CPU_RS64A:
23193 case CPU_PPC601: /* ? */
23194 case CPU_PPC7450:
23195 return 3;
23196 case CPU_PPC440:
23197 case CPU_PPC603:
23198 case CPU_PPC750:
23199 case CPU_PPC7400:
23200 case CPU_PPC8540:
23201 case CPU_PPC8548:
23202 case CPU_CELL:
23203 case CPU_PPCE300C2:
23204 case CPU_PPCE300C3:
23205 case CPU_PPCE500MC:
23206 case CPU_PPCE500MC64:
23207 case CPU_PPCE5500:
23208 case CPU_PPCE6500:
23209 case CPU_TITAN:
23210 return 2;
23211 case CPU_PPC476:
23212 case CPU_PPC604:
23213 case CPU_PPC604E:
23214 case CPU_PPC620:
23215 case CPU_PPC630:
23216 return 4;
23217 case CPU_POWER4:
23218 case CPU_POWER5:
23219 case CPU_POWER6:
23220 case CPU_POWER7:
23221 return 5;
23222 default:
23223 return 1;
23227 /* Return how many instructions to look ahead for better insn
23228 scheduling. */
23230 static int
23231 rs6000_use_sched_lookahead (void)
23233 switch (rs6000_cpu_attr)
23235 case CPU_PPC8540:
23236 case CPU_PPC8548:
23237 return 4;
23239 case CPU_CELL:
23240 return (reload_completed ? 8 : 0);
23242 default:
23243 return 0;
23247 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23248 static int
23249 rs6000_use_sched_lookahead_guard (rtx insn)
23251 if (rs6000_cpu_attr != CPU_CELL)
23252 return 1;
23254 if (insn == NULL_RTX || !INSN_P (insn))
23255 abort ();
23257 if (!reload_completed
23258 || is_nonpipeline_insn (insn)
23259 || is_microcoded_insn (insn))
23260 return 0;
23262 return 1;
23265 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23266 and return true. */
23268 static bool
23269 find_mem_ref (rtx pat, rtx *mem_ref)
23271 const char * fmt;
23272 int i, j;
23274 /* stack_tie does not produce any real memory traffic. */
23275 if (tie_operand (pat, VOIDmode))
23276 return false;
23278 if (GET_CODE (pat) == MEM)
23280 *mem_ref = pat;
23281 return true;
23284 /* Recursively process the pattern. */
23285 fmt = GET_RTX_FORMAT (GET_CODE (pat));
23287 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
23289 if (fmt[i] == 'e')
23291 if (find_mem_ref (XEXP (pat, i), mem_ref))
23292 return true;
23294 else if (fmt[i] == 'E')
23295 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
23297 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
23298 return true;
23302 return false;
23305 /* Determine if PAT is a PATTERN of a load insn. */
23307 static bool
23308 is_load_insn1 (rtx pat, rtx *load_mem)
23310 if (!pat || pat == NULL_RTX)
23311 return false;
23313 if (GET_CODE (pat) == SET)
23314 return find_mem_ref (SET_SRC (pat), load_mem);
23316 if (GET_CODE (pat) == PARALLEL)
23318 int i;
23320 for (i = 0; i < XVECLEN (pat, 0); i++)
23321 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
23322 return true;
23325 return false;
23328 /* Determine if INSN loads from memory. */
23330 static bool
23331 is_load_insn (rtx insn, rtx *load_mem)
23333 if (!insn || !INSN_P (insn))
23334 return false;
23336 if (GET_CODE (insn) == CALL_INSN)
23337 return false;
23339 return is_load_insn1 (PATTERN (insn), load_mem);
23342 /* Determine if PAT is a PATTERN of a store insn. */
23344 static bool
23345 is_store_insn1 (rtx pat, rtx *str_mem)
23347 if (!pat || pat == NULL_RTX)
23348 return false;
23350 if (GET_CODE (pat) == SET)
23351 return find_mem_ref (SET_DEST (pat), str_mem);
23353 if (GET_CODE (pat) == PARALLEL)
23355 int i;
23357 for (i = 0; i < XVECLEN (pat, 0); i++)
23358 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
23359 return true;
23362 return false;
23365 /* Determine if INSN stores to memory. */
23367 static bool
23368 is_store_insn (rtx insn, rtx *str_mem)
23370 if (!insn || !INSN_P (insn))
23371 return false;
23373 return is_store_insn1 (PATTERN (insn), str_mem);
23376 /* Returns whether the dependence between INSN and NEXT is considered
23377 costly by the given target. */
23379 static bool
23380 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
23382 rtx insn;
23383 rtx next;
23384 rtx load_mem, str_mem;
23386 /* If the flag is not enabled - no dependence is considered costly;
23387 allow all dependent insns in the same group.
23388 This is the most aggressive option. */
23389 if (rs6000_sched_costly_dep == no_dep_costly)
23390 return false;
23392 /* If the flag is set to 1 - a dependence is always considered costly;
23393 do not allow dependent instructions in the same group.
23394 This is the most conservative option. */
23395 if (rs6000_sched_costly_dep == all_deps_costly)
23396 return true;
23398 insn = DEP_PRO (dep);
23399 next = DEP_CON (dep);
23401 if (rs6000_sched_costly_dep == store_to_load_dep_costly
23402 && is_load_insn (next, &load_mem)
23403 && is_store_insn (insn, &str_mem))
23404 /* Prevent load after store in the same group. */
23405 return true;
23407 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
23408 && is_load_insn (next, &load_mem)
23409 && is_store_insn (insn, &str_mem)
23410 && DEP_TYPE (dep) == REG_DEP_TRUE
23411 && mem_locations_overlap(str_mem, load_mem))
23412 /* Prevent load after store in the same group if it is a true
23413 dependence. */
23414 return true;
23416 /* The flag is set to X; dependences with latency >= X are considered costly,
23417 and will not be scheduled in the same group. */
23418 if (rs6000_sched_costly_dep <= max_dep_latency
23419 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
23420 return true;
23422 return false;
23425 /* Return the next insn after INSN that is found before TAIL is reached,
23426 skipping any "non-active" insns - insns that will not actually occupy
23427 an issue slot. Return NULL_RTX if such an insn is not found. */
23429 static rtx
23430 get_next_active_insn (rtx insn, rtx tail)
23432 if (insn == NULL_RTX || insn == tail)
23433 return NULL_RTX;
23435 while (1)
23437 insn = NEXT_INSN (insn);
23438 if (insn == NULL_RTX || insn == tail)
23439 return NULL_RTX;
23441 if (CALL_P (insn)
23442 || JUMP_P (insn)
23443 || (NONJUMP_INSN_P (insn)
23444 && GET_CODE (PATTERN (insn)) != USE
23445 && GET_CODE (PATTERN (insn)) != CLOBBER
23446 && INSN_CODE (insn) != CODE_FOR_stack_tie))
23447 break;
23449 return insn;
23452 /* We are about to begin issuing insns for this clock cycle. */
23454 static int
23455 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
23456 rtx *ready ATTRIBUTE_UNUSED,
23457 int *pn_ready ATTRIBUTE_UNUSED,
23458 int clock_var ATTRIBUTE_UNUSED)
23460 int n_ready = *pn_ready;
23462 if (sched_verbose)
23463 fprintf (dump, "// rs6000_sched_reorder :\n");
23465 /* Reorder the ready list, if the second to last ready insn
23466 is a nonepipeline insn. */
23467 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
23469 if (is_nonpipeline_insn (ready[n_ready - 1])
23470 && (recog_memoized (ready[n_ready - 2]) > 0))
23471 /* Simply swap first two insns. */
23473 rtx tmp = ready[n_ready - 1];
23474 ready[n_ready - 1] = ready[n_ready - 2];
23475 ready[n_ready - 2] = tmp;
23479 if (rs6000_cpu == PROCESSOR_POWER6)
23480 load_store_pendulum = 0;
23482 return rs6000_issue_rate ();
23485 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23487 static int
23488 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
23489 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
23491 if (sched_verbose)
23492 fprintf (dump, "// rs6000_sched_reorder2 :\n");
23494 /* For Power6, we need to handle some special cases to try and keep the
23495 store queue from overflowing and triggering expensive flushes.
23497 This code monitors how load and store instructions are being issued
23498 and skews the ready list one way or the other to increase the likelihood
23499 that a desired instruction is issued at the proper time.
23501 A couple of things are done. First, we maintain a "load_store_pendulum"
23502 to track the current state of load/store issue.
23504 - If the pendulum is at zero, then no loads or stores have been
23505 issued in the current cycle so we do nothing.
23507 - If the pendulum is 1, then a single load has been issued in this
23508 cycle and we attempt to locate another load in the ready list to
23509 issue with it.
23511 - If the pendulum is -2, then two stores have already been
23512 issued in this cycle, so we increase the priority of the first load
23513 in the ready list to increase it's likelihood of being chosen first
23514 in the next cycle.
23516 - If the pendulum is -1, then a single store has been issued in this
23517 cycle and we attempt to locate another store in the ready list to
23518 issue with it, preferring a store to an adjacent memory location to
23519 facilitate store pairing in the store queue.
23521 - If the pendulum is 2, then two loads have already been
23522 issued in this cycle, so we increase the priority of the first store
23523 in the ready list to increase it's likelihood of being chosen first
23524 in the next cycle.
23526 - If the pendulum < -2 or > 2, then do nothing.
23528 Note: This code covers the most common scenarios. There exist non
23529 load/store instructions which make use of the LSU and which
23530 would need to be accounted for to strictly model the behavior
23531 of the machine. Those instructions are currently unaccounted
23532 for to help minimize compile time overhead of this code.
23534 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
23536 int pos;
23537 int i;
23538 rtx tmp, load_mem, str_mem;
23540 if (is_store_insn (last_scheduled_insn, &str_mem))
23541 /* Issuing a store, swing the load_store_pendulum to the left */
23542 load_store_pendulum--;
23543 else if (is_load_insn (last_scheduled_insn, &load_mem))
23544 /* Issuing a load, swing the load_store_pendulum to the right */
23545 load_store_pendulum++;
23546 else
23547 return cached_can_issue_more;
23549 /* If the pendulum is balanced, or there is only one instruction on
23550 the ready list, then all is well, so return. */
23551 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
23552 return cached_can_issue_more;
23554 if (load_store_pendulum == 1)
23556 /* A load has been issued in this cycle. Scan the ready list
23557 for another load to issue with it */
23558 pos = *pn_ready-1;
23560 while (pos >= 0)
23562 if (is_load_insn (ready[pos], &load_mem))
23564 /* Found a load. Move it to the head of the ready list,
23565 and adjust it's priority so that it is more likely to
23566 stay there */
23567 tmp = ready[pos];
23568 for (i=pos; i<*pn_ready-1; i++)
23569 ready[i] = ready[i + 1];
23570 ready[*pn_ready-1] = tmp;
23572 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23573 INSN_PRIORITY (tmp)++;
23574 break;
23576 pos--;
23579 else if (load_store_pendulum == -2)
23581 /* Two stores have been issued in this cycle. Increase the
23582 priority of the first load in the ready list to favor it for
23583 issuing in the next cycle. */
23584 pos = *pn_ready-1;
23586 while (pos >= 0)
23588 if (is_load_insn (ready[pos], &load_mem)
23589 && !sel_sched_p ()
23590 && INSN_PRIORITY_KNOWN (ready[pos]))
23592 INSN_PRIORITY (ready[pos])++;
23594 /* Adjust the pendulum to account for the fact that a load
23595 was found and increased in priority. This is to prevent
23596 increasing the priority of multiple loads */
23597 load_store_pendulum--;
23599 break;
23601 pos--;
23604 else if (load_store_pendulum == -1)
23606 /* A store has been issued in this cycle. Scan the ready list for
23607 another store to issue with it, preferring a store to an adjacent
23608 memory location */
23609 int first_store_pos = -1;
23611 pos = *pn_ready-1;
23613 while (pos >= 0)
23615 if (is_store_insn (ready[pos], &str_mem))
23617 rtx str_mem2;
23618 /* Maintain the index of the first store found on the
23619 list */
23620 if (first_store_pos == -1)
23621 first_store_pos = pos;
23623 if (is_store_insn (last_scheduled_insn, &str_mem2)
23624 && adjacent_mem_locations (str_mem, str_mem2))
23626 /* Found an adjacent store. Move it to the head of the
23627 ready list, and adjust it's priority so that it is
23628 more likely to stay there */
23629 tmp = ready[pos];
23630 for (i=pos; i<*pn_ready-1; i++)
23631 ready[i] = ready[i + 1];
23632 ready[*pn_ready-1] = tmp;
23634 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23635 INSN_PRIORITY (tmp)++;
23637 first_store_pos = -1;
23639 break;
23642 pos--;
23645 if (first_store_pos >= 0)
23647 /* An adjacent store wasn't found, but a non-adjacent store was,
23648 so move the non-adjacent store to the front of the ready
23649 list, and adjust its priority so that it is more likely to
23650 stay there. */
23651 tmp = ready[first_store_pos];
23652 for (i=first_store_pos; i<*pn_ready-1; i++)
23653 ready[i] = ready[i + 1];
23654 ready[*pn_ready-1] = tmp;
23655 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23656 INSN_PRIORITY (tmp)++;
23659 else if (load_store_pendulum == 2)
23661 /* Two loads have been issued in this cycle. Increase the priority
23662 of the first store in the ready list to favor it for issuing in
23663 the next cycle. */
23664 pos = *pn_ready-1;
23666 while (pos >= 0)
23668 if (is_store_insn (ready[pos], &str_mem)
23669 && !sel_sched_p ()
23670 && INSN_PRIORITY_KNOWN (ready[pos]))
23672 INSN_PRIORITY (ready[pos])++;
23674 /* Adjust the pendulum to account for the fact that a store
23675 was found and increased in priority. This is to prevent
23676 increasing the priority of multiple stores */
23677 load_store_pendulum++;
23679 break;
23681 pos--;
23686 return cached_can_issue_more;
23689 /* Return whether the presence of INSN causes a dispatch group termination
23690 of group WHICH_GROUP.
23692 If WHICH_GROUP == current_group, this function will return true if INSN
23693 causes the termination of the current group (i.e, the dispatch group to
23694 which INSN belongs). This means that INSN will be the last insn in the
23695 group it belongs to.
23697 If WHICH_GROUP == previous_group, this function will return true if INSN
23698 causes the termination of the previous group (i.e, the dispatch group that
23699 precedes the group to which INSN belongs). This means that INSN will be
23700 the first insn in the group it belongs to). */
23702 static bool
23703 insn_terminates_group_p (rtx insn, enum group_termination which_group)
23705 bool first, last;
23707 if (! insn)
23708 return false;
23710 first = insn_must_be_first_in_group (insn);
23711 last = insn_must_be_last_in_group (insn);
23713 if (first && last)
23714 return true;
23716 if (which_group == current_group)
23717 return last;
23718 else if (which_group == previous_group)
23719 return first;
23721 return false;
23725 static bool
23726 insn_must_be_first_in_group (rtx insn)
23728 enum attr_type type;
23730 if (!insn
23731 || GET_CODE (insn) == NOTE
23732 || DEBUG_INSN_P (insn)
23733 || GET_CODE (PATTERN (insn)) == USE
23734 || GET_CODE (PATTERN (insn)) == CLOBBER)
23735 return false;
23737 switch (rs6000_cpu)
23739 case PROCESSOR_POWER5:
23740 if (is_cracked_insn (insn))
23741 return true;
23742 case PROCESSOR_POWER4:
23743 if (is_microcoded_insn (insn))
23744 return true;
23746 if (!rs6000_sched_groups)
23747 return false;
23749 type = get_attr_type (insn);
23751 switch (type)
23753 case TYPE_MFCR:
23754 case TYPE_MFCRF:
23755 case TYPE_MTCR:
23756 case TYPE_DELAYED_CR:
23757 case TYPE_CR_LOGICAL:
23758 case TYPE_MTJMPR:
23759 case TYPE_MFJMPR:
23760 case TYPE_IDIV:
23761 case TYPE_LDIV:
23762 case TYPE_LOAD_L:
23763 case TYPE_STORE_C:
23764 case TYPE_ISYNC:
23765 case TYPE_SYNC:
23766 return true;
23767 default:
23768 break;
23770 break;
23771 case PROCESSOR_POWER6:
23772 type = get_attr_type (insn);
23774 switch (type)
23776 case TYPE_INSERT_DWORD:
23777 case TYPE_EXTS:
23778 case TYPE_CNTLZ:
23779 case TYPE_SHIFT:
23780 case TYPE_VAR_SHIFT_ROTATE:
23781 case TYPE_TRAP:
23782 case TYPE_IMUL:
23783 case TYPE_IMUL2:
23784 case TYPE_IMUL3:
23785 case TYPE_LMUL:
23786 case TYPE_IDIV:
23787 case TYPE_INSERT_WORD:
23788 case TYPE_DELAYED_COMPARE:
23789 case TYPE_IMUL_COMPARE:
23790 case TYPE_LMUL_COMPARE:
23791 case TYPE_FPCOMPARE:
23792 case TYPE_MFCR:
23793 case TYPE_MTCR:
23794 case TYPE_MFJMPR:
23795 case TYPE_MTJMPR:
23796 case TYPE_ISYNC:
23797 case TYPE_SYNC:
23798 case TYPE_LOAD_L:
23799 case TYPE_STORE_C:
23800 case TYPE_LOAD_U:
23801 case TYPE_LOAD_UX:
23802 case TYPE_LOAD_EXT_UX:
23803 case TYPE_STORE_U:
23804 case TYPE_STORE_UX:
23805 case TYPE_FPLOAD_U:
23806 case TYPE_FPLOAD_UX:
23807 case TYPE_FPSTORE_U:
23808 case TYPE_FPSTORE_UX:
23809 return true;
23810 default:
23811 break;
23813 break;
23814 case PROCESSOR_POWER7:
23815 type = get_attr_type (insn);
23817 switch (type)
23819 case TYPE_CR_LOGICAL:
23820 case TYPE_MFCR:
23821 case TYPE_MFCRF:
23822 case TYPE_MTCR:
23823 case TYPE_IDIV:
23824 case TYPE_LDIV:
23825 case TYPE_COMPARE:
23826 case TYPE_DELAYED_COMPARE:
23827 case TYPE_VAR_DELAYED_COMPARE:
23828 case TYPE_ISYNC:
23829 case TYPE_LOAD_L:
23830 case TYPE_STORE_C:
23831 case TYPE_LOAD_U:
23832 case TYPE_LOAD_UX:
23833 case TYPE_LOAD_EXT:
23834 case TYPE_LOAD_EXT_U:
23835 case TYPE_LOAD_EXT_UX:
23836 case TYPE_STORE_U:
23837 case TYPE_STORE_UX:
23838 case TYPE_FPLOAD_U:
23839 case TYPE_FPLOAD_UX:
23840 case TYPE_FPSTORE_U:
23841 case TYPE_FPSTORE_UX:
23842 case TYPE_MFJMPR:
23843 case TYPE_MTJMPR:
23844 return true;
23845 default:
23846 break;
23848 break;
23849 default:
23850 break;
23853 return false;
23856 static bool
23857 insn_must_be_last_in_group (rtx insn)
23859 enum attr_type type;
23861 if (!insn
23862 || GET_CODE (insn) == NOTE
23863 || DEBUG_INSN_P (insn)
23864 || GET_CODE (PATTERN (insn)) == USE
23865 || GET_CODE (PATTERN (insn)) == CLOBBER)
23866 return false;
23868 switch (rs6000_cpu) {
23869 case PROCESSOR_POWER4:
23870 case PROCESSOR_POWER5:
23871 if (is_microcoded_insn (insn))
23872 return true;
23874 if (is_branch_slot_insn (insn))
23875 return true;
23877 break;
23878 case PROCESSOR_POWER6:
23879 type = get_attr_type (insn);
23881 switch (type)
23883 case TYPE_EXTS:
23884 case TYPE_CNTLZ:
23885 case TYPE_SHIFT:
23886 case TYPE_VAR_SHIFT_ROTATE:
23887 case TYPE_TRAP:
23888 case TYPE_IMUL:
23889 case TYPE_IMUL2:
23890 case TYPE_IMUL3:
23891 case TYPE_LMUL:
23892 case TYPE_IDIV:
23893 case TYPE_DELAYED_COMPARE:
23894 case TYPE_IMUL_COMPARE:
23895 case TYPE_LMUL_COMPARE:
23896 case TYPE_FPCOMPARE:
23897 case TYPE_MFCR:
23898 case TYPE_MTCR:
23899 case TYPE_MFJMPR:
23900 case TYPE_MTJMPR:
23901 case TYPE_ISYNC:
23902 case TYPE_SYNC:
23903 case TYPE_LOAD_L:
23904 case TYPE_STORE_C:
23905 return true;
23906 default:
23907 break;
23909 break;
23910 case PROCESSOR_POWER7:
23911 type = get_attr_type (insn);
23913 switch (type)
23915 case TYPE_ISYNC:
23916 case TYPE_SYNC:
23917 case TYPE_LOAD_L:
23918 case TYPE_STORE_C:
23919 case TYPE_LOAD_EXT_U:
23920 case TYPE_LOAD_EXT_UX:
23921 case TYPE_STORE_UX:
23922 return true;
23923 default:
23924 break;
23926 break;
23927 default:
23928 break;
23931 return false;
23934 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23935 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23937 static bool
23938 is_costly_group (rtx *group_insns, rtx next_insn)
23940 int i;
23941 int issue_rate = rs6000_issue_rate ();
23943 for (i = 0; i < issue_rate; i++)
23945 sd_iterator_def sd_it;
23946 dep_t dep;
23947 rtx insn = group_insns[i];
23949 if (!insn)
23950 continue;
23952 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
23954 rtx next = DEP_CON (dep);
23956 if (next == next_insn
23957 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
23958 return true;
23962 return false;
23965 /* Utility of the function redefine_groups.
23966 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23967 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23968 to keep it "far" (in a separate group) from GROUP_INSNS, following
23969 one of the following schemes, depending on the value of the flag
23970 -minsert_sched_nops = X:
23971 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23972 in order to force NEXT_INSN into a separate group.
23973 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23974 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23975 insertion (has a group just ended, how many vacant issue slots remain in the
23976 last group, and how many dispatch groups were encountered so far). */
23978 static int
23979 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
23980 rtx next_insn, bool *group_end, int can_issue_more,
23981 int *group_count)
23983 rtx nop;
23984 bool force;
23985 int issue_rate = rs6000_issue_rate ();
23986 bool end = *group_end;
23987 int i;
23989 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
23990 return can_issue_more;
23992 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
23993 return can_issue_more;
23995 force = is_costly_group (group_insns, next_insn);
23996 if (!force)
23997 return can_issue_more;
23999 if (sched_verbose > 6)
24000 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
24001 *group_count ,can_issue_more);
24003 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
24005 if (*group_end)
24006 can_issue_more = 0;
24008 /* Since only a branch can be issued in the last issue_slot, it is
24009 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
24010 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
24011 in this case the last nop will start a new group and the branch
24012 will be forced to the new group. */
24013 if (can_issue_more && !is_branch_slot_insn (next_insn))
24014 can_issue_more--;
24016 /* Power6 and Power7 have special group ending nop. */
24017 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
24019 nop = gen_group_ending_nop ();
24020 emit_insn_before (nop, next_insn);
24021 can_issue_more = 0;
24023 else
24024 while (can_issue_more > 0)
24026 nop = gen_nop ();
24027 emit_insn_before (nop, next_insn);
24028 can_issue_more--;
24031 *group_end = true;
24032 return 0;
24035 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
24037 int n_nops = rs6000_sched_insert_nops;
24039 /* Nops can't be issued from the branch slot, so the effective
24040 issue_rate for nops is 'issue_rate - 1'. */
24041 if (can_issue_more == 0)
24042 can_issue_more = issue_rate;
24043 can_issue_more--;
24044 if (can_issue_more == 0)
24046 can_issue_more = issue_rate - 1;
24047 (*group_count)++;
24048 end = true;
24049 for (i = 0; i < issue_rate; i++)
24051 group_insns[i] = 0;
24055 while (n_nops > 0)
24057 nop = gen_nop ();
24058 emit_insn_before (nop, next_insn);
24059 if (can_issue_more == issue_rate - 1) /* new group begins */
24060 end = false;
24061 can_issue_more--;
24062 if (can_issue_more == 0)
24064 can_issue_more = issue_rate - 1;
24065 (*group_count)++;
24066 end = true;
24067 for (i = 0; i < issue_rate; i++)
24069 group_insns[i] = 0;
24072 n_nops--;
24075 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24076 can_issue_more++;
24078 /* Is next_insn going to start a new group? */
24079 *group_end
24080 = (end
24081 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24082 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24083 || (can_issue_more < issue_rate &&
24084 insn_terminates_group_p (next_insn, previous_group)));
24085 if (*group_end && end)
24086 (*group_count)--;
24088 if (sched_verbose > 6)
24089 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
24090 *group_count, can_issue_more);
24091 return can_issue_more;
24094 return can_issue_more;
24097 /* This function tries to synch the dispatch groups that the compiler "sees"
24098 with the dispatch groups that the processor dispatcher is expected to
24099 form in practice. It tries to achieve this synchronization by forcing the
24100 estimated processor grouping on the compiler (as opposed to the function
24101 'pad_goups' which tries to force the scheduler's grouping on the processor).
24103 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24104 examines the (estimated) dispatch groups that will be formed by the processor
24105 dispatcher. It marks these group boundaries to reflect the estimated
24106 processor grouping, overriding the grouping that the scheduler had marked.
24107 Depending on the value of the flag '-minsert-sched-nops' this function can
24108 force certain insns into separate groups or force a certain distance between
24109 them by inserting nops, for example, if there exists a "costly dependence"
24110 between the insns.
24112 The function estimates the group boundaries that the processor will form as
24113 follows: It keeps track of how many vacant issue slots are available after
24114 each insn. A subsequent insn will start a new group if one of the following
24115 4 cases applies:
24116 - no more vacant issue slots remain in the current dispatch group.
24117 - only the last issue slot, which is the branch slot, is vacant, but the next
24118 insn is not a branch.
24119 - only the last 2 or less issue slots, including the branch slot, are vacant,
24120 which means that a cracked insn (which occupies two issue slots) can't be
24121 issued in this group.
24122 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24123 start a new group. */
24125 static int
24126 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24128 rtx insn, next_insn;
24129 int issue_rate;
24130 int can_issue_more;
24131 int slot, i;
24132 bool group_end;
24133 int group_count = 0;
24134 rtx *group_insns;
24136 /* Initialize. */
24137 issue_rate = rs6000_issue_rate ();
24138 group_insns = XALLOCAVEC (rtx, issue_rate);
24139 for (i = 0; i < issue_rate; i++)
24141 group_insns[i] = 0;
24143 can_issue_more = issue_rate;
24144 slot = 0;
24145 insn = get_next_active_insn (prev_head_insn, tail);
24146 group_end = false;
24148 while (insn != NULL_RTX)
24150 slot = (issue_rate - can_issue_more);
24151 group_insns[slot] = insn;
24152 can_issue_more =
24153 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24154 if (insn_terminates_group_p (insn, current_group))
24155 can_issue_more = 0;
24157 next_insn = get_next_active_insn (insn, tail);
24158 if (next_insn == NULL_RTX)
24159 return group_count + 1;
24161 /* Is next_insn going to start a new group? */
24162 group_end
24163 = (can_issue_more == 0
24164 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24165 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24166 || (can_issue_more < issue_rate &&
24167 insn_terminates_group_p (next_insn, previous_group)));
24169 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
24170 next_insn, &group_end, can_issue_more,
24171 &group_count);
24173 if (group_end)
24175 group_count++;
24176 can_issue_more = 0;
24177 for (i = 0; i < issue_rate; i++)
24179 group_insns[i] = 0;
24183 if (GET_MODE (next_insn) == TImode && can_issue_more)
24184 PUT_MODE (next_insn, VOIDmode);
24185 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
24186 PUT_MODE (next_insn, TImode);
24188 insn = next_insn;
24189 if (can_issue_more == 0)
24190 can_issue_more = issue_rate;
24191 } /* while */
24193 return group_count;
24196 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24197 dispatch group boundaries that the scheduler had marked. Pad with nops
24198 any dispatch groups which have vacant issue slots, in order to force the
24199 scheduler's grouping on the processor dispatcher. The function
24200 returns the number of dispatch groups found. */
24202 static int
24203 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24205 rtx insn, next_insn;
24206 rtx nop;
24207 int issue_rate;
24208 int can_issue_more;
24209 int group_end;
24210 int group_count = 0;
24212 /* Initialize issue_rate. */
24213 issue_rate = rs6000_issue_rate ();
24214 can_issue_more = issue_rate;
24216 insn = get_next_active_insn (prev_head_insn, tail);
24217 next_insn = get_next_active_insn (insn, tail);
24219 while (insn != NULL_RTX)
24221 can_issue_more =
24222 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24224 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
24226 if (next_insn == NULL_RTX)
24227 break;
24229 if (group_end)
24231 /* If the scheduler had marked group termination at this location
24232 (between insn and next_insn), and neither insn nor next_insn will
24233 force group termination, pad the group with nops to force group
24234 termination. */
24235 if (can_issue_more
24236 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
24237 && !insn_terminates_group_p (insn, current_group)
24238 && !insn_terminates_group_p (next_insn, previous_group))
24240 if (!is_branch_slot_insn (next_insn))
24241 can_issue_more--;
24243 while (can_issue_more)
24245 nop = gen_nop ();
24246 emit_insn_before (nop, next_insn);
24247 can_issue_more--;
24251 can_issue_more = issue_rate;
24252 group_count++;
24255 insn = next_insn;
24256 next_insn = get_next_active_insn (insn, tail);
24259 return group_count;
24262 /* We're beginning a new block. Initialize data structures as necessary. */
24264 static void
24265 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
24266 int sched_verbose ATTRIBUTE_UNUSED,
24267 int max_ready ATTRIBUTE_UNUSED)
24269 last_scheduled_insn = NULL_RTX;
24270 load_store_pendulum = 0;
24273 /* The following function is called at the end of scheduling BB.
24274 After reload, it inserts nops at insn group bundling. */
24276 static void
24277 rs6000_sched_finish (FILE *dump, int sched_verbose)
24279 int n_groups;
24281 if (sched_verbose)
24282 fprintf (dump, "=== Finishing schedule.\n");
24284 if (reload_completed && rs6000_sched_groups)
24286 /* Do not run sched_finish hook when selective scheduling enabled. */
24287 if (sel_sched_p ())
24288 return;
24290 if (rs6000_sched_insert_nops == sched_finish_none)
24291 return;
24293 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
24294 n_groups = pad_groups (dump, sched_verbose,
24295 current_sched_info->prev_head,
24296 current_sched_info->next_tail);
24297 else
24298 n_groups = redefine_groups (dump, sched_verbose,
24299 current_sched_info->prev_head,
24300 current_sched_info->next_tail);
24302 if (sched_verbose >= 6)
24304 fprintf (dump, "ngroups = %d\n", n_groups);
24305 print_rtl (dump, current_sched_info->prev_head);
24306 fprintf (dump, "Done finish_sched\n");
24311 struct _rs6000_sched_context
24313 short cached_can_issue_more;
24314 rtx last_scheduled_insn;
24315 int load_store_pendulum;
24318 typedef struct _rs6000_sched_context rs6000_sched_context_def;
24319 typedef rs6000_sched_context_def *rs6000_sched_context_t;
24321 /* Allocate store for new scheduling context. */
24322 static void *
24323 rs6000_alloc_sched_context (void)
24325 return xmalloc (sizeof (rs6000_sched_context_def));
24328 /* If CLEAN_P is true then initializes _SC with clean data,
24329 and from the global context otherwise. */
24330 static void
24331 rs6000_init_sched_context (void *_sc, bool clean_p)
24333 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24335 if (clean_p)
24337 sc->cached_can_issue_more = 0;
24338 sc->last_scheduled_insn = NULL_RTX;
24339 sc->load_store_pendulum = 0;
24341 else
24343 sc->cached_can_issue_more = cached_can_issue_more;
24344 sc->last_scheduled_insn = last_scheduled_insn;
24345 sc->load_store_pendulum = load_store_pendulum;
24349 /* Sets the global scheduling context to the one pointed to by _SC. */
24350 static void
24351 rs6000_set_sched_context (void *_sc)
24353 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24355 gcc_assert (sc != NULL);
24357 cached_can_issue_more = sc->cached_can_issue_more;
24358 last_scheduled_insn = sc->last_scheduled_insn;
24359 load_store_pendulum = sc->load_store_pendulum;
24362 /* Free _SC. */
24363 static void
24364 rs6000_free_sched_context (void *_sc)
24366 gcc_assert (_sc != NULL);
24368 free (_sc);
24372 /* Length in units of the trampoline for entering a nested function. */
24375 rs6000_trampoline_size (void)
24377 int ret = 0;
24379 switch (DEFAULT_ABI)
24381 default:
24382 gcc_unreachable ();
24384 case ABI_AIX:
24385 ret = (TARGET_32BIT) ? 12 : 24;
24386 break;
24388 case ABI_DARWIN:
24389 case ABI_V4:
24390 ret = (TARGET_32BIT) ? 40 : 48;
24391 break;
24394 return ret;
24397 /* Emit RTL insns to initialize the variable parts of a trampoline.
24398 FNADDR is an RTX for the address of the function's pure code.
24399 CXT is an RTX for the static chain value for the function. */
24401 static void
24402 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
24404 int regsize = (TARGET_32BIT) ? 4 : 8;
24405 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
24406 rtx ctx_reg = force_reg (Pmode, cxt);
24407 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
24409 switch (DEFAULT_ABI)
24411 default:
24412 gcc_unreachable ();
24414 /* Under AIX, just build the 3 word function descriptor */
24415 case ABI_AIX:
24417 rtx fnmem, fn_reg, toc_reg;
24419 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
24420 error ("You cannot take the address of a nested function if you use "
24421 "the -mno-pointers-to-nested-functions option.");
24423 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
24424 fn_reg = gen_reg_rtx (Pmode);
24425 toc_reg = gen_reg_rtx (Pmode);
24427 /* Macro to shorten the code expansions below. */
24428 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24430 m_tramp = replace_equiv_address (m_tramp, addr);
24432 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
24433 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
24434 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
24435 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
24436 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
24438 # undef MEM_PLUS
24440 break;
24442 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24443 case ABI_DARWIN:
24444 case ABI_V4:
24445 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
24446 LCT_NORMAL, VOIDmode, 4,
24447 addr, Pmode,
24448 GEN_INT (rs6000_trampoline_size ()), SImode,
24449 fnaddr, Pmode,
24450 ctx_reg, Pmode);
24451 break;
24456 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24457 identifier as an argument, so the front end shouldn't look it up. */
24459 static bool
24460 rs6000_attribute_takes_identifier_p (const_tree attr_id)
24462 return is_attribute_p ("altivec", attr_id);
24465 /* Handle the "altivec" attribute. The attribute may have
24466 arguments as follows:
24468 __attribute__((altivec(vector__)))
24469 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24470 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24472 and may appear more than once (e.g., 'vector bool char') in a
24473 given declaration. */
24475 static tree
24476 rs6000_handle_altivec_attribute (tree *node,
24477 tree name ATTRIBUTE_UNUSED,
24478 tree args,
24479 int flags ATTRIBUTE_UNUSED,
24480 bool *no_add_attrs)
24482 tree type = *node, result = NULL_TREE;
24483 enum machine_mode mode;
24484 int unsigned_p;
24485 char altivec_type
24486 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
24487 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
24488 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
24489 : '?');
24491 while (POINTER_TYPE_P (type)
24492 || TREE_CODE (type) == FUNCTION_TYPE
24493 || TREE_CODE (type) == METHOD_TYPE
24494 || TREE_CODE (type) == ARRAY_TYPE)
24495 type = TREE_TYPE (type);
24497 mode = TYPE_MODE (type);
24499 /* Check for invalid AltiVec type qualifiers. */
24500 if (type == long_double_type_node)
24501 error ("use of %<long double%> in AltiVec types is invalid");
24502 else if (type == boolean_type_node)
24503 error ("use of boolean types in AltiVec types is invalid");
24504 else if (TREE_CODE (type) == COMPLEX_TYPE)
24505 error ("use of %<complex%> in AltiVec types is invalid");
24506 else if (DECIMAL_FLOAT_MODE_P (mode))
24507 error ("use of decimal floating point types in AltiVec types is invalid");
24508 else if (!TARGET_VSX)
24510 if (type == long_unsigned_type_node || type == long_integer_type_node)
24512 if (TARGET_64BIT)
24513 error ("use of %<long%> in AltiVec types is invalid for "
24514 "64-bit code without -mvsx");
24515 else if (rs6000_warn_altivec_long)
24516 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24517 "use %<int%>");
24519 else if (type == long_long_unsigned_type_node
24520 || type == long_long_integer_type_node)
24521 error ("use of %<long long%> in AltiVec types is invalid without "
24522 "-mvsx");
24523 else if (type == double_type_node)
24524 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24527 switch (altivec_type)
24529 case 'v':
24530 unsigned_p = TYPE_UNSIGNED (type);
24531 switch (mode)
24533 case DImode:
24534 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
24535 break;
24536 case SImode:
24537 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
24538 break;
24539 case HImode:
24540 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
24541 break;
24542 case QImode:
24543 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
24544 break;
24545 case SFmode: result = V4SF_type_node; break;
24546 case DFmode: result = V2DF_type_node; break;
24547 /* If the user says 'vector int bool', we may be handed the 'bool'
24548 attribute _before_ the 'vector' attribute, and so select the
24549 proper type in the 'b' case below. */
24550 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
24551 case V2DImode: case V2DFmode:
24552 result = type;
24553 default: break;
24555 break;
24556 case 'b':
24557 switch (mode)
24559 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
24560 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
24561 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
24562 case QImode: case V16QImode: result = bool_V16QI_type_node;
24563 default: break;
24565 break;
24566 case 'p':
24567 switch (mode)
24569 case V8HImode: result = pixel_V8HI_type_node;
24570 default: break;
24572 default: break;
24575 /* Propagate qualifiers attached to the element type
24576 onto the vector type. */
24577 if (result && result != type && TYPE_QUALS (type))
24578 result = build_qualified_type (result, TYPE_QUALS (type));
24580 *no_add_attrs = true; /* No need to hang on to the attribute. */
24582 if (result)
24583 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
24585 return NULL_TREE;
24588 /* AltiVec defines four built-in scalar types that serve as vector
24589 elements; we must teach the compiler how to mangle them. */
24591 static const char *
24592 rs6000_mangle_type (const_tree type)
24594 type = TYPE_MAIN_VARIANT (type);
24596 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24597 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24598 return NULL;
24600 if (type == bool_char_type_node) return "U6__boolc";
24601 if (type == bool_short_type_node) return "U6__bools";
24602 if (type == pixel_type_node) return "u7__pixel";
24603 if (type == bool_int_type_node) return "U6__booli";
24604 if (type == bool_long_type_node) return "U6__booll";
24606 /* Mangle IBM extended float long double as `g' (__float128) on
24607 powerpc*-linux where long-double-64 previously was the default. */
24608 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
24609 && TARGET_ELF
24610 && TARGET_LONG_DOUBLE_128
24611 && !TARGET_IEEEQUAD)
24612 return "g";
24614 /* For all other types, use normal C++ mangling. */
24615 return NULL;
24618 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24619 struct attribute_spec.handler. */
24621 static tree
24622 rs6000_handle_longcall_attribute (tree *node, tree name,
24623 tree args ATTRIBUTE_UNUSED,
24624 int flags ATTRIBUTE_UNUSED,
24625 bool *no_add_attrs)
24627 if (TREE_CODE (*node) != FUNCTION_TYPE
24628 && TREE_CODE (*node) != FIELD_DECL
24629 && TREE_CODE (*node) != TYPE_DECL)
24631 warning (OPT_Wattributes, "%qE attribute only applies to functions",
24632 name);
24633 *no_add_attrs = true;
24636 return NULL_TREE;
24639 /* Set longcall attributes on all functions declared when
24640 rs6000_default_long_calls is true. */
24641 static void
24642 rs6000_set_default_type_attributes (tree type)
24644 if (rs6000_default_long_calls
24645 && (TREE_CODE (type) == FUNCTION_TYPE
24646 || TREE_CODE (type) == METHOD_TYPE))
24647 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
24648 NULL_TREE,
24649 TYPE_ATTRIBUTES (type));
24651 #if TARGET_MACHO
24652 darwin_set_default_type_attributes (type);
24653 #endif
24656 /* Return a reference suitable for calling a function with the
24657 longcall attribute. */
24660 rs6000_longcall_ref (rtx call_ref)
24662 const char *call_name;
24663 tree node;
24665 if (GET_CODE (call_ref) != SYMBOL_REF)
24666 return call_ref;
24668 /* System V adds '.' to the internal name, so skip them. */
24669 call_name = XSTR (call_ref, 0);
24670 if (*call_name == '.')
24672 while (*call_name == '.')
24673 call_name++;
24675 node = get_identifier (call_name);
24676 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
24679 return force_reg (Pmode, call_ref);
24682 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24683 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24684 #endif
24686 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24687 struct attribute_spec.handler. */
24688 static tree
24689 rs6000_handle_struct_attribute (tree *node, tree name,
24690 tree args ATTRIBUTE_UNUSED,
24691 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24693 tree *type = NULL;
24694 if (DECL_P (*node))
24696 if (TREE_CODE (*node) == TYPE_DECL)
24697 type = &TREE_TYPE (*node);
24699 else
24700 type = node;
24702 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
24703 || TREE_CODE (*type) == UNION_TYPE)))
24705 warning (OPT_Wattributes, "%qE attribute ignored", name);
24706 *no_add_attrs = true;
24709 else if ((is_attribute_p ("ms_struct", name)
24710 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
24711 || ((is_attribute_p ("gcc_struct", name)
24712 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
24714 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
24715 name);
24716 *no_add_attrs = true;
24719 return NULL_TREE;
24722 static bool
24723 rs6000_ms_bitfield_layout_p (const_tree record_type)
24725 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
24726 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
24727 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
24730 #ifdef USING_ELFOS_H
24732 /* A get_unnamed_section callback, used for switching to toc_section. */
24734 static void
24735 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
24737 if (DEFAULT_ABI == ABI_AIX
24738 && TARGET_MINIMAL_TOC
24739 && !TARGET_RELOCATABLE)
24741 if (!toc_initialized)
24743 toc_initialized = 1;
24744 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24745 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
24746 fprintf (asm_out_file, "\t.tc ");
24747 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
24748 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24749 fprintf (asm_out_file, "\n");
24751 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24752 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24753 fprintf (asm_out_file, " = .+32768\n");
24755 else
24756 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24758 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
24759 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24760 else
24762 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24763 if (!toc_initialized)
24765 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24766 fprintf (asm_out_file, " = .+32768\n");
24767 toc_initialized = 1;
24772 /* Implement TARGET_ASM_INIT_SECTIONS. */
24774 static void
24775 rs6000_elf_asm_init_sections (void)
24777 toc_section
24778 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
24780 sdata2_section
24781 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
24782 SDATA2_SECTION_ASM_OP);
24785 /* Implement TARGET_SELECT_RTX_SECTION. */
24787 static section *
24788 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
24789 unsigned HOST_WIDE_INT align)
24791 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
24792 return toc_section;
24793 else
24794 return default_elf_select_rtx_section (mode, x, align);
24797 /* For a SYMBOL_REF, set generic flags and then perform some
24798 target-specific processing.
24800 When the AIX ABI is requested on a non-AIX system, replace the
24801 function name with the real name (with a leading .) rather than the
24802 function descriptor name. This saves a lot of overriding code to
24803 read the prefixes. */
24805 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
24806 static void
24807 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
24809 default_encode_section_info (decl, rtl, first);
24811 if (first
24812 && TREE_CODE (decl) == FUNCTION_DECL
24813 && !TARGET_AIX
24814 && DEFAULT_ABI == ABI_AIX)
24816 rtx sym_ref = XEXP (rtl, 0);
24817 size_t len = strlen (XSTR (sym_ref, 0));
24818 char *str = XALLOCAVEC (char, len + 2);
24819 str[0] = '.';
24820 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
24821 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
24825 static inline bool
24826 compare_section_name (const char *section, const char *templ)
24828 int len;
24830 len = strlen (templ);
24831 return (strncmp (section, templ, len) == 0
24832 && (section[len] == 0 || section[len] == '.'));
24835 bool
24836 rs6000_elf_in_small_data_p (const_tree decl)
24838 if (rs6000_sdata == SDATA_NONE)
24839 return false;
24841 /* We want to merge strings, so we never consider them small data. */
24842 if (TREE_CODE (decl) == STRING_CST)
24843 return false;
24845 /* Functions are never in the small data area. */
24846 if (TREE_CODE (decl) == FUNCTION_DECL)
24847 return false;
24849 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
24851 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
24852 if (compare_section_name (section, ".sdata")
24853 || compare_section_name (section, ".sdata2")
24854 || compare_section_name (section, ".gnu.linkonce.s")
24855 || compare_section_name (section, ".sbss")
24856 || compare_section_name (section, ".sbss2")
24857 || compare_section_name (section, ".gnu.linkonce.sb")
24858 || strcmp (section, ".PPC.EMB.sdata0") == 0
24859 || strcmp (section, ".PPC.EMB.sbss0") == 0)
24860 return true;
24862 else
24864 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
24866 if (size > 0
24867 && size <= g_switch_value
24868 /* If it's not public, and we're not going to reference it there,
24869 there's no need to put it in the small data section. */
24870 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
24871 return true;
24874 return false;
24877 #endif /* USING_ELFOS_H */
24879 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24881 static bool
24882 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
24884 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
24887 /* Return a REG that occurs in ADDR with coefficient 1.
24888 ADDR can be effectively incremented by incrementing REG.
24890 r0 is special and we must not select it as an address
24891 register by this routine since our caller will try to
24892 increment the returned register via an "la" instruction. */
24895 find_addr_reg (rtx addr)
24897 while (GET_CODE (addr) == PLUS)
24899 if (GET_CODE (XEXP (addr, 0)) == REG
24900 && REGNO (XEXP (addr, 0)) != 0)
24901 addr = XEXP (addr, 0);
24902 else if (GET_CODE (XEXP (addr, 1)) == REG
24903 && REGNO (XEXP (addr, 1)) != 0)
24904 addr = XEXP (addr, 1);
24905 else if (CONSTANT_P (XEXP (addr, 0)))
24906 addr = XEXP (addr, 1);
24907 else if (CONSTANT_P (XEXP (addr, 1)))
24908 addr = XEXP (addr, 0);
24909 else
24910 gcc_unreachable ();
24912 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
24913 return addr;
24916 void
24917 rs6000_fatal_bad_address (rtx op)
24919 fatal_insn ("bad address", op);
24922 #if TARGET_MACHO
24924 typedef struct branch_island_d {
24925 tree function_name;
24926 tree label_name;
24927 int line_number;
24928 } branch_island;
24930 DEF_VEC_O(branch_island);
24931 DEF_VEC_ALLOC_O(branch_island,gc);
24933 static VEC(branch_island,gc) *branch_islands;
24935 /* Remember to generate a branch island for far calls to the given
24936 function. */
24938 static void
24939 add_compiler_branch_island (tree label_name, tree function_name,
24940 int line_number)
24942 branch_island bi = {function_name, label_name, line_number};
24943 VEC_safe_push (branch_island, gc, branch_islands, bi);
24946 /* Generate far-jump branch islands for everything recorded in
24947 branch_islands. Invoked immediately after the last instruction of
24948 the epilogue has been emitted; the branch islands must be appended
24949 to, and contiguous with, the function body. Mach-O stubs are
24950 generated in machopic_output_stub(). */
24952 static void
24953 macho_branch_islands (void)
24955 char tmp_buf[512];
24957 while (!VEC_empty (branch_island, branch_islands))
24959 branch_island *bi = &VEC_last (branch_island, branch_islands);
24960 const char *label = IDENTIFIER_POINTER (bi->label_name);
24961 const char *name = IDENTIFIER_POINTER (bi->function_name);
24962 char name_buf[512];
24963 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24964 if (name[0] == '*' || name[0] == '&')
24965 strcpy (name_buf, name+1);
24966 else
24968 name_buf[0] = '_';
24969 strcpy (name_buf+1, name);
24971 strcpy (tmp_buf, "\n");
24972 strcat (tmp_buf, label);
24973 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24974 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
24975 dbxout_stabd (N_SLINE, bi->line_number);
24976 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24977 if (flag_pic)
24979 if (TARGET_LINK_STACK)
24981 char name[32];
24982 get_ppc476_thunk_name (name);
24983 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
24984 strcat (tmp_buf, name);
24985 strcat (tmp_buf, "\n");
24986 strcat (tmp_buf, label);
24987 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24989 else
24991 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
24992 strcat (tmp_buf, label);
24993 strcat (tmp_buf, "_pic\n");
24994 strcat (tmp_buf, label);
24995 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24998 strcat (tmp_buf, "\taddis r11,r11,ha16(");
24999 strcat (tmp_buf, name_buf);
25000 strcat (tmp_buf, " - ");
25001 strcat (tmp_buf, label);
25002 strcat (tmp_buf, "_pic)\n");
25004 strcat (tmp_buf, "\tmtlr r0\n");
25006 strcat (tmp_buf, "\taddi r12,r11,lo16(");
25007 strcat (tmp_buf, name_buf);
25008 strcat (tmp_buf, " - ");
25009 strcat (tmp_buf, label);
25010 strcat (tmp_buf, "_pic)\n");
25012 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
25014 else
25016 strcat (tmp_buf, ":\nlis r12,hi16(");
25017 strcat (tmp_buf, name_buf);
25018 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
25019 strcat (tmp_buf, name_buf);
25020 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
25022 output_asm_insn (tmp_buf, 0);
25023 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25024 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25025 dbxout_stabd (N_SLINE, bi->line_number);
25026 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25027 VEC_pop (branch_island, branch_islands);
25031 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25032 already there or not. */
25034 static int
25035 no_previous_def (tree function_name)
25037 branch_island *bi;
25038 unsigned ix;
25040 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25041 if (function_name == bi->function_name)
25042 return 0;
25043 return 1;
25046 /* GET_PREV_LABEL gets the label name from the previous definition of
25047 the function. */
25049 static tree
25050 get_prev_label (tree function_name)
25052 branch_island *bi;
25053 unsigned ix;
25055 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25056 if (function_name == bi->function_name)
25057 return bi->label_name;
25058 return NULL_TREE;
25061 /* INSN is either a function call or a millicode call. It may have an
25062 unconditional jump in its delay slot.
25064 CALL_DEST is the routine we are calling. */
25066 char *
25067 output_call (rtx insn, rtx *operands, int dest_operand_number,
25068 int cookie_operand_number)
25070 static char buf[256];
25071 if (darwin_emit_branch_islands
25072 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
25073 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
25075 tree labelname;
25076 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
25078 if (no_previous_def (funname))
25080 rtx label_rtx = gen_label_rtx ();
25081 char *label_buf, temp_buf[256];
25082 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
25083 CODE_LABEL_NUMBER (label_rtx));
25084 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
25085 labelname = get_identifier (label_buf);
25086 add_compiler_branch_island (labelname, funname, insn_line (insn));
25088 else
25089 labelname = get_prev_label (funname);
25091 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25092 instruction will reach 'foo', otherwise link as 'bl L42'".
25093 "L42" should be a 'branch island', that will do a far jump to
25094 'foo'. Branch islands are generated in
25095 macho_branch_islands(). */
25096 sprintf (buf, "jbsr %%z%d,%.246s",
25097 dest_operand_number, IDENTIFIER_POINTER (labelname));
25099 else
25100 sprintf (buf, "bl %%z%d", dest_operand_number);
25101 return buf;
25104 /* Generate PIC and indirect symbol stubs. */
25106 void
25107 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25109 unsigned int length;
25110 char *symbol_name, *lazy_ptr_name;
25111 char *local_label_0;
25112 static int label = 0;
25114 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25115 symb = (*targetm.strip_name_encoding) (symb);
25118 length = strlen (symb);
25119 symbol_name = XALLOCAVEC (char, length + 32);
25120 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25122 lazy_ptr_name = XALLOCAVEC (char, length + 32);
25123 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
25125 if (flag_pic == 2)
25126 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
25127 else
25128 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
25130 if (flag_pic == 2)
25132 fprintf (file, "\t.align 5\n");
25134 fprintf (file, "%s:\n", stub);
25135 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25137 label++;
25138 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25139 sprintf (local_label_0, "\"L%011d$spb\"", label);
25141 fprintf (file, "\tmflr r0\n");
25142 if (TARGET_LINK_STACK)
25144 char name[32];
25145 get_ppc476_thunk_name (name);
25146 fprintf (file, "\tbl %s\n", name);
25147 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25149 else
25151 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
25152 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25154 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
25155 lazy_ptr_name, local_label_0);
25156 fprintf (file, "\tmtlr r0\n");
25157 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
25158 (TARGET_64BIT ? "ldu" : "lwzu"),
25159 lazy_ptr_name, local_label_0);
25160 fprintf (file, "\tmtctr r12\n");
25161 fprintf (file, "\tbctr\n");
25163 else
25165 fprintf (file, "\t.align 4\n");
25167 fprintf (file, "%s:\n", stub);
25168 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25170 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
25171 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
25172 (TARGET_64BIT ? "ldu" : "lwzu"),
25173 lazy_ptr_name);
25174 fprintf (file, "\tmtctr r12\n");
25175 fprintf (file, "\tbctr\n");
25178 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25179 fprintf (file, "%s:\n", lazy_ptr_name);
25180 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25181 fprintf (file, "%sdyld_stub_binding_helper\n",
25182 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
25185 /* Legitimize PIC addresses. If the address is already
25186 position-independent, we return ORIG. Newly generated
25187 position-independent addresses go into a reg. This is REG if non
25188 zero, otherwise we allocate register(s) as necessary. */
25190 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25193 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
25194 rtx reg)
25196 rtx base, offset;
25198 if (reg == NULL && ! reload_in_progress && ! reload_completed)
25199 reg = gen_reg_rtx (Pmode);
25201 if (GET_CODE (orig) == CONST)
25203 rtx reg_temp;
25205 if (GET_CODE (XEXP (orig, 0)) == PLUS
25206 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
25207 return orig;
25209 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
25211 /* Use a different reg for the intermediate value, as
25212 it will be marked UNCHANGING. */
25213 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
25214 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
25215 Pmode, reg_temp);
25216 offset =
25217 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
25218 Pmode, reg);
25220 if (GET_CODE (offset) == CONST_INT)
25222 if (SMALL_INT (offset))
25223 return plus_constant (Pmode, base, INTVAL (offset));
25224 else if (! reload_in_progress && ! reload_completed)
25225 offset = force_reg (Pmode, offset);
25226 else
25228 rtx mem = force_const_mem (Pmode, orig);
25229 return machopic_legitimize_pic_address (mem, Pmode, reg);
25232 return gen_rtx_PLUS (Pmode, base, offset);
25235 /* Fall back on generic machopic code. */
25236 return machopic_legitimize_pic_address (orig, mode, reg);
25239 /* Output a .machine directive for the Darwin assembler, and call
25240 the generic start_file routine. */
25242 static void
25243 rs6000_darwin_file_start (void)
25245 static const struct
25247 const char *arg;
25248 const char *name;
25249 int if_set;
25250 } mapping[] = {
25251 { "ppc64", "ppc64", MASK_64BIT },
25252 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
25253 { "power4", "ppc970", 0 },
25254 { "G5", "ppc970", 0 },
25255 { "7450", "ppc7450", 0 },
25256 { "7400", "ppc7400", MASK_ALTIVEC },
25257 { "G4", "ppc7400", 0 },
25258 { "750", "ppc750", 0 },
25259 { "740", "ppc750", 0 },
25260 { "G3", "ppc750", 0 },
25261 { "604e", "ppc604e", 0 },
25262 { "604", "ppc604", 0 },
25263 { "603e", "ppc603", 0 },
25264 { "603", "ppc603", 0 },
25265 { "601", "ppc601", 0 },
25266 { NULL, "ppc", 0 } };
25267 const char *cpu_id = "";
25268 size_t i;
25270 rs6000_file_start ();
25271 darwin_file_start ();
25273 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25275 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
25276 cpu_id = rs6000_default_cpu;
25278 if (global_options_set.x_rs6000_cpu_index)
25279 cpu_id = processor_target_table[rs6000_cpu_index].name;
25281 /* Look through the mapping array. Pick the first name that either
25282 matches the argument, has a bit set in IF_SET that is also set
25283 in the target flags, or has a NULL name. */
25285 i = 0;
25286 while (mapping[i].arg != NULL
25287 && strcmp (mapping[i].arg, cpu_id) != 0
25288 && (mapping[i].if_set & target_flags) == 0)
25289 i++;
25291 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
25294 #endif /* TARGET_MACHO */
25296 #if TARGET_ELF
25297 static int
25298 rs6000_elf_reloc_rw_mask (void)
25300 if (flag_pic)
25301 return 3;
25302 else if (DEFAULT_ABI == ABI_AIX)
25303 return 2;
25304 else
25305 return 0;
25308 /* Record an element in the table of global constructors. SYMBOL is
25309 a SYMBOL_REF of the function to be called; PRIORITY is a number
25310 between 0 and MAX_INIT_PRIORITY.
25312 This differs from default_named_section_asm_out_constructor in
25313 that we have special handling for -mrelocatable. */
25315 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
25316 static void
25317 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
25319 const char *section = ".ctors";
25320 char buf[16];
25322 if (priority != DEFAULT_INIT_PRIORITY)
25324 sprintf (buf, ".ctors.%.5u",
25325 /* Invert the numbering so the linker puts us in the proper
25326 order; constructors are run from right to left, and the
25327 linker sorts in increasing order. */
25328 MAX_INIT_PRIORITY - priority);
25329 section = buf;
25332 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25333 assemble_align (POINTER_SIZE);
25335 if (TARGET_RELOCATABLE)
25337 fputs ("\t.long (", asm_out_file);
25338 output_addr_const (asm_out_file, symbol);
25339 fputs (")@fixup\n", asm_out_file);
25341 else
25342 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25345 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
25346 static void
25347 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
25349 const char *section = ".dtors";
25350 char buf[16];
25352 if (priority != DEFAULT_INIT_PRIORITY)
25354 sprintf (buf, ".dtors.%.5u",
25355 /* Invert the numbering so the linker puts us in the proper
25356 order; constructors are run from right to left, and the
25357 linker sorts in increasing order. */
25358 MAX_INIT_PRIORITY - priority);
25359 section = buf;
25362 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25363 assemble_align (POINTER_SIZE);
25365 if (TARGET_RELOCATABLE)
25367 fputs ("\t.long (", asm_out_file);
25368 output_addr_const (asm_out_file, symbol);
25369 fputs (")@fixup\n", asm_out_file);
25371 else
25372 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25375 void
25376 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
25378 if (TARGET_64BIT)
25380 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
25381 ASM_OUTPUT_LABEL (file, name);
25382 fputs (DOUBLE_INT_ASM_OP, file);
25383 rs6000_output_function_entry (file, name);
25384 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
25385 if (DOT_SYMBOLS)
25387 fputs ("\t.size\t", file);
25388 assemble_name (file, name);
25389 fputs (",24\n\t.type\t.", file);
25390 assemble_name (file, name);
25391 fputs (",@function\n", file);
25392 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
25394 fputs ("\t.globl\t.", file);
25395 assemble_name (file, name);
25396 putc ('\n', file);
25399 else
25400 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25401 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25402 rs6000_output_function_entry (file, name);
25403 fputs (":\n", file);
25404 return;
25407 if (TARGET_RELOCATABLE
25408 && !TARGET_SECURE_PLT
25409 && (get_pool_size () != 0 || crtl->profile)
25410 && uses_TOC ())
25412 char buf[256];
25414 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
25416 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
25417 fprintf (file, "\t.long ");
25418 assemble_name (file, buf);
25419 putc ('-', file);
25420 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25421 assemble_name (file, buf);
25422 putc ('\n', file);
25425 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25426 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25428 if (DEFAULT_ABI == ABI_AIX)
25430 const char *desc_name, *orig_name;
25432 orig_name = (*targetm.strip_name_encoding) (name);
25433 desc_name = orig_name;
25434 while (*desc_name == '.')
25435 desc_name++;
25437 if (TREE_PUBLIC (decl))
25438 fprintf (file, "\t.globl %s\n", desc_name);
25440 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25441 fprintf (file, "%s:\n", desc_name);
25442 fprintf (file, "\t.long %s\n", orig_name);
25443 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
25444 if (DEFAULT_ABI == ABI_AIX)
25445 fputs ("\t.long 0\n", file);
25446 fprintf (file, "\t.previous\n");
25448 ASM_OUTPUT_LABEL (file, name);
25451 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
25452 static void
25453 rs6000_elf_file_end (void)
25455 #ifdef HAVE_AS_GNU_ATTRIBUTE
25456 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
25458 if (rs6000_passes_float)
25459 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
25460 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
25461 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
25462 : 2));
25463 if (rs6000_passes_vector)
25464 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
25465 (TARGET_ALTIVEC_ABI ? 2
25466 : TARGET_SPE_ABI ? 3
25467 : 1));
25468 if (rs6000_returns_struct)
25469 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
25470 aix_struct_return ? 2 : 1);
25472 #endif
25473 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25474 if (TARGET_32BIT)
25475 file_end_indicate_exec_stack ();
25476 #endif
25478 #endif
25480 #if TARGET_XCOFF
25481 static void
25482 rs6000_xcoff_asm_output_anchor (rtx symbol)
25484 char buffer[100];
25486 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
25487 SYMBOL_REF_BLOCK_OFFSET (symbol));
25488 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
25491 static void
25492 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
25494 fputs (GLOBAL_ASM_OP, stream);
25495 RS6000_OUTPUT_BASENAME (stream, name);
25496 putc ('\n', stream);
25499 /* A get_unnamed_decl callback, used for read-only sections. PTR
25500 points to the section string variable. */
25502 static void
25503 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
25505 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
25506 *(const char *const *) directive,
25507 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25510 /* Likewise for read-write sections. */
25512 static void
25513 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
25515 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
25516 *(const char *const *) directive,
25517 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25520 /* A get_unnamed_section callback, used for switching to toc_section. */
25522 static void
25523 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
25525 if (TARGET_MINIMAL_TOC)
25527 /* toc_section is always selected at least once from
25528 rs6000_xcoff_file_start, so this is guaranteed to
25529 always be defined once and only once in each file. */
25530 if (!toc_initialized)
25532 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
25533 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
25534 toc_initialized = 1;
25536 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
25537 (TARGET_32BIT ? "" : ",3"));
25539 else
25540 fputs ("\t.toc\n", asm_out_file);
25543 /* Implement TARGET_ASM_INIT_SECTIONS. */
25545 static void
25546 rs6000_xcoff_asm_init_sections (void)
25548 read_only_data_section
25549 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25550 &xcoff_read_only_section_name);
25552 private_data_section
25553 = get_unnamed_section (SECTION_WRITE,
25554 rs6000_xcoff_output_readwrite_section_asm_op,
25555 &xcoff_private_data_section_name);
25557 read_only_private_data_section
25558 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25559 &xcoff_private_data_section_name);
25561 toc_section
25562 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
25564 readonly_data_section = read_only_data_section;
25565 exception_section = data_section;
25568 static int
25569 rs6000_xcoff_reloc_rw_mask (void)
25571 return 3;
25574 static void
25575 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
25576 tree decl ATTRIBUTE_UNUSED)
25578 int smclass;
25579 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
25581 if (flags & SECTION_CODE)
25582 smclass = 0;
25583 else if (flags & SECTION_TLS)
25584 smclass = 3;
25585 else if (flags & SECTION_WRITE)
25586 smclass = 2;
25587 else
25588 smclass = 1;
25590 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
25591 (flags & SECTION_CODE) ? "." : "",
25592 name, suffix[smclass], flags & SECTION_ENTSIZE);
25595 static section *
25596 rs6000_xcoff_select_section (tree decl, int reloc,
25597 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25599 if (decl_readonly_section (decl, reloc))
25601 if (TREE_PUBLIC (decl))
25602 return read_only_data_section;
25603 else
25604 return read_only_private_data_section;
25606 else
25608 if (TREE_PUBLIC (decl))
25609 return data_section;
25610 else
25611 return private_data_section;
25615 static void
25616 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
25618 const char *name;
25620 /* Use select_section for private and uninitialized data. */
25621 if (!TREE_PUBLIC (decl)
25622 || DECL_COMMON (decl)
25623 || DECL_INITIAL (decl) == NULL_TREE
25624 || DECL_INITIAL (decl) == error_mark_node
25625 || (flag_zero_initialized_in_bss
25626 && initializer_zerop (DECL_INITIAL (decl))))
25627 return;
25629 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
25630 name = (*targetm.strip_name_encoding) (name);
25631 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
25634 /* Select section for constant in constant pool.
25636 On RS/6000, all constants are in the private read-only data area.
25637 However, if this is being placed in the TOC it must be output as a
25638 toc entry. */
25640 static section *
25641 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
25642 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25644 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25645 return toc_section;
25646 else
25647 return read_only_private_data_section;
25650 /* Remove any trailing [DS] or the like from the symbol name. */
25652 static const char *
25653 rs6000_xcoff_strip_name_encoding (const char *name)
25655 size_t len;
25656 if (*name == '*')
25657 name++;
25658 len = strlen (name);
25659 if (name[len - 1] == ']')
25660 return ggc_alloc_string (name, len - 4);
25661 else
25662 return name;
25665 /* Section attributes. AIX is always PIC. */
25667 static unsigned int
25668 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
25670 unsigned int align;
25671 unsigned int flags = default_section_type_flags (decl, name, reloc);
25673 /* Align to at least UNIT size. */
25674 if (flags & SECTION_CODE || !decl)
25675 align = MIN_UNITS_PER_WORD;
25676 else
25677 /* Increase alignment of large objects if not already stricter. */
25678 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
25679 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
25680 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
25682 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
25685 /* Output at beginning of assembler file.
25687 Initialize the section names for the RS/6000 at this point.
25689 Specify filename, including full path, to assembler.
25691 We want to go into the TOC section so at least one .toc will be emitted.
25692 Also, in order to output proper .bs/.es pairs, we need at least one static
25693 [RW] section emitted.
25695 Finally, declare mcount when profiling to make the assembler happy. */
25697 static void
25698 rs6000_xcoff_file_start (void)
25700 rs6000_gen_section_name (&xcoff_bss_section_name,
25701 main_input_filename, ".bss_");
25702 rs6000_gen_section_name (&xcoff_private_data_section_name,
25703 main_input_filename, ".rw_");
25704 rs6000_gen_section_name (&xcoff_read_only_section_name,
25705 main_input_filename, ".ro_");
25707 fputs ("\t.file\t", asm_out_file);
25708 output_quoted_string (asm_out_file, main_input_filename);
25709 fputc ('\n', asm_out_file);
25710 if (write_symbols != NO_DEBUG)
25711 switch_to_section (private_data_section);
25712 switch_to_section (text_section);
25713 if (profile_flag)
25714 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
25715 rs6000_file_start ();
25718 /* Output at end of assembler file.
25719 On the RS/6000, referencing data should automatically pull in text. */
25721 static void
25722 rs6000_xcoff_file_end (void)
25724 switch_to_section (text_section);
25725 fputs ("_section_.text:\n", asm_out_file);
25726 switch_to_section (data_section);
25727 fputs (TARGET_32BIT
25728 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25729 asm_out_file);
25731 #endif /* TARGET_XCOFF */
25733 /* Compute a (partial) cost for rtx X. Return true if the complete
25734 cost has been computed, and false if subexpressions should be
25735 scanned. In either case, *TOTAL contains the cost result. */
25737 static bool
25738 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
25739 int *total, bool speed)
25741 enum machine_mode mode = GET_MODE (x);
25743 switch (code)
25745 /* On the RS/6000, if it is valid in the insn, it is free. */
25746 case CONST_INT:
25747 if (((outer_code == SET
25748 || outer_code == PLUS
25749 || outer_code == MINUS)
25750 && (satisfies_constraint_I (x)
25751 || satisfies_constraint_L (x)))
25752 || (outer_code == AND
25753 && (satisfies_constraint_K (x)
25754 || (mode == SImode
25755 ? satisfies_constraint_L (x)
25756 : satisfies_constraint_J (x))
25757 || mask_operand (x, mode)
25758 || (mode == DImode
25759 && mask64_operand (x, DImode))))
25760 || ((outer_code == IOR || outer_code == XOR)
25761 && (satisfies_constraint_K (x)
25762 || (mode == SImode
25763 ? satisfies_constraint_L (x)
25764 : satisfies_constraint_J (x))))
25765 || outer_code == ASHIFT
25766 || outer_code == ASHIFTRT
25767 || outer_code == LSHIFTRT
25768 || outer_code == ROTATE
25769 || outer_code == ROTATERT
25770 || outer_code == ZERO_EXTRACT
25771 || (outer_code == MULT
25772 && satisfies_constraint_I (x))
25773 || ((outer_code == DIV || outer_code == UDIV
25774 || outer_code == MOD || outer_code == UMOD)
25775 && exact_log2 (INTVAL (x)) >= 0)
25776 || (outer_code == COMPARE
25777 && (satisfies_constraint_I (x)
25778 || satisfies_constraint_K (x)))
25779 || ((outer_code == EQ || outer_code == NE)
25780 && (satisfies_constraint_I (x)
25781 || satisfies_constraint_K (x)
25782 || (mode == SImode
25783 ? satisfies_constraint_L (x)
25784 : satisfies_constraint_J (x))))
25785 || (outer_code == GTU
25786 && satisfies_constraint_I (x))
25787 || (outer_code == LTU
25788 && satisfies_constraint_P (x)))
25790 *total = 0;
25791 return true;
25793 else if ((outer_code == PLUS
25794 && reg_or_add_cint_operand (x, VOIDmode))
25795 || (outer_code == MINUS
25796 && reg_or_sub_cint_operand (x, VOIDmode))
25797 || ((outer_code == SET
25798 || outer_code == IOR
25799 || outer_code == XOR)
25800 && (INTVAL (x)
25801 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
25803 *total = COSTS_N_INSNS (1);
25804 return true;
25806 /* FALLTHRU */
25808 case CONST_DOUBLE:
25809 if (mode == DImode && code == CONST_DOUBLE)
25811 if ((outer_code == IOR || outer_code == XOR)
25812 && CONST_DOUBLE_HIGH (x) == 0
25813 && (CONST_DOUBLE_LOW (x)
25814 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
25816 *total = 0;
25817 return true;
25819 else if ((outer_code == AND && and64_2_operand (x, DImode))
25820 || ((outer_code == SET
25821 || outer_code == IOR
25822 || outer_code == XOR)
25823 && CONST_DOUBLE_HIGH (x) == 0))
25825 *total = COSTS_N_INSNS (1);
25826 return true;
25829 /* FALLTHRU */
25831 case CONST:
25832 case HIGH:
25833 case SYMBOL_REF:
25834 case MEM:
25835 /* When optimizing for size, MEM should be slightly more expensive
25836 than generating address, e.g., (plus (reg) (const)).
25837 L1 cache latency is about two instructions. */
25838 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25839 return true;
25841 case LABEL_REF:
25842 *total = 0;
25843 return true;
25845 case PLUS:
25846 case MINUS:
25847 if (FLOAT_MODE_P (mode))
25848 *total = rs6000_cost->fp;
25849 else
25850 *total = COSTS_N_INSNS (1);
25851 return false;
25853 case MULT:
25854 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25855 && satisfies_constraint_I (XEXP (x, 1)))
25857 if (INTVAL (XEXP (x, 1)) >= -256
25858 && INTVAL (XEXP (x, 1)) <= 255)
25859 *total = rs6000_cost->mulsi_const9;
25860 else
25861 *total = rs6000_cost->mulsi_const;
25863 else if (mode == SFmode)
25864 *total = rs6000_cost->fp;
25865 else if (FLOAT_MODE_P (mode))
25866 *total = rs6000_cost->dmul;
25867 else if (mode == DImode)
25868 *total = rs6000_cost->muldi;
25869 else
25870 *total = rs6000_cost->mulsi;
25871 return false;
25873 case FMA:
25874 if (mode == SFmode)
25875 *total = rs6000_cost->fp;
25876 else
25877 *total = rs6000_cost->dmul;
25878 break;
25880 case DIV:
25881 case MOD:
25882 if (FLOAT_MODE_P (mode))
25884 *total = mode == DFmode ? rs6000_cost->ddiv
25885 : rs6000_cost->sdiv;
25886 return false;
25888 /* FALLTHRU */
25890 case UDIV:
25891 case UMOD:
25892 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25893 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
25895 if (code == DIV || code == MOD)
25896 /* Shift, addze */
25897 *total = COSTS_N_INSNS (2);
25898 else
25899 /* Shift */
25900 *total = COSTS_N_INSNS (1);
25902 else
25904 if (GET_MODE (XEXP (x, 1)) == DImode)
25905 *total = rs6000_cost->divdi;
25906 else
25907 *total = rs6000_cost->divsi;
25909 /* Add in shift and subtract for MOD. */
25910 if (code == MOD || code == UMOD)
25911 *total += COSTS_N_INSNS (2);
25912 return false;
25914 case CTZ:
25915 case FFS:
25916 *total = COSTS_N_INSNS (4);
25917 return false;
25919 case POPCOUNT:
25920 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
25921 return false;
25923 case PARITY:
25924 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
25925 return false;
25927 case NOT:
25928 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
25930 *total = 0;
25931 return false;
25933 /* FALLTHRU */
25935 case AND:
25936 case CLZ:
25937 case IOR:
25938 case XOR:
25939 case ZERO_EXTRACT:
25940 *total = COSTS_N_INSNS (1);
25941 return false;
25943 case ASHIFT:
25944 case ASHIFTRT:
25945 case LSHIFTRT:
25946 case ROTATE:
25947 case ROTATERT:
25948 /* Handle mul_highpart. */
25949 if (outer_code == TRUNCATE
25950 && GET_CODE (XEXP (x, 0)) == MULT)
25952 if (mode == DImode)
25953 *total = rs6000_cost->muldi;
25954 else
25955 *total = rs6000_cost->mulsi;
25956 return true;
25958 else if (outer_code == AND)
25959 *total = 0;
25960 else
25961 *total = COSTS_N_INSNS (1);
25962 return false;
25964 case SIGN_EXTEND:
25965 case ZERO_EXTEND:
25966 if (GET_CODE (XEXP (x, 0)) == MEM)
25967 *total = 0;
25968 else
25969 *total = COSTS_N_INSNS (1);
25970 return false;
25972 case COMPARE:
25973 case NEG:
25974 case ABS:
25975 if (!FLOAT_MODE_P (mode))
25977 *total = COSTS_N_INSNS (1);
25978 return false;
25980 /* FALLTHRU */
25982 case FLOAT:
25983 case UNSIGNED_FLOAT:
25984 case FIX:
25985 case UNSIGNED_FIX:
25986 case FLOAT_TRUNCATE:
25987 *total = rs6000_cost->fp;
25988 return false;
25990 case FLOAT_EXTEND:
25991 if (mode == DFmode)
25992 *total = 0;
25993 else
25994 *total = rs6000_cost->fp;
25995 return false;
25997 case UNSPEC:
25998 switch (XINT (x, 1))
26000 case UNSPEC_FRSP:
26001 *total = rs6000_cost->fp;
26002 return true;
26004 default:
26005 break;
26007 break;
26009 case CALL:
26010 case IF_THEN_ELSE:
26011 if (!speed)
26013 *total = COSTS_N_INSNS (1);
26014 return true;
26016 else if (FLOAT_MODE_P (mode)
26017 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
26019 *total = rs6000_cost->fp;
26020 return false;
26022 break;
26024 case EQ:
26025 case GTU:
26026 case LTU:
26027 /* Carry bit requires mode == Pmode.
26028 NEG or PLUS already counted so only add one. */
26029 if (mode == Pmode
26030 && (outer_code == NEG || outer_code == PLUS))
26032 *total = COSTS_N_INSNS (1);
26033 return true;
26035 if (outer_code == SET)
26037 if (XEXP (x, 1) == const0_rtx)
26039 if (TARGET_ISEL && !TARGET_MFCRF)
26040 *total = COSTS_N_INSNS (8);
26041 else
26042 *total = COSTS_N_INSNS (2);
26043 return true;
26045 else if (mode == Pmode)
26047 *total = COSTS_N_INSNS (3);
26048 return false;
26051 /* FALLTHRU */
26053 case GT:
26054 case LT:
26055 case UNORDERED:
26056 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
26058 if (TARGET_ISEL && !TARGET_MFCRF)
26059 *total = COSTS_N_INSNS (8);
26060 else
26061 *total = COSTS_N_INSNS (2);
26062 return true;
26064 /* CC COMPARE. */
26065 if (outer_code == COMPARE)
26067 *total = 0;
26068 return true;
26070 break;
26072 default:
26073 break;
26076 return false;
26079 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26081 static bool
26082 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
26083 bool speed)
26085 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
26087 fprintf (stderr,
26088 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26089 "opno = %d, total = %d, speed = %s, x:\n",
26090 ret ? "complete" : "scan inner",
26091 GET_RTX_NAME (code),
26092 GET_RTX_NAME (outer_code),
26093 opno,
26094 *total,
26095 speed ? "true" : "false");
26097 debug_rtx (x);
26099 return ret;
26102 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26104 static int
26105 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
26106 addr_space_t as, bool speed)
26108 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
26110 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26111 ret, speed ? "true" : "false");
26112 debug_rtx (x);
26114 return ret;
26118 /* A C expression returning the cost of moving data from a register of class
26119 CLASS1 to one of CLASS2. */
26121 static int
26122 rs6000_register_move_cost (enum machine_mode mode,
26123 reg_class_t from, reg_class_t to)
26125 int ret;
26127 if (TARGET_DEBUG_COST)
26128 dbg_cost_ctrl++;
26130 /* Moves from/to GENERAL_REGS. */
26131 if (reg_classes_intersect_p (to, GENERAL_REGS)
26132 || reg_classes_intersect_p (from, GENERAL_REGS))
26134 reg_class_t rclass = from;
26136 if (! reg_classes_intersect_p (to, GENERAL_REGS))
26137 rclass = to;
26139 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
26140 ret = (rs6000_memory_move_cost (mode, rclass, false)
26141 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
26143 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26144 shift. */
26145 else if (rclass == CR_REGS)
26146 ret = 4;
26148 /* For those processors that have slow LR/CTR moves, make them more
26149 expensive than memory in order to bias spills to memory .*/
26150 else if ((rs6000_cpu == PROCESSOR_POWER6
26151 || rs6000_cpu == PROCESSOR_POWER7)
26152 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
26153 ret = 6 * hard_regno_nregs[0][mode];
26155 else
26156 /* A move will cost one instruction per GPR moved. */
26157 ret = 2 * hard_regno_nregs[0][mode];
26160 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26161 else if (VECTOR_UNIT_VSX_P (mode)
26162 && reg_classes_intersect_p (to, VSX_REGS)
26163 && reg_classes_intersect_p (from, VSX_REGS))
26164 ret = 2 * hard_regno_nregs[32][mode];
26166 /* Moving between two similar registers is just one instruction. */
26167 else if (reg_classes_intersect_p (to, from))
26168 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
26170 /* Everything else has to go through GENERAL_REGS. */
26171 else
26172 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
26173 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
26175 if (TARGET_DEBUG_COST)
26177 if (dbg_cost_ctrl == 1)
26178 fprintf (stderr,
26179 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26180 ret, GET_MODE_NAME (mode), reg_class_names[from],
26181 reg_class_names[to]);
26182 dbg_cost_ctrl--;
26185 return ret;
26188 /* A C expressions returning the cost of moving data of MODE from a register to
26189 or from memory. */
26191 static int
26192 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
26193 bool in ATTRIBUTE_UNUSED)
26195 int ret;
26197 if (TARGET_DEBUG_COST)
26198 dbg_cost_ctrl++;
26200 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
26201 ret = 4 * hard_regno_nregs[0][mode];
26202 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
26203 ret = 4 * hard_regno_nregs[32][mode];
26204 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
26205 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
26206 else
26207 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
26209 if (TARGET_DEBUG_COST)
26211 if (dbg_cost_ctrl == 1)
26212 fprintf (stderr,
26213 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26214 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
26215 dbg_cost_ctrl--;
26218 return ret;
26221 /* Returns a code for a target-specific builtin that implements
26222 reciprocal of the function, or NULL_TREE if not available. */
26224 static tree
26225 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
26226 bool sqrt ATTRIBUTE_UNUSED)
26228 if (optimize_insn_for_size_p ())
26229 return NULL_TREE;
26231 if (md_fn)
26232 switch (fn)
26234 case VSX_BUILTIN_XVSQRTDP:
26235 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
26236 return NULL_TREE;
26238 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
26240 case VSX_BUILTIN_XVSQRTSP:
26241 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
26242 return NULL_TREE;
26244 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
26246 default:
26247 return NULL_TREE;
26250 else
26251 switch (fn)
26253 case BUILT_IN_SQRT:
26254 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
26255 return NULL_TREE;
26257 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
26259 case BUILT_IN_SQRTF:
26260 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
26261 return NULL_TREE;
26263 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
26265 default:
26266 return NULL_TREE;
26270 /* Load up a constant. If the mode is a vector mode, splat the value across
26271 all of the vector elements. */
26273 static rtx
26274 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
26276 rtx reg;
26278 if (mode == SFmode || mode == DFmode)
26280 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
26281 reg = force_reg (mode, d);
26283 else if (mode == V4SFmode)
26285 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
26286 rtvec v = gen_rtvec (4, d, d, d, d);
26287 reg = gen_reg_rtx (mode);
26288 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26290 else if (mode == V2DFmode)
26292 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
26293 rtvec v = gen_rtvec (2, d, d);
26294 reg = gen_reg_rtx (mode);
26295 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26297 else
26298 gcc_unreachable ();
26300 return reg;
26303 /* Generate an FMA instruction. */
26305 static void
26306 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
26308 enum machine_mode mode = GET_MODE (target);
26309 rtx dst;
26311 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26312 gcc_assert (dst != NULL);
26314 if (dst != target)
26315 emit_move_insn (target, dst);
26318 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26320 static void
26321 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
26323 enum machine_mode mode = GET_MODE (target);
26324 rtx dst;
26326 /* Altivec does not support fms directly;
26327 generate in terms of fma in that case. */
26328 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
26329 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
26330 else
26332 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
26333 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26335 gcc_assert (dst != NULL);
26337 if (dst != target)
26338 emit_move_insn (target, dst);
26341 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26343 static void
26344 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
26346 enum machine_mode mode = GET_MODE (dst);
26347 rtx r;
26349 /* This is a tad more complicated, since the fnma_optab is for
26350 a different expression: fma(-m1, m2, a), which is the same
26351 thing except in the case of signed zeros.
26353 Fortunately we know that if FMA is supported that FNMSUB is
26354 also supported in the ISA. Just expand it directly. */
26356 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
26358 r = gen_rtx_NEG (mode, a);
26359 r = gen_rtx_FMA (mode, m1, m2, r);
26360 r = gen_rtx_NEG (mode, r);
26361 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
26364 /* Newton-Raphson approximation of floating point divide with just 2 passes
26365 (either single precision floating point, or newer machines with higher
26366 accuracy estimates). Support both scalar and vector divide. Assumes no
26367 trapping math and finite arguments. */
26369 static void
26370 rs6000_emit_swdiv_high_precision (rtx dst, rtx n, rtx d)
26372 enum machine_mode mode = GET_MODE (dst);
26373 rtx x0, e0, e1, y1, u0, v0;
26374 enum insn_code code = optab_handler (smul_optab, mode);
26375 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26376 rtx one = rs6000_load_constant_and_splat (mode, dconst1);
26378 gcc_assert (code != CODE_FOR_nothing);
26380 /* x0 = 1./d estimate */
26381 x0 = gen_reg_rtx (mode);
26382 emit_insn (gen_rtx_SET (VOIDmode, x0,
26383 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26384 UNSPEC_FRES)));
26386 e0 = gen_reg_rtx (mode);
26387 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - (d * x0) */
26389 e1 = gen_reg_rtx (mode);
26390 rs6000_emit_madd (e1, e0, e0, e0); /* e1 = (e0 * e0) + e0 */
26392 y1 = gen_reg_rtx (mode);
26393 rs6000_emit_madd (y1, e1, x0, x0); /* y1 = (e1 * x0) + x0 */
26395 u0 = gen_reg_rtx (mode);
26396 emit_insn (gen_mul (u0, n, y1)); /* u0 = n * y1 */
26398 v0 = gen_reg_rtx (mode);
26399 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - (d * u0) */
26401 rs6000_emit_madd (dst, v0, y1, u0); /* dst = (v0 * y1) + u0 */
26404 /* Newton-Raphson approximation of floating point divide that has a low
26405 precision estimate. Assumes no trapping math and finite arguments. */
26407 static void
26408 rs6000_emit_swdiv_low_precision (rtx dst, rtx n, rtx d)
26410 enum machine_mode mode = GET_MODE (dst);
26411 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
26412 enum insn_code code = optab_handler (smul_optab, mode);
26413 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26415 gcc_assert (code != CODE_FOR_nothing);
26417 one = rs6000_load_constant_and_splat (mode, dconst1);
26419 /* x0 = 1./d estimate */
26420 x0 = gen_reg_rtx (mode);
26421 emit_insn (gen_rtx_SET (VOIDmode, x0,
26422 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26423 UNSPEC_FRES)));
26425 e0 = gen_reg_rtx (mode);
26426 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - d * x0 */
26428 y1 = gen_reg_rtx (mode);
26429 rs6000_emit_madd (y1, e0, x0, x0); /* y1 = x0 + e0 * x0 */
26431 e1 = gen_reg_rtx (mode);
26432 emit_insn (gen_mul (e1, e0, e0)); /* e1 = e0 * e0 */
26434 y2 = gen_reg_rtx (mode);
26435 rs6000_emit_madd (y2, e1, y1, y1); /* y2 = y1 + e1 * y1 */
26437 e2 = gen_reg_rtx (mode);
26438 emit_insn (gen_mul (e2, e1, e1)); /* e2 = e1 * e1 */
26440 y3 = gen_reg_rtx (mode);
26441 rs6000_emit_madd (y3, e2, y2, y2); /* y3 = y2 + e2 * y2 */
26443 u0 = gen_reg_rtx (mode);
26444 emit_insn (gen_mul (u0, n, y3)); /* u0 = n * y3 */
26446 v0 = gen_reg_rtx (mode);
26447 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - d * u0 */
26449 rs6000_emit_madd (dst, v0, y3, u0); /* dst = u0 + v0 * y3 */
26452 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26453 add a reg_note saying that this was a division. Support both scalar and
26454 vector divide. Assumes no trapping math and finite arguments. */
26456 void
26457 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
26459 enum machine_mode mode = GET_MODE (dst);
26461 if (RS6000_RECIP_HIGH_PRECISION_P (mode))
26462 rs6000_emit_swdiv_high_precision (dst, n, d);
26463 else
26464 rs6000_emit_swdiv_low_precision (dst, n, d);
26466 if (note_p)
26467 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
26470 /* Newton-Raphson approximation of single/double-precision floating point
26471 rsqrt. Assumes no trapping math and finite arguments. */
26473 void
26474 rs6000_emit_swrsqrt (rtx dst, rtx src)
26476 enum machine_mode mode = GET_MODE (src);
26477 rtx x0 = gen_reg_rtx (mode);
26478 rtx y = gen_reg_rtx (mode);
26479 int passes = (TARGET_RECIP_PRECISION) ? 2 : 3;
26480 REAL_VALUE_TYPE dconst3_2;
26481 int i;
26482 rtx halfthree;
26483 enum insn_code code = optab_handler (smul_optab, mode);
26484 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26486 gcc_assert (code != CODE_FOR_nothing);
26488 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26489 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
26490 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
26492 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
26494 /* x0 = rsqrt estimate */
26495 emit_insn (gen_rtx_SET (VOIDmode, x0,
26496 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
26497 UNSPEC_RSQRT)));
26499 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26500 rs6000_emit_msub (y, src, halfthree, src);
26502 for (i = 0; i < passes; i++)
26504 rtx x1 = gen_reg_rtx (mode);
26505 rtx u = gen_reg_rtx (mode);
26506 rtx v = gen_reg_rtx (mode);
26508 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26509 emit_insn (gen_mul (u, x0, x0));
26510 rs6000_emit_nmsub (v, y, u, halfthree);
26511 emit_insn (gen_mul (x1, x0, v));
26512 x0 = x1;
26515 emit_move_insn (dst, x0);
26516 return;
26519 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26520 (Power7) targets. DST is the target, and SRC is the argument operand. */
26522 void
26523 rs6000_emit_popcount (rtx dst, rtx src)
26525 enum machine_mode mode = GET_MODE (dst);
26526 rtx tmp1, tmp2;
26528 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26529 if (TARGET_POPCNTD)
26531 if (mode == SImode)
26532 emit_insn (gen_popcntdsi2 (dst, src));
26533 else
26534 emit_insn (gen_popcntddi2 (dst, src));
26535 return;
26538 tmp1 = gen_reg_rtx (mode);
26540 if (mode == SImode)
26542 emit_insn (gen_popcntbsi2 (tmp1, src));
26543 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
26544 NULL_RTX, 0);
26545 tmp2 = force_reg (SImode, tmp2);
26546 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
26548 else
26550 emit_insn (gen_popcntbdi2 (tmp1, src));
26551 tmp2 = expand_mult (DImode, tmp1,
26552 GEN_INT ((HOST_WIDE_INT)
26553 0x01010101 << 32 | 0x01010101),
26554 NULL_RTX, 0);
26555 tmp2 = force_reg (DImode, tmp2);
26556 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
26561 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26562 target, and SRC is the argument operand. */
26564 void
26565 rs6000_emit_parity (rtx dst, rtx src)
26567 enum machine_mode mode = GET_MODE (dst);
26568 rtx tmp;
26570 tmp = gen_reg_rtx (mode);
26572 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26573 if (TARGET_CMPB)
26575 if (mode == SImode)
26577 emit_insn (gen_popcntbsi2 (tmp, src));
26578 emit_insn (gen_paritysi2_cmpb (dst, tmp));
26580 else
26582 emit_insn (gen_popcntbdi2 (tmp, src));
26583 emit_insn (gen_paritydi2_cmpb (dst, tmp));
26585 return;
26588 if (mode == SImode)
26590 /* Is mult+shift >= shift+xor+shift+xor? */
26591 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
26593 rtx tmp1, tmp2, tmp3, tmp4;
26595 tmp1 = gen_reg_rtx (SImode);
26596 emit_insn (gen_popcntbsi2 (tmp1, src));
26598 tmp2 = gen_reg_rtx (SImode);
26599 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
26600 tmp3 = gen_reg_rtx (SImode);
26601 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
26603 tmp4 = gen_reg_rtx (SImode);
26604 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
26605 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
26607 else
26608 rs6000_emit_popcount (tmp, src);
26609 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
26611 else
26613 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26614 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
26616 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
26618 tmp1 = gen_reg_rtx (DImode);
26619 emit_insn (gen_popcntbdi2 (tmp1, src));
26621 tmp2 = gen_reg_rtx (DImode);
26622 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
26623 tmp3 = gen_reg_rtx (DImode);
26624 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
26626 tmp4 = gen_reg_rtx (DImode);
26627 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
26628 tmp5 = gen_reg_rtx (DImode);
26629 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
26631 tmp6 = gen_reg_rtx (DImode);
26632 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
26633 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
26635 else
26636 rs6000_emit_popcount (tmp, src);
26637 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
26641 /* Expand an Altivec constant permutation. Return true if we match
26642 an efficient implementation; false to fall back to VPERM. */
26644 bool
26645 altivec_expand_vec_perm_const (rtx operands[4])
26647 struct altivec_perm_insn {
26648 enum insn_code impl;
26649 unsigned char perm[16];
26651 static const struct altivec_perm_insn patterns[] = {
26652 { CODE_FOR_altivec_vpkuhum,
26653 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26654 { CODE_FOR_altivec_vpkuwum,
26655 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26656 { CODE_FOR_altivec_vmrghb,
26657 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26658 { CODE_FOR_altivec_vmrghh,
26659 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26660 { CODE_FOR_altivec_vmrghw,
26661 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26662 { CODE_FOR_altivec_vmrglb,
26663 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26664 { CODE_FOR_altivec_vmrglh,
26665 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26666 { CODE_FOR_altivec_vmrglw,
26667 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26670 unsigned int i, j, elt, which;
26671 unsigned char perm[16];
26672 rtx target, op0, op1, sel, x;
26673 bool one_vec;
26675 target = operands[0];
26676 op0 = operands[1];
26677 op1 = operands[2];
26678 sel = operands[3];
26680 /* Unpack the constant selector. */
26681 for (i = which = 0; i < 16; ++i)
26683 rtx e = XVECEXP (sel, 0, i);
26684 elt = INTVAL (e) & 31;
26685 which |= (elt < 16 ? 1 : 2);
26686 perm[i] = elt;
26689 /* Simplify the constant selector based on operands. */
26690 switch (which)
26692 default:
26693 gcc_unreachable ();
26695 case 3:
26696 one_vec = false;
26697 if (!rtx_equal_p (op0, op1))
26698 break;
26699 /* FALLTHRU */
26701 case 2:
26702 for (i = 0; i < 16; ++i)
26703 perm[i] &= 15;
26704 op0 = op1;
26705 one_vec = true;
26706 break;
26708 case 1:
26709 op1 = op0;
26710 one_vec = true;
26711 break;
26714 /* Look for splat patterns. */
26715 if (one_vec)
26717 elt = perm[0];
26719 for (i = 0; i < 16; ++i)
26720 if (perm[i] != elt)
26721 break;
26722 if (i == 16)
26724 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
26725 return true;
26728 if (elt % 2 == 0)
26730 for (i = 0; i < 16; i += 2)
26731 if (perm[i] != elt || perm[i + 1] != elt + 1)
26732 break;
26733 if (i == 16)
26735 x = gen_reg_rtx (V8HImode);
26736 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
26737 GEN_INT (elt / 2)));
26738 emit_move_insn (target, gen_lowpart (V16QImode, x));
26739 return true;
26743 if (elt % 4 == 0)
26745 for (i = 0; i < 16; i += 4)
26746 if (perm[i] != elt
26747 || perm[i + 1] != elt + 1
26748 || perm[i + 2] != elt + 2
26749 || perm[i + 3] != elt + 3)
26750 break;
26751 if (i == 16)
26753 x = gen_reg_rtx (V4SImode);
26754 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
26755 GEN_INT (elt / 4)));
26756 emit_move_insn (target, gen_lowpart (V16QImode, x));
26757 return true;
26762 /* Look for merge and pack patterns. */
26763 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
26765 bool swapped;
26767 elt = patterns[j].perm[0];
26768 if (perm[0] == elt)
26769 swapped = false;
26770 else if (perm[0] == elt + 16)
26771 swapped = true;
26772 else
26773 continue;
26774 for (i = 1; i < 16; ++i)
26776 elt = patterns[j].perm[i];
26777 if (swapped)
26778 elt = (elt >= 16 ? elt - 16 : elt + 16);
26779 else if (one_vec && elt >= 16)
26780 elt -= 16;
26781 if (perm[i] != elt)
26782 break;
26784 if (i == 16)
26786 enum insn_code icode = patterns[j].impl;
26787 enum machine_mode omode = insn_data[icode].operand[0].mode;
26788 enum machine_mode imode = insn_data[icode].operand[1].mode;
26790 if (swapped)
26791 x = op0, op0 = op1, op1 = x;
26792 if (imode != V16QImode)
26794 op0 = gen_lowpart (imode, op0);
26795 op1 = gen_lowpart (imode, op1);
26797 if (omode == V16QImode)
26798 x = target;
26799 else
26800 x = gen_reg_rtx (omode);
26801 emit_insn (GEN_FCN (icode) (x, op0, op1));
26802 if (omode != V16QImode)
26803 emit_move_insn (target, gen_lowpart (V16QImode, x));
26804 return true;
26808 return false;
26811 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26812 Return true if we match an efficient implementation. */
26814 static bool
26815 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
26816 unsigned char perm0, unsigned char perm1)
26818 rtx x;
26820 /* If both selectors come from the same operand, fold to single op. */
26821 if ((perm0 & 2) == (perm1 & 2))
26823 if (perm0 & 2)
26824 op0 = op1;
26825 else
26826 op1 = op0;
26828 /* If both operands are equal, fold to simpler permutation. */
26829 if (rtx_equal_p (op0, op1))
26831 perm0 = perm0 & 1;
26832 perm1 = (perm1 & 1) + 2;
26834 /* If the first selector comes from the second operand, swap. */
26835 else if (perm0 & 2)
26837 if (perm1 & 2)
26838 return false;
26839 perm0 -= 2;
26840 perm1 += 2;
26841 x = op0, op0 = op1, op1 = x;
26843 /* If the second selector does not come from the second operand, fail. */
26844 else if ((perm1 & 2) == 0)
26845 return false;
26847 /* Success! */
26848 if (target != NULL)
26850 enum machine_mode vmode, dmode;
26851 rtvec v;
26853 vmode = GET_MODE (target);
26854 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
26855 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
26857 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
26858 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
26859 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
26860 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26862 return true;
26865 bool
26866 rs6000_expand_vec_perm_const (rtx operands[4])
26868 rtx target, op0, op1, sel;
26869 unsigned char perm0, perm1;
26871 target = operands[0];
26872 op0 = operands[1];
26873 op1 = operands[2];
26874 sel = operands[3];
26876 /* Unpack the constant selector. */
26877 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
26878 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
26880 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
26883 /* Test whether a constant permutation is supported. */
26885 static bool
26886 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
26887 const unsigned char *sel)
26889 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26890 if (TARGET_ALTIVEC)
26891 return true;
26893 /* Check for ps_merge* or evmerge* insns. */
26894 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
26895 || (TARGET_SPE && vmode == V2SImode))
26897 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
26898 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
26899 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
26902 return false;
26905 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26907 static void
26908 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
26909 enum machine_mode vmode, unsigned nelt, rtx perm[])
26911 enum machine_mode imode;
26912 rtx x;
26914 imode = vmode;
26915 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
26917 imode = GET_MODE_INNER (vmode);
26918 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
26919 imode = mode_for_vector (imode, nelt);
26922 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
26923 x = expand_vec_perm (vmode, op0, op1, x, target);
26924 if (x != target)
26925 emit_move_insn (target, x);
26928 /* Expand an extract even operation. */
26930 void
26931 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
26933 enum machine_mode vmode = GET_MODE (target);
26934 unsigned i, nelt = GET_MODE_NUNITS (vmode);
26935 rtx perm[16];
26937 for (i = 0; i < nelt; i++)
26938 perm[i] = GEN_INT (i * 2);
26940 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26943 /* Expand a vector interleave operation. */
26945 void
26946 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
26948 enum machine_mode vmode = GET_MODE (target);
26949 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
26950 rtx perm[16];
26952 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
26953 for (i = 0; i < nelt / 2; i++)
26955 perm[i * 2] = GEN_INT (i + high);
26956 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
26959 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26962 /* Return an RTX representing where to find the function value of a
26963 function returning MODE. */
26964 static rtx
26965 rs6000_complex_function_value (enum machine_mode mode)
26967 unsigned int regno;
26968 rtx r1, r2;
26969 enum machine_mode inner = GET_MODE_INNER (mode);
26970 unsigned int inner_bytes = GET_MODE_SIZE (inner);
26972 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
26973 regno = FP_ARG_RETURN;
26974 else
26976 regno = GP_ARG_RETURN;
26978 /* 32-bit is OK since it'll go in r3/r4. */
26979 if (TARGET_32BIT && inner_bytes >= 4)
26980 return gen_rtx_REG (mode, regno);
26983 if (inner_bytes >= 8)
26984 return gen_rtx_REG (mode, regno);
26986 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
26987 const0_rtx);
26988 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
26989 GEN_INT (inner_bytes));
26990 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
26993 /* Target hook for TARGET_FUNCTION_VALUE.
26995 On the SPE, both FPs and vectors are returned in r3.
26997 On RS/6000 an integer value is in r3 and a floating-point value is in
26998 fp1, unless -msoft-float. */
27000 static rtx
27001 rs6000_function_value (const_tree valtype,
27002 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
27003 bool outgoing ATTRIBUTE_UNUSED)
27005 enum machine_mode mode;
27006 unsigned int regno;
27008 /* Special handling for structs in darwin64. */
27009 if (TARGET_MACHO
27010 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
27012 CUMULATIVE_ARGS valcum;
27013 rtx valret;
27015 valcum.words = 0;
27016 valcum.fregno = FP_ARG_MIN_REG;
27017 valcum.vregno = ALTIVEC_ARG_MIN_REG;
27018 /* Do a trial code generation as if this were going to be passed as
27019 an argument; if any part goes in memory, we return NULL. */
27020 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
27021 if (valret)
27022 return valret;
27023 /* Otherwise fall through to standard ABI rules. */
27026 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
27028 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27029 return gen_rtx_PARALLEL (DImode,
27030 gen_rtvec (2,
27031 gen_rtx_EXPR_LIST (VOIDmode,
27032 gen_rtx_REG (SImode, GP_ARG_RETURN),
27033 const0_rtx),
27034 gen_rtx_EXPR_LIST (VOIDmode,
27035 gen_rtx_REG (SImode,
27036 GP_ARG_RETURN + 1),
27037 GEN_INT (4))));
27039 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
27041 return gen_rtx_PARALLEL (DCmode,
27042 gen_rtvec (4,
27043 gen_rtx_EXPR_LIST (VOIDmode,
27044 gen_rtx_REG (SImode, GP_ARG_RETURN),
27045 const0_rtx),
27046 gen_rtx_EXPR_LIST (VOIDmode,
27047 gen_rtx_REG (SImode,
27048 GP_ARG_RETURN + 1),
27049 GEN_INT (4)),
27050 gen_rtx_EXPR_LIST (VOIDmode,
27051 gen_rtx_REG (SImode,
27052 GP_ARG_RETURN + 2),
27053 GEN_INT (8)),
27054 gen_rtx_EXPR_LIST (VOIDmode,
27055 gen_rtx_REG (SImode,
27056 GP_ARG_RETURN + 3),
27057 GEN_INT (12))));
27060 mode = TYPE_MODE (valtype);
27061 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
27062 || POINTER_TYPE_P (valtype))
27063 mode = TARGET_32BIT ? SImode : DImode;
27065 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27066 /* _Decimal128 must use an even/odd register pair. */
27067 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27068 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
27069 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
27070 regno = FP_ARG_RETURN;
27071 else if (TREE_CODE (valtype) == COMPLEX_TYPE
27072 && targetm.calls.split_complex_arg)
27073 return rs6000_complex_function_value (mode);
27074 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27075 return register is used in both cases, and we won't see V2DImode/V2DFmode
27076 for pure altivec, combine the two cases. */
27077 else if (TREE_CODE (valtype) == VECTOR_TYPE
27078 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
27079 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
27080 regno = ALTIVEC_ARG_RETURN;
27081 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27082 && (mode == DFmode || mode == DCmode
27083 || mode == TFmode || mode == TCmode))
27084 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27085 else
27086 regno = GP_ARG_RETURN;
27088 return gen_rtx_REG (mode, regno);
27091 /* Define how to find the value returned by a library function
27092 assuming the value has mode MODE. */
27094 rs6000_libcall_value (enum machine_mode mode)
27096 unsigned int regno;
27098 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
27100 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27101 return gen_rtx_PARALLEL (DImode,
27102 gen_rtvec (2,
27103 gen_rtx_EXPR_LIST (VOIDmode,
27104 gen_rtx_REG (SImode, GP_ARG_RETURN),
27105 const0_rtx),
27106 gen_rtx_EXPR_LIST (VOIDmode,
27107 gen_rtx_REG (SImode,
27108 GP_ARG_RETURN + 1),
27109 GEN_INT (4))));
27112 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27113 /* _Decimal128 must use an even/odd register pair. */
27114 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27115 else if (SCALAR_FLOAT_MODE_P (mode)
27116 && TARGET_HARD_FLOAT && TARGET_FPRS
27117 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
27118 regno = FP_ARG_RETURN;
27119 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27120 return register is used in both cases, and we won't see V2DImode/V2DFmode
27121 for pure altivec, combine the two cases. */
27122 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
27123 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
27124 regno = ALTIVEC_ARG_RETURN;
27125 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
27126 return rs6000_complex_function_value (mode);
27127 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27128 && (mode == DFmode || mode == DCmode
27129 || mode == TFmode || mode == TCmode))
27130 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27131 else
27132 regno = GP_ARG_RETURN;
27134 return gen_rtx_REG (mode, regno);
27138 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27139 Frame pointer elimination is automatically handled.
27141 For the RS/6000, if frame pointer elimination is being done, we would like
27142 to convert ap into fp, not sp.
27144 We need r30 if -mminimal-toc was specified, and there are constant pool
27145 references. */
27147 static bool
27148 rs6000_can_eliminate (const int from, const int to)
27150 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
27151 ? ! frame_pointer_needed
27152 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
27153 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
27154 : true);
27157 /* Define the offset between two registers, FROM to be eliminated and its
27158 replacement TO, at the start of a routine. */
27159 HOST_WIDE_INT
27160 rs6000_initial_elimination_offset (int from, int to)
27162 rs6000_stack_t *info = rs6000_stack_info ();
27163 HOST_WIDE_INT offset;
27165 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27166 offset = info->push_p ? 0 : -info->total_size;
27167 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27169 offset = info->push_p ? 0 : -info->total_size;
27170 if (FRAME_GROWS_DOWNWARD)
27171 offset += info->fixed_size + info->vars_size + info->parm_size;
27173 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27174 offset = FRAME_GROWS_DOWNWARD
27175 ? info->fixed_size + info->vars_size + info->parm_size
27176 : 0;
27177 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27178 offset = info->total_size;
27179 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27180 offset = info->push_p ? info->total_size : 0;
27181 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
27182 offset = 0;
27183 else
27184 gcc_unreachable ();
27186 return offset;
27189 static rtx
27190 rs6000_dwarf_register_span (rtx reg)
27192 rtx parts[8];
27193 int i, words;
27194 unsigned regno = REGNO (reg);
27195 enum machine_mode mode = GET_MODE (reg);
27197 if (TARGET_SPE
27198 && regno < 32
27199 && (SPE_VECTOR_MODE (GET_MODE (reg))
27200 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
27201 && mode != SFmode && mode != SDmode && mode != SCmode)))
27203 else
27204 return NULL_RTX;
27206 regno = REGNO (reg);
27208 /* The duality of the SPE register size wreaks all kinds of havoc.
27209 This is a way of distinguishing r0 in 32-bits from r0 in
27210 64-bits. */
27211 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
27212 gcc_assert (words <= 4);
27213 for (i = 0; i < words; i++, regno++)
27215 if (BYTES_BIG_ENDIAN)
27217 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
27218 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
27220 else
27222 parts[2 * i] = gen_rtx_REG (SImode, regno);
27223 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
27227 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
27230 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27232 static void
27233 rs6000_init_dwarf_reg_sizes_extra (tree address)
27235 if (TARGET_SPE)
27237 int i;
27238 enum machine_mode mode = TYPE_MODE (char_type_node);
27239 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
27240 rtx mem = gen_rtx_MEM (BLKmode, addr);
27241 rtx value = gen_int_mode (4, mode);
27243 for (i = 1201; i < 1232; i++)
27245 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
27246 HOST_WIDE_INT offset
27247 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
27249 emit_move_insn (adjust_address (mem, mode, offset), value);
27254 /* Map internal gcc register numbers to DWARF2 register numbers. */
27256 unsigned int
27257 rs6000_dbx_register_number (unsigned int regno)
27259 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
27260 return regno;
27261 if (regno == LR_REGNO)
27262 return 108;
27263 if (regno == CTR_REGNO)
27264 return 109;
27265 if (CR_REGNO_P (regno))
27266 return regno - CR0_REGNO + 86;
27267 if (regno == CA_REGNO)
27268 return 101; /* XER */
27269 if (ALTIVEC_REGNO_P (regno))
27270 return regno - FIRST_ALTIVEC_REGNO + 1124;
27271 if (regno == VRSAVE_REGNO)
27272 return 356;
27273 if (regno == VSCR_REGNO)
27274 return 67;
27275 if (regno == SPE_ACC_REGNO)
27276 return 99;
27277 if (regno == SPEFSCR_REGNO)
27278 return 612;
27279 /* SPE high reg number. We get these values of regno from
27280 rs6000_dwarf_register_span. */
27281 gcc_assert (regno >= 1200 && regno < 1232);
27282 return regno;
27285 /* target hook eh_return_filter_mode */
27286 static enum machine_mode
27287 rs6000_eh_return_filter_mode (void)
27289 return TARGET_32BIT ? SImode : word_mode;
27292 /* Target hook for scalar_mode_supported_p. */
27293 static bool
27294 rs6000_scalar_mode_supported_p (enum machine_mode mode)
27296 if (DECIMAL_FLOAT_MODE_P (mode))
27297 return default_decimal_float_supported_p ();
27298 else
27299 return default_scalar_mode_supported_p (mode);
27302 /* Target hook for vector_mode_supported_p. */
27303 static bool
27304 rs6000_vector_mode_supported_p (enum machine_mode mode)
27307 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
27308 return true;
27310 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
27311 return true;
27313 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
27314 return true;
27316 else
27317 return false;
27320 /* Target hook for invalid_arg_for_unprototyped_fn. */
27321 static const char *
27322 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
27324 return (!rs6000_darwin64_abi
27325 && typelist == 0
27326 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
27327 && (funcdecl == NULL_TREE
27328 || (TREE_CODE (funcdecl) == FUNCTION_DECL
27329 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
27330 ? N_("AltiVec argument passed to unprototyped function")
27331 : NULL;
27334 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27335 setup by using __stack_chk_fail_local hidden function instead of
27336 calling __stack_chk_fail directly. Otherwise it is better to call
27337 __stack_chk_fail directly. */
27339 static tree ATTRIBUTE_UNUSED
27340 rs6000_stack_protect_fail (void)
27342 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
27343 ? default_hidden_stack_protect_fail ()
27344 : default_external_stack_protect_fail ();
27347 void
27348 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
27349 int num_operands ATTRIBUTE_UNUSED)
27351 if (rs6000_warn_cell_microcode)
27353 const char *temp;
27354 int insn_code_number = recog_memoized (insn);
27355 location_t location = INSN_LOCATION (insn);
27357 /* Punt on insns we cannot recognize. */
27358 if (insn_code_number < 0)
27359 return;
27361 temp = get_insn_template (insn_code_number, insn);
27363 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
27364 warning_at (location, OPT_mwarn_cell_microcode,
27365 "emitting microcode insn %s\t[%s] #%d",
27366 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27367 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
27368 warning_at (location, OPT_mwarn_cell_microcode,
27369 "emitting conditional microcode insn %s\t[%s] #%d",
27370 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27375 /* Mask options that we want to support inside of attribute((target)) and
27376 #pragma GCC target operations. Note, we do not include things like
27377 64/32-bit, endianess, hard/soft floating point, etc. that would have
27378 different calling sequences. */
27380 struct rs6000_opt_mask {
27381 const char *name; /* option name */
27382 int mask; /* mask to set */
27383 bool invert; /* invert sense of mask */
27384 bool valid_target; /* option is a target option */
27387 static struct rs6000_opt_mask const rs6000_opt_masks[] =
27389 { "altivec", MASK_ALTIVEC, false, true },
27390 { "cmpb", MASK_CMPB, false, true },
27391 { "dlmzb", MASK_DLMZB, false, true },
27392 { "fprnd", MASK_FPRND, false, true },
27393 { "hard-dfp", MASK_DFP, false, true },
27394 { "isel", MASK_ISEL, false, true },
27395 { "mfcrf", MASK_MFCRF, false, true },
27396 { "mfpgpr", MASK_MFPGPR, false, true },
27397 { "mulhw", MASK_MULHW, false, true },
27398 { "multiple", MASK_MULTIPLE, false, true },
27399 { "update", MASK_NO_UPDATE, true , true },
27400 { "popcntb", MASK_POPCNTB, false, true },
27401 { "popcntd", MASK_POPCNTD, false, true },
27402 { "powerpc-gfxopt", MASK_PPC_GFXOPT, false, true },
27403 { "powerpc-gpopt", MASK_PPC_GPOPT, false, true },
27404 { "recip-precision", MASK_RECIP_PRECISION, false, true },
27405 { "string", MASK_STRING, false, true },
27406 { "vsx", MASK_VSX, false, true },
27407 #ifdef MASK_64BIT
27408 #if TARGET_AIX_OS
27409 { "aix64", MASK_64BIT, false, false },
27410 { "aix32", MASK_64BIT, true, false },
27411 #else
27412 { "64", MASK_64BIT, false, false },
27413 { "32", MASK_64BIT, true, false },
27414 #endif
27415 #endif
27416 #ifdef MASK_EABI
27417 { "eabi", MASK_EABI, false, false },
27418 #endif
27419 #ifdef MASK_LITTLE_ENDIAN
27420 { "little", MASK_LITTLE_ENDIAN, false, false },
27421 { "big", MASK_LITTLE_ENDIAN, true, false },
27422 #endif
27423 #ifdef MASK_RELOCATABLE
27424 { "relocatable", MASK_RELOCATABLE, false, false },
27425 #endif
27426 #ifdef MASK_STRICT_ALIGN
27427 { "strict-align", MASK_STRICT_ALIGN, false, false },
27428 #endif
27429 { "soft-float", MASK_SOFT_FLOAT, false, false },
27430 { "string", MASK_STRING, false, false },
27433 /* Builtin mask mapping for printing the flags. */
27434 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
27436 { "altivec", RS6000_BTM_ALTIVEC, false, false },
27437 { "vsx", RS6000_BTM_VSX, false, false },
27438 { "spe", RS6000_BTM_SPE, false, false },
27439 { "paired", RS6000_BTM_PAIRED, false, false },
27440 { "fre", RS6000_BTM_FRE, false, false },
27441 { "fres", RS6000_BTM_FRES, false, false },
27442 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
27443 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
27444 { "popcntd", RS6000_BTM_POPCNTD, false, false },
27445 { "cell", RS6000_BTM_CELL, false, false },
27448 /* Option variables that we want to support inside attribute((target)) and
27449 #pragma GCC target operations. */
27451 struct rs6000_opt_var {
27452 const char *name; /* option name */
27453 size_t global_offset; /* offset of the option in global_options. */
27454 size_t target_offset; /* offset of the option in target optiosn. */
27457 static struct rs6000_opt_var const rs6000_opt_vars[] =
27459 { "friz",
27460 offsetof (struct gcc_options, x_TARGET_FRIZ),
27461 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
27462 { "avoid-indexed-addresses",
27463 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
27464 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
27465 { "paired",
27466 offsetof (struct gcc_options, x_rs6000_paired_float),
27467 offsetof (struct cl_target_option, x_rs6000_paired_float), },
27468 { "longcall",
27469 offsetof (struct gcc_options, x_rs6000_default_long_calls),
27470 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
27473 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27474 parsing. Return true if there were no errors. */
27476 static bool
27477 rs6000_inner_target_options (tree args, bool attr_p)
27479 bool ret = true;
27481 if (args == NULL_TREE)
27484 else if (TREE_CODE (args) == STRING_CST)
27486 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27487 char *q;
27489 while ((q = strtok (p, ",")) != NULL)
27491 bool error_p = false;
27492 bool not_valid_p = false;
27493 const char *cpu_opt = NULL;
27495 p = NULL;
27496 if (strncmp (q, "cpu=", 4) == 0)
27498 int cpu_index = rs6000_cpu_name_lookup (q+4);
27499 if (cpu_index >= 0)
27500 rs6000_cpu_index = cpu_index;
27501 else
27503 error_p = true;
27504 cpu_opt = q+4;
27507 else if (strncmp (q, "tune=", 5) == 0)
27509 int tune_index = rs6000_cpu_name_lookup (q+5);
27510 if (tune_index >= 0)
27511 rs6000_tune_index = tune_index;
27512 else
27514 error_p = true;
27515 cpu_opt = q+5;
27518 else
27520 size_t i;
27521 bool invert = false;
27522 char *r = q;
27524 error_p = true;
27525 if (strncmp (r, "no-", 3) == 0)
27527 invert = true;
27528 r += 3;
27531 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27532 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
27534 int mask = rs6000_opt_masks[i].mask;
27536 if (!rs6000_opt_masks[i].valid_target)
27537 not_valid_p = true;
27538 else
27540 error_p = false;
27541 target_flags_explicit |= mask;
27543 /* VSX needs altivec, so -mvsx automagically sets
27544 altivec. */
27545 if (mask == MASK_VSX && !invert)
27546 mask |= MASK_ALTIVEC;
27548 if (rs6000_opt_masks[i].invert)
27549 invert = !invert;
27551 if (invert)
27552 target_flags &= ~mask;
27553 else
27554 target_flags |= mask;
27556 break;
27559 if (error_p && !not_valid_p)
27561 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27562 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
27564 size_t j = rs6000_opt_vars[i].global_offset;
27565 *((int *) ((char *)&global_options + j)) = !invert;
27566 error_p = false;
27567 break;
27572 if (error_p)
27574 const char *eprefix, *esuffix;
27576 ret = false;
27577 if (attr_p)
27579 eprefix = "__attribute__((__target__(";
27580 esuffix = ")))";
27582 else
27584 eprefix = "#pragma GCC target ";
27585 esuffix = "";
27588 if (cpu_opt)
27589 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
27590 q, esuffix);
27591 else if (not_valid_p)
27592 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
27593 else
27594 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
27599 else if (TREE_CODE (args) == TREE_LIST)
27603 tree value = TREE_VALUE (args);
27604 if (value)
27606 bool ret2 = rs6000_inner_target_options (value, attr_p);
27607 if (!ret2)
27608 ret = false;
27610 args = TREE_CHAIN (args);
27612 while (args != NULL_TREE);
27615 else
27616 gcc_unreachable ();
27618 return ret;
27621 /* Print out the target options as a list for -mdebug=target. */
27623 static void
27624 rs6000_debug_target_options (tree args, const char *prefix)
27626 if (args == NULL_TREE)
27627 fprintf (stderr, "%s<NULL>", prefix);
27629 else if (TREE_CODE (args) == STRING_CST)
27631 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27632 char *q;
27634 while ((q = strtok (p, ",")) != NULL)
27636 p = NULL;
27637 fprintf (stderr, "%s\"%s\"", prefix, q);
27638 prefix = ", ";
27642 else if (TREE_CODE (args) == TREE_LIST)
27646 tree value = TREE_VALUE (args);
27647 if (value)
27649 rs6000_debug_target_options (value, prefix);
27650 prefix = ", ";
27652 args = TREE_CHAIN (args);
27654 while (args != NULL_TREE);
27657 else
27658 gcc_unreachable ();
27660 return;
27664 /* Hook to validate attribute((target("..."))). */
27666 static bool
27667 rs6000_valid_attribute_p (tree fndecl,
27668 tree ARG_UNUSED (name),
27669 tree args,
27670 int flags)
27672 struct cl_target_option cur_target;
27673 bool ret;
27674 tree old_optimize = build_optimization_node ();
27675 tree new_target, new_optimize;
27676 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27678 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
27680 if (TARGET_DEBUG_TARGET)
27682 tree tname = DECL_NAME (fndecl);
27683 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
27684 if (tname)
27685 fprintf (stderr, "function: %.*s\n",
27686 (int) IDENTIFIER_LENGTH (tname),
27687 IDENTIFIER_POINTER (tname));
27688 else
27689 fprintf (stderr, "function: unknown\n");
27691 fprintf (stderr, "args:");
27692 rs6000_debug_target_options (args, " ");
27693 fprintf (stderr, "\n");
27695 if (flags)
27696 fprintf (stderr, "flags: 0x%x\n", flags);
27698 fprintf (stderr, "--------------------\n");
27701 old_optimize = build_optimization_node ();
27702 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27704 /* If the function changed the optimization levels as well as setting target
27705 options, start with the optimizations specified. */
27706 if (func_optimize && func_optimize != old_optimize)
27707 cl_optimization_restore (&global_options,
27708 TREE_OPTIMIZATION (func_optimize));
27710 /* The target attributes may also change some optimization flags, so update
27711 the optimization options if necessary. */
27712 cl_target_option_save (&cur_target, &global_options);
27713 rs6000_cpu_index = rs6000_tune_index = -1;
27714 ret = rs6000_inner_target_options (args, true);
27716 /* Set up any additional state. */
27717 if (ret)
27719 ret = rs6000_option_override_internal (false);
27720 new_target = build_target_option_node ();
27722 else
27723 new_target = NULL;
27725 new_optimize = build_optimization_node ();
27727 if (!new_target)
27728 ret = false;
27730 else if (fndecl)
27732 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
27734 if (old_optimize != new_optimize)
27735 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
27738 cl_target_option_restore (&global_options, &cur_target);
27740 if (old_optimize != new_optimize)
27741 cl_optimization_restore (&global_options,
27742 TREE_OPTIMIZATION (old_optimize));
27744 return ret;
27748 /* Hook to validate the current #pragma GCC target and set the state, and
27749 update the macros based on what was changed. If ARGS is NULL, then
27750 POP_TARGET is used to reset the options. */
27752 bool
27753 rs6000_pragma_target_parse (tree args, tree pop_target)
27755 tree prev_tree = build_target_option_node ();
27756 tree cur_tree;
27757 struct cl_target_option *prev_opt, *cur_opt;
27758 unsigned prev_bumask, cur_bumask, diff_bumask;
27759 int prev_flags, cur_flags, diff_flags;
27761 if (TARGET_DEBUG_TARGET)
27763 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
27764 fprintf (stderr, "args:");
27765 rs6000_debug_target_options (args, " ");
27766 fprintf (stderr, "\n");
27768 if (pop_target)
27770 fprintf (stderr, "pop_target:\n");
27771 debug_tree (pop_target);
27773 else
27774 fprintf (stderr, "pop_target: <NULL>\n");
27776 fprintf (stderr, "--------------------\n");
27779 if (! args)
27781 cur_tree = ((pop_target)
27782 ? pop_target
27783 : target_option_default_node);
27784 cl_target_option_restore (&global_options,
27785 TREE_TARGET_OPTION (cur_tree));
27787 else
27789 rs6000_cpu_index = rs6000_tune_index = -1;
27790 if (!rs6000_inner_target_options (args, false)
27791 || !rs6000_option_override_internal (false)
27792 || (cur_tree = build_target_option_node ()) == NULL_TREE)
27794 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
27795 fprintf (stderr, "invalid pragma\n");
27797 return false;
27801 target_option_current_node = cur_tree;
27803 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27804 change the macros that are defined. */
27805 if (rs6000_target_modify_macros_ptr)
27807 prev_opt = TREE_TARGET_OPTION (prev_tree);
27808 prev_bumask = prev_opt->x_rs6000_builtin_mask;
27809 prev_flags = prev_opt->x_target_flags;
27811 cur_opt = TREE_TARGET_OPTION (cur_tree);
27812 cur_flags = cur_opt->x_target_flags;
27813 cur_bumask = cur_opt->x_rs6000_builtin_mask;
27815 diff_bumask = (prev_bumask ^ cur_bumask);
27816 diff_flags = (prev_flags ^ cur_flags);
27818 if ((diff_flags != 0) || (diff_bumask != 0))
27820 /* Delete old macros. */
27821 rs6000_target_modify_macros_ptr (false,
27822 prev_flags & diff_flags,
27823 prev_bumask & diff_bumask);
27825 /* Define new macros. */
27826 rs6000_target_modify_macros_ptr (true,
27827 cur_flags & diff_flags,
27828 cur_bumask & diff_bumask);
27832 return true;
27836 /* Remember the last target of rs6000_set_current_function. */
27837 static GTY(()) tree rs6000_previous_fndecl;
27839 /* Establish appropriate back-end context for processing the function
27840 FNDECL. The argument might be NULL to indicate processing at top
27841 level, outside of any function scope. */
27842 static void
27843 rs6000_set_current_function (tree fndecl)
27845 tree old_tree = (rs6000_previous_fndecl
27846 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
27847 : NULL_TREE);
27849 tree new_tree = (fndecl
27850 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
27851 : NULL_TREE);
27853 if (TARGET_DEBUG_TARGET)
27855 bool print_final = false;
27856 fprintf (stderr, "\n==================== rs6000_set_current_function");
27858 if (fndecl)
27859 fprintf (stderr, ", fndecl %s (%p)",
27860 (DECL_NAME (fndecl)
27861 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
27862 : "<unknown>"), (void *)fndecl);
27864 if (rs6000_previous_fndecl)
27865 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
27867 fprintf (stderr, "\n");
27868 if (new_tree)
27870 fprintf (stderr, "\nnew fndecl target specific options:\n");
27871 debug_tree (new_tree);
27872 print_final = true;
27875 if (old_tree)
27877 fprintf (stderr, "\nold fndecl target specific options:\n");
27878 debug_tree (old_tree);
27879 print_final = true;
27882 if (print_final)
27883 fprintf (stderr, "--------------------\n");
27886 /* Only change the context if the function changes. This hook is called
27887 several times in the course of compiling a function, and we don't want to
27888 slow things down too much or call target_reinit when it isn't safe. */
27889 if (fndecl && fndecl != rs6000_previous_fndecl)
27891 rs6000_previous_fndecl = fndecl;
27892 if (old_tree == new_tree)
27895 else if (new_tree)
27897 cl_target_option_restore (&global_options,
27898 TREE_TARGET_OPTION (new_tree));
27899 target_reinit ();
27902 else if (old_tree)
27904 struct cl_target_option *def
27905 = TREE_TARGET_OPTION (target_option_current_node);
27907 cl_target_option_restore (&global_options, def);
27908 target_reinit ();
27914 /* Save the current options */
27916 static void
27917 rs6000_function_specific_save (struct cl_target_option *ptr)
27919 ptr->rs6000_target_flags_explicit = target_flags_explicit;
27922 /* Restore the current options */
27924 static void
27925 rs6000_function_specific_restore (struct cl_target_option *ptr)
27927 target_flags_explicit = ptr->rs6000_target_flags_explicit;
27928 (void) rs6000_option_override_internal (false);
27931 /* Print the current options */
27933 static void
27934 rs6000_function_specific_print (FILE *file, int indent,
27935 struct cl_target_option *ptr)
27937 size_t i;
27938 int flags = ptr->x_target_flags;
27939 unsigned bu_mask = ptr->x_rs6000_builtin_mask;
27941 /* Print the various mask options. */
27942 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27943 if ((flags & rs6000_opt_masks[i].mask) != 0)
27945 flags &= ~ rs6000_opt_masks[i].mask;
27946 fprintf (file, "%*s-m%s%s\n", indent, "",
27947 rs6000_opt_masks[i].invert ? "no-" : "",
27948 rs6000_opt_masks[i].name);
27951 /* Print the various options that are variables. */
27952 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27954 size_t j = rs6000_opt_vars[i].target_offset;
27955 if (((signed char *) ptr)[j])
27956 fprintf (file, "%*s-m%s\n", indent, "",
27957 rs6000_opt_vars[i].name);
27960 /* Print the various builtin flags. */
27961 fprintf (file, "%*sbuiltin mask = 0x%x\n", indent, "", bu_mask);
27962 for (i = 0; i < ARRAY_SIZE (rs6000_builtin_mask_names); i++)
27963 if ((bu_mask & rs6000_builtin_mask_names[i].mask) != 0)
27965 fprintf (file, "%*s%s builtins supported\n", indent, "",
27966 rs6000_builtin_mask_names[i].name);
27971 /* Hook to determine if one function can safely inline another. */
27973 static bool
27974 rs6000_can_inline_p (tree caller, tree callee)
27976 bool ret = false;
27977 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
27978 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
27980 /* If callee has no option attributes, then it is ok to inline. */
27981 if (!callee_tree)
27982 ret = true;
27984 /* If caller has no option attributes, but callee does then it is not ok to
27985 inline. */
27986 else if (!caller_tree)
27987 ret = false;
27989 else
27991 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
27992 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
27994 /* Callee's options should a subset of the caller's, i.e. a vsx function
27995 can inline an altivec function but a non-vsx function can't inline a
27996 vsx function. */
27997 if ((caller_opts->x_target_flags & callee_opts->x_target_flags)
27998 == callee_opts->x_target_flags)
27999 ret = true;
28002 if (TARGET_DEBUG_TARGET)
28003 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
28004 (DECL_NAME (caller)
28005 ? IDENTIFIER_POINTER (DECL_NAME (caller))
28006 : "<unknown>"),
28007 (DECL_NAME (callee)
28008 ? IDENTIFIER_POINTER (DECL_NAME (callee))
28009 : "<unknown>"),
28010 (ret ? "can" : "cannot"));
28012 return ret;
28015 /* Allocate a stack temp and fixup the address so it meets the particular
28016 memory requirements (either offetable or REG+REG addressing). */
28019 rs6000_allocate_stack_temp (enum machine_mode mode,
28020 bool offsettable_p,
28021 bool reg_reg_p)
28023 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
28024 rtx addr = XEXP (stack, 0);
28025 int strict_p = (reload_in_progress || reload_completed);
28027 if (!legitimate_indirect_address_p (addr, strict_p))
28029 if (offsettable_p
28030 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
28031 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28033 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
28034 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28037 return stack;
28040 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28041 to such a form to deal with memory reference instructions like STFIWX that
28042 only take reg+reg addressing. */
28045 rs6000_address_for_fpconvert (rtx x)
28047 int strict_p = (reload_in_progress || reload_completed);
28048 rtx addr;
28050 gcc_assert (MEM_P (x));
28051 addr = XEXP (x, 0);
28052 if (! legitimate_indirect_address_p (addr, strict_p)
28053 && ! legitimate_indexed_address_p (addr, strict_p))
28055 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
28057 rtx reg = XEXP (addr, 0);
28058 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
28059 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
28060 gcc_assert (REG_P (reg));
28061 emit_insn (gen_add3_insn (reg, reg, size_rtx));
28062 addr = reg;
28064 else if (GET_CODE (addr) == PRE_MODIFY)
28066 rtx reg = XEXP (addr, 0);
28067 rtx expr = XEXP (addr, 1);
28068 gcc_assert (REG_P (reg));
28069 gcc_assert (GET_CODE (expr) == PLUS);
28070 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
28071 addr = reg;
28074 x = replace_equiv_address (x, copy_addr_to_reg (addr));
28077 return x;
28080 /* Given a memory reference, if it is not in the form for altivec memory
28081 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28082 convert to the altivec format. */
28085 rs6000_address_for_altivec (rtx x)
28087 gcc_assert (MEM_P (x));
28088 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
28090 rtx addr = XEXP (x, 0);
28091 int strict_p = (reload_in_progress || reload_completed);
28093 if (!legitimate_indexed_address_p (addr, strict_p)
28094 && !legitimate_indirect_address_p (addr, strict_p))
28095 addr = copy_to_mode_reg (Pmode, addr);
28097 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
28098 x = change_address (x, GET_MODE (x), addr);
28101 return x;
28104 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28106 On the RS/6000, all integer constants are acceptable, most won't be valid
28107 for particular insns, though. Only easy FP constants are acceptable. */
28109 static bool
28110 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
28112 if (rs6000_tls_referenced_p (x))
28113 return false;
28115 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
28116 || GET_MODE (x) == VOIDmode
28117 || (TARGET_POWERPC64 && mode == DImode)
28118 || easy_fp_constant (x, mode)
28119 || easy_vector_constant (x, mode));
28123 /* A function pointer under AIX is a pointer to a data area whose first word
28124 contains the actual address of the function, whose second word contains a
28125 pointer to its TOC, and whose third word contains a value to place in the
28126 static chain register (r11). Note that if we load the static chain, our
28127 "trampoline" need not have any executable code. */
28129 void
28130 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
28132 rtx func_addr;
28133 rtx toc_reg;
28134 rtx sc_reg;
28135 rtx stack_ptr;
28136 rtx stack_toc_offset;
28137 rtx stack_toc_mem;
28138 rtx func_toc_offset;
28139 rtx func_toc_mem;
28140 rtx func_sc_offset;
28141 rtx func_sc_mem;
28142 rtx insn;
28143 rtx (*call_func) (rtx, rtx, rtx, rtx);
28144 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
28146 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28147 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
28149 /* Load up address of the actual function. */
28150 func_desc = force_reg (Pmode, func_desc);
28151 func_addr = gen_reg_rtx (Pmode);
28152 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
28154 if (TARGET_32BIT)
28157 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
28158 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
28159 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
28160 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28162 call_func = gen_call_indirect_aix32bit;
28163 call_value_func = gen_call_value_indirect_aix32bit;
28165 else
28167 call_func = gen_call_indirect_aix32bit_nor11;
28168 call_value_func = gen_call_value_indirect_aix32bit_nor11;
28171 else
28173 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
28174 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
28175 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
28176 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28178 call_func = gen_call_indirect_aix64bit;
28179 call_value_func = gen_call_value_indirect_aix64bit;
28181 else
28183 call_func = gen_call_indirect_aix64bit_nor11;
28184 call_value_func = gen_call_value_indirect_aix64bit_nor11;
28188 /* Reserved spot to store the TOC. */
28189 stack_toc_mem = gen_frame_mem (Pmode,
28190 gen_rtx_PLUS (Pmode,
28191 stack_ptr,
28192 stack_toc_offset));
28194 gcc_assert (cfun);
28195 gcc_assert (cfun->machine);
28197 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28198 every call? */
28199 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
28200 cfun->machine->save_toc_in_prologue = true;
28202 else
28204 MEM_VOLATILE_P (stack_toc_mem) = 1;
28205 emit_move_insn (stack_toc_mem, toc_reg);
28208 /* Calculate the address to load the TOC of the called function. We don't
28209 actually load this until the split after reload. */
28210 func_toc_mem = gen_rtx_MEM (Pmode,
28211 gen_rtx_PLUS (Pmode,
28212 func_desc,
28213 func_toc_offset));
28215 /* If we have a static chain, load it up. */
28216 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28218 func_sc_mem = gen_rtx_MEM (Pmode,
28219 gen_rtx_PLUS (Pmode,
28220 func_desc,
28221 func_sc_offset));
28223 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
28224 emit_move_insn (sc_reg, func_sc_mem);
28227 /* Create the call. */
28228 if (value)
28229 insn = call_value_func (value, func_addr, flag, func_toc_mem,
28230 stack_toc_mem);
28231 else
28232 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
28234 emit_call_insn (insn);
28237 /* Return whether we need to always update the saved TOC pointer when we update
28238 the stack pointer. */
28240 static bool
28241 rs6000_save_toc_in_prologue_p (void)
28243 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
28246 #ifdef HAVE_GAS_HIDDEN
28247 # define USE_HIDDEN_LINKONCE 1
28248 #else
28249 # define USE_HIDDEN_LINKONCE 0
28250 #endif
28252 /* Fills in the label name that should be used for a 476 link stack thunk. */
28254 void
28255 get_ppc476_thunk_name (char name[32])
28257 gcc_assert (TARGET_LINK_STACK);
28259 if (USE_HIDDEN_LINKONCE)
28260 sprintf (name, "__ppc476.get_thunk");
28261 else
28262 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
28265 /* This function emits the simple thunk routine that is used to preserve
28266 the link stack on the 476 cpu. */
28268 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
28269 static void
28270 rs6000_code_end (void)
28272 char name[32];
28273 tree decl;
28275 if (!TARGET_LINK_STACK)
28276 return;
28278 get_ppc476_thunk_name (name);
28280 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
28281 build_function_type_list (void_type_node, NULL_TREE));
28282 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
28283 NULL_TREE, void_type_node);
28284 TREE_PUBLIC (decl) = 1;
28285 TREE_STATIC (decl) = 1;
28287 if (USE_HIDDEN_LINKONCE)
28289 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
28290 targetm.asm_out.unique_section (decl, 0);
28291 switch_to_section (get_named_section (decl, NULL, 0));
28292 DECL_WEAK (decl) = 1;
28293 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
28294 targetm.asm_out.globalize_label (asm_out_file, name);
28295 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
28296 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
28298 else
28300 switch_to_section (text_section);
28301 ASM_OUTPUT_LABEL (asm_out_file, name);
28304 DECL_INITIAL (decl) = make_node (BLOCK);
28305 current_function_decl = decl;
28306 init_function_start (decl);
28307 first_function_block_is_cold = false;
28308 /* Make sure unwind info is emitted for the thunk if needed. */
28309 final_start_function (emit_barrier (), asm_out_file, 1);
28311 fputs ("\tblr\n", asm_out_file);
28313 final_end_function ();
28314 init_insn_lengths ();
28315 free_after_compilation (cfun);
28316 set_cfun (NULL);
28317 current_function_decl = NULL;
28320 /* Add r30 to hard reg set if the prologue sets it up and it is not
28321 pic_offset_table_rtx. */
28323 static void
28324 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
28326 if (!TARGET_SINGLE_PIC_BASE
28327 && TARGET_TOC
28328 && TARGET_MINIMAL_TOC
28329 && get_pool_size () != 0)
28330 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28333 struct gcc_target targetm = TARGET_INITIALIZER;
28335 #include "gt-rs6000.h"