Merge from branches/gcc-4_8-branch up to rev 207411.
[official-gcc.git] / gcc-4_8-branch / gcc / config / rs6000 / rs6000.c
blob0325870af97c4812abe15c6879b8f00861a13397
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2013 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #include "dumpfile.h"
62 #if TARGET_XCOFF
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #endif
65 #if TARGET_MACHO
66 #include "gstab.h" /* for N_SLINE */
67 #endif
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
71 #endif
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack {
78 int reload_completed; /* stack info won't change from here on */
79 int first_gp_reg_save; /* first callee saved GP register used */
80 int first_fp_reg_save; /* first callee saved FP register used */
81 int first_altivec_reg_save; /* first callee saved AltiVec register used */
82 int lr_save_p; /* true if the link reg needs to be saved */
83 int cr_save_p; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask; /* mask of vec registers to save */
85 int push_p; /* true if we need to allocate stack space */
86 int calls_p; /* true if the function makes any calls */
87 int world_save_p; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi; /* which ABI to use */
90 int gp_save_offset; /* offset to save GP regs from initial SP */
91 int fp_save_offset; /* offset to save FP regs from initial SP */
92 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset; /* offset to save LR from initial SP */
94 int cr_save_offset; /* offset to save CR from initial SP */
95 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset; /* offset to save the varargs registers */
98 int ehrd_offset; /* offset to EH return data */
99 int reg_size; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size; /* variable save area size */
101 int parm_size; /* outgoing parameter size */
102 int save_size; /* save area size */
103 int fixed_size; /* fixed size of stack frame */
104 int gp_size; /* size of saved GP registers */
105 int fp_size; /* size of saved FP registers */
106 int altivec_size; /* size of saved AltiVec registers */
107 int cr_size; /* size to hold CR if not in save_size */
108 int vrsave_size; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size; /* size of altivec alignment padding if
110 not in save_size */
111 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size;
113 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
114 int spe_64bit_regs_used;
115 int savres_strategy;
116 } rs6000_stack_t;
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame;
128 /* Flags if __builtin_return_address (0) was used. */
129 int ra_need_lr;
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 int lr_save_state;
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot;
142 } machine_function;
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load;
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined;
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno;
154 #ifdef USING_ELFOS_H
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno = 0;
157 #endif
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 int dot_symbols;
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode;
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size;
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct;
178 #endif
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
192 /* Reload functions based on the type and the vector unit. */
193 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
195 static int dbg_cost_ctrl;
197 /* Built in types. */
198 tree rs6000_builtin_types[RS6000_BTI_MAX];
199 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
201 /* Flag to say the TOC is initialized */
202 int toc_initialized;
203 char toc_label_name[10];
205 /* Cached value of rs6000_variable_issue. This is cached in
206 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
207 static short cached_can_issue_more;
209 static GTY(()) section *read_only_data_section;
210 static GTY(()) section *private_data_section;
211 static GTY(()) section *tls_data_section;
212 static GTY(()) section *tls_private_data_section;
213 static GTY(()) section *read_only_private_data_section;
214 static GTY(()) section *sdata2_section;
215 static GTY(()) section *toc_section;
217 struct builtin_description
219 const HOST_WIDE_INT mask;
220 const enum insn_code icode;
221 const char *const name;
222 const enum rs6000_builtins code;
225 /* Describe the vector unit used for modes. */
226 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
227 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
229 /* Register classes for various constraints that are based on the target
230 switches. */
231 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
233 /* Describe the alignment of a vector. */
234 int rs6000_vector_align[NUM_MACHINE_MODES];
236 /* Map selected modes to types for builtins. */
237 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
239 /* What modes to automatically generate reciprocal divide estimate (fre) and
240 reciprocal sqrt (frsqrte) for. */
241 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
243 /* Masks to determine which reciprocal esitmate instructions to generate
244 automatically. */
245 enum rs6000_recip_mask {
246 RECIP_SF_DIV = 0x001, /* Use divide estimate */
247 RECIP_DF_DIV = 0x002,
248 RECIP_V4SF_DIV = 0x004,
249 RECIP_V2DF_DIV = 0x008,
251 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
252 RECIP_DF_RSQRT = 0x020,
253 RECIP_V4SF_RSQRT = 0x040,
254 RECIP_V2DF_RSQRT = 0x080,
256 /* Various combination of flags for -mrecip=xxx. */
257 RECIP_NONE = 0,
258 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
259 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
260 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
262 RECIP_HIGH_PRECISION = RECIP_ALL,
264 /* On low precision machines like the power5, don't enable double precision
265 reciprocal square root estimate, since it isn't accurate enough. */
266 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
269 /* -mrecip options. */
270 static struct
272 const char *string; /* option name */
273 unsigned int mask; /* mask bits to set */
274 } recip_options[] = {
275 { "all", RECIP_ALL },
276 { "none", RECIP_NONE },
277 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
278 | RECIP_V2DF_DIV) },
279 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
280 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
281 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
282 | RECIP_V2DF_RSQRT) },
283 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
284 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
287 /* Pointer to function (in rs6000-c.c) that can define or undefine target
288 macros that have changed. Languages that don't support the preprocessor
289 don't link in rs6000-c.c, so we can't call it directly. */
290 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
293 /* Target cpu costs. */
295 struct processor_costs {
296 const int mulsi; /* cost of SImode multiplication. */
297 const int mulsi_const; /* cost of SImode multiplication by constant. */
298 const int mulsi_const9; /* cost of SImode mult by short constant. */
299 const int muldi; /* cost of DImode multiplication. */
300 const int divsi; /* cost of SImode division. */
301 const int divdi; /* cost of DImode division. */
302 const int fp; /* cost of simple SFmode and DFmode insns. */
303 const int dmul; /* cost of DFmode multiplication (and fmadd). */
304 const int sdiv; /* cost of SFmode division (fdivs). */
305 const int ddiv; /* cost of DFmode division (fdiv). */
306 const int cache_line_size; /* cache line size in bytes. */
307 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
308 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
309 const int simultaneous_prefetches; /* number of parallel prefetch
310 operations. */
313 const struct processor_costs *rs6000_cost;
315 /* Processor costs (relative to an add) */
317 /* Instruction size costs on 32bit processors. */
318 static const
319 struct processor_costs size32_cost = {
320 COSTS_N_INSNS (1), /* mulsi */
321 COSTS_N_INSNS (1), /* mulsi_const */
322 COSTS_N_INSNS (1), /* mulsi_const9 */
323 COSTS_N_INSNS (1), /* muldi */
324 COSTS_N_INSNS (1), /* divsi */
325 COSTS_N_INSNS (1), /* divdi */
326 COSTS_N_INSNS (1), /* fp */
327 COSTS_N_INSNS (1), /* dmul */
328 COSTS_N_INSNS (1), /* sdiv */
329 COSTS_N_INSNS (1), /* ddiv */
336 /* Instruction size costs on 64bit processors. */
337 static const
338 struct processor_costs size64_cost = {
339 COSTS_N_INSNS (1), /* mulsi */
340 COSTS_N_INSNS (1), /* mulsi_const */
341 COSTS_N_INSNS (1), /* mulsi_const9 */
342 COSTS_N_INSNS (1), /* muldi */
343 COSTS_N_INSNS (1), /* divsi */
344 COSTS_N_INSNS (1), /* divdi */
345 COSTS_N_INSNS (1), /* fp */
346 COSTS_N_INSNS (1), /* dmul */
347 COSTS_N_INSNS (1), /* sdiv */
348 COSTS_N_INSNS (1), /* ddiv */
349 128,
355 /* Instruction costs on RS64A processors. */
356 static const
357 struct processor_costs rs64a_cost = {
358 COSTS_N_INSNS (20), /* mulsi */
359 COSTS_N_INSNS (12), /* mulsi_const */
360 COSTS_N_INSNS (8), /* mulsi_const9 */
361 COSTS_N_INSNS (34), /* muldi */
362 COSTS_N_INSNS (65), /* divsi */
363 COSTS_N_INSNS (67), /* divdi */
364 COSTS_N_INSNS (4), /* fp */
365 COSTS_N_INSNS (4), /* dmul */
366 COSTS_N_INSNS (31), /* sdiv */
367 COSTS_N_INSNS (31), /* ddiv */
368 128, /* cache line size */
369 128, /* l1 cache */
370 2048, /* l2 cache */
371 1, /* streams */
374 /* Instruction costs on MPCCORE processors. */
375 static const
376 struct processor_costs mpccore_cost = {
377 COSTS_N_INSNS (2), /* mulsi */
378 COSTS_N_INSNS (2), /* mulsi_const */
379 COSTS_N_INSNS (2), /* mulsi_const9 */
380 COSTS_N_INSNS (2), /* muldi */
381 COSTS_N_INSNS (6), /* divsi */
382 COSTS_N_INSNS (6), /* divdi */
383 COSTS_N_INSNS (4), /* fp */
384 COSTS_N_INSNS (5), /* dmul */
385 COSTS_N_INSNS (10), /* sdiv */
386 COSTS_N_INSNS (17), /* ddiv */
387 32, /* cache line size */
388 4, /* l1 cache */
389 16, /* l2 cache */
390 1, /* streams */
393 /* Instruction costs on PPC403 processors. */
394 static const
395 struct processor_costs ppc403_cost = {
396 COSTS_N_INSNS (4), /* mulsi */
397 COSTS_N_INSNS (4), /* mulsi_const */
398 COSTS_N_INSNS (4), /* mulsi_const9 */
399 COSTS_N_INSNS (4), /* muldi */
400 COSTS_N_INSNS (33), /* divsi */
401 COSTS_N_INSNS (33), /* divdi */
402 COSTS_N_INSNS (11), /* fp */
403 COSTS_N_INSNS (11), /* dmul */
404 COSTS_N_INSNS (11), /* sdiv */
405 COSTS_N_INSNS (11), /* ddiv */
406 32, /* cache line size */
407 4, /* l1 cache */
408 16, /* l2 cache */
409 1, /* streams */
412 /* Instruction costs on PPC405 processors. */
413 static const
414 struct processor_costs ppc405_cost = {
415 COSTS_N_INSNS (5), /* mulsi */
416 COSTS_N_INSNS (4), /* mulsi_const */
417 COSTS_N_INSNS (3), /* mulsi_const9 */
418 COSTS_N_INSNS (5), /* muldi */
419 COSTS_N_INSNS (35), /* divsi */
420 COSTS_N_INSNS (35), /* divdi */
421 COSTS_N_INSNS (11), /* fp */
422 COSTS_N_INSNS (11), /* dmul */
423 COSTS_N_INSNS (11), /* sdiv */
424 COSTS_N_INSNS (11), /* ddiv */
425 32, /* cache line size */
426 16, /* l1 cache */
427 128, /* l2 cache */
428 1, /* streams */
431 /* Instruction costs on PPC440 processors. */
432 static const
433 struct processor_costs ppc440_cost = {
434 COSTS_N_INSNS (3), /* mulsi */
435 COSTS_N_INSNS (2), /* mulsi_const */
436 COSTS_N_INSNS (2), /* mulsi_const9 */
437 COSTS_N_INSNS (3), /* muldi */
438 COSTS_N_INSNS (34), /* divsi */
439 COSTS_N_INSNS (34), /* divdi */
440 COSTS_N_INSNS (5), /* fp */
441 COSTS_N_INSNS (5), /* dmul */
442 COSTS_N_INSNS (19), /* sdiv */
443 COSTS_N_INSNS (33), /* ddiv */
444 32, /* cache line size */
445 32, /* l1 cache */
446 256, /* l2 cache */
447 1, /* streams */
450 /* Instruction costs on PPC476 processors. */
451 static const
452 struct processor_costs ppc476_cost = {
453 COSTS_N_INSNS (4), /* mulsi */
454 COSTS_N_INSNS (4), /* mulsi_const */
455 COSTS_N_INSNS (4), /* mulsi_const9 */
456 COSTS_N_INSNS (4), /* muldi */
457 COSTS_N_INSNS (11), /* divsi */
458 COSTS_N_INSNS (11), /* divdi */
459 COSTS_N_INSNS (6), /* fp */
460 COSTS_N_INSNS (6), /* dmul */
461 COSTS_N_INSNS (19), /* sdiv */
462 COSTS_N_INSNS (33), /* ddiv */
463 32, /* l1 cache line size */
464 32, /* l1 cache */
465 512, /* l2 cache */
466 1, /* streams */
469 /* Instruction costs on PPC601 processors. */
470 static const
471 struct processor_costs ppc601_cost = {
472 COSTS_N_INSNS (5), /* mulsi */
473 COSTS_N_INSNS (5), /* mulsi_const */
474 COSTS_N_INSNS (5), /* mulsi_const9 */
475 COSTS_N_INSNS (5), /* muldi */
476 COSTS_N_INSNS (36), /* divsi */
477 COSTS_N_INSNS (36), /* divdi */
478 COSTS_N_INSNS (4), /* fp */
479 COSTS_N_INSNS (5), /* dmul */
480 COSTS_N_INSNS (17), /* sdiv */
481 COSTS_N_INSNS (31), /* ddiv */
482 32, /* cache line size */
483 32, /* l1 cache */
484 256, /* l2 cache */
485 1, /* streams */
488 /* Instruction costs on PPC603 processors. */
489 static const
490 struct processor_costs ppc603_cost = {
491 COSTS_N_INSNS (5), /* mulsi */
492 COSTS_N_INSNS (3), /* mulsi_const */
493 COSTS_N_INSNS (2), /* mulsi_const9 */
494 COSTS_N_INSNS (5), /* muldi */
495 COSTS_N_INSNS (37), /* divsi */
496 COSTS_N_INSNS (37), /* divdi */
497 COSTS_N_INSNS (3), /* fp */
498 COSTS_N_INSNS (4), /* dmul */
499 COSTS_N_INSNS (18), /* sdiv */
500 COSTS_N_INSNS (33), /* ddiv */
501 32, /* cache line size */
502 8, /* l1 cache */
503 64, /* l2 cache */
504 1, /* streams */
507 /* Instruction costs on PPC604 processors. */
508 static const
509 struct processor_costs ppc604_cost = {
510 COSTS_N_INSNS (4), /* mulsi */
511 COSTS_N_INSNS (4), /* mulsi_const */
512 COSTS_N_INSNS (4), /* mulsi_const9 */
513 COSTS_N_INSNS (4), /* muldi */
514 COSTS_N_INSNS (20), /* divsi */
515 COSTS_N_INSNS (20), /* divdi */
516 COSTS_N_INSNS (3), /* fp */
517 COSTS_N_INSNS (3), /* dmul */
518 COSTS_N_INSNS (18), /* sdiv */
519 COSTS_N_INSNS (32), /* ddiv */
520 32, /* cache line size */
521 16, /* l1 cache */
522 512, /* l2 cache */
523 1, /* streams */
526 /* Instruction costs on PPC604e processors. */
527 static const
528 struct processor_costs ppc604e_cost = {
529 COSTS_N_INSNS (2), /* mulsi */
530 COSTS_N_INSNS (2), /* mulsi_const */
531 COSTS_N_INSNS (2), /* mulsi_const9 */
532 COSTS_N_INSNS (2), /* muldi */
533 COSTS_N_INSNS (20), /* divsi */
534 COSTS_N_INSNS (20), /* divdi */
535 COSTS_N_INSNS (3), /* fp */
536 COSTS_N_INSNS (3), /* dmul */
537 COSTS_N_INSNS (18), /* sdiv */
538 COSTS_N_INSNS (32), /* ddiv */
539 32, /* cache line size */
540 32, /* l1 cache */
541 1024, /* l2 cache */
542 1, /* streams */
545 /* Instruction costs on PPC620 processors. */
546 static const
547 struct processor_costs ppc620_cost = {
548 COSTS_N_INSNS (5), /* mulsi */
549 COSTS_N_INSNS (4), /* mulsi_const */
550 COSTS_N_INSNS (3), /* mulsi_const9 */
551 COSTS_N_INSNS (7), /* muldi */
552 COSTS_N_INSNS (21), /* divsi */
553 COSTS_N_INSNS (37), /* divdi */
554 COSTS_N_INSNS (3), /* fp */
555 COSTS_N_INSNS (3), /* dmul */
556 COSTS_N_INSNS (18), /* sdiv */
557 COSTS_N_INSNS (32), /* ddiv */
558 128, /* cache line size */
559 32, /* l1 cache */
560 1024, /* l2 cache */
561 1, /* streams */
564 /* Instruction costs on PPC630 processors. */
565 static const
566 struct processor_costs ppc630_cost = {
567 COSTS_N_INSNS (5), /* mulsi */
568 COSTS_N_INSNS (4), /* mulsi_const */
569 COSTS_N_INSNS (3), /* mulsi_const9 */
570 COSTS_N_INSNS (7), /* muldi */
571 COSTS_N_INSNS (21), /* divsi */
572 COSTS_N_INSNS (37), /* divdi */
573 COSTS_N_INSNS (3), /* fp */
574 COSTS_N_INSNS (3), /* dmul */
575 COSTS_N_INSNS (17), /* sdiv */
576 COSTS_N_INSNS (21), /* ddiv */
577 128, /* cache line size */
578 64, /* l1 cache */
579 1024, /* l2 cache */
580 1, /* streams */
583 /* Instruction costs on Cell processor. */
584 /* COSTS_N_INSNS (1) ~ one add. */
585 static const
586 struct processor_costs ppccell_cost = {
587 COSTS_N_INSNS (9/2)+2, /* mulsi */
588 COSTS_N_INSNS (6/2), /* mulsi_const */
589 COSTS_N_INSNS (6/2), /* mulsi_const9 */
590 COSTS_N_INSNS (15/2)+2, /* muldi */
591 COSTS_N_INSNS (38/2), /* divsi */
592 COSTS_N_INSNS (70/2), /* divdi */
593 COSTS_N_INSNS (10/2), /* fp */
594 COSTS_N_INSNS (10/2), /* dmul */
595 COSTS_N_INSNS (74/2), /* sdiv */
596 COSTS_N_INSNS (74/2), /* ddiv */
597 128, /* cache line size */
598 32, /* l1 cache */
599 512, /* l2 cache */
600 6, /* streams */
603 /* Instruction costs on PPC750 and PPC7400 processors. */
604 static const
605 struct processor_costs ppc750_cost = {
606 COSTS_N_INSNS (5), /* mulsi */
607 COSTS_N_INSNS (3), /* mulsi_const */
608 COSTS_N_INSNS (2), /* mulsi_const9 */
609 COSTS_N_INSNS (5), /* muldi */
610 COSTS_N_INSNS (17), /* divsi */
611 COSTS_N_INSNS (17), /* divdi */
612 COSTS_N_INSNS (3), /* fp */
613 COSTS_N_INSNS (3), /* dmul */
614 COSTS_N_INSNS (17), /* sdiv */
615 COSTS_N_INSNS (31), /* ddiv */
616 32, /* cache line size */
617 32, /* l1 cache */
618 512, /* l2 cache */
619 1, /* streams */
622 /* Instruction costs on PPC7450 processors. */
623 static const
624 struct processor_costs ppc7450_cost = {
625 COSTS_N_INSNS (4), /* mulsi */
626 COSTS_N_INSNS (3), /* mulsi_const */
627 COSTS_N_INSNS (3), /* mulsi_const9 */
628 COSTS_N_INSNS (4), /* muldi */
629 COSTS_N_INSNS (23), /* divsi */
630 COSTS_N_INSNS (23), /* divdi */
631 COSTS_N_INSNS (5), /* fp */
632 COSTS_N_INSNS (5), /* dmul */
633 COSTS_N_INSNS (21), /* sdiv */
634 COSTS_N_INSNS (35), /* ddiv */
635 32, /* cache line size */
636 32, /* l1 cache */
637 1024, /* l2 cache */
638 1, /* streams */
641 /* Instruction costs on PPC8540 processors. */
642 static const
643 struct processor_costs ppc8540_cost = {
644 COSTS_N_INSNS (4), /* mulsi */
645 COSTS_N_INSNS (4), /* mulsi_const */
646 COSTS_N_INSNS (4), /* mulsi_const9 */
647 COSTS_N_INSNS (4), /* muldi */
648 COSTS_N_INSNS (19), /* divsi */
649 COSTS_N_INSNS (19), /* divdi */
650 COSTS_N_INSNS (4), /* fp */
651 COSTS_N_INSNS (4), /* dmul */
652 COSTS_N_INSNS (29), /* sdiv */
653 COSTS_N_INSNS (29), /* ddiv */
654 32, /* cache line size */
655 32, /* l1 cache */
656 256, /* l2 cache */
657 1, /* prefetch streams /*/
660 /* Instruction costs on E300C2 and E300C3 cores. */
661 static const
662 struct processor_costs ppce300c2c3_cost = {
663 COSTS_N_INSNS (4), /* mulsi */
664 COSTS_N_INSNS (4), /* mulsi_const */
665 COSTS_N_INSNS (4), /* mulsi_const9 */
666 COSTS_N_INSNS (4), /* muldi */
667 COSTS_N_INSNS (19), /* divsi */
668 COSTS_N_INSNS (19), /* divdi */
669 COSTS_N_INSNS (3), /* fp */
670 COSTS_N_INSNS (4), /* dmul */
671 COSTS_N_INSNS (18), /* sdiv */
672 COSTS_N_INSNS (33), /* ddiv */
674 16, /* l1 cache */
675 16, /* l2 cache */
676 1, /* prefetch streams /*/
679 /* Instruction costs on PPCE500MC processors. */
680 static const
681 struct processor_costs ppce500mc_cost = {
682 COSTS_N_INSNS (4), /* mulsi */
683 COSTS_N_INSNS (4), /* mulsi_const */
684 COSTS_N_INSNS (4), /* mulsi_const9 */
685 COSTS_N_INSNS (4), /* muldi */
686 COSTS_N_INSNS (14), /* divsi */
687 COSTS_N_INSNS (14), /* divdi */
688 COSTS_N_INSNS (8), /* fp */
689 COSTS_N_INSNS (10), /* dmul */
690 COSTS_N_INSNS (36), /* sdiv */
691 COSTS_N_INSNS (66), /* ddiv */
692 64, /* cache line size */
693 32, /* l1 cache */
694 128, /* l2 cache */
695 1, /* prefetch streams /*/
698 /* Instruction costs on PPCE500MC64 processors. */
699 static const
700 struct processor_costs ppce500mc64_cost = {
701 COSTS_N_INSNS (4), /* mulsi */
702 COSTS_N_INSNS (4), /* mulsi_const */
703 COSTS_N_INSNS (4), /* mulsi_const9 */
704 COSTS_N_INSNS (4), /* muldi */
705 COSTS_N_INSNS (14), /* divsi */
706 COSTS_N_INSNS (14), /* divdi */
707 COSTS_N_INSNS (4), /* fp */
708 COSTS_N_INSNS (10), /* dmul */
709 COSTS_N_INSNS (36), /* sdiv */
710 COSTS_N_INSNS (66), /* ddiv */
711 64, /* cache line size */
712 32, /* l1 cache */
713 128, /* l2 cache */
714 1, /* prefetch streams /*/
717 /* Instruction costs on PPCE5500 processors. */
718 static const
719 struct processor_costs ppce5500_cost = {
720 COSTS_N_INSNS (5), /* mulsi */
721 COSTS_N_INSNS (5), /* mulsi_const */
722 COSTS_N_INSNS (4), /* mulsi_const9 */
723 COSTS_N_INSNS (5), /* muldi */
724 COSTS_N_INSNS (14), /* divsi */
725 COSTS_N_INSNS (14), /* divdi */
726 COSTS_N_INSNS (7), /* fp */
727 COSTS_N_INSNS (10), /* dmul */
728 COSTS_N_INSNS (36), /* sdiv */
729 COSTS_N_INSNS (66), /* ddiv */
730 64, /* cache line size */
731 32, /* l1 cache */
732 128, /* l2 cache */
733 1, /* prefetch streams /*/
736 /* Instruction costs on PPCE6500 processors. */
737 static const
738 struct processor_costs ppce6500_cost = {
739 COSTS_N_INSNS (5), /* mulsi */
740 COSTS_N_INSNS (5), /* mulsi_const */
741 COSTS_N_INSNS (4), /* mulsi_const9 */
742 COSTS_N_INSNS (5), /* muldi */
743 COSTS_N_INSNS (14), /* divsi */
744 COSTS_N_INSNS (14), /* divdi */
745 COSTS_N_INSNS (7), /* fp */
746 COSTS_N_INSNS (10), /* dmul */
747 COSTS_N_INSNS (36), /* sdiv */
748 COSTS_N_INSNS (66), /* ddiv */
749 64, /* cache line size */
750 32, /* l1 cache */
751 128, /* l2 cache */
752 1, /* prefetch streams /*/
755 /* Instruction costs on AppliedMicro Titan processors. */
756 static const
757 struct processor_costs titan_cost = {
758 COSTS_N_INSNS (5), /* mulsi */
759 COSTS_N_INSNS (5), /* mulsi_const */
760 COSTS_N_INSNS (5), /* mulsi_const9 */
761 COSTS_N_INSNS (5), /* muldi */
762 COSTS_N_INSNS (18), /* divsi */
763 COSTS_N_INSNS (18), /* divdi */
764 COSTS_N_INSNS (10), /* fp */
765 COSTS_N_INSNS (10), /* dmul */
766 COSTS_N_INSNS (46), /* sdiv */
767 COSTS_N_INSNS (72), /* ddiv */
768 32, /* cache line size */
769 32, /* l1 cache */
770 512, /* l2 cache */
771 1, /* prefetch streams /*/
774 /* Instruction costs on POWER4 and POWER5 processors. */
775 static const
776 struct processor_costs power4_cost = {
777 COSTS_N_INSNS (3), /* mulsi */
778 COSTS_N_INSNS (2), /* mulsi_const */
779 COSTS_N_INSNS (2), /* mulsi_const9 */
780 COSTS_N_INSNS (4), /* muldi */
781 COSTS_N_INSNS (18), /* divsi */
782 COSTS_N_INSNS (34), /* divdi */
783 COSTS_N_INSNS (3), /* fp */
784 COSTS_N_INSNS (3), /* dmul */
785 COSTS_N_INSNS (17), /* sdiv */
786 COSTS_N_INSNS (17), /* ddiv */
787 128, /* cache line size */
788 32, /* l1 cache */
789 1024, /* l2 cache */
790 8, /* prefetch streams /*/
793 /* Instruction costs on POWER6 processors. */
794 static const
795 struct processor_costs power6_cost = {
796 COSTS_N_INSNS (8), /* mulsi */
797 COSTS_N_INSNS (8), /* mulsi_const */
798 COSTS_N_INSNS (8), /* mulsi_const9 */
799 COSTS_N_INSNS (8), /* muldi */
800 COSTS_N_INSNS (22), /* divsi */
801 COSTS_N_INSNS (28), /* divdi */
802 COSTS_N_INSNS (3), /* fp */
803 COSTS_N_INSNS (3), /* dmul */
804 COSTS_N_INSNS (13), /* sdiv */
805 COSTS_N_INSNS (16), /* ddiv */
806 128, /* cache line size */
807 64, /* l1 cache */
808 2048, /* l2 cache */
809 16, /* prefetch streams */
812 /* Instruction costs on POWER7 processors. */
813 static const
814 struct processor_costs power7_cost = {
815 COSTS_N_INSNS (2), /* mulsi */
816 COSTS_N_INSNS (2), /* mulsi_const */
817 COSTS_N_INSNS (2), /* mulsi_const9 */
818 COSTS_N_INSNS (2), /* muldi */
819 COSTS_N_INSNS (18), /* divsi */
820 COSTS_N_INSNS (34), /* divdi */
821 COSTS_N_INSNS (3), /* fp */
822 COSTS_N_INSNS (3), /* dmul */
823 COSTS_N_INSNS (13), /* sdiv */
824 COSTS_N_INSNS (16), /* ddiv */
825 128, /* cache line size */
826 32, /* l1 cache */
827 256, /* l2 cache */
828 12, /* prefetch streams */
831 /* Instruction costs on POWER A2 processors. */
832 static const
833 struct processor_costs ppca2_cost = {
834 COSTS_N_INSNS (16), /* mulsi */
835 COSTS_N_INSNS (16), /* mulsi_const */
836 COSTS_N_INSNS (16), /* mulsi_const9 */
837 COSTS_N_INSNS (16), /* muldi */
838 COSTS_N_INSNS (22), /* divsi */
839 COSTS_N_INSNS (28), /* divdi */
840 COSTS_N_INSNS (3), /* fp */
841 COSTS_N_INSNS (3), /* dmul */
842 COSTS_N_INSNS (59), /* sdiv */
843 COSTS_N_INSNS (72), /* ddiv */
845 16, /* l1 cache */
846 2048, /* l2 cache */
847 16, /* prefetch streams */
851 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
852 #undef RS6000_BUILTIN_1
853 #undef RS6000_BUILTIN_2
854 #undef RS6000_BUILTIN_3
855 #undef RS6000_BUILTIN_A
856 #undef RS6000_BUILTIN_D
857 #undef RS6000_BUILTIN_E
858 #undef RS6000_BUILTIN_P
859 #undef RS6000_BUILTIN_Q
860 #undef RS6000_BUILTIN_S
861 #undef RS6000_BUILTIN_X
863 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
864 { NAME, ICODE, MASK, ATTR },
866 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
867 { NAME, ICODE, MASK, ATTR },
869 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
870 { NAME, ICODE, MASK, ATTR },
872 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
873 { NAME, ICODE, MASK, ATTR },
875 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
876 { NAME, ICODE, MASK, ATTR },
878 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
879 { NAME, ICODE, MASK, ATTR },
881 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
882 { NAME, ICODE, MASK, ATTR },
884 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
885 { NAME, ICODE, MASK, ATTR },
887 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
888 { NAME, ICODE, MASK, ATTR },
890 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
891 { NAME, ICODE, MASK, ATTR },
893 struct rs6000_builtin_info_type {
894 const char *name;
895 const enum insn_code icode;
896 const HOST_WIDE_INT mask;
897 const unsigned attr;
900 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
902 #include "rs6000-builtin.def"
905 #undef RS6000_BUILTIN_1
906 #undef RS6000_BUILTIN_2
907 #undef RS6000_BUILTIN_3
908 #undef RS6000_BUILTIN_A
909 #undef RS6000_BUILTIN_D
910 #undef RS6000_BUILTIN_E
911 #undef RS6000_BUILTIN_P
912 #undef RS6000_BUILTIN_Q
913 #undef RS6000_BUILTIN_S
914 #undef RS6000_BUILTIN_X
916 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
917 static tree (*rs6000_veclib_handler) (tree, tree, tree);
920 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
921 static bool spe_func_has_64bit_regs_p (void);
922 static struct machine_function * rs6000_init_machine_status (void);
923 static int rs6000_ra_ever_killed (void);
924 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
925 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
926 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
927 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
928 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
929 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
930 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
931 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
932 bool);
933 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
934 static bool is_microcoded_insn (rtx);
935 static bool is_nonpipeline_insn (rtx);
936 static bool is_cracked_insn (rtx);
937 static bool is_load_insn (rtx, rtx *);
938 static bool is_store_insn (rtx, rtx *);
939 static bool set_to_load_agen (rtx,rtx);
940 static bool insn_terminates_group_p (rtx , enum group_termination);
941 static bool insn_must_be_first_in_group (rtx);
942 static bool insn_must_be_last_in_group (rtx);
943 static void altivec_init_builtins (void);
944 static tree builtin_function_type (enum machine_mode, enum machine_mode,
945 enum machine_mode, enum machine_mode,
946 enum rs6000_builtins, const char *name);
947 static void rs6000_common_init_builtins (void);
948 static void paired_init_builtins (void);
949 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
950 static void spe_init_builtins (void);
951 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
952 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
953 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
954 static rs6000_stack_t *rs6000_stack_info (void);
955 static void is_altivec_return_reg (rtx, void *);
956 int easy_vector_constant (rtx, enum machine_mode);
957 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
958 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
959 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
960 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
961 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
962 bool, bool);
963 #if TARGET_MACHO
964 static void macho_branch_islands (void);
965 #endif
966 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
967 int, int *);
968 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
969 int, int, int *);
970 static bool rs6000_mode_dependent_address (const_rtx);
971 static bool rs6000_debug_mode_dependent_address (const_rtx);
972 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
973 enum machine_mode, rtx);
974 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
975 enum machine_mode,
976 rtx);
977 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
978 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
979 enum reg_class);
980 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
981 enum machine_mode);
982 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
983 enum reg_class,
984 enum machine_mode);
985 static bool rs6000_cannot_change_mode_class (enum machine_mode,
986 enum machine_mode,
987 enum reg_class);
988 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
989 enum machine_mode,
990 enum reg_class);
991 static bool rs6000_save_toc_in_prologue_p (void);
993 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
994 int, int *)
995 = rs6000_legitimize_reload_address;
997 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
998 = rs6000_mode_dependent_address;
1000 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1001 enum machine_mode, rtx)
1002 = rs6000_secondary_reload_class;
1004 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1005 = rs6000_preferred_reload_class;
1007 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1008 enum machine_mode)
1009 = rs6000_secondary_memory_needed;
1011 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1012 enum machine_mode,
1013 enum reg_class)
1014 = rs6000_cannot_change_mode_class;
1016 const int INSN_NOT_AVAILABLE = -1;
1018 static void rs6000_print_isa_options (FILE *, int, const char *,
1019 HOST_WIDE_INT);
1020 static void rs6000_print_builtin_options (FILE *, int, const char *,
1021 HOST_WIDE_INT);
1023 /* Hash table stuff for keeping track of TOC entries. */
1025 struct GTY(()) toc_hash_struct
1027 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1028 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1029 rtx key;
1030 enum machine_mode key_mode;
1031 int labelno;
1034 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1036 /* Hash table to keep track of the argument types for builtin functions. */
1038 struct GTY(()) builtin_hash_struct
1040 tree type;
1041 enum machine_mode mode[4]; /* return value + 3 arguments. */
1042 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1045 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1048 /* Default register names. */
1049 char rs6000_reg_names[][8] =
1051 "0", "1", "2", "3", "4", "5", "6", "7",
1052 "8", "9", "10", "11", "12", "13", "14", "15",
1053 "16", "17", "18", "19", "20", "21", "22", "23",
1054 "24", "25", "26", "27", "28", "29", "30", "31",
1055 "0", "1", "2", "3", "4", "5", "6", "7",
1056 "8", "9", "10", "11", "12", "13", "14", "15",
1057 "16", "17", "18", "19", "20", "21", "22", "23",
1058 "24", "25", "26", "27", "28", "29", "30", "31",
1059 "mq", "lr", "ctr","ap",
1060 "0", "1", "2", "3", "4", "5", "6", "7",
1061 "ca",
1062 /* AltiVec registers. */
1063 "0", "1", "2", "3", "4", "5", "6", "7",
1064 "8", "9", "10", "11", "12", "13", "14", "15",
1065 "16", "17", "18", "19", "20", "21", "22", "23",
1066 "24", "25", "26", "27", "28", "29", "30", "31",
1067 "vrsave", "vscr",
1068 /* SPE registers. */
1069 "spe_acc", "spefscr",
1070 /* Soft frame pointer. */
1071 "sfp"
1074 #ifdef TARGET_REGNAMES
1075 static const char alt_reg_names[][8] =
1077 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1078 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1079 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1080 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1081 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1082 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1083 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1084 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1085 "mq", "lr", "ctr", "ap",
1086 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1087 "ca",
1088 /* AltiVec registers. */
1089 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1090 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1091 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1092 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1093 "vrsave", "vscr",
1094 /* SPE registers. */
1095 "spe_acc", "spefscr",
1096 /* Soft frame pointer. */
1097 "sfp"
1099 #endif
1101 /* Table of valid machine attributes. */
1103 static const struct attribute_spec rs6000_attribute_table[] =
1105 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1106 affects_type_identity } */
1107 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1108 false },
1109 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1110 false },
1111 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1112 false },
1113 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1114 false },
1115 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1116 false },
1117 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1118 SUBTARGET_ATTRIBUTE_TABLE,
1119 #endif
1120 { NULL, 0, 0, false, false, false, NULL, false }
1123 #ifndef TARGET_PROFILE_KERNEL
1124 #define TARGET_PROFILE_KERNEL 0
1125 #endif
1127 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1128 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1130 /* Initialize the GCC target structure. */
1131 #undef TARGET_ATTRIBUTE_TABLE
1132 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1133 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1134 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1135 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1136 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1138 #undef TARGET_ASM_ALIGNED_DI_OP
1139 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1141 /* Default unaligned ops are only provided for ELF. Find the ops needed
1142 for non-ELF systems. */
1143 #ifndef OBJECT_FORMAT_ELF
1144 #if TARGET_XCOFF
1145 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1146 64-bit targets. */
1147 #undef TARGET_ASM_UNALIGNED_HI_OP
1148 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1149 #undef TARGET_ASM_UNALIGNED_SI_OP
1150 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1151 #undef TARGET_ASM_UNALIGNED_DI_OP
1152 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1153 #else
1154 /* For Darwin. */
1155 #undef TARGET_ASM_UNALIGNED_HI_OP
1156 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1157 #undef TARGET_ASM_UNALIGNED_SI_OP
1158 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1159 #undef TARGET_ASM_UNALIGNED_DI_OP
1160 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1161 #undef TARGET_ASM_ALIGNED_DI_OP
1162 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1163 #endif
1164 #endif
1166 /* This hook deals with fixups for relocatable code and DI-mode objects
1167 in 64-bit code. */
1168 #undef TARGET_ASM_INTEGER
1169 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1171 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1172 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1173 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1174 #endif
1176 #undef TARGET_SET_UP_BY_PROLOGUE
1177 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1179 #undef TARGET_HAVE_TLS
1180 #define TARGET_HAVE_TLS HAVE_AS_TLS
1182 #undef TARGET_CANNOT_FORCE_CONST_MEM
1183 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1185 #undef TARGET_DELEGITIMIZE_ADDRESS
1186 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1188 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1189 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1191 #undef TARGET_ASM_FUNCTION_PROLOGUE
1192 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1193 #undef TARGET_ASM_FUNCTION_EPILOGUE
1194 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1196 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1197 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1199 #undef TARGET_LEGITIMIZE_ADDRESS
1200 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1202 #undef TARGET_SCHED_VARIABLE_ISSUE
1203 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1205 #undef TARGET_SCHED_ISSUE_RATE
1206 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1207 #undef TARGET_SCHED_ADJUST_COST
1208 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1209 #undef TARGET_SCHED_ADJUST_PRIORITY
1210 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1211 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1212 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1213 #undef TARGET_SCHED_INIT
1214 #define TARGET_SCHED_INIT rs6000_sched_init
1215 #undef TARGET_SCHED_FINISH
1216 #define TARGET_SCHED_FINISH rs6000_sched_finish
1217 #undef TARGET_SCHED_REORDER
1218 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1219 #undef TARGET_SCHED_REORDER2
1220 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1222 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1223 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1225 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1226 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1228 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1229 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1230 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1231 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1232 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1233 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1234 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1235 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1237 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1238 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1239 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1240 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1241 rs6000_builtin_support_vector_misalignment
1242 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1243 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1244 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1245 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1246 rs6000_builtin_vectorization_cost
1247 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1248 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1249 rs6000_preferred_simd_mode
1250 #undef TARGET_VECTORIZE_INIT_COST
1251 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1252 #undef TARGET_VECTORIZE_ADD_STMT_COST
1253 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1254 #undef TARGET_VECTORIZE_FINISH_COST
1255 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1256 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1257 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1259 #undef TARGET_INIT_BUILTINS
1260 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1261 #undef TARGET_BUILTIN_DECL
1262 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1264 #undef TARGET_EXPAND_BUILTIN
1265 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1267 #undef TARGET_MANGLE_TYPE
1268 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1270 #undef TARGET_INIT_LIBFUNCS
1271 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1273 #if TARGET_MACHO
1274 #undef TARGET_BINDS_LOCAL_P
1275 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1276 #endif
1278 #undef TARGET_MS_BITFIELD_LAYOUT_P
1279 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1281 #undef TARGET_ASM_OUTPUT_MI_THUNK
1282 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1284 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1285 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1287 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1288 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1290 #undef TARGET_INVALID_WITHIN_DOLOOP
1291 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1293 #undef TARGET_REGISTER_MOVE_COST
1294 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1295 #undef TARGET_MEMORY_MOVE_COST
1296 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1297 #undef TARGET_RTX_COSTS
1298 #define TARGET_RTX_COSTS rs6000_rtx_costs
1299 #undef TARGET_ADDRESS_COST
1300 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1302 #undef TARGET_DWARF_REGISTER_SPAN
1303 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1305 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1306 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1308 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1309 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1311 /* On rs6000, function arguments are promoted, as are function return
1312 values. */
1313 #undef TARGET_PROMOTE_FUNCTION_MODE
1314 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1316 #undef TARGET_RETURN_IN_MEMORY
1317 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1319 #undef TARGET_SETUP_INCOMING_VARARGS
1320 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1322 /* Always strict argument naming on rs6000. */
1323 #undef TARGET_STRICT_ARGUMENT_NAMING
1324 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1325 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1326 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1327 #undef TARGET_SPLIT_COMPLEX_ARG
1328 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1329 #undef TARGET_MUST_PASS_IN_STACK
1330 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1331 #undef TARGET_PASS_BY_REFERENCE
1332 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1333 #undef TARGET_ARG_PARTIAL_BYTES
1334 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1335 #undef TARGET_FUNCTION_ARG_ADVANCE
1336 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1337 #undef TARGET_FUNCTION_ARG
1338 #define TARGET_FUNCTION_ARG rs6000_function_arg
1339 #undef TARGET_FUNCTION_ARG_BOUNDARY
1340 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1342 #undef TARGET_BUILD_BUILTIN_VA_LIST
1343 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1345 #undef TARGET_EXPAND_BUILTIN_VA_START
1346 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1348 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1349 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1351 #undef TARGET_EH_RETURN_FILTER_MODE
1352 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1354 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1355 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1357 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1358 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1360 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1361 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1363 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1364 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1366 #undef TARGET_OPTION_OVERRIDE
1367 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1369 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1370 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1371 rs6000_builtin_vectorized_function
1373 #if !TARGET_MACHO
1374 #undef TARGET_STACK_PROTECT_FAIL
1375 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1376 #endif
1378 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1379 The PowerPC architecture requires only weak consistency among
1380 processors--that is, memory accesses between processors need not be
1381 sequentially consistent and memory accesses among processors can occur
1382 in any order. The ability to order memory accesses weakly provides
1383 opportunities for more efficient use of the system bus. Unless a
1384 dependency exists, the 604e allows read operations to precede store
1385 operations. */
1386 #undef TARGET_RELAXED_ORDERING
1387 #define TARGET_RELAXED_ORDERING true
1389 #ifdef HAVE_AS_TLS
1390 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1391 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1392 #endif
1394 /* Use a 32-bit anchor range. This leads to sequences like:
1396 addis tmp,anchor,high
1397 add dest,tmp,low
1399 where tmp itself acts as an anchor, and can be shared between
1400 accesses to the same 64k page. */
1401 #undef TARGET_MIN_ANCHOR_OFFSET
1402 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1403 #undef TARGET_MAX_ANCHOR_OFFSET
1404 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1405 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1406 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1407 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1408 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1410 #undef TARGET_BUILTIN_RECIPROCAL
1411 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1413 #undef TARGET_EXPAND_TO_RTL_HOOK
1414 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1416 #undef TARGET_INSTANTIATE_DECLS
1417 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1419 #undef TARGET_SECONDARY_RELOAD
1420 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1422 #undef TARGET_LEGITIMATE_ADDRESS_P
1423 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1425 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1426 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1428 #undef TARGET_CAN_ELIMINATE
1429 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1431 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1432 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1434 #undef TARGET_TRAMPOLINE_INIT
1435 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1437 #undef TARGET_FUNCTION_VALUE
1438 #define TARGET_FUNCTION_VALUE rs6000_function_value
1440 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1441 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1443 #undef TARGET_OPTION_SAVE
1444 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1446 #undef TARGET_OPTION_RESTORE
1447 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1449 #undef TARGET_OPTION_PRINT
1450 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1452 #undef TARGET_CAN_INLINE_P
1453 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1455 #undef TARGET_SET_CURRENT_FUNCTION
1456 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1458 #undef TARGET_LEGITIMATE_CONSTANT_P
1459 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1461 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1462 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1465 /* Processor table. */
1466 struct rs6000_ptt
1468 const char *const name; /* Canonical processor name. */
1469 const enum processor_type processor; /* Processor type enum value. */
1470 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1473 static struct rs6000_ptt const processor_target_table[] =
1475 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1476 #include "rs6000-cpus.def"
1477 #undef RS6000_CPU
1480 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1481 name is invalid. */
1483 static int
1484 rs6000_cpu_name_lookup (const char *name)
1486 size_t i;
1488 if (name != NULL)
1490 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1491 if (! strcmp (name, processor_target_table[i].name))
1492 return (int)i;
1495 return -1;
1499 /* Return number of consecutive hard regs needed starting at reg REGNO
1500 to hold something of mode MODE.
1501 This is ordinarily the length in words of a value of mode MODE
1502 but can be less for certain modes in special long registers.
1504 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1505 scalar instructions. The upper 32 bits are only available to the
1506 SIMD instructions.
1508 POWER and PowerPC GPRs hold 32 bits worth;
1509 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1511 static int
1512 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1514 unsigned HOST_WIDE_INT reg_size;
1516 if (FP_REGNO_P (regno))
1517 reg_size = (VECTOR_MEM_VSX_P (mode)
1518 ? UNITS_PER_VSX_WORD
1519 : UNITS_PER_FP_WORD);
1521 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1522 reg_size = UNITS_PER_SPE_WORD;
1524 else if (ALTIVEC_REGNO_P (regno))
1525 reg_size = UNITS_PER_ALTIVEC_WORD;
1527 /* The value returned for SCmode in the E500 double case is 2 for
1528 ABI compatibility; storing an SCmode value in a single register
1529 would require function_arg and rs6000_spe_function_arg to handle
1530 SCmode so as to pass the value correctly in a pair of
1531 registers. */
1532 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1533 && !DECIMAL_FLOAT_MODE_P (mode))
1534 reg_size = UNITS_PER_FP_WORD;
1536 else
1537 reg_size = UNITS_PER_WORD;
1539 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1542 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1543 MODE. */
1544 static int
1545 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1547 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1549 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1550 implementations. Don't allow an item to be split between a FP register
1551 and an Altivec register. */
1552 if (VECTOR_MEM_VSX_P (mode))
1554 if (FP_REGNO_P (regno))
1555 return FP_REGNO_P (last_regno);
1557 if (ALTIVEC_REGNO_P (regno))
1558 return ALTIVEC_REGNO_P (last_regno);
1561 /* The GPRs can hold any mode, but values bigger than one register
1562 cannot go past R31. */
1563 if (INT_REGNO_P (regno))
1564 return INT_REGNO_P (last_regno);
1566 /* The float registers (except for VSX vector modes) can only hold floating
1567 modes and DImode. This excludes the 32-bit decimal float mode for
1568 now. */
1569 if (FP_REGNO_P (regno))
1571 if (SCALAR_FLOAT_MODE_P (mode)
1572 && (mode != TDmode || (regno % 2) == 0)
1573 && FP_REGNO_P (last_regno))
1574 return 1;
1576 if (GET_MODE_CLASS (mode) == MODE_INT
1577 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1578 return 1;
1580 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1581 && PAIRED_VECTOR_MODE (mode))
1582 return 1;
1584 return 0;
1587 /* The CR register can only hold CC modes. */
1588 if (CR_REGNO_P (regno))
1589 return GET_MODE_CLASS (mode) == MODE_CC;
1591 if (CA_REGNO_P (regno))
1592 return mode == BImode;
1594 /* AltiVec only in AldyVec registers. */
1595 if (ALTIVEC_REGNO_P (regno))
1596 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1598 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1599 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1600 return 1;
1602 /* We cannot put TImode anywhere except general register and it must be able
1603 to fit within the register set. In the future, allow TImode in the
1604 Altivec or VSX registers. */
1606 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1609 /* Print interesting facts about registers. */
1610 static void
1611 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1613 int r, m;
1615 for (r = first_regno; r <= last_regno; ++r)
1617 const char *comma = "";
1618 int len;
1620 if (first_regno == last_regno)
1621 fprintf (stderr, "%s:\t", reg_name);
1622 else
1623 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1625 len = 8;
1626 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1627 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1629 if (len > 70)
1631 fprintf (stderr, ",\n\t");
1632 len = 8;
1633 comma = "";
1636 if (rs6000_hard_regno_nregs[m][r] > 1)
1637 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1638 rs6000_hard_regno_nregs[m][r]);
1639 else
1640 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1642 comma = ", ";
1645 if (call_used_regs[r])
1647 if (len > 70)
1649 fprintf (stderr, ",\n\t");
1650 len = 8;
1651 comma = "";
1654 len += fprintf (stderr, "%s%s", comma, "call-used");
1655 comma = ", ";
1658 if (fixed_regs[r])
1660 if (len > 70)
1662 fprintf (stderr, ",\n\t");
1663 len = 8;
1664 comma = "";
1667 len += fprintf (stderr, "%s%s", comma, "fixed");
1668 comma = ", ";
1671 if (len > 70)
1673 fprintf (stderr, ",\n\t");
1674 comma = "";
1677 fprintf (stderr, "%sregno = %d\n", comma, r);
1681 #define DEBUG_FMT_ID "%-32s= "
1682 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1683 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1684 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1686 /* Print various interesting information with -mdebug=reg. */
1687 static void
1688 rs6000_debug_reg_global (void)
1690 static const char *const tf[2] = { "false", "true" };
1691 const char *nl = (const char *)0;
1692 int m;
1693 char costly_num[20];
1694 char nop_num[20];
1695 char flags_buffer[40];
1696 const char *costly_str;
1697 const char *nop_str;
1698 const char *trace_str;
1699 const char *abi_str;
1700 const char *cmodel_str;
1701 struct cl_target_option cl_opts;
1703 /* Map enum rs6000_vector to string. */
1704 static const char *rs6000_debug_vector_unit[] = {
1705 "none",
1706 "altivec",
1707 "vsx",
1708 "paired",
1709 "spe",
1710 "other"
1713 fprintf (stderr, "Register information: (last virtual reg = %d)\n",
1714 LAST_VIRTUAL_REGISTER);
1715 rs6000_debug_reg_print (0, 31, "gr");
1716 rs6000_debug_reg_print (32, 63, "fp");
1717 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1718 LAST_ALTIVEC_REGNO,
1719 "vs");
1720 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1721 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1722 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1723 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1724 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1725 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1726 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1727 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1729 fprintf (stderr,
1730 "\n"
1731 "d reg_class = %s\n"
1732 "f reg_class = %s\n"
1733 "v reg_class = %s\n"
1734 "wa reg_class = %s\n"
1735 "wd reg_class = %s\n"
1736 "wf reg_class = %s\n"
1737 "ws reg_class = %s\n\n",
1738 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1739 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1740 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1741 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1742 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1743 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1744 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]);
1746 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1747 if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
1749 nl = "\n";
1750 fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1751 GET_MODE_NAME (m),
1752 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1753 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
1756 if (nl)
1757 fputs (nl, stderr);
1759 if (rs6000_recip_control)
1761 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1763 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1764 if (rs6000_recip_bits[m])
1766 fprintf (stderr,
1767 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1768 GET_MODE_NAME (m),
1769 (RS6000_RECIP_AUTO_RE_P (m)
1770 ? "auto"
1771 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1772 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1773 ? "auto"
1774 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1777 fputs ("\n", stderr);
1780 if (rs6000_cpu_index >= 0)
1782 const char *name = processor_target_table[rs6000_cpu_index].name;
1783 HOST_WIDE_INT flags
1784 = processor_target_table[rs6000_cpu_index].target_enable;
1786 sprintf (flags_buffer, "-mcpu=%s flags", name);
1787 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
1789 else
1790 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
1792 if (rs6000_tune_index >= 0)
1794 const char *name = processor_target_table[rs6000_tune_index].name;
1795 HOST_WIDE_INT flags
1796 = processor_target_table[rs6000_tune_index].target_enable;
1798 sprintf (flags_buffer, "-mtune=%s flags", name);
1799 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
1801 else
1802 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
1804 cl_target_option_save (&cl_opts, &global_options);
1805 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
1806 rs6000_isa_flags);
1808 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
1809 rs6000_isa_flags_explicit);
1811 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
1812 rs6000_builtin_mask);
1814 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
1816 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
1817 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
1819 switch (rs6000_sched_costly_dep)
1821 case max_dep_latency:
1822 costly_str = "max_dep_latency";
1823 break;
1825 case no_dep_costly:
1826 costly_str = "no_dep_costly";
1827 break;
1829 case all_deps_costly:
1830 costly_str = "all_deps_costly";
1831 break;
1833 case true_store_to_load_dep_costly:
1834 costly_str = "true_store_to_load_dep_costly";
1835 break;
1837 case store_to_load_dep_costly:
1838 costly_str = "store_to_load_dep_costly";
1839 break;
1841 default:
1842 costly_str = costly_num;
1843 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
1844 break;
1847 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
1849 switch (rs6000_sched_insert_nops)
1851 case sched_finish_regroup_exact:
1852 nop_str = "sched_finish_regroup_exact";
1853 break;
1855 case sched_finish_pad_groups:
1856 nop_str = "sched_finish_pad_groups";
1857 break;
1859 case sched_finish_none:
1860 nop_str = "sched_finish_none";
1861 break;
1863 default:
1864 nop_str = nop_num;
1865 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
1866 break;
1869 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
1871 switch (rs6000_sdata)
1873 default:
1874 case SDATA_NONE:
1875 break;
1877 case SDATA_DATA:
1878 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
1879 break;
1881 case SDATA_SYSV:
1882 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
1883 break;
1885 case SDATA_EABI:
1886 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
1887 break;
1891 switch (rs6000_traceback)
1893 case traceback_default: trace_str = "default"; break;
1894 case traceback_none: trace_str = "none"; break;
1895 case traceback_part: trace_str = "part"; break;
1896 case traceback_full: trace_str = "full"; break;
1897 default: trace_str = "unknown"; break;
1900 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
1902 switch (rs6000_current_cmodel)
1904 case CMODEL_SMALL: cmodel_str = "small"; break;
1905 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
1906 case CMODEL_LARGE: cmodel_str = "large"; break;
1907 default: cmodel_str = "unknown"; break;
1910 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
1912 switch (rs6000_current_abi)
1914 case ABI_NONE: abi_str = "none"; break;
1915 case ABI_AIX: abi_str = "aix"; break;
1916 case ABI_V4: abi_str = "V4"; break;
1917 case ABI_DARWIN: abi_str = "darwin"; break;
1918 default: abi_str = "unknown"; break;
1921 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
1923 if (rs6000_altivec_abi)
1924 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
1926 if (rs6000_spe_abi)
1927 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
1929 if (rs6000_darwin64_abi)
1930 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
1932 if (rs6000_float_gprs)
1933 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
1935 if (TARGET_LINK_STACK)
1936 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
1938 fprintf (stderr, DEBUG_FMT_S, "plt-format",
1939 TARGET_SECURE_PLT ? "secure" : "bss");
1940 fprintf (stderr, DEBUG_FMT_S, "struct-return",
1941 aix_struct_return ? "aix" : "sysv");
1942 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
1943 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
1944 fprintf (stderr, DEBUG_FMT_S, "align_branch",
1945 tf[!!rs6000_align_branch_targets]);
1946 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
1947 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
1948 rs6000_long_double_type_size);
1949 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
1950 (int)rs6000_sched_restricted_insns_priority);
1951 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
1952 (int)END_BUILTINS);
1953 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
1954 (int)RS6000_BUILTIN_COUNT);
1957 /* Initialize the various global tables that are based on register size. */
1958 static void
1959 rs6000_init_hard_regno_mode_ok (bool global_init_p)
1961 int r, m, c;
1962 int align64;
1963 int align32;
1965 /* Precalculate REGNO_REG_CLASS. */
1966 rs6000_regno_regclass[0] = GENERAL_REGS;
1967 for (r = 1; r < 32; ++r)
1968 rs6000_regno_regclass[r] = BASE_REGS;
1970 for (r = 32; r < 64; ++r)
1971 rs6000_regno_regclass[r] = FLOAT_REGS;
1973 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
1974 rs6000_regno_regclass[r] = NO_REGS;
1976 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
1977 rs6000_regno_regclass[r] = ALTIVEC_REGS;
1979 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
1980 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
1981 rs6000_regno_regclass[r] = CR_REGS;
1983 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
1984 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
1985 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
1986 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
1987 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
1988 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
1989 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
1990 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
1991 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
1993 /* Precalculate vector information, this must be set up before the
1994 rs6000_hard_regno_nregs_internal below. */
1995 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1997 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
1998 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
1999 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
2002 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
2003 rs6000_constraints[c] = NO_REGS;
2005 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2006 believes it can use native alignment or still uses 128-bit alignment. */
2007 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2009 align64 = 64;
2010 align32 = 32;
2012 else
2014 align64 = 128;
2015 align32 = 128;
2018 /* V2DF mode, VSX only. */
2019 if (TARGET_VSX)
2021 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2022 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2023 rs6000_vector_align[V2DFmode] = align64;
2026 /* V4SF mode, either VSX or Altivec. */
2027 if (TARGET_VSX)
2029 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2030 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2031 rs6000_vector_align[V4SFmode] = align32;
2033 else if (TARGET_ALTIVEC)
2035 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2036 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2037 rs6000_vector_align[V4SFmode] = align32;
2040 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2041 and stores. */
2042 if (TARGET_ALTIVEC)
2044 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2045 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2046 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2047 rs6000_vector_align[V4SImode] = align32;
2048 rs6000_vector_align[V8HImode] = align32;
2049 rs6000_vector_align[V16QImode] = align32;
2051 if (TARGET_VSX)
2053 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2054 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2055 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2057 else
2059 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2060 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2061 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2065 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2066 Altivec doesn't have 64-bit support. */
2067 if (TARGET_VSX)
2069 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2070 rs6000_vector_unit[V2DImode] = VECTOR_NONE;
2071 rs6000_vector_align[V2DImode] = align64;
2074 /* DFmode, see if we want to use the VSX unit. */
2075 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2077 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2078 rs6000_vector_mem[DFmode]
2079 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2080 rs6000_vector_align[DFmode] = align64;
2083 /* TODO add SPE and paired floating point vector support. */
2085 /* Register class constraints for the constraints that depend on compile
2086 switches. */
2087 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2088 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2090 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2091 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2093 if (TARGET_VSX)
2095 /* At present, we just use VSX_REGS, but we have different constraints
2096 based on the use, in case we want to fine tune the default register
2097 class used. wa = any VSX register, wf = register class to use for
2098 V4SF, wd = register class to use for V2DF, and ws = register classs to
2099 use for DF scalars. */
2100 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2101 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2102 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2103 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2104 ? VSX_REGS
2105 : FLOAT_REGS);
2108 if (TARGET_ALTIVEC)
2109 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2111 /* Set up the reload helper functions. */
2112 if (TARGET_VSX || TARGET_ALTIVEC)
2114 if (TARGET_64BIT)
2116 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2117 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2118 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2119 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2120 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2121 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2122 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2123 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2124 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2125 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2126 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2127 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2128 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2130 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2131 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2134 else
2136 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2137 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2138 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2139 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2140 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2141 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2142 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2143 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2144 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2145 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2146 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2147 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2148 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2150 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2151 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2156 /* Precalculate HARD_REGNO_NREGS. */
2157 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2158 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2159 rs6000_hard_regno_nregs[m][r]
2160 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2162 /* Precalculate HARD_REGNO_MODE_OK. */
2163 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2164 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2165 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2166 rs6000_hard_regno_mode_ok_p[m][r] = true;
2168 /* Precalculate CLASS_MAX_NREGS sizes. */
2169 for (c = 0; c < LIM_REG_CLASSES; ++c)
2171 int reg_size;
2173 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2174 reg_size = UNITS_PER_VSX_WORD;
2176 else if (c == ALTIVEC_REGS)
2177 reg_size = UNITS_PER_ALTIVEC_WORD;
2179 else if (c == FLOAT_REGS)
2180 reg_size = UNITS_PER_FP_WORD;
2182 else
2183 reg_size = UNITS_PER_WORD;
2185 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2187 int reg_size2 = reg_size;
2189 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2190 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2191 && (m == TDmode || m == TFmode))
2192 reg_size2 = UNITS_PER_FP_WORD;
2194 rs6000_class_max_nregs[m][c]
2195 = (GET_MODE_SIZE (m) + reg_size2 - 1) / reg_size2;
2199 if (TARGET_E500_DOUBLE)
2200 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2202 /* Calculate which modes to automatically generate code to use a the
2203 reciprocal divide and square root instructions. In the future, possibly
2204 automatically generate the instructions even if the user did not specify
2205 -mrecip. The older machines double precision reciprocal sqrt estimate is
2206 not accurate enough. */
2207 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2208 if (TARGET_FRES)
2209 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2210 if (TARGET_FRE)
2211 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2212 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2213 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2214 if (VECTOR_UNIT_VSX_P (V2DFmode))
2215 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2217 if (TARGET_FRSQRTES)
2218 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2219 if (TARGET_FRSQRTE)
2220 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2221 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2222 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2223 if (VECTOR_UNIT_VSX_P (V2DFmode))
2224 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2226 if (rs6000_recip_control)
2228 if (!flag_finite_math_only)
2229 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2230 if (flag_trapping_math)
2231 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2232 if (!flag_reciprocal_math)
2233 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2234 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2236 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2237 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2238 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2240 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2241 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2242 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2244 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2245 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2246 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2248 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2249 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2250 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2252 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2253 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2254 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2256 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2257 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2258 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2260 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2261 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2262 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2264 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2265 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2266 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2270 if (global_init_p || TARGET_DEBUG_TARGET)
2272 if (TARGET_DEBUG_REG)
2273 rs6000_debug_reg_global ();
2275 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2276 fprintf (stderr,
2277 "SImode variable mult cost = %d\n"
2278 "SImode constant mult cost = %d\n"
2279 "SImode short constant mult cost = %d\n"
2280 "DImode multipliciation cost = %d\n"
2281 "SImode division cost = %d\n"
2282 "DImode division cost = %d\n"
2283 "Simple fp operation cost = %d\n"
2284 "DFmode multiplication cost = %d\n"
2285 "SFmode division cost = %d\n"
2286 "DFmode division cost = %d\n"
2287 "cache line size = %d\n"
2288 "l1 cache size = %d\n"
2289 "l2 cache size = %d\n"
2290 "simultaneous prefetches = %d\n"
2291 "\n",
2292 rs6000_cost->mulsi,
2293 rs6000_cost->mulsi_const,
2294 rs6000_cost->mulsi_const9,
2295 rs6000_cost->muldi,
2296 rs6000_cost->divsi,
2297 rs6000_cost->divdi,
2298 rs6000_cost->fp,
2299 rs6000_cost->dmul,
2300 rs6000_cost->sdiv,
2301 rs6000_cost->ddiv,
2302 rs6000_cost->cache_line_size,
2303 rs6000_cost->l1_cache_size,
2304 rs6000_cost->l2_cache_size,
2305 rs6000_cost->simultaneous_prefetches);
2309 #if TARGET_MACHO
2310 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2312 static void
2313 darwin_rs6000_override_options (void)
2315 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2316 off. */
2317 rs6000_altivec_abi = 1;
2318 TARGET_ALTIVEC_VRSAVE = 1;
2319 rs6000_current_abi = ABI_DARWIN;
2321 if (DEFAULT_ABI == ABI_DARWIN
2322 && TARGET_64BIT)
2323 darwin_one_byte_bool = 1;
2325 if (TARGET_64BIT && ! TARGET_POWERPC64)
2327 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
2328 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2330 if (flag_mkernel)
2332 rs6000_default_long_calls = 1;
2333 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
2336 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2337 Altivec. */
2338 if (!flag_mkernel && !flag_apple_kext
2339 && TARGET_64BIT
2340 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
2341 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2343 /* Unless the user (not the configurer) has explicitly overridden
2344 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2345 G4 unless targeting the kernel. */
2346 if (!flag_mkernel
2347 && !flag_apple_kext
2348 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2349 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
2350 && ! global_options_set.x_rs6000_cpu_index)
2352 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
2355 #endif
2357 /* If not otherwise specified by a target, make 'long double' equivalent to
2358 'double'. */
2360 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2361 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2362 #endif
2364 /* Return the builtin mask of the various options used that could affect which
2365 builtins were used. In the past we used target_flags, but we've run out of
2366 bits, and some options like SPE and PAIRED are no longer in
2367 target_flags. */
2369 HOST_WIDE_INT
2370 rs6000_builtin_mask_calculate (void)
2372 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2373 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2374 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2375 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2376 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2377 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2378 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2379 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2380 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2381 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0));
2384 /* Override command line options. Mostly we process the processor type and
2385 sometimes adjust other TARGET_ options. */
2387 static bool
2388 rs6000_option_override_internal (bool global_init_p)
2390 bool ret = true;
2391 bool have_cpu = false;
2393 /* The default cpu requested at configure time, if any. */
2394 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2396 HOST_WIDE_INT set_masks;
2397 int cpu_index;
2398 int tune_index;
2399 struct cl_target_option *main_target_opt
2400 = ((global_init_p || target_option_default_node == NULL)
2401 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2403 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2404 library functions, so warn about it. The flag may be useful for
2405 performance studies from time to time though, so don't disable it
2406 entirely. */
2407 if (global_options_set.x_rs6000_alignment_flags
2408 && rs6000_alignment_flags == MASK_ALIGN_POWER
2409 && DEFAULT_ABI == ABI_DARWIN
2410 && TARGET_64BIT)
2411 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2412 " it is incompatible with the installed C and C++ libraries");
2414 /* Numerous experiment shows that IRA based loop pressure
2415 calculation works better for RTL loop invariant motion on targets
2416 with enough (>= 32) registers. It is an expensive optimization.
2417 So it is on only for peak performance. */
2418 if (optimize >= 3 && global_init_p)
2419 flag_ira_loop_pressure = 1;
2421 /* Set the pointer size. */
2422 if (TARGET_64BIT)
2424 rs6000_pmode = (int)DImode;
2425 rs6000_pointer_size = 64;
2427 else
2429 rs6000_pmode = (int)SImode;
2430 rs6000_pointer_size = 32;
2433 /* Some OSs don't support saving the high part of 64-bit registers on context
2434 switch. Other OSs don't support saving Altivec registers. On those OSs,
2435 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
2436 if the user wants either, the user must explicitly specify them and we
2437 won't interfere with the user's specification. */
2439 set_masks = POWERPC_MASKS;
2440 #ifdef OS_MISSING_POWERPC64
2441 if (OS_MISSING_POWERPC64)
2442 set_masks &= ~OPTION_MASK_POWERPC64;
2443 #endif
2444 #ifdef OS_MISSING_ALTIVEC
2445 if (OS_MISSING_ALTIVEC)
2446 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
2447 #endif
2449 /* Don't override by the processor default if given explicitly. */
2450 set_masks &= ~rs6000_isa_flags_explicit;
2452 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2453 the cpu in a target attribute or pragma, but did not specify a tuning
2454 option, use the cpu for the tuning option rather than the option specified
2455 with -mtune on the command line. Process a '--with-cpu' configuration
2456 request as an implicit --cpu. */
2457 if (rs6000_cpu_index >= 0)
2459 cpu_index = rs6000_cpu_index;
2460 have_cpu = true;
2462 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2464 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2465 have_cpu = true;
2467 else if (implicit_cpu)
2469 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
2470 have_cpu = true;
2472 else
2474 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
2475 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2476 have_cpu = false;
2479 gcc_assert (cpu_index >= 0);
2481 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
2482 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
2483 with those from the cpu, except for options that were explicitly set. If
2484 we don't have a cpu, do not override the target bits set in
2485 TARGET_DEFAULT. */
2486 if (have_cpu)
2488 rs6000_isa_flags &= ~set_masks;
2489 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2490 & set_masks);
2492 else
2493 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
2494 & ~rs6000_isa_flags_explicit);
2496 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
2497 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
2498 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
2499 to using rs6000_isa_flags, we need to do the initialization here. */
2500 if (!have_cpu)
2501 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
2503 if (rs6000_tune_index >= 0)
2504 tune_index = rs6000_tune_index;
2505 else if (have_cpu)
2506 rs6000_tune_index = tune_index = cpu_index;
2507 else
2509 size_t i;
2510 enum processor_type tune_proc
2511 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2513 tune_index = -1;
2514 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2515 if (processor_target_table[i].processor == tune_proc)
2517 rs6000_tune_index = tune_index = i;
2518 break;
2522 gcc_assert (tune_index >= 0);
2523 rs6000_cpu = processor_target_table[tune_index].processor;
2525 /* Pick defaults for SPE related control flags. Do this early to make sure
2526 that the TARGET_ macros are representative ASAP. */
2528 int spe_capable_cpu =
2529 (rs6000_cpu == PROCESSOR_PPC8540
2530 || rs6000_cpu == PROCESSOR_PPC8548);
2532 if (!global_options_set.x_rs6000_spe_abi)
2533 rs6000_spe_abi = spe_capable_cpu;
2535 if (!global_options_set.x_rs6000_spe)
2536 rs6000_spe = spe_capable_cpu;
2538 if (!global_options_set.x_rs6000_float_gprs)
2539 rs6000_float_gprs =
2540 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2541 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2542 : 0);
2545 if (global_options_set.x_rs6000_spe_abi
2546 && rs6000_spe_abi
2547 && !TARGET_SPE_ABI)
2548 error ("not configured for SPE ABI");
2550 if (global_options_set.x_rs6000_spe
2551 && rs6000_spe
2552 && !TARGET_SPE)
2553 error ("not configured for SPE instruction set");
2555 if (main_target_opt != NULL
2556 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2557 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2558 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2559 error ("target attribute or pragma changes SPE ABI");
2561 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2562 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2563 || rs6000_cpu == PROCESSOR_PPCE5500)
2565 if (TARGET_ALTIVEC)
2566 error ("AltiVec not supported in this target");
2567 if (TARGET_SPE)
2568 error ("SPE not supported in this target");
2570 if (rs6000_cpu == PROCESSOR_PPCE6500)
2572 if (TARGET_SPE)
2573 error ("SPE not supported in this target");
2576 /* Disable Cell microcode if we are optimizing for the Cell
2577 and not optimizing for size. */
2578 if (rs6000_gen_cell_microcode == -1)
2579 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2580 && !optimize_size);
2582 /* If we are optimizing big endian systems for space and it's OK to
2583 use instructions that would be microcoded on the Cell, use the
2584 load/store multiple and string instructions. */
2585 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2586 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
2587 | OPTION_MASK_STRING);
2589 /* Don't allow -mmultiple or -mstring on little endian systems
2590 unless the cpu is a 750, because the hardware doesn't support the
2591 instructions used in little endian mode, and causes an alignment
2592 trap. The 750 does not cause an alignment trap (except when the
2593 target is unaligned). */
2595 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2597 if (TARGET_MULTIPLE)
2599 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
2600 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
2601 warning (0, "-mmultiple is not supported on little endian systems");
2604 if (TARGET_STRING)
2606 rs6000_isa_flags &= ~OPTION_MASK_STRING;
2607 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
2608 warning (0, "-mstring is not supported on little endian systems");
2612 /* Add some warnings for VSX. */
2613 if (TARGET_VSX)
2615 const char *msg = NULL;
2616 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2617 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2619 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
2620 msg = N_("-mvsx requires hardware floating point");
2621 else
2622 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
2624 else if (TARGET_PAIRED_FLOAT)
2625 msg = N_("-mvsx and -mpaired are incompatible");
2626 /* The hardware will allow VSX and little endian, but until we make sure
2627 things like vector select, etc. work don't allow VSX on little endian
2628 systems at this point. */
2629 else if (!BYTES_BIG_ENDIAN)
2630 msg = N_("-mvsx used with little endian code");
2631 else if (TARGET_AVOID_XFORM > 0)
2632 msg = N_("-mvsx needs indexed addressing");
2633 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
2634 & OPTION_MASK_ALTIVEC))
2636 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
2637 msg = N_("-mvsx and -mno-altivec are incompatible");
2638 else
2639 msg = N_("-mno-altivec disables vsx");
2642 if (msg)
2644 warning (0, msg);
2645 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
2646 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
2650 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2651 unless the user explicitly used the -mno-<option> to disable the code. */
2652 if (TARGET_VSX)
2653 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2654 else if (TARGET_POPCNTD)
2655 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
2656 else if (TARGET_DFP)
2657 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
2658 else if (TARGET_CMPB)
2659 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
2660 else if (TARGET_FPRND)
2661 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
2662 else if (TARGET_POPCNTB)
2663 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
2664 else if (TARGET_ALTIVEC)
2665 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
2667 /* E500mc does "better" if we inline more aggressively. Respect the
2668 user's opinion, though. */
2669 if (rs6000_block_move_inline_limit == 0
2670 && (rs6000_cpu == PROCESSOR_PPCE500MC
2671 || rs6000_cpu == PROCESSOR_PPCE500MC64
2672 || rs6000_cpu == PROCESSOR_PPCE5500
2673 || rs6000_cpu == PROCESSOR_PPCE6500))
2674 rs6000_block_move_inline_limit = 128;
2676 /* store_one_arg depends on expand_block_move to handle at least the
2677 size of reg_parm_stack_space. */
2678 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
2679 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
2681 if (global_init_p)
2683 /* If the appropriate debug option is enabled, replace the target hooks
2684 with debug versions that call the real version and then prints
2685 debugging information. */
2686 if (TARGET_DEBUG_COST)
2688 targetm.rtx_costs = rs6000_debug_rtx_costs;
2689 targetm.address_cost = rs6000_debug_address_cost;
2690 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2693 if (TARGET_DEBUG_ADDR)
2695 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2696 targetm.legitimize_address = rs6000_debug_legitimize_address;
2697 rs6000_secondary_reload_class_ptr
2698 = rs6000_debug_secondary_reload_class;
2699 rs6000_secondary_memory_needed_ptr
2700 = rs6000_debug_secondary_memory_needed;
2701 rs6000_cannot_change_mode_class_ptr
2702 = rs6000_debug_cannot_change_mode_class;
2703 rs6000_preferred_reload_class_ptr
2704 = rs6000_debug_preferred_reload_class;
2705 rs6000_legitimize_reload_address_ptr
2706 = rs6000_debug_legitimize_reload_address;
2707 rs6000_mode_dependent_address_ptr
2708 = rs6000_debug_mode_dependent_address;
2711 if (rs6000_veclibabi_name)
2713 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
2714 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
2715 else
2717 error ("unknown vectorization library ABI type (%s) for "
2718 "-mveclibabi= switch", rs6000_veclibabi_name);
2719 ret = false;
2724 if (!global_options_set.x_rs6000_long_double_type_size)
2726 if (main_target_opt != NULL
2727 && (main_target_opt->x_rs6000_long_double_type_size
2728 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
2729 error ("target attribute or pragma changes long double size");
2730 else
2731 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2734 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2735 if (!global_options_set.x_rs6000_ieeequad)
2736 rs6000_ieeequad = 1;
2737 #endif
2739 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2740 target attribute or pragma which automatically enables both options,
2741 unless the altivec ABI was set. This is set by default for 64-bit, but
2742 not for 32-bit. */
2743 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2744 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
2745 & ~rs6000_isa_flags_explicit);
2747 /* Enable Altivec ABI for AIX -maltivec. */
2748 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
2750 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2751 error ("target attribute or pragma changes AltiVec ABI");
2752 else
2753 rs6000_altivec_abi = 1;
2756 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2757 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2758 be explicitly overridden in either case. */
2759 if (TARGET_ELF)
2761 if (!global_options_set.x_rs6000_altivec_abi
2762 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
2764 if (main_target_opt != NULL &&
2765 !main_target_opt->x_rs6000_altivec_abi)
2766 error ("target attribute or pragma changes AltiVec ABI");
2767 else
2768 rs6000_altivec_abi = 1;
2772 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2773 So far, the only darwin64 targets are also MACH-O. */
2774 if (TARGET_MACHO
2775 && DEFAULT_ABI == ABI_DARWIN
2776 && TARGET_64BIT)
2778 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
2779 error ("target attribute or pragma changes darwin64 ABI");
2780 else
2782 rs6000_darwin64_abi = 1;
2783 /* Default to natural alignment, for better performance. */
2784 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2788 /* Place FP constants in the constant pool instead of TOC
2789 if section anchors enabled. */
2790 if (flag_section_anchors)
2791 TARGET_NO_FP_IN_TOC = 1;
2793 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2794 SUBTARGET_OVERRIDE_OPTIONS;
2795 #endif
2796 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2797 SUBSUBTARGET_OVERRIDE_OPTIONS;
2798 #endif
2799 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2800 SUB3TARGET_OVERRIDE_OPTIONS;
2801 #endif
2803 /* For the E500 family of cores, reset the single/double FP flags to let us
2804 check that they remain constant across attributes or pragmas. Also,
2805 clear a possible request for string instructions, not supported and which
2806 we might have silently queried above for -Os.
2808 For other families, clear ISEL in case it was set implicitly.
2811 switch (rs6000_cpu)
2813 case PROCESSOR_PPC8540:
2814 case PROCESSOR_PPC8548:
2815 case PROCESSOR_PPCE500MC:
2816 case PROCESSOR_PPCE500MC64:
2817 case PROCESSOR_PPCE5500:
2818 case PROCESSOR_PPCE6500:
2820 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
2821 rs6000_double_float = TARGET_E500_DOUBLE;
2823 rs6000_isa_flags &= ~OPTION_MASK_STRING;
2825 break;
2827 default:
2829 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
2830 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
2832 break;
2835 if (main_target_opt)
2837 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
2838 error ("target attribute or pragma changes single precision floating "
2839 "point");
2840 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
2841 error ("target attribute or pragma changes double precision floating "
2842 "point");
2845 /* Detect invalid option combinations with E500. */
2846 CHECK_E500_OPTIONS;
2848 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
2849 && rs6000_cpu != PROCESSOR_POWER5
2850 && rs6000_cpu != PROCESSOR_POWER6
2851 && rs6000_cpu != PROCESSOR_POWER7
2852 && rs6000_cpu != PROCESSOR_PPCA2
2853 && rs6000_cpu != PROCESSOR_CELL
2854 && rs6000_cpu != PROCESSOR_PPC476);
2855 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
2856 || rs6000_cpu == PROCESSOR_POWER5
2857 || rs6000_cpu == PROCESSOR_POWER7);
2858 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
2859 || rs6000_cpu == PROCESSOR_POWER5
2860 || rs6000_cpu == PROCESSOR_POWER6
2861 || rs6000_cpu == PROCESSOR_POWER7
2862 || rs6000_cpu == PROCESSOR_PPCE500MC
2863 || rs6000_cpu == PROCESSOR_PPCE500MC64
2864 || rs6000_cpu == PROCESSOR_PPCE5500
2865 || rs6000_cpu == PROCESSOR_PPCE6500);
2867 /* Allow debug switches to override the above settings. These are set to -1
2868 in rs6000.opt to indicate the user hasn't directly set the switch. */
2869 if (TARGET_ALWAYS_HINT >= 0)
2870 rs6000_always_hint = TARGET_ALWAYS_HINT;
2872 if (TARGET_SCHED_GROUPS >= 0)
2873 rs6000_sched_groups = TARGET_SCHED_GROUPS;
2875 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
2876 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
2878 rs6000_sched_restricted_insns_priority
2879 = (rs6000_sched_groups ? 1 : 0);
2881 /* Handle -msched-costly-dep option. */
2882 rs6000_sched_costly_dep
2883 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
2885 if (rs6000_sched_costly_dep_str)
2887 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
2888 rs6000_sched_costly_dep = no_dep_costly;
2889 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
2890 rs6000_sched_costly_dep = all_deps_costly;
2891 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
2892 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
2893 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
2894 rs6000_sched_costly_dep = store_to_load_dep_costly;
2895 else
2896 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
2897 atoi (rs6000_sched_costly_dep_str));
2900 /* Handle -minsert-sched-nops option. */
2901 rs6000_sched_insert_nops
2902 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
2904 if (rs6000_sched_insert_nops_str)
2906 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
2907 rs6000_sched_insert_nops = sched_finish_none;
2908 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
2909 rs6000_sched_insert_nops = sched_finish_pad_groups;
2910 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
2911 rs6000_sched_insert_nops = sched_finish_regroup_exact;
2912 else
2913 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
2914 atoi (rs6000_sched_insert_nops_str));
2917 if (global_init_p)
2919 #ifdef TARGET_REGNAMES
2920 /* If the user desires alternate register names, copy in the
2921 alternate names now. */
2922 if (TARGET_REGNAMES)
2923 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
2924 #endif
2926 /* Set aix_struct_return last, after the ABI is determined.
2927 If -maix-struct-return or -msvr4-struct-return was explicitly
2928 used, don't override with the ABI default. */
2929 if (!global_options_set.x_aix_struct_return)
2930 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
2932 #if 0
2933 /* IBM XL compiler defaults to unsigned bitfields. */
2934 if (TARGET_XL_COMPAT)
2935 flag_signed_bitfields = 0;
2936 #endif
2938 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
2939 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
2941 if (TARGET_TOC)
2942 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
2944 /* We can only guarantee the availability of DI pseudo-ops when
2945 assembling for 64-bit targets. */
2946 if (!TARGET_64BIT)
2948 targetm.asm_out.aligned_op.di = NULL;
2949 targetm.asm_out.unaligned_op.di = NULL;
2953 /* Set branch target alignment, if not optimizing for size. */
2954 if (!optimize_size)
2956 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2957 aligned 8byte to avoid misprediction by the branch predictor. */
2958 if (rs6000_cpu == PROCESSOR_TITAN
2959 || rs6000_cpu == PROCESSOR_CELL)
2961 if (align_functions <= 0)
2962 align_functions = 8;
2963 if (align_jumps <= 0)
2964 align_jumps = 8;
2965 if (align_loops <= 0)
2966 align_loops = 8;
2968 if (rs6000_align_branch_targets)
2970 if (align_functions <= 0)
2971 align_functions = 16;
2972 if (align_jumps <= 0)
2973 align_jumps = 16;
2974 if (align_loops <= 0)
2976 can_override_loop_align = 1;
2977 align_loops = 16;
2980 if (align_jumps_max_skip <= 0)
2981 align_jumps_max_skip = 15;
2982 if (align_loops_max_skip <= 0)
2983 align_loops_max_skip = 15;
2986 /* Arrange to save and restore machine status around nested functions. */
2987 init_machine_status = rs6000_init_machine_status;
2989 /* We should always be splitting complex arguments, but we can't break
2990 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2991 if (DEFAULT_ABI != ABI_AIX)
2992 targetm.calls.split_complex_arg = NULL;
2995 /* Initialize rs6000_cost with the appropriate target costs. */
2996 if (optimize_size)
2997 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
2998 else
2999 switch (rs6000_cpu)
3001 case PROCESSOR_RS64A:
3002 rs6000_cost = &rs64a_cost;
3003 break;
3005 case PROCESSOR_MPCCORE:
3006 rs6000_cost = &mpccore_cost;
3007 break;
3009 case PROCESSOR_PPC403:
3010 rs6000_cost = &ppc403_cost;
3011 break;
3013 case PROCESSOR_PPC405:
3014 rs6000_cost = &ppc405_cost;
3015 break;
3017 case PROCESSOR_PPC440:
3018 rs6000_cost = &ppc440_cost;
3019 break;
3021 case PROCESSOR_PPC476:
3022 rs6000_cost = &ppc476_cost;
3023 break;
3025 case PROCESSOR_PPC601:
3026 rs6000_cost = &ppc601_cost;
3027 break;
3029 case PROCESSOR_PPC603:
3030 rs6000_cost = &ppc603_cost;
3031 break;
3033 case PROCESSOR_PPC604:
3034 rs6000_cost = &ppc604_cost;
3035 break;
3037 case PROCESSOR_PPC604e:
3038 rs6000_cost = &ppc604e_cost;
3039 break;
3041 case PROCESSOR_PPC620:
3042 rs6000_cost = &ppc620_cost;
3043 break;
3045 case PROCESSOR_PPC630:
3046 rs6000_cost = &ppc630_cost;
3047 break;
3049 case PROCESSOR_CELL:
3050 rs6000_cost = &ppccell_cost;
3051 break;
3053 case PROCESSOR_PPC750:
3054 case PROCESSOR_PPC7400:
3055 rs6000_cost = &ppc750_cost;
3056 break;
3058 case PROCESSOR_PPC7450:
3059 rs6000_cost = &ppc7450_cost;
3060 break;
3062 case PROCESSOR_PPC8540:
3063 case PROCESSOR_PPC8548:
3064 rs6000_cost = &ppc8540_cost;
3065 break;
3067 case PROCESSOR_PPCE300C2:
3068 case PROCESSOR_PPCE300C3:
3069 rs6000_cost = &ppce300c2c3_cost;
3070 break;
3072 case PROCESSOR_PPCE500MC:
3073 rs6000_cost = &ppce500mc_cost;
3074 break;
3076 case PROCESSOR_PPCE500MC64:
3077 rs6000_cost = &ppce500mc64_cost;
3078 break;
3080 case PROCESSOR_PPCE5500:
3081 rs6000_cost = &ppce5500_cost;
3082 break;
3084 case PROCESSOR_PPCE6500:
3085 rs6000_cost = &ppce6500_cost;
3086 break;
3088 case PROCESSOR_TITAN:
3089 rs6000_cost = &titan_cost;
3090 break;
3092 case PROCESSOR_POWER4:
3093 case PROCESSOR_POWER5:
3094 rs6000_cost = &power4_cost;
3095 break;
3097 case PROCESSOR_POWER6:
3098 rs6000_cost = &power6_cost;
3099 break;
3101 case PROCESSOR_POWER7:
3102 rs6000_cost = &power7_cost;
3103 break;
3105 case PROCESSOR_PPCA2:
3106 rs6000_cost = &ppca2_cost;
3107 break;
3109 default:
3110 gcc_unreachable ();
3113 if (global_init_p)
3115 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3116 rs6000_cost->simultaneous_prefetches,
3117 global_options.x_param_values,
3118 global_options_set.x_param_values);
3119 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3120 global_options.x_param_values,
3121 global_options_set.x_param_values);
3122 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3123 rs6000_cost->cache_line_size,
3124 global_options.x_param_values,
3125 global_options_set.x_param_values);
3126 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3127 global_options.x_param_values,
3128 global_options_set.x_param_values);
3130 /* Increase loop peeling limits based on performance analysis. */
3131 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3132 global_options.x_param_values,
3133 global_options_set.x_param_values);
3134 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3135 global_options.x_param_values,
3136 global_options_set.x_param_values);
3138 /* If using typedef char *va_list, signal that
3139 __builtin_va_start (&ap, 0) can be optimized to
3140 ap = __builtin_next_arg (0). */
3141 if (DEFAULT_ABI != ABI_V4)
3142 targetm.expand_builtin_va_start = NULL;
3145 /* Set up single/double float flags.
3146 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3147 then set both flags. */
3148 if (TARGET_HARD_FLOAT && TARGET_FPRS
3149 && rs6000_single_float == 0 && rs6000_double_float == 0)
3150 rs6000_single_float = rs6000_double_float = 1;
3152 /* If not explicitly specified via option, decide whether to generate indexed
3153 load/store instructions. */
3154 if (TARGET_AVOID_XFORM == -1)
3155 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3156 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3157 need indexed accesses and the type used is the scalar type of the element
3158 being loaded or stored. */
3159 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3160 && !TARGET_ALTIVEC);
3162 /* Set the -mrecip options. */
3163 if (rs6000_recip_name)
3165 char *p = ASTRDUP (rs6000_recip_name);
3166 char *q;
3167 unsigned int mask, i;
3168 bool invert;
3170 while ((q = strtok (p, ",")) != NULL)
3172 p = NULL;
3173 if (*q == '!')
3175 invert = true;
3176 q++;
3178 else
3179 invert = false;
3181 if (!strcmp (q, "default"))
3182 mask = ((TARGET_RECIP_PRECISION)
3183 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3184 else
3186 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3187 if (!strcmp (q, recip_options[i].string))
3189 mask = recip_options[i].mask;
3190 break;
3193 if (i == ARRAY_SIZE (recip_options))
3195 error ("unknown option for -mrecip=%s", q);
3196 invert = false;
3197 mask = 0;
3198 ret = false;
3202 if (invert)
3203 rs6000_recip_control &= ~mask;
3204 else
3205 rs6000_recip_control |= mask;
3209 /* Set the builtin mask of the various options used that could affect which
3210 builtins were used. In the past we used target_flags, but we've run out
3211 of bits, and some options like SPE and PAIRED are no longer in
3212 target_flags. */
3213 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3214 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3216 fprintf (stderr,
3217 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
3218 rs6000_builtin_mask);
3219 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
3222 /* Initialize all of the registers. */
3223 rs6000_init_hard_regno_mode_ok (global_init_p);
3225 /* Save the initial options in case the user does function specific options */
3226 if (global_init_p)
3227 target_option_default_node = target_option_current_node
3228 = build_target_option_node ();
3230 /* If not explicitly specified via option, decide whether to generate the
3231 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3232 if (TARGET_LINK_STACK == -1)
3233 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3235 return ret;
3238 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3239 define the target cpu type. */
3241 static void
3242 rs6000_option_override (void)
3244 (void) rs6000_option_override_internal (true);
3248 /* Implement targetm.vectorize.builtin_mask_for_load. */
3249 static tree
3250 rs6000_builtin_mask_for_load (void)
3252 if (TARGET_ALTIVEC || TARGET_VSX)
3253 return altivec_builtin_mask_for_load;
3254 else
3255 return 0;
3258 /* Implement LOOP_ALIGN. */
3260 rs6000_loop_align (rtx label)
3262 basic_block bb;
3263 int ninsns;
3265 /* Don't override loop alignment if -falign-loops was specified. */
3266 if (!can_override_loop_align)
3267 return align_loops_log;
3269 bb = BLOCK_FOR_INSN (label);
3270 ninsns = num_loop_insns(bb->loop_father);
3272 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3273 if (ninsns > 4 && ninsns <= 8
3274 && (rs6000_cpu == PROCESSOR_POWER4
3275 || rs6000_cpu == PROCESSOR_POWER5
3276 || rs6000_cpu == PROCESSOR_POWER6
3277 || rs6000_cpu == PROCESSOR_POWER7))
3278 return 5;
3279 else
3280 return align_loops_log;
3283 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3284 static int
3285 rs6000_loop_align_max_skip (rtx label)
3287 return (1 << rs6000_loop_align (label)) - 1;
3290 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3291 after applying N number of iterations. This routine does not determine
3292 how may iterations are required to reach desired alignment. */
3294 static bool
3295 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3297 if (is_packed)
3298 return false;
3300 if (TARGET_32BIT)
3302 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3303 return true;
3305 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3306 return true;
3308 return false;
3310 else
3312 if (TARGET_MACHO)
3313 return false;
3315 /* Assuming that all other types are naturally aligned. CHECKME! */
3316 return true;
3320 /* Return true if the vector misalignment factor is supported by the
3321 target. */
3322 static bool
3323 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3324 const_tree type,
3325 int misalignment,
3326 bool is_packed)
3328 if (TARGET_VSX)
3330 /* Return if movmisalign pattern is not supported for this mode. */
3331 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3332 return false;
3334 if (misalignment == -1)
3336 /* Misalignment factor is unknown at compile time but we know
3337 it's word aligned. */
3338 if (rs6000_vector_alignment_reachable (type, is_packed))
3340 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3342 if (element_size == 64 || element_size == 32)
3343 return true;
3346 return false;
3349 /* VSX supports word-aligned vector. */
3350 if (misalignment % 4 == 0)
3351 return true;
3353 return false;
3356 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3357 static int
3358 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3359 tree vectype, int misalign)
3361 unsigned elements;
3362 tree elem_type;
3364 switch (type_of_cost)
3366 case scalar_stmt:
3367 case scalar_load:
3368 case scalar_store:
3369 case vector_stmt:
3370 case vector_load:
3371 case vector_store:
3372 case vec_to_scalar:
3373 case scalar_to_vec:
3374 case cond_branch_not_taken:
3375 return 1;
3377 case vec_perm:
3378 if (TARGET_VSX)
3379 return 3;
3380 else
3381 return 1;
3383 case vec_promote_demote:
3384 if (TARGET_VSX)
3385 return 4;
3386 else
3387 return 1;
3389 case cond_branch_taken:
3390 return 3;
3392 case unaligned_load:
3393 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3395 elements = TYPE_VECTOR_SUBPARTS (vectype);
3396 if (elements == 2)
3397 /* Double word aligned. */
3398 return 2;
3400 if (elements == 4)
3402 switch (misalign)
3404 case 8:
3405 /* Double word aligned. */
3406 return 2;
3408 case -1:
3409 /* Unknown misalignment. */
3410 case 4:
3411 case 12:
3412 /* Word aligned. */
3413 return 22;
3415 default:
3416 gcc_unreachable ();
3421 if (TARGET_ALTIVEC)
3422 /* Misaligned loads are not supported. */
3423 gcc_unreachable ();
3425 return 2;
3427 case unaligned_store:
3428 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3430 elements = TYPE_VECTOR_SUBPARTS (vectype);
3431 if (elements == 2)
3432 /* Double word aligned. */
3433 return 2;
3435 if (elements == 4)
3437 switch (misalign)
3439 case 8:
3440 /* Double word aligned. */
3441 return 2;
3443 case -1:
3444 /* Unknown misalignment. */
3445 case 4:
3446 case 12:
3447 /* Word aligned. */
3448 return 23;
3450 default:
3451 gcc_unreachable ();
3456 if (TARGET_ALTIVEC)
3457 /* Misaligned stores are not supported. */
3458 gcc_unreachable ();
3460 return 2;
3462 case vec_construct:
3463 elements = TYPE_VECTOR_SUBPARTS (vectype);
3464 elem_type = TREE_TYPE (vectype);
3465 /* 32-bit vectors loaded into registers are stored as double
3466 precision, so we need n/2 converts in addition to the usual
3467 n/2 merges to construct a vector of short floats from them. */
3468 if (SCALAR_FLOAT_TYPE_P (elem_type)
3469 && TYPE_PRECISION (elem_type) == 32)
3470 return elements + 1;
3471 else
3472 return elements / 2 + 1;
3474 default:
3475 gcc_unreachable ();
3479 /* Implement targetm.vectorize.preferred_simd_mode. */
3481 static enum machine_mode
3482 rs6000_preferred_simd_mode (enum machine_mode mode)
3484 if (TARGET_VSX)
3485 switch (mode)
3487 case DFmode:
3488 return V2DFmode;
3489 default:;
3491 if (TARGET_ALTIVEC || TARGET_VSX)
3492 switch (mode)
3494 case SFmode:
3495 return V4SFmode;
3496 case DImode:
3497 return V2DImode;
3498 case SImode:
3499 return V4SImode;
3500 case HImode:
3501 return V8HImode;
3502 case QImode:
3503 return V16QImode;
3504 default:;
3506 if (TARGET_SPE)
3507 switch (mode)
3509 case SFmode:
3510 return V2SFmode;
3511 case SImode:
3512 return V2SImode;
3513 default:;
3515 if (TARGET_PAIRED_FLOAT
3516 && mode == SFmode)
3517 return V2SFmode;
3518 return word_mode;
3521 typedef struct _rs6000_cost_data
3523 struct loop *loop_info;
3524 unsigned cost[3];
3525 } rs6000_cost_data;
3527 /* Test for likely overcommitment of vector hardware resources. If a
3528 loop iteration is relatively large, and too large a percentage of
3529 instructions in the loop are vectorized, the cost model may not
3530 adequately reflect delays from unavailable vector resources.
3531 Penalize the loop body cost for this case. */
3533 static void
3534 rs6000_density_test (rs6000_cost_data *data)
3536 const int DENSITY_PCT_THRESHOLD = 85;
3537 const int DENSITY_SIZE_THRESHOLD = 70;
3538 const int DENSITY_PENALTY = 10;
3539 struct loop *loop = data->loop_info;
3540 basic_block *bbs = get_loop_body (loop);
3541 int nbbs = loop->num_nodes;
3542 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
3543 int i, density_pct;
3545 for (i = 0; i < nbbs; i++)
3547 basic_block bb = bbs[i];
3548 gimple_stmt_iterator gsi;
3550 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3552 gimple stmt = gsi_stmt (gsi);
3553 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3555 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3556 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
3557 not_vec_cost++;
3561 free (bbs);
3562 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
3564 if (density_pct > DENSITY_PCT_THRESHOLD
3565 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
3567 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
3568 if (dump_enabled_p ())
3569 dump_printf_loc (MSG_NOTE, vect_location,
3570 "density %d%%, cost %d exceeds threshold, penalizing "
3571 "loop body cost by %d%%", density_pct,
3572 vec_cost + not_vec_cost, DENSITY_PENALTY);
3576 /* Implement targetm.vectorize.init_cost. */
3578 static void *
3579 rs6000_init_cost (struct loop *loop_info)
3581 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
3582 data->loop_info = loop_info;
3583 data->cost[vect_prologue] = 0;
3584 data->cost[vect_body] = 0;
3585 data->cost[vect_epilogue] = 0;
3586 return data;
3589 /* Implement targetm.vectorize.add_stmt_cost. */
3591 static unsigned
3592 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
3593 struct _stmt_vec_info *stmt_info, int misalign,
3594 enum vect_cost_model_location where)
3596 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3597 unsigned retval = 0;
3599 if (flag_vect_cost_model)
3601 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
3602 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
3603 misalign);
3604 /* Statements in an inner loop relative to the loop being
3605 vectorized are weighted more heavily. The value here is
3606 arbitrary and could potentially be improved with analysis. */
3607 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
3608 count *= 50; /* FIXME. */
3610 retval = (unsigned) (count * stmt_cost);
3611 cost_data->cost[where] += retval;
3614 return retval;
3617 /* Implement targetm.vectorize.finish_cost. */
3619 static void
3620 rs6000_finish_cost (void *data, unsigned *prologue_cost,
3621 unsigned *body_cost, unsigned *epilogue_cost)
3623 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3625 if (cost_data->loop_info)
3626 rs6000_density_test (cost_data);
3628 *prologue_cost = cost_data->cost[vect_prologue];
3629 *body_cost = cost_data->cost[vect_body];
3630 *epilogue_cost = cost_data->cost[vect_epilogue];
3633 /* Implement targetm.vectorize.destroy_cost_data. */
3635 static void
3636 rs6000_destroy_cost_data (void *data)
3638 free (data);
3641 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3642 library with vectorized intrinsics. */
3644 static tree
3645 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
3647 char name[32];
3648 const char *suffix = NULL;
3649 tree fntype, new_fndecl, bdecl = NULL_TREE;
3650 int n_args = 1;
3651 const char *bname;
3652 enum machine_mode el_mode, in_mode;
3653 int n, in_n;
3655 /* Libmass is suitable for unsafe math only as it does not correctly support
3656 parts of IEEE with the required precision such as denormals. Only support
3657 it if we have VSX to use the simd d2 or f4 functions.
3658 XXX: Add variable length support. */
3659 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
3660 return NULL_TREE;
3662 el_mode = TYPE_MODE (TREE_TYPE (type_out));
3663 n = TYPE_VECTOR_SUBPARTS (type_out);
3664 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3665 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3666 if (el_mode != in_mode
3667 || n != in_n)
3668 return NULL_TREE;
3670 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3672 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3673 switch (fn)
3675 case BUILT_IN_ATAN2:
3676 case BUILT_IN_HYPOT:
3677 case BUILT_IN_POW:
3678 n_args = 2;
3679 /* fall through */
3681 case BUILT_IN_ACOS:
3682 case BUILT_IN_ACOSH:
3683 case BUILT_IN_ASIN:
3684 case BUILT_IN_ASINH:
3685 case BUILT_IN_ATAN:
3686 case BUILT_IN_ATANH:
3687 case BUILT_IN_CBRT:
3688 case BUILT_IN_COS:
3689 case BUILT_IN_COSH:
3690 case BUILT_IN_ERF:
3691 case BUILT_IN_ERFC:
3692 case BUILT_IN_EXP2:
3693 case BUILT_IN_EXP:
3694 case BUILT_IN_EXPM1:
3695 case BUILT_IN_LGAMMA:
3696 case BUILT_IN_LOG10:
3697 case BUILT_IN_LOG1P:
3698 case BUILT_IN_LOG2:
3699 case BUILT_IN_LOG:
3700 case BUILT_IN_SIN:
3701 case BUILT_IN_SINH:
3702 case BUILT_IN_SQRT:
3703 case BUILT_IN_TAN:
3704 case BUILT_IN_TANH:
3705 bdecl = builtin_decl_implicit (fn);
3706 suffix = "d2"; /* pow -> powd2 */
3707 if (el_mode != DFmode
3708 || n != 2
3709 || !bdecl)
3710 return NULL_TREE;
3711 break;
3713 case BUILT_IN_ATAN2F:
3714 case BUILT_IN_HYPOTF:
3715 case BUILT_IN_POWF:
3716 n_args = 2;
3717 /* fall through */
3719 case BUILT_IN_ACOSF:
3720 case BUILT_IN_ACOSHF:
3721 case BUILT_IN_ASINF:
3722 case BUILT_IN_ASINHF:
3723 case BUILT_IN_ATANF:
3724 case BUILT_IN_ATANHF:
3725 case BUILT_IN_CBRTF:
3726 case BUILT_IN_COSF:
3727 case BUILT_IN_COSHF:
3728 case BUILT_IN_ERFF:
3729 case BUILT_IN_ERFCF:
3730 case BUILT_IN_EXP2F:
3731 case BUILT_IN_EXPF:
3732 case BUILT_IN_EXPM1F:
3733 case BUILT_IN_LGAMMAF:
3734 case BUILT_IN_LOG10F:
3735 case BUILT_IN_LOG1PF:
3736 case BUILT_IN_LOG2F:
3737 case BUILT_IN_LOGF:
3738 case BUILT_IN_SINF:
3739 case BUILT_IN_SINHF:
3740 case BUILT_IN_SQRTF:
3741 case BUILT_IN_TANF:
3742 case BUILT_IN_TANHF:
3743 bdecl = builtin_decl_implicit (fn);
3744 suffix = "4"; /* powf -> powf4 */
3745 if (el_mode != SFmode
3746 || n != 4
3747 || !bdecl)
3748 return NULL_TREE;
3749 break;
3751 default:
3752 return NULL_TREE;
3755 else
3756 return NULL_TREE;
3758 gcc_assert (suffix != NULL);
3759 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
3760 if (!bname)
3761 return NULL_TREE;
3763 strcpy (name, bname + sizeof ("__builtin_") - 1);
3764 strcat (name, suffix);
3766 if (n_args == 1)
3767 fntype = build_function_type_list (type_out, type_in, NULL);
3768 else if (n_args == 2)
3769 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
3770 else
3771 gcc_unreachable ();
3773 /* Build a function declaration for the vectorized function. */
3774 new_fndecl = build_decl (BUILTINS_LOCATION,
3775 FUNCTION_DECL, get_identifier (name), fntype);
3776 TREE_PUBLIC (new_fndecl) = 1;
3777 DECL_EXTERNAL (new_fndecl) = 1;
3778 DECL_IS_NOVOPS (new_fndecl) = 1;
3779 TREE_READONLY (new_fndecl) = 1;
3781 return new_fndecl;
3784 /* Returns a function decl for a vectorized version of the builtin function
3785 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3786 if it is not available. */
3788 static tree
3789 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
3790 tree type_in)
3792 enum machine_mode in_mode, out_mode;
3793 int in_n, out_n;
3795 if (TARGET_DEBUG_BUILTIN)
3796 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3797 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
3798 GET_MODE_NAME (TYPE_MODE (type_out)),
3799 GET_MODE_NAME (TYPE_MODE (type_in)));
3801 if (TREE_CODE (type_out) != VECTOR_TYPE
3802 || TREE_CODE (type_in) != VECTOR_TYPE
3803 || !TARGET_VECTORIZE_BUILTINS)
3804 return NULL_TREE;
3806 out_mode = TYPE_MODE (TREE_TYPE (type_out));
3807 out_n = TYPE_VECTOR_SUBPARTS (type_out);
3808 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3809 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3811 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3813 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3814 switch (fn)
3816 case BUILT_IN_COPYSIGN:
3817 if (VECTOR_UNIT_VSX_P (V2DFmode)
3818 && out_mode == DFmode && out_n == 2
3819 && in_mode == DFmode && in_n == 2)
3820 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
3821 break;
3822 case BUILT_IN_COPYSIGNF:
3823 if (out_mode != SFmode || out_n != 4
3824 || in_mode != SFmode || in_n != 4)
3825 break;
3826 if (VECTOR_UNIT_VSX_P (V4SFmode))
3827 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
3828 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3829 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
3830 break;
3831 case BUILT_IN_SQRT:
3832 if (VECTOR_UNIT_VSX_P (V2DFmode)
3833 && out_mode == DFmode && out_n == 2
3834 && in_mode == DFmode && in_n == 2)
3835 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
3836 break;
3837 case BUILT_IN_SQRTF:
3838 if (VECTOR_UNIT_VSX_P (V4SFmode)
3839 && out_mode == SFmode && out_n == 4
3840 && in_mode == SFmode && in_n == 4)
3841 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
3842 break;
3843 case BUILT_IN_CEIL:
3844 if (VECTOR_UNIT_VSX_P (V2DFmode)
3845 && out_mode == DFmode && out_n == 2
3846 && in_mode == DFmode && in_n == 2)
3847 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
3848 break;
3849 case BUILT_IN_CEILF:
3850 if (out_mode != SFmode || out_n != 4
3851 || in_mode != SFmode || in_n != 4)
3852 break;
3853 if (VECTOR_UNIT_VSX_P (V4SFmode))
3854 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
3855 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3856 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
3857 break;
3858 case BUILT_IN_FLOOR:
3859 if (VECTOR_UNIT_VSX_P (V2DFmode)
3860 && out_mode == DFmode && out_n == 2
3861 && in_mode == DFmode && in_n == 2)
3862 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
3863 break;
3864 case BUILT_IN_FLOORF:
3865 if (out_mode != SFmode || out_n != 4
3866 || in_mode != SFmode || in_n != 4)
3867 break;
3868 if (VECTOR_UNIT_VSX_P (V4SFmode))
3869 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
3870 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3871 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
3872 break;
3873 case BUILT_IN_FMA:
3874 if (VECTOR_UNIT_VSX_P (V2DFmode)
3875 && out_mode == DFmode && out_n == 2
3876 && in_mode == DFmode && in_n == 2)
3877 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
3878 break;
3879 case BUILT_IN_FMAF:
3880 if (VECTOR_UNIT_VSX_P (V4SFmode)
3881 && out_mode == SFmode && out_n == 4
3882 && in_mode == SFmode && in_n == 4)
3883 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
3884 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
3885 && out_mode == SFmode && out_n == 4
3886 && in_mode == SFmode && in_n == 4)
3887 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
3888 break;
3889 case BUILT_IN_TRUNC:
3890 if (VECTOR_UNIT_VSX_P (V2DFmode)
3891 && out_mode == DFmode && out_n == 2
3892 && in_mode == DFmode && in_n == 2)
3893 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
3894 break;
3895 case BUILT_IN_TRUNCF:
3896 if (out_mode != SFmode || out_n != 4
3897 || in_mode != SFmode || in_n != 4)
3898 break;
3899 if (VECTOR_UNIT_VSX_P (V4SFmode))
3900 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
3901 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3902 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
3903 break;
3904 case BUILT_IN_NEARBYINT:
3905 if (VECTOR_UNIT_VSX_P (V2DFmode)
3906 && flag_unsafe_math_optimizations
3907 && out_mode == DFmode && out_n == 2
3908 && in_mode == DFmode && in_n == 2)
3909 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
3910 break;
3911 case BUILT_IN_NEARBYINTF:
3912 if (VECTOR_UNIT_VSX_P (V4SFmode)
3913 && flag_unsafe_math_optimizations
3914 && out_mode == SFmode && out_n == 4
3915 && in_mode == SFmode && in_n == 4)
3916 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
3917 break;
3918 case BUILT_IN_RINT:
3919 if (VECTOR_UNIT_VSX_P (V2DFmode)
3920 && !flag_trapping_math
3921 && out_mode == DFmode && out_n == 2
3922 && in_mode == DFmode && in_n == 2)
3923 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
3924 break;
3925 case BUILT_IN_RINTF:
3926 if (VECTOR_UNIT_VSX_P (V4SFmode)
3927 && !flag_trapping_math
3928 && out_mode == SFmode && out_n == 4
3929 && in_mode == SFmode && in_n == 4)
3930 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
3931 break;
3932 default:
3933 break;
3937 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
3939 enum rs6000_builtins fn
3940 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
3941 switch (fn)
3943 case RS6000_BUILTIN_RSQRTF:
3944 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3945 && out_mode == SFmode && out_n == 4
3946 && in_mode == SFmode && in_n == 4)
3947 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
3948 break;
3949 case RS6000_BUILTIN_RSQRT:
3950 if (VECTOR_UNIT_VSX_P (V2DFmode)
3951 && out_mode == DFmode && out_n == 2
3952 && in_mode == DFmode && in_n == 2)
3953 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
3954 break;
3955 case RS6000_BUILTIN_RECIPF:
3956 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3957 && out_mode == SFmode && out_n == 4
3958 && in_mode == SFmode && in_n == 4)
3959 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
3960 break;
3961 case RS6000_BUILTIN_RECIP:
3962 if (VECTOR_UNIT_VSX_P (V2DFmode)
3963 && out_mode == DFmode && out_n == 2
3964 && in_mode == DFmode && in_n == 2)
3965 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
3966 break;
3967 default:
3968 break;
3972 /* Generate calls to libmass if appropriate. */
3973 if (rs6000_veclib_handler)
3974 return rs6000_veclib_handler (fndecl, type_out, type_in);
3976 return NULL_TREE;
3979 /* Default CPU string for rs6000*_file_start functions. */
3980 static const char *rs6000_default_cpu;
3982 /* Do anything needed at the start of the asm file. */
3984 static void
3985 rs6000_file_start (void)
3987 char buffer[80];
3988 const char *start = buffer;
3989 FILE *file = asm_out_file;
3991 rs6000_default_cpu = TARGET_CPU_DEFAULT;
3993 default_file_start ();
3995 if (flag_verbose_asm)
3997 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
3999 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4001 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4002 start = "";
4005 if (global_options_set.x_rs6000_cpu_index)
4007 fprintf (file, "%s -mcpu=%s", start,
4008 processor_target_table[rs6000_cpu_index].name);
4009 start = "";
4012 if (global_options_set.x_rs6000_tune_index)
4014 fprintf (file, "%s -mtune=%s", start,
4015 processor_target_table[rs6000_tune_index].name);
4016 start = "";
4019 if (PPC405_ERRATUM77)
4021 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4022 start = "";
4025 #ifdef USING_ELFOS_H
4026 switch (rs6000_sdata)
4028 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4029 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4030 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4031 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4034 if (rs6000_sdata && g_switch_value)
4036 fprintf (file, "%s -G %d", start,
4037 g_switch_value);
4038 start = "";
4040 #endif
4042 if (*start == '\0')
4043 putc ('\n', file);
4046 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
4048 switch_to_section (toc_section);
4049 switch_to_section (text_section);
4054 /* Return nonzero if this function is known to have a null epilogue. */
4057 direct_return (void)
4059 if (reload_completed)
4061 rs6000_stack_t *info = rs6000_stack_info ();
4063 if (info->first_gp_reg_save == 32
4064 && info->first_fp_reg_save == 64
4065 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4066 && ! info->lr_save_p
4067 && ! info->cr_save_p
4068 && info->vrsave_mask == 0
4069 && ! info->push_p)
4070 return 1;
4073 return 0;
4076 /* Return the number of instructions it takes to form a constant in an
4077 integer register. */
4080 num_insns_constant_wide (HOST_WIDE_INT value)
4082 /* signed constant loadable with addi */
4083 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4084 return 1;
4086 /* constant loadable with addis */
4087 else if ((value & 0xffff) == 0
4088 && (value >> 31 == -1 || value >> 31 == 0))
4089 return 1;
4091 #if HOST_BITS_PER_WIDE_INT == 64
4092 else if (TARGET_POWERPC64)
4094 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4095 HOST_WIDE_INT high = value >> 31;
4097 if (high == 0 || high == -1)
4098 return 2;
4100 high >>= 1;
4102 if (low == 0)
4103 return num_insns_constant_wide (high) + 1;
4104 else if (high == 0)
4105 return num_insns_constant_wide (low) + 1;
4106 else
4107 return (num_insns_constant_wide (high)
4108 + num_insns_constant_wide (low) + 1);
4110 #endif
4112 else
4113 return 2;
4117 num_insns_constant (rtx op, enum machine_mode mode)
4119 HOST_WIDE_INT low, high;
4121 switch (GET_CODE (op))
4123 case CONST_INT:
4124 #if HOST_BITS_PER_WIDE_INT == 64
4125 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4126 && mask64_operand (op, mode))
4127 return 2;
4128 else
4129 #endif
4130 return num_insns_constant_wide (INTVAL (op));
4132 case CONST_DOUBLE:
4133 if (mode == SFmode || mode == SDmode)
4135 long l;
4136 REAL_VALUE_TYPE rv;
4138 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4139 if (DECIMAL_FLOAT_MODE_P (mode))
4140 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4141 else
4142 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4143 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4146 if (mode == VOIDmode || mode == DImode)
4148 high = CONST_DOUBLE_HIGH (op);
4149 low = CONST_DOUBLE_LOW (op);
4151 else
4153 long l[2];
4154 REAL_VALUE_TYPE rv;
4156 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4157 if (DECIMAL_FLOAT_MODE_P (mode))
4158 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4159 else
4160 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4161 high = l[WORDS_BIG_ENDIAN == 0];
4162 low = l[WORDS_BIG_ENDIAN != 0];
4165 if (TARGET_32BIT)
4166 return (num_insns_constant_wide (low)
4167 + num_insns_constant_wide (high));
4168 else
4170 if ((high == 0 && low >= 0)
4171 || (high == -1 && low < 0))
4172 return num_insns_constant_wide (low);
4174 else if (mask64_operand (op, mode))
4175 return 2;
4177 else if (low == 0)
4178 return num_insns_constant_wide (high) + 1;
4180 else
4181 return (num_insns_constant_wide (high)
4182 + num_insns_constant_wide (low) + 1);
4185 default:
4186 gcc_unreachable ();
4190 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4191 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4192 corresponding element of the vector, but for V4SFmode and V2SFmode,
4193 the corresponding "float" is interpreted as an SImode integer. */
4195 HOST_WIDE_INT
4196 const_vector_elt_as_int (rtx op, unsigned int elt)
4198 rtx tmp;
4200 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4201 gcc_assert (GET_MODE (op) != V2DImode
4202 && GET_MODE (op) != V2DFmode);
4204 tmp = CONST_VECTOR_ELT (op, elt);
4205 if (GET_MODE (op) == V4SFmode
4206 || GET_MODE (op) == V2SFmode)
4207 tmp = gen_lowpart (SImode, tmp);
4208 return INTVAL (tmp);
4211 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4212 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4213 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4214 all items are set to the same value and contain COPIES replicas of the
4215 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4216 operand and the others are set to the value of the operand's msb. */
4218 static bool
4219 vspltis_constant (rtx op, unsigned step, unsigned copies)
4221 enum machine_mode mode = GET_MODE (op);
4222 enum machine_mode inner = GET_MODE_INNER (mode);
4224 unsigned i;
4225 unsigned nunits;
4226 unsigned bitsize;
4227 unsigned mask;
4229 HOST_WIDE_INT val;
4230 HOST_WIDE_INT splat_val;
4231 HOST_WIDE_INT msb_val;
4233 if (mode == V2DImode || mode == V2DFmode)
4234 return false;
4236 nunits = GET_MODE_NUNITS (mode);
4237 bitsize = GET_MODE_BITSIZE (inner);
4238 mask = GET_MODE_MASK (inner);
4240 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
4241 splat_val = val;
4242 msb_val = val > 0 ? 0 : -1;
4244 /* Construct the value to be splatted, if possible. If not, return 0. */
4245 for (i = 2; i <= copies; i *= 2)
4247 HOST_WIDE_INT small_val;
4248 bitsize /= 2;
4249 small_val = splat_val >> bitsize;
4250 mask >>= bitsize;
4251 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4252 return false;
4253 splat_val = small_val;
4256 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4257 if (EASY_VECTOR_15 (splat_val))
4260 /* Also check if we can splat, and then add the result to itself. Do so if
4261 the value is positive, of if the splat instruction is using OP's mode;
4262 for splat_val < 0, the splat and the add should use the same mode. */
4263 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4264 && (splat_val >= 0 || (step == 1 && copies == 1)))
4267 /* Also check if are loading up the most significant bit which can be done by
4268 loading up -1 and shifting the value left by -1. */
4269 else if (EASY_VECTOR_MSB (splat_val, inner))
4272 else
4273 return false;
4275 /* Check if VAL is present in every STEP-th element, and the
4276 other elements are filled with its most significant bit. */
4277 for (i = 0; i < nunits - 1; ++i)
4279 HOST_WIDE_INT desired_val;
4280 if (((BYTES_BIG_ENDIAN ? i + 1 : i) & (step - 1)) == 0)
4281 desired_val = val;
4282 else
4283 desired_val = msb_val;
4285 if (desired_val != const_vector_elt_as_int (op, i))
4286 return false;
4289 return true;
4293 /* Return true if OP is of the given MODE and can be synthesized
4294 with a vspltisb, vspltish or vspltisw. */
4296 bool
4297 easy_altivec_constant (rtx op, enum machine_mode mode)
4299 unsigned step, copies;
4301 if (mode == VOIDmode)
4302 mode = GET_MODE (op);
4303 else if (mode != GET_MODE (op))
4304 return false;
4306 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4307 constants. */
4308 if (mode == V2DFmode)
4309 return zero_constant (op, mode);
4311 if (mode == V2DImode)
4313 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4314 easy. */
4315 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4316 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4317 return false;
4319 if (zero_constant (op, mode))
4320 return true;
4322 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4323 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4324 return true;
4326 return false;
4329 /* Start with a vspltisw. */
4330 step = GET_MODE_NUNITS (mode) / 4;
4331 copies = 1;
4333 if (vspltis_constant (op, step, copies))
4334 return true;
4336 /* Then try with a vspltish. */
4337 if (step == 1)
4338 copies <<= 1;
4339 else
4340 step >>= 1;
4342 if (vspltis_constant (op, step, copies))
4343 return true;
4345 /* And finally a vspltisb. */
4346 if (step == 1)
4347 copies <<= 1;
4348 else
4349 step >>= 1;
4351 if (vspltis_constant (op, step, copies))
4352 return true;
4354 return false;
4357 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4358 result is OP. Abort if it is not possible. */
4361 gen_easy_altivec_constant (rtx op)
4363 enum machine_mode mode = GET_MODE (op);
4364 int nunits = GET_MODE_NUNITS (mode);
4365 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
4366 unsigned step = nunits / 4;
4367 unsigned copies = 1;
4369 /* Start with a vspltisw. */
4370 if (vspltis_constant (op, step, copies))
4371 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
4373 /* Then try with a vspltish. */
4374 if (step == 1)
4375 copies <<= 1;
4376 else
4377 step >>= 1;
4379 if (vspltis_constant (op, step, copies))
4380 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
4382 /* And finally a vspltisb. */
4383 if (step == 1)
4384 copies <<= 1;
4385 else
4386 step >>= 1;
4388 if (vspltis_constant (op, step, copies))
4389 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
4391 gcc_unreachable ();
4394 const char *
4395 output_vec_const_move (rtx *operands)
4397 int cst, cst2;
4398 enum machine_mode mode;
4399 rtx dest, vec;
4401 dest = operands[0];
4402 vec = operands[1];
4403 mode = GET_MODE (dest);
4405 if (TARGET_VSX)
4407 if (zero_constant (vec, mode))
4408 return "xxlxor %x0,%x0,%x0";
4410 if (mode == V2DImode
4411 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4412 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4413 return "vspltisw %0,-1";
4416 if (TARGET_ALTIVEC)
4418 rtx splat_vec;
4419 if (zero_constant (vec, mode))
4420 return "vxor %0,%0,%0";
4422 splat_vec = gen_easy_altivec_constant (vec);
4423 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4424 operands[1] = XEXP (splat_vec, 0);
4425 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4426 return "#";
4428 switch (GET_MODE (splat_vec))
4430 case V4SImode:
4431 return "vspltisw %0,%1";
4433 case V8HImode:
4434 return "vspltish %0,%1";
4436 case V16QImode:
4437 return "vspltisb %0,%1";
4439 default:
4440 gcc_unreachable ();
4444 gcc_assert (TARGET_SPE);
4446 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4447 pattern of V1DI, V4HI, and V2SF.
4449 FIXME: We should probably return # and add post reload
4450 splitters for these, but this way is so easy ;-). */
4451 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4452 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4453 operands[1] = CONST_VECTOR_ELT (vec, 0);
4454 operands[2] = CONST_VECTOR_ELT (vec, 1);
4455 if (cst == cst2)
4456 return "li %0,%1\n\tevmergelo %0,%0,%0";
4457 else
4458 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4461 /* Initialize TARGET of vector PAIRED to VALS. */
4463 void
4464 paired_expand_vector_init (rtx target, rtx vals)
4466 enum machine_mode mode = GET_MODE (target);
4467 int n_elts = GET_MODE_NUNITS (mode);
4468 int n_var = 0;
4469 rtx x, new_rtx, tmp, constant_op, op1, op2;
4470 int i;
4472 for (i = 0; i < n_elts; ++i)
4474 x = XVECEXP (vals, 0, i);
4475 if (!(CONST_INT_P (x)
4476 || GET_CODE (x) == CONST_DOUBLE
4477 || GET_CODE (x) == CONST_FIXED))
4478 ++n_var;
4480 if (n_var == 0)
4482 /* Load from constant pool. */
4483 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4484 return;
4487 if (n_var == 2)
4489 /* The vector is initialized only with non-constants. */
4490 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4491 XVECEXP (vals, 0, 1));
4493 emit_move_insn (target, new_rtx);
4494 return;
4497 /* One field is non-constant and the other one is a constant. Load the
4498 constant from the constant pool and use ps_merge instruction to
4499 construct the whole vector. */
4500 op1 = XVECEXP (vals, 0, 0);
4501 op2 = XVECEXP (vals, 0, 1);
4503 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4505 tmp = gen_reg_rtx (GET_MODE (constant_op));
4506 emit_move_insn (tmp, constant_op);
4508 if (CONSTANT_P (op1))
4509 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4510 else
4511 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4513 emit_move_insn (target, new_rtx);
4516 void
4517 paired_expand_vector_move (rtx operands[])
4519 rtx op0 = operands[0], op1 = operands[1];
4521 emit_move_insn (op0, op1);
4524 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4525 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4526 operands for the relation operation COND. This is a recursive
4527 function. */
4529 static void
4530 paired_emit_vector_compare (enum rtx_code rcode,
4531 rtx dest, rtx op0, rtx op1,
4532 rtx cc_op0, rtx cc_op1)
4534 rtx tmp = gen_reg_rtx (V2SFmode);
4535 rtx tmp1, max, min;
4537 gcc_assert (TARGET_PAIRED_FLOAT);
4538 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4540 switch (rcode)
4542 case LT:
4543 case LTU:
4544 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4545 return;
4546 case GE:
4547 case GEU:
4548 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4549 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4550 return;
4551 case LE:
4552 case LEU:
4553 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4554 return;
4555 case GT:
4556 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4557 return;
4558 case EQ:
4559 tmp1 = gen_reg_rtx (V2SFmode);
4560 max = gen_reg_rtx (V2SFmode);
4561 min = gen_reg_rtx (V2SFmode);
4562 gen_reg_rtx (V2SFmode);
4564 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4565 emit_insn (gen_selv2sf4
4566 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4567 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4568 emit_insn (gen_selv2sf4
4569 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4570 emit_insn (gen_subv2sf3 (tmp1, min, max));
4571 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4572 return;
4573 case NE:
4574 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4575 return;
4576 case UNLE:
4577 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4578 return;
4579 case UNLT:
4580 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4581 return;
4582 case UNGE:
4583 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4584 return;
4585 case UNGT:
4586 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4587 return;
4588 default:
4589 gcc_unreachable ();
4592 return;
4595 /* Emit vector conditional expression.
4596 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4597 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4600 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4601 rtx cond, rtx cc_op0, rtx cc_op1)
4603 enum rtx_code rcode = GET_CODE (cond);
4605 if (!TARGET_PAIRED_FLOAT)
4606 return 0;
4608 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4610 return 1;
4613 /* Initialize vector TARGET to VALS. */
4615 void
4616 rs6000_expand_vector_init (rtx target, rtx vals)
4618 enum machine_mode mode = GET_MODE (target);
4619 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4620 int n_elts = GET_MODE_NUNITS (mode);
4621 int n_var = 0, one_var = -1;
4622 bool all_same = true, all_const_zero = true;
4623 rtx x, mem;
4624 int i;
4626 for (i = 0; i < n_elts; ++i)
4628 x = XVECEXP (vals, 0, i);
4629 if (!(CONST_INT_P (x)
4630 || GET_CODE (x) == CONST_DOUBLE
4631 || GET_CODE (x) == CONST_FIXED))
4632 ++n_var, one_var = i;
4633 else if (x != CONST0_RTX (inner_mode))
4634 all_const_zero = false;
4636 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4637 all_same = false;
4640 if (n_var == 0)
4642 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4643 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4644 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4646 /* Zero register. */
4647 emit_insn (gen_rtx_SET (VOIDmode, target,
4648 gen_rtx_XOR (mode, target, target)));
4649 return;
4651 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4653 /* Splat immediate. */
4654 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4655 return;
4657 else
4659 /* Load from constant pool. */
4660 emit_move_insn (target, const_vec);
4661 return;
4665 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4666 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4668 rtx op0 = XVECEXP (vals, 0, 0);
4669 rtx op1 = XVECEXP (vals, 0, 1);
4670 if (all_same)
4672 if (!MEM_P (op0) && !REG_P (op0))
4673 op0 = force_reg (inner_mode, op0);
4674 if (mode == V2DFmode)
4675 emit_insn (gen_vsx_splat_v2df (target, op0));
4676 else
4677 emit_insn (gen_vsx_splat_v2di (target, op0));
4679 else
4681 op0 = force_reg (inner_mode, op0);
4682 op1 = force_reg (inner_mode, op1);
4683 if (mode == V2DFmode)
4684 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4685 else
4686 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4688 return;
4691 /* With single precision floating point on VSX, know that internally single
4692 precision is actually represented as a double, and either make 2 V2DF
4693 vectors, and convert these vectors to single precision, or do one
4694 conversion, and splat the result to the other elements. */
4695 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4697 if (all_same)
4699 rtx freg = gen_reg_rtx (V4SFmode);
4700 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
4702 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4703 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4705 else
4707 rtx dbl_even = gen_reg_rtx (V2DFmode);
4708 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4709 rtx flt_even = gen_reg_rtx (V4SFmode);
4710 rtx flt_odd = gen_reg_rtx (V4SFmode);
4711 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
4712 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
4713 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
4714 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
4716 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
4717 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
4718 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4719 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4720 rs6000_expand_extract_even (target, flt_even, flt_odd);
4722 return;
4725 /* Store value to stack temp. Load vector element. Splat. However, splat
4726 of 64-bit items is not supported on Altivec. */
4727 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
4729 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4730 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4731 XVECEXP (vals, 0, 0));
4732 x = gen_rtx_UNSPEC (VOIDmode,
4733 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4734 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4735 gen_rtvec (2,
4736 gen_rtx_SET (VOIDmode,
4737 target, mem),
4738 x)));
4739 x = gen_rtx_VEC_SELECT (inner_mode, target,
4740 gen_rtx_PARALLEL (VOIDmode,
4741 gen_rtvec (1, const0_rtx)));
4742 emit_insn (gen_rtx_SET (VOIDmode, target,
4743 gen_rtx_VEC_DUPLICATE (mode, x)));
4744 return;
4747 /* One field is non-constant. Load constant then overwrite
4748 varying field. */
4749 if (n_var == 1)
4751 rtx copy = copy_rtx (vals);
4753 /* Load constant part of vector, substitute neighboring value for
4754 varying element. */
4755 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
4756 rs6000_expand_vector_init (target, copy);
4758 /* Insert variable. */
4759 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
4760 return;
4763 /* Construct the vector in memory one field at a time
4764 and load the whole vector. */
4765 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4766 for (i = 0; i < n_elts; i++)
4767 emit_move_insn (adjust_address_nv (mem, inner_mode,
4768 i * GET_MODE_SIZE (inner_mode)),
4769 XVECEXP (vals, 0, i));
4770 emit_move_insn (target, mem);
4773 /* Set field ELT of TARGET to VAL. */
4775 void
4776 rs6000_expand_vector_set (rtx target, rtx val, int elt)
4778 enum machine_mode mode = GET_MODE (target);
4779 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4780 rtx reg = gen_reg_rtx (mode);
4781 rtx mask, mem, x;
4782 int width = GET_MODE_SIZE (inner_mode);
4783 int i;
4785 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4787 rtx (*set_func) (rtx, rtx, rtx, rtx)
4788 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
4789 emit_insn (set_func (target, target, val, GEN_INT (elt)));
4790 return;
4793 /* Load single variable value. */
4794 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4795 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
4796 x = gen_rtx_UNSPEC (VOIDmode,
4797 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4798 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4799 gen_rtvec (2,
4800 gen_rtx_SET (VOIDmode,
4801 reg, mem),
4802 x)));
4804 /* Linear sequence. */
4805 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
4806 for (i = 0; i < 16; ++i)
4807 XVECEXP (mask, 0, i) = GEN_INT (i);
4809 /* Set permute mask to insert element into target. */
4810 for (i = 0; i < width; ++i)
4811 XVECEXP (mask, 0, elt*width + i)
4812 = GEN_INT (i + 0x10);
4813 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
4814 x = gen_rtx_UNSPEC (mode,
4815 gen_rtvec (3, target, reg,
4816 force_reg (V16QImode, x)),
4817 UNSPEC_VPERM);
4818 emit_insn (gen_rtx_SET (VOIDmode, target, x));
4821 /* Extract field ELT from VEC into TARGET. */
4823 void
4824 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
4826 enum machine_mode mode = GET_MODE (vec);
4827 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4828 rtx mem;
4830 if (VECTOR_MEM_VSX_P (mode))
4832 switch (mode)
4834 default:
4835 break;
4836 case V2DFmode:
4837 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
4838 return;
4839 case V2DImode:
4840 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
4841 return;
4842 case V4SFmode:
4843 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
4844 return;
4848 /* Allocate mode-sized buffer. */
4849 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4851 emit_move_insn (mem, vec);
4853 /* Add offset to field within buffer matching vector element. */
4854 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
4856 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
4859 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4860 implement ANDing by the mask IN. */
4861 void
4862 build_mask64_2_operands (rtx in, rtx *out)
4864 #if HOST_BITS_PER_WIDE_INT >= 64
4865 unsigned HOST_WIDE_INT c, lsb, m1, m2;
4866 int shift;
4868 gcc_assert (GET_CODE (in) == CONST_INT);
4870 c = INTVAL (in);
4871 if (c & 1)
4873 /* Assume c initially something like 0x00fff000000fffff. The idea
4874 is to rotate the word so that the middle ^^^^^^ group of zeros
4875 is at the MS end and can be cleared with an rldicl mask. We then
4876 rotate back and clear off the MS ^^ group of zeros with a
4877 second rldicl. */
4878 c = ~c; /* c == 0xff000ffffff00000 */
4879 lsb = c & -c; /* lsb == 0x0000000000100000 */
4880 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
4881 c = ~c; /* c == 0x00fff000000fffff */
4882 c &= -lsb; /* c == 0x00fff00000000000 */
4883 lsb = c & -c; /* lsb == 0x0000100000000000 */
4884 c = ~c; /* c == 0xff000fffffffffff */
4885 c &= -lsb; /* c == 0xff00000000000000 */
4886 shift = 0;
4887 while ((lsb >>= 1) != 0)
4888 shift++; /* shift == 44 on exit from loop */
4889 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
4890 m1 = ~m1; /* m1 == 0x000000ffffffffff */
4891 m2 = ~c; /* m2 == 0x00ffffffffffffff */
4893 else
4895 /* Assume c initially something like 0xff000f0000000000. The idea
4896 is to rotate the word so that the ^^^ middle group of zeros
4897 is at the LS end and can be cleared with an rldicr mask. We then
4898 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4899 a second rldicr. */
4900 lsb = c & -c; /* lsb == 0x0000010000000000 */
4901 m2 = -lsb; /* m2 == 0xffffff0000000000 */
4902 c = ~c; /* c == 0x00fff0ffffffffff */
4903 c &= -lsb; /* c == 0x00fff00000000000 */
4904 lsb = c & -c; /* lsb == 0x0000100000000000 */
4905 c = ~c; /* c == 0xff000fffffffffff */
4906 c &= -lsb; /* c == 0xff00000000000000 */
4907 shift = 0;
4908 while ((lsb >>= 1) != 0)
4909 shift++; /* shift == 44 on exit from loop */
4910 m1 = ~c; /* m1 == 0x00ffffffffffffff */
4911 m1 >>= shift; /* m1 == 0x0000000000000fff */
4912 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
4915 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4916 masks will be all 1's. We are guaranteed more than one transition. */
4917 out[0] = GEN_INT (64 - shift);
4918 out[1] = GEN_INT (m1);
4919 out[2] = GEN_INT (shift);
4920 out[3] = GEN_INT (m2);
4921 #else
4922 (void)in;
4923 (void)out;
4924 gcc_unreachable ();
4925 #endif
4928 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4930 bool
4931 invalid_e500_subreg (rtx op, enum machine_mode mode)
4933 if (TARGET_E500_DOUBLE)
4935 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4936 subreg:TI and reg:TF. Decimal float modes are like integer
4937 modes (only low part of each register used) for this
4938 purpose. */
4939 if (GET_CODE (op) == SUBREG
4940 && (mode == SImode || mode == DImode || mode == TImode
4941 || mode == DDmode || mode == TDmode)
4942 && REG_P (SUBREG_REG (op))
4943 && (GET_MODE (SUBREG_REG (op)) == DFmode
4944 || GET_MODE (SUBREG_REG (op)) == TFmode))
4945 return true;
4947 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4948 reg:TI. */
4949 if (GET_CODE (op) == SUBREG
4950 && (mode == DFmode || mode == TFmode)
4951 && REG_P (SUBREG_REG (op))
4952 && (GET_MODE (SUBREG_REG (op)) == DImode
4953 || GET_MODE (SUBREG_REG (op)) == TImode
4954 || GET_MODE (SUBREG_REG (op)) == DDmode
4955 || GET_MODE (SUBREG_REG (op)) == TDmode))
4956 return true;
4959 if (TARGET_SPE
4960 && GET_CODE (op) == SUBREG
4961 && mode == SImode
4962 && REG_P (SUBREG_REG (op))
4963 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
4964 return true;
4966 return false;
4969 /* AIX increases natural record alignment to doubleword if the first
4970 field is an FP double while the FP fields remain word aligned. */
4972 unsigned int
4973 rs6000_special_round_type_align (tree type, unsigned int computed,
4974 unsigned int specified)
4976 unsigned int align = MAX (computed, specified);
4977 tree field = TYPE_FIELDS (type);
4979 /* Skip all non field decls */
4980 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4981 field = DECL_CHAIN (field);
4983 if (field != NULL && field != type)
4985 type = TREE_TYPE (field);
4986 while (TREE_CODE (type) == ARRAY_TYPE)
4987 type = TREE_TYPE (type);
4989 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
4990 align = MAX (align, 64);
4993 return align;
4996 /* Darwin increases record alignment to the natural alignment of
4997 the first field. */
4999 unsigned int
5000 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5001 unsigned int specified)
5003 unsigned int align = MAX (computed, specified);
5005 if (TYPE_PACKED (type))
5006 return align;
5008 /* Find the first field, looking down into aggregates. */
5009 do {
5010 tree field = TYPE_FIELDS (type);
5011 /* Skip all non field decls */
5012 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5013 field = DECL_CHAIN (field);
5014 if (! field)
5015 break;
5016 /* A packed field does not contribute any extra alignment. */
5017 if (DECL_PACKED (field))
5018 return align;
5019 type = TREE_TYPE (field);
5020 while (TREE_CODE (type) == ARRAY_TYPE)
5021 type = TREE_TYPE (type);
5022 } while (AGGREGATE_TYPE_P (type));
5024 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
5025 align = MAX (align, TYPE_ALIGN (type));
5027 return align;
5030 /* Return 1 for an operand in small memory on V.4/eabi. */
5033 small_data_operand (rtx op ATTRIBUTE_UNUSED,
5034 enum machine_mode mode ATTRIBUTE_UNUSED)
5036 #if TARGET_ELF
5037 rtx sym_ref;
5039 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
5040 return 0;
5042 if (DEFAULT_ABI != ABI_V4)
5043 return 0;
5045 /* Vector and float memory instructions have a limited offset on the
5046 SPE, so using a vector or float variable directly as an operand is
5047 not useful. */
5048 if (TARGET_SPE
5049 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5050 return 0;
5052 if (GET_CODE (op) == SYMBOL_REF)
5053 sym_ref = op;
5055 else if (GET_CODE (op) != CONST
5056 || GET_CODE (XEXP (op, 0)) != PLUS
5057 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5058 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5059 return 0;
5061 else
5063 rtx sum = XEXP (op, 0);
5064 HOST_WIDE_INT summand;
5066 /* We have to be careful here, because it is the referenced address
5067 that must be 32k from _SDA_BASE_, not just the symbol. */
5068 summand = INTVAL (XEXP (sum, 1));
5069 if (summand < 0 || summand > g_switch_value)
5070 return 0;
5072 sym_ref = XEXP (sum, 0);
5075 return SYMBOL_REF_SMALL_P (sym_ref);
5076 #else
5077 return 0;
5078 #endif
5081 /* Return true if either operand is a general purpose register. */
5083 bool
5084 gpr_or_gpr_p (rtx op0, rtx op1)
5086 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5087 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5090 /* Given an address, return a constant offset term if one exists. */
5092 static rtx
5093 address_offset (rtx op)
5095 if (GET_CODE (op) == PRE_INC
5096 || GET_CODE (op) == PRE_DEC)
5097 op = XEXP (op, 0);
5098 else if (GET_CODE (op) == PRE_MODIFY
5099 || GET_CODE (op) == LO_SUM)
5100 op = XEXP (op, 1);
5102 if (GET_CODE (op) == CONST)
5103 op = XEXP (op, 0);
5105 if (GET_CODE (op) == PLUS)
5106 op = XEXP (op, 1);
5108 if (CONST_INT_P (op))
5109 return op;
5111 return NULL_RTX;
5114 /* Return true if the MEM operand is a memory operand suitable for use
5115 with a (full width, possibly multiple) gpr load/store. On
5116 powerpc64 this means the offset must be divisible by 4.
5117 Implements 'Y' constraint.
5119 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5120 a constraint function we know the operand has satisfied a suitable
5121 memory predicate. Also accept some odd rtl generated by reload
5122 (see rs6000_legitimize_reload_address for various forms). It is
5123 important that reload rtl be accepted by appropriate constraints
5124 but not by the operand predicate.
5126 Offsetting a lo_sum should not be allowed, except where we know by
5127 alignment that a 32k boundary is not crossed, but see the ???
5128 comment in rs6000_legitimize_reload_address. Note that by
5129 "offsetting" here we mean a further offset to access parts of the
5130 MEM. It's fine to have a lo_sum where the inner address is offset
5131 from a sym, since the same sym+offset will appear in the high part
5132 of the address calculation. */
5134 bool
5135 mem_operand_gpr (rtx op, enum machine_mode mode)
5137 unsigned HOST_WIDE_INT offset;
5138 int extra;
5139 rtx addr = XEXP (op, 0);
5141 op = address_offset (addr);
5142 if (op == NULL_RTX)
5143 return true;
5145 offset = INTVAL (op);
5146 if (TARGET_POWERPC64 && (offset & 3) != 0)
5147 return false;
5149 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5150 gcc_assert (extra >= 0);
5152 if (GET_CODE (addr) == LO_SUM)
5153 /* For lo_sum addresses, we must allow any offset except one that
5154 causes a wrap, so test only the low 16 bits. */
5155 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
5157 return offset + 0x8000 < 0x10000u - extra;
5160 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5162 static bool
5163 reg_offset_addressing_ok_p (enum machine_mode mode)
5165 switch (mode)
5167 case V16QImode:
5168 case V8HImode:
5169 case V4SFmode:
5170 case V4SImode:
5171 case V2DFmode:
5172 case V2DImode:
5173 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5174 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5175 return false;
5176 break;
5178 case V4HImode:
5179 case V2SImode:
5180 case V1DImode:
5181 case V2SFmode:
5182 /* Paired vector modes. Only reg+reg addressing is valid. */
5183 if (TARGET_PAIRED_FLOAT)
5184 return false;
5185 break;
5187 default:
5188 break;
5191 return true;
5194 static bool
5195 virtual_stack_registers_memory_p (rtx op)
5197 int regnum;
5199 if (GET_CODE (op) == REG)
5200 regnum = REGNO (op);
5202 else if (GET_CODE (op) == PLUS
5203 && GET_CODE (XEXP (op, 0)) == REG
5204 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5205 regnum = REGNO (XEXP (op, 0));
5207 else
5208 return false;
5210 return (regnum >= FIRST_VIRTUAL_REGISTER
5211 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5214 /* Return true if memory accesses to OP are known to never straddle
5215 a 32k boundary. */
5217 static bool
5218 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5219 enum machine_mode mode)
5221 tree decl, type;
5222 unsigned HOST_WIDE_INT dsize, dalign;
5224 if (GET_CODE (op) != SYMBOL_REF)
5225 return false;
5227 decl = SYMBOL_REF_DECL (op);
5228 if (!decl)
5230 if (GET_MODE_SIZE (mode) == 0)
5231 return false;
5233 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5234 replacing memory addresses with an anchor plus offset. We
5235 could find the decl by rummaging around in the block->objects
5236 VEC for the given offset but that seems like too much work. */
5237 dalign = 1;
5238 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5239 && SYMBOL_REF_ANCHOR_P (op)
5240 && SYMBOL_REF_BLOCK (op) != NULL)
5242 struct object_block *block = SYMBOL_REF_BLOCK (op);
5243 HOST_WIDE_INT lsb, mask;
5245 /* Given the alignment of the block.. */
5246 dalign = block->alignment;
5247 mask = dalign / BITS_PER_UNIT - 1;
5249 /* ..and the combined offset of the anchor and any offset
5250 to this block object.. */
5251 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5252 lsb = offset & -offset;
5254 /* ..find how many bits of the alignment we know for the
5255 object. */
5256 mask &= lsb - 1;
5257 dalign = mask + 1;
5259 return dalign >= GET_MODE_SIZE (mode);
5262 if (DECL_P (decl))
5264 if (TREE_CODE (decl) == FUNCTION_DECL)
5265 return true;
5267 if (!DECL_SIZE_UNIT (decl))
5268 return false;
5270 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5271 return false;
5273 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5274 if (dsize > 32768)
5275 return false;
5277 dalign = DECL_ALIGN_UNIT (decl);
5278 return dalign >= dsize;
5281 type = TREE_TYPE (decl);
5283 if (TREE_CODE (decl) == STRING_CST)
5284 dsize = TREE_STRING_LENGTH (decl);
5285 else if (TYPE_SIZE_UNIT (type)
5286 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5287 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5288 else
5289 return false;
5290 if (dsize > 32768)
5291 return false;
5293 dalign = TYPE_ALIGN (type);
5294 if (CONSTANT_CLASS_P (decl))
5295 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5296 else
5297 dalign = DATA_ALIGNMENT (decl, dalign);
5298 dalign /= BITS_PER_UNIT;
5299 return dalign >= dsize;
5302 static bool
5303 constant_pool_expr_p (rtx op)
5305 rtx base, offset;
5307 split_const (op, &base, &offset);
5308 return (GET_CODE (base) == SYMBOL_REF
5309 && CONSTANT_POOL_ADDRESS_P (base)
5310 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5313 static const_rtx tocrel_base, tocrel_offset;
5315 /* Return true if OP is a toc pointer relative address (the output
5316 of create_TOC_reference). If STRICT, do not match high part or
5317 non-split -mcmodel=large/medium toc pointer relative addresses. */
5319 bool
5320 toc_relative_expr_p (const_rtx op, bool strict)
5322 if (!TARGET_TOC)
5323 return false;
5325 if (TARGET_CMODEL != CMODEL_SMALL)
5327 /* Only match the low part. */
5328 if (GET_CODE (op) == LO_SUM
5329 && REG_P (XEXP (op, 0))
5330 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5331 op = XEXP (op, 1);
5332 else if (strict)
5333 return false;
5336 tocrel_base = op;
5337 tocrel_offset = const0_rtx;
5338 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
5340 tocrel_base = XEXP (op, 0);
5341 tocrel_offset = XEXP (op, 1);
5344 return (GET_CODE (tocrel_base) == UNSPEC
5345 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5348 /* Return true if X is a constant pool address, and also for cmodel=medium
5349 if X is a toc-relative address known to be offsettable within MODE. */
5351 bool
5352 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5353 bool strict)
5355 return (toc_relative_expr_p (x, strict)
5356 && (TARGET_CMODEL != CMODEL_MEDIUM
5357 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5358 || mode == QImode
5359 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5360 INTVAL (tocrel_offset), mode)));
5363 static bool
5364 legitimate_small_data_p (enum machine_mode mode, rtx x)
5366 return (DEFAULT_ABI == ABI_V4
5367 && !flag_pic && !TARGET_TOC
5368 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5369 && small_data_operand (x, mode));
5372 /* SPE offset addressing is limited to 5-bits worth of double words. */
5373 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5375 bool
5376 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
5377 bool strict, bool worst_case)
5379 unsigned HOST_WIDE_INT offset;
5380 unsigned int extra;
5382 if (GET_CODE (x) != PLUS)
5383 return false;
5384 if (!REG_P (XEXP (x, 0)))
5385 return false;
5386 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5387 return false;
5388 if (!reg_offset_addressing_ok_p (mode))
5389 return virtual_stack_registers_memory_p (x);
5390 if (legitimate_constant_pool_address_p (x, mode, strict))
5391 return true;
5392 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5393 return false;
5395 offset = INTVAL (XEXP (x, 1));
5396 extra = 0;
5397 switch (mode)
5399 case V4HImode:
5400 case V2SImode:
5401 case V1DImode:
5402 case V2SFmode:
5403 /* SPE vector modes. */
5404 return SPE_CONST_OFFSET_OK (offset);
5406 case DFmode:
5407 case DDmode:
5408 case DImode:
5409 /* On e500v2, we may have:
5411 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5413 Which gets addressed with evldd instructions. */
5414 if (TARGET_E500_DOUBLE)
5415 return SPE_CONST_OFFSET_OK (offset);
5417 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5418 addressing. */
5419 if (mode == DFmode && VECTOR_MEM_VSX_P (DFmode))
5420 return false;
5422 if (!worst_case)
5423 break;
5424 if (!TARGET_POWERPC64)
5425 extra = 4;
5426 else if (offset & 3)
5427 return false;
5428 break;
5430 case TFmode:
5431 if (TARGET_E500_DOUBLE)
5432 return (SPE_CONST_OFFSET_OK (offset)
5433 && SPE_CONST_OFFSET_OK (offset + 8));
5434 /* fall through */
5436 case TDmode:
5437 case TImode:
5438 extra = 8;
5439 if (!worst_case)
5440 break;
5441 if (!TARGET_POWERPC64)
5442 extra = 12;
5443 else if (offset & 3)
5444 return false;
5445 break;
5447 default:
5448 break;
5451 offset += 0x8000;
5452 return offset < 0x10000 - extra;
5455 bool
5456 legitimate_indexed_address_p (rtx x, int strict)
5458 rtx op0, op1;
5460 if (GET_CODE (x) != PLUS)
5461 return false;
5463 op0 = XEXP (x, 0);
5464 op1 = XEXP (x, 1);
5466 /* Recognize the rtl generated by reload which we know will later be
5467 replaced with proper base and index regs. */
5468 if (!strict
5469 && reload_in_progress
5470 && (REG_P (op0) || GET_CODE (op0) == PLUS)
5471 && REG_P (op1))
5472 return true;
5474 return (REG_P (op0) && REG_P (op1)
5475 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
5476 && INT_REG_OK_FOR_INDEX_P (op1, strict))
5477 || (INT_REG_OK_FOR_BASE_P (op1, strict)
5478 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
5481 bool
5482 avoiding_indexed_address_p (enum machine_mode mode)
5484 /* Avoid indexed addressing for modes that have non-indexed
5485 load/store instruction forms. */
5486 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
5489 bool
5490 legitimate_indirect_address_p (rtx x, int strict)
5492 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
5495 bool
5496 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
5498 if (!TARGET_MACHO || !flag_pic
5499 || mode != SImode || GET_CODE (x) != MEM)
5500 return false;
5501 x = XEXP (x, 0);
5503 if (GET_CODE (x) != LO_SUM)
5504 return false;
5505 if (GET_CODE (XEXP (x, 0)) != REG)
5506 return false;
5507 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
5508 return false;
5509 x = XEXP (x, 1);
5511 return CONSTANT_P (x);
5514 static bool
5515 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
5517 if (GET_CODE (x) != LO_SUM)
5518 return false;
5519 if (GET_CODE (XEXP (x, 0)) != REG)
5520 return false;
5521 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5522 return false;
5523 /* Restrict addressing for DI because of our SUBREG hackery. */
5524 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
5525 return false;
5526 x = XEXP (x, 1);
5528 if (TARGET_ELF || TARGET_MACHO)
5530 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
5531 return false;
5532 if (TARGET_TOC)
5533 return false;
5534 if (GET_MODE_NUNITS (mode) != 1)
5535 return false;
5536 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5537 && !(/* ??? Assume floating point reg based on mode? */
5538 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5539 && (mode == DFmode || mode == DDmode)))
5540 return false;
5542 return CONSTANT_P (x);
5545 return false;
5549 /* Try machine-dependent ways of modifying an illegitimate address
5550 to be legitimate. If we find one, return the new, valid address.
5551 This is used from only one place: `memory_address' in explow.c.
5553 OLDX is the address as it was before break_out_memory_refs was
5554 called. In some cases it is useful to look at this to decide what
5555 needs to be done.
5557 It is always safe for this function to do nothing. It exists to
5558 recognize opportunities to optimize the output.
5560 On RS/6000, first check for the sum of a register with a constant
5561 integer that is out of range. If so, generate code to add the
5562 constant with the low-order 16 bits masked to the register and force
5563 this result into another register (this can be done with `cau').
5564 Then generate an address of REG+(CONST&0xffff), allowing for the
5565 possibility of bit 16 being a one.
5567 Then check for the sum of a register and something not constant, try to
5568 load the other things into a register and return the sum. */
5570 static rtx
5571 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5572 enum machine_mode mode)
5574 unsigned int extra;
5576 if (!reg_offset_addressing_ok_p (mode))
5578 if (virtual_stack_registers_memory_p (x))
5579 return x;
5581 /* In theory we should not be seeing addresses of the form reg+0,
5582 but just in case it is generated, optimize it away. */
5583 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5584 return force_reg (Pmode, XEXP (x, 0));
5586 /* Make sure both operands are registers. */
5587 else if (GET_CODE (x) == PLUS)
5588 return gen_rtx_PLUS (Pmode,
5589 force_reg (Pmode, XEXP (x, 0)),
5590 force_reg (Pmode, XEXP (x, 1)));
5591 else
5592 return force_reg (Pmode, x);
5594 if (GET_CODE (x) == SYMBOL_REF)
5596 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5597 if (model != 0)
5598 return rs6000_legitimize_tls_address (x, model);
5601 extra = 0;
5602 switch (mode)
5604 case TFmode:
5605 case TDmode:
5606 case TImode:
5607 /* As in legitimate_offset_address_p we do not assume
5608 worst-case. The mode here is just a hint as to the registers
5609 used. A TImode is usually in gprs, but may actually be in
5610 fprs. Leave worst-case scenario for reload to handle via
5611 insn constraints. */
5612 extra = 8;
5613 break;
5614 default:
5615 break;
5618 if (GET_CODE (x) == PLUS
5619 && GET_CODE (XEXP (x, 0)) == REG
5620 && GET_CODE (XEXP (x, 1)) == CONST_INT
5621 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5622 >= 0x10000 - extra)
5623 && !(SPE_VECTOR_MODE (mode)
5624 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
5626 HOST_WIDE_INT high_int, low_int;
5627 rtx sum;
5628 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5629 if (low_int >= 0x8000 - extra)
5630 low_int = 0;
5631 high_int = INTVAL (XEXP (x, 1)) - low_int;
5632 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5633 GEN_INT (high_int)), 0);
5634 return plus_constant (Pmode, sum, low_int);
5636 else if (GET_CODE (x) == PLUS
5637 && GET_CODE (XEXP (x, 0)) == REG
5638 && GET_CODE (XEXP (x, 1)) != CONST_INT
5639 && GET_MODE_NUNITS (mode) == 1
5640 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5641 || (/* ??? Assume floating point reg based on mode? */
5642 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5643 && (mode == DFmode || mode == DDmode)))
5644 && !avoiding_indexed_address_p (mode))
5646 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5647 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5649 else if (SPE_VECTOR_MODE (mode)
5650 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
5652 if (mode == DImode)
5653 return x;
5654 /* We accept [reg + reg] and [reg + OFFSET]. */
5656 if (GET_CODE (x) == PLUS)
5658 rtx op1 = XEXP (x, 0);
5659 rtx op2 = XEXP (x, 1);
5660 rtx y;
5662 op1 = force_reg (Pmode, op1);
5664 if (GET_CODE (op2) != REG
5665 && (GET_CODE (op2) != CONST_INT
5666 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5667 || (GET_MODE_SIZE (mode) > 8
5668 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5669 op2 = force_reg (Pmode, op2);
5671 /* We can't always do [reg + reg] for these, because [reg +
5672 reg + offset] is not a legitimate addressing mode. */
5673 y = gen_rtx_PLUS (Pmode, op1, op2);
5675 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5676 return force_reg (Pmode, y);
5677 else
5678 return y;
5681 return force_reg (Pmode, x);
5683 else if ((TARGET_ELF
5684 #if TARGET_MACHO
5685 || !MACHO_DYNAMIC_NO_PIC_P
5686 #endif
5688 && TARGET_32BIT
5689 && TARGET_NO_TOC
5690 && ! flag_pic
5691 && GET_CODE (x) != CONST_INT
5692 && GET_CODE (x) != CONST_DOUBLE
5693 && CONSTANT_P (x)
5694 && GET_MODE_NUNITS (mode) == 1
5695 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5696 || (/* ??? Assume floating point reg based on mode? */
5697 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5698 && (mode == DFmode || mode == DDmode))))
5700 rtx reg = gen_reg_rtx (Pmode);
5701 if (TARGET_ELF)
5702 emit_insn (gen_elf_high (reg, x));
5703 else
5704 emit_insn (gen_macho_high (reg, x));
5705 return gen_rtx_LO_SUM (Pmode, reg, x);
5707 else if (TARGET_TOC
5708 && GET_CODE (x) == SYMBOL_REF
5709 && constant_pool_expr_p (x)
5710 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5711 return create_TOC_reference (x, NULL_RTX);
5712 else
5713 return x;
5716 /* Debug version of rs6000_legitimize_address. */
5717 static rtx
5718 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5720 rtx ret;
5721 rtx insns;
5723 start_sequence ();
5724 ret = rs6000_legitimize_address (x, oldx, mode);
5725 insns = get_insns ();
5726 end_sequence ();
5728 if (ret != x)
5730 fprintf (stderr,
5731 "\nrs6000_legitimize_address: mode %s, old code %s, "
5732 "new code %s, modified\n",
5733 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5734 GET_RTX_NAME (GET_CODE (ret)));
5736 fprintf (stderr, "Original address:\n");
5737 debug_rtx (x);
5739 fprintf (stderr, "oldx:\n");
5740 debug_rtx (oldx);
5742 fprintf (stderr, "New address:\n");
5743 debug_rtx (ret);
5745 if (insns)
5747 fprintf (stderr, "Insns added:\n");
5748 debug_rtx_list (insns, 20);
5751 else
5753 fprintf (stderr,
5754 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5755 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
5757 debug_rtx (x);
5760 if (insns)
5761 emit_insn (insns);
5763 return ret;
5766 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5767 We need to emit DTP-relative relocations. */
5769 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5770 static void
5771 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
5773 switch (size)
5775 case 4:
5776 fputs ("\t.long\t", file);
5777 break;
5778 case 8:
5779 fputs (DOUBLE_INT_ASM_OP, file);
5780 break;
5781 default:
5782 gcc_unreachable ();
5784 output_addr_const (file, x);
5785 fputs ("@dtprel+0x8000", file);
5788 /* In the name of slightly smaller debug output, and to cater to
5789 general assembler lossage, recognize various UNSPEC sequences
5790 and turn them back into a direct symbol reference. */
5792 static rtx
5793 rs6000_delegitimize_address (rtx orig_x)
5795 rtx x, y, offset;
5797 orig_x = delegitimize_mem_from_attrs (orig_x);
5798 x = orig_x;
5799 if (MEM_P (x))
5800 x = XEXP (x, 0);
5802 y = x;
5803 if (TARGET_CMODEL != CMODEL_SMALL
5804 && GET_CODE (y) == LO_SUM)
5805 y = XEXP (y, 1);
5807 offset = NULL_RTX;
5808 if (GET_CODE (y) == PLUS
5809 && GET_MODE (y) == Pmode
5810 && CONST_INT_P (XEXP (y, 1)))
5812 offset = XEXP (y, 1);
5813 y = XEXP (y, 0);
5816 if (GET_CODE (y) == UNSPEC
5817 && XINT (y, 1) == UNSPEC_TOCREL)
5819 #ifdef ENABLE_CHECKING
5820 if (REG_P (XVECEXP (y, 0, 1))
5821 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
5823 /* All good. */
5825 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
5827 /* Weirdness alert. df_note_compute can replace r2 with a
5828 debug_expr when this unspec is in a debug_insn.
5829 Seen in gcc.dg/pr51957-1.c */
5831 else
5833 debug_rtx (orig_x);
5834 abort ();
5836 #endif
5837 y = XVECEXP (y, 0, 0);
5839 #ifdef HAVE_AS_TLS
5840 /* Do not associate thread-local symbols with the original
5841 constant pool symbol. */
5842 if (TARGET_XCOFF
5843 && GET_CODE (y) == SYMBOL_REF
5844 && CONSTANT_POOL_ADDRESS_P (y)
5845 && SYMBOL_REF_TLS_MODEL (get_pool_constant (y)) >= TLS_MODEL_REAL)
5846 return orig_x;
5847 #endif
5849 if (offset != NULL_RTX)
5850 y = gen_rtx_PLUS (Pmode, y, offset);
5851 if (!MEM_P (orig_x))
5852 return y;
5853 else
5854 return replace_equiv_address_nv (orig_x, y);
5857 if (TARGET_MACHO
5858 && GET_CODE (orig_x) == LO_SUM
5859 && GET_CODE (XEXP (orig_x, 1)) == CONST)
5861 y = XEXP (XEXP (orig_x, 1), 0);
5862 if (GET_CODE (y) == UNSPEC
5863 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
5864 return XVECEXP (y, 0, 0);
5867 return orig_x;
5870 /* Return true if X shouldn't be emitted into the debug info.
5871 The linker doesn't like .toc section references from
5872 .debug_* sections, so reject .toc section symbols. */
5874 static bool
5875 rs6000_const_not_ok_for_debug_p (rtx x)
5877 if (GET_CODE (x) == SYMBOL_REF
5878 && CONSTANT_POOL_ADDRESS_P (x))
5880 rtx c = get_pool_constant (x);
5881 enum machine_mode cmode = get_pool_mode (x);
5882 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
5883 return true;
5886 return false;
5889 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5891 static GTY(()) rtx rs6000_tls_symbol;
5892 static rtx
5893 rs6000_tls_get_addr (void)
5895 if (!rs6000_tls_symbol)
5896 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
5898 return rs6000_tls_symbol;
5901 /* Construct the SYMBOL_REF for TLS GOT references. */
5903 static GTY(()) rtx rs6000_got_symbol;
5904 static rtx
5905 rs6000_got_sym (void)
5907 if (!rs6000_got_symbol)
5909 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
5910 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
5911 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
5914 return rs6000_got_symbol;
5917 /* AIX Thread-Local Address support. */
5919 static rtx
5920 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
5922 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
5923 const char *name;
5924 char *tlsname;
5926 name = XSTR (addr, 0);
5927 /* Append TLS CSECT qualifier, unless the symbol already is qualified
5928 or the symbol will be in TLS private data section. */
5929 if (name[strlen (name) - 1] != ']'
5930 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
5931 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
5933 tlsname = XALLOCAVEC (char, strlen (name) + 4);
5934 strcpy (tlsname, name);
5935 strcat (tlsname,
5936 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
5937 tlsaddr = copy_rtx (addr);
5938 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
5940 else
5941 tlsaddr = addr;
5943 /* Place addr into TOC constant pool. */
5944 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
5946 /* Output the TOC entry and create the MEM referencing the value. */
5947 if (constant_pool_expr_p (XEXP (sym, 0))
5948 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
5950 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
5951 mem = gen_const_mem (Pmode, tocref);
5952 set_mem_alias_set (mem, get_TOC_alias_set ());
5954 else
5955 return sym;
5957 /* Use global-dynamic for local-dynamic. */
5958 if (model == TLS_MODEL_GLOBAL_DYNAMIC
5959 || model == TLS_MODEL_LOCAL_DYNAMIC)
5961 /* Create new TOC reference for @m symbol. */
5962 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
5963 tlsname = XALLOCAVEC (char, strlen (name) + 1);
5964 strcpy (tlsname, "*LCM");
5965 strcat (tlsname, name + 3);
5966 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
5967 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
5968 tocref = create_TOC_reference (modaddr, NULL_RTX);
5969 rtx modmem = gen_const_mem (Pmode, tocref);
5970 set_mem_alias_set (modmem, get_TOC_alias_set ());
5972 rtx modreg = gen_reg_rtx (Pmode);
5973 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
5975 tmpreg = gen_reg_rtx (Pmode);
5976 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
5978 dest = gen_reg_rtx (Pmode);
5979 if (TARGET_32BIT)
5980 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
5981 else
5982 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
5983 return dest;
5985 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
5986 else if (TARGET_32BIT)
5988 tlsreg = gen_reg_rtx (SImode);
5989 emit_insn (gen_tls_get_tpointer (tlsreg));
5991 else
5992 tlsreg = gen_rtx_REG (DImode, 13);
5994 /* Load the TOC value into temporary register. */
5995 tmpreg = gen_reg_rtx (Pmode);
5996 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
5997 set_unique_reg_note (get_last_insn (), REG_EQUAL,
5998 gen_rtx_MINUS (Pmode, addr, tlsreg));
6000 /* Add TOC symbol value to TLS pointer. */
6001 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
6003 return dest;
6006 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
6007 this (thread-local) address. */
6009 static rtx
6010 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
6012 rtx dest, insn;
6014 if (TARGET_XCOFF)
6015 return rs6000_legitimize_tls_address_aix (addr, model);
6017 dest = gen_reg_rtx (Pmode);
6018 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
6020 rtx tlsreg;
6022 if (TARGET_64BIT)
6024 tlsreg = gen_rtx_REG (Pmode, 13);
6025 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
6027 else
6029 tlsreg = gen_rtx_REG (Pmode, 2);
6030 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
6032 emit_insn (insn);
6034 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
6036 rtx tlsreg, tmp;
6038 tmp = gen_reg_rtx (Pmode);
6039 if (TARGET_64BIT)
6041 tlsreg = gen_rtx_REG (Pmode, 13);
6042 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
6044 else
6046 tlsreg = gen_rtx_REG (Pmode, 2);
6047 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
6049 emit_insn (insn);
6050 if (TARGET_64BIT)
6051 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
6052 else
6053 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
6054 emit_insn (insn);
6056 else
6058 rtx r3, got, tga, tmp1, tmp2, call_insn;
6060 /* We currently use relocations like @got@tlsgd for tls, which
6061 means the linker will handle allocation of tls entries, placing
6062 them in the .got section. So use a pointer to the .got section,
6063 not one to secondary TOC sections used by 64-bit -mminimal-toc,
6064 or to secondary GOT sections used by 32-bit -fPIC. */
6065 if (TARGET_64BIT)
6066 got = gen_rtx_REG (Pmode, 2);
6067 else
6069 if (flag_pic == 1)
6070 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
6071 else
6073 rtx gsym = rs6000_got_sym ();
6074 got = gen_reg_rtx (Pmode);
6075 if (flag_pic == 0)
6076 rs6000_emit_move (got, gsym, Pmode);
6077 else
6079 rtx mem, lab, last;
6081 tmp1 = gen_reg_rtx (Pmode);
6082 tmp2 = gen_reg_rtx (Pmode);
6083 mem = gen_const_mem (Pmode, tmp1);
6084 lab = gen_label_rtx ();
6085 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
6086 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
6087 if (TARGET_LINK_STACK)
6088 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
6089 emit_move_insn (tmp2, mem);
6090 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
6091 set_unique_reg_note (last, REG_EQUAL, gsym);
6096 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
6098 tga = rs6000_tls_get_addr ();
6099 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
6100 1, const0_rtx, Pmode);
6102 r3 = gen_rtx_REG (Pmode, 3);
6103 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6104 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
6105 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6106 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
6107 else if (DEFAULT_ABI == ABI_V4)
6108 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
6109 else
6110 gcc_unreachable ();
6111 call_insn = last_call_insn ();
6112 PATTERN (call_insn) = insn;
6113 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6114 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6115 pic_offset_table_rtx);
6117 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
6119 tga = rs6000_tls_get_addr ();
6120 tmp1 = gen_reg_rtx (Pmode);
6121 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
6122 1, const0_rtx, Pmode);
6124 r3 = gen_rtx_REG (Pmode, 3);
6125 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
6126 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
6127 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
6128 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
6129 else if (DEFAULT_ABI == ABI_V4)
6130 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
6131 else
6132 gcc_unreachable ();
6133 call_insn = last_call_insn ();
6134 PATTERN (call_insn) = insn;
6135 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
6136 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
6137 pic_offset_table_rtx);
6139 if (rs6000_tls_size == 16)
6141 if (TARGET_64BIT)
6142 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
6143 else
6144 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
6146 else if (rs6000_tls_size == 32)
6148 tmp2 = gen_reg_rtx (Pmode);
6149 if (TARGET_64BIT)
6150 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
6151 else
6152 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
6153 emit_insn (insn);
6154 if (TARGET_64BIT)
6155 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6156 else
6157 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6159 else
6161 tmp2 = gen_reg_rtx (Pmode);
6162 if (TARGET_64BIT)
6163 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6164 else
6165 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6166 emit_insn (insn);
6167 insn = gen_rtx_SET (Pmode, dest,
6168 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6170 emit_insn (insn);
6172 else
6174 /* IE, or 64-bit offset LE. */
6175 tmp2 = gen_reg_rtx (Pmode);
6176 if (TARGET_64BIT)
6177 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6178 else
6179 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6180 emit_insn (insn);
6181 if (TARGET_64BIT)
6182 insn = gen_tls_tls_64 (dest, tmp2, addr);
6183 else
6184 insn = gen_tls_tls_32 (dest, tmp2, addr);
6185 emit_insn (insn);
6189 return dest;
6192 /* Return 1 if X contains a thread-local symbol. */
6194 static bool
6195 rs6000_tls_referenced_p (rtx x)
6197 if (! TARGET_HAVE_TLS)
6198 return false;
6200 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6203 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6205 static bool
6206 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6208 if (GET_CODE (x) == HIGH
6209 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6210 return true;
6212 /* A TLS symbol in the TOC cannot contain a sum. */
6213 if (GET_CODE (x) == CONST
6214 && GET_CODE (XEXP (x, 0)) == PLUS
6215 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6216 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
6217 return true;
6219 /* Do not place an ELF TLS symbol in the constant pool. */
6220 return TARGET_ELF && rs6000_tls_referenced_p (x);
6223 /* Return 1 if *X is a thread-local symbol. This is the same as
6224 rs6000_tls_symbol_ref except for the type of the unused argument. */
6226 static int
6227 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6229 return RS6000_SYMBOL_REF_TLS_P (*x);
6232 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6233 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6234 can be addressed relative to the toc pointer. */
6236 static bool
6237 use_toc_relative_ref (rtx sym)
6239 return ((constant_pool_expr_p (sym)
6240 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6241 get_pool_mode (sym)))
6242 || (TARGET_CMODEL == CMODEL_MEDIUM
6243 && !CONSTANT_POOL_ADDRESS_P (sym)
6244 && SYMBOL_REF_LOCAL_P (sym)));
6247 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6248 replace the input X, or the original X if no replacement is called for.
6249 The output parameter *WIN is 1 if the calling macro should goto WIN,
6250 0 if it should not.
6252 For RS/6000, we wish to handle large displacements off a base
6253 register by splitting the addend across an addiu/addis and the mem insn.
6254 This cuts number of extra insns needed from 3 to 1.
6256 On Darwin, we use this to generate code for floating point constants.
6257 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6258 The Darwin code is inside #if TARGET_MACHO because only then are the
6259 machopic_* functions defined. */
6260 static rtx
6261 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6262 int opnum, int type,
6263 int ind_levels ATTRIBUTE_UNUSED, int *win)
6265 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6267 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6268 DFmode/DImode MEM. */
6269 if (reg_offset_p
6270 && opnum == 1
6271 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6272 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6273 reg_offset_p = false;
6275 /* We must recognize output that we have already generated ourselves. */
6276 if (GET_CODE (x) == PLUS
6277 && GET_CODE (XEXP (x, 0)) == PLUS
6278 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6279 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6280 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6282 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6283 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6284 opnum, (enum reload_type) type);
6285 *win = 1;
6286 return x;
6289 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6290 if (GET_CODE (x) == LO_SUM
6291 && GET_CODE (XEXP (x, 0)) == HIGH)
6293 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6294 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6295 opnum, (enum reload_type) type);
6296 *win = 1;
6297 return x;
6300 #if TARGET_MACHO
6301 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6302 && GET_CODE (x) == LO_SUM
6303 && GET_CODE (XEXP (x, 0)) == PLUS
6304 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6305 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6306 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6307 && machopic_operand_p (XEXP (x, 1)))
6309 /* Result of previous invocation of this function on Darwin
6310 floating point constant. */
6311 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6312 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6313 opnum, (enum reload_type) type);
6314 *win = 1;
6315 return x;
6317 #endif
6319 if (TARGET_CMODEL != CMODEL_SMALL
6320 && reg_offset_p
6321 && small_toc_ref (x, VOIDmode))
6323 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6324 x = gen_rtx_LO_SUM (Pmode, hi, x);
6325 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6326 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6327 opnum, (enum reload_type) type);
6328 *win = 1;
6329 return x;
6332 if (GET_CODE (x) == PLUS
6333 && GET_CODE (XEXP (x, 0)) == REG
6334 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6335 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6336 && GET_CODE (XEXP (x, 1)) == CONST_INT
6337 && reg_offset_p
6338 && !SPE_VECTOR_MODE (mode)
6339 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6340 || mode == DDmode || mode == TDmode
6341 || mode == DImode))
6342 && VECTOR_MEM_NONE_P (mode))
6344 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6345 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6346 HOST_WIDE_INT high
6347 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6349 /* Check for 32-bit overflow. */
6350 if (high + low != val)
6352 *win = 0;
6353 return x;
6356 /* Reload the high part into a base reg; leave the low part
6357 in the mem directly. */
6359 x = gen_rtx_PLUS (GET_MODE (x),
6360 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6361 GEN_INT (high)),
6362 GEN_INT (low));
6364 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6365 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6366 opnum, (enum reload_type) type);
6367 *win = 1;
6368 return x;
6371 if (GET_CODE (x) == SYMBOL_REF
6372 && reg_offset_p
6373 && VECTOR_MEM_NONE_P (mode)
6374 && !SPE_VECTOR_MODE (mode)
6375 #if TARGET_MACHO
6376 && DEFAULT_ABI == ABI_DARWIN
6377 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6378 && machopic_symbol_defined_p (x)
6379 #else
6380 && DEFAULT_ABI == ABI_V4
6381 && !flag_pic
6382 #endif
6383 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6384 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6385 without fprs.
6386 ??? Assume floating point reg based on mode? This assumption is
6387 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6388 where reload ends up doing a DFmode load of a constant from
6389 mem using two gprs. Unfortunately, at this point reload
6390 hasn't yet selected regs so poking around in reload data
6391 won't help and even if we could figure out the regs reliably,
6392 we'd still want to allow this transformation when the mem is
6393 naturally aligned. Since we say the address is good here, we
6394 can't disable offsets from LO_SUMs in mem_operand_gpr.
6395 FIXME: Allow offset from lo_sum for other modes too, when
6396 mem is sufficiently aligned. */
6397 && mode != TFmode
6398 && mode != TDmode
6399 && (mode != DImode || TARGET_POWERPC64)
6400 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6401 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6403 #if TARGET_MACHO
6404 if (flag_pic)
6406 rtx offset = machopic_gen_offset (x);
6407 x = gen_rtx_LO_SUM (GET_MODE (x),
6408 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6409 gen_rtx_HIGH (Pmode, offset)), offset);
6411 else
6412 #endif
6413 x = gen_rtx_LO_SUM (GET_MODE (x),
6414 gen_rtx_HIGH (Pmode, x), x);
6416 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6417 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6418 opnum, (enum reload_type) type);
6419 *win = 1;
6420 return x;
6423 /* Reload an offset address wrapped by an AND that represents the
6424 masking of the lower bits. Strip the outer AND and let reload
6425 convert the offset address into an indirect address. For VSX,
6426 force reload to create the address with an AND in a separate
6427 register, because we can't guarantee an altivec register will
6428 be used. */
6429 if (VECTOR_MEM_ALTIVEC_P (mode)
6430 && GET_CODE (x) == AND
6431 && GET_CODE (XEXP (x, 0)) == PLUS
6432 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6433 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6434 && GET_CODE (XEXP (x, 1)) == CONST_INT
6435 && INTVAL (XEXP (x, 1)) == -16)
6437 x = XEXP (x, 0);
6438 *win = 1;
6439 return x;
6442 if (TARGET_TOC
6443 && reg_offset_p
6444 && GET_CODE (x) == SYMBOL_REF
6445 && use_toc_relative_ref (x))
6447 x = create_TOC_reference (x, NULL_RTX);
6448 if (TARGET_CMODEL != CMODEL_SMALL)
6449 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6450 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6451 opnum, (enum reload_type) type);
6452 *win = 1;
6453 return x;
6455 *win = 0;
6456 return x;
6459 /* Debug version of rs6000_legitimize_reload_address. */
6460 static rtx
6461 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
6462 int opnum, int type,
6463 int ind_levels, int *win)
6465 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
6466 ind_levels, win);
6467 fprintf (stderr,
6468 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6469 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6470 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
6471 debug_rtx (x);
6473 if (x == ret)
6474 fprintf (stderr, "Same address returned\n");
6475 else if (!ret)
6476 fprintf (stderr, "NULL returned\n");
6477 else
6479 fprintf (stderr, "New address:\n");
6480 debug_rtx (ret);
6483 return ret;
6486 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6487 that is a valid memory address for an instruction.
6488 The MODE argument is the machine mode for the MEM expression
6489 that wants to use this address.
6491 On the RS/6000, there are four valid address: a SYMBOL_REF that
6492 refers to a constant pool entry of an address (or the sum of it
6493 plus a constant), a short (16-bit signed) constant plus a register,
6494 the sum of two registers, or a register indirect, possibly with an
6495 auto-increment. For DFmode, DDmode and DImode with a constant plus
6496 register, we must ensure that both words are addressable or PowerPC64
6497 with offset word aligned.
6499 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6500 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6501 because adjacent memory cells are accessed by adding word-sized offsets
6502 during assembly output. */
6503 static bool
6504 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
6506 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6508 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6509 if (VECTOR_MEM_ALTIVEC_P (mode)
6510 && GET_CODE (x) == AND
6511 && GET_CODE (XEXP (x, 1)) == CONST_INT
6512 && INTVAL (XEXP (x, 1)) == -16)
6513 x = XEXP (x, 0);
6515 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
6516 return 0;
6517 if (legitimate_indirect_address_p (x, reg_ok_strict))
6518 return 1;
6519 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
6520 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6521 && !SPE_VECTOR_MODE (mode)
6522 && mode != TFmode
6523 && mode != TDmode
6524 /* Restrict addressing for DI because of our SUBREG hackery. */
6525 && !(TARGET_E500_DOUBLE
6526 && (mode == DFmode || mode == DDmode || mode == DImode))
6527 && TARGET_UPDATE
6528 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
6529 return 1;
6530 if (virtual_stack_registers_memory_p (x))
6531 return 1;
6532 if (reg_offset_p && legitimate_small_data_p (mode, x))
6533 return 1;
6534 if (reg_offset_p
6535 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
6536 return 1;
6537 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6538 if (! reg_ok_strict
6539 && reg_offset_p
6540 && GET_CODE (x) == PLUS
6541 && GET_CODE (XEXP (x, 0)) == REG
6542 && (XEXP (x, 0) == virtual_stack_vars_rtx
6543 || XEXP (x, 0) == arg_pointer_rtx)
6544 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6545 return 1;
6546 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
6547 return 1;
6548 if (mode != TImode
6549 && mode != TFmode
6550 && mode != TDmode
6551 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6552 || TARGET_POWERPC64
6553 || (mode != DFmode && mode != DDmode)
6554 || (TARGET_E500_DOUBLE && mode != DDmode))
6555 && (TARGET_POWERPC64 || mode != DImode)
6556 && !avoiding_indexed_address_p (mode)
6557 && legitimate_indexed_address_p (x, reg_ok_strict))
6558 return 1;
6559 if (GET_CODE (x) == PRE_MODIFY
6560 && mode != TImode
6561 && mode != TFmode
6562 && mode != TDmode
6563 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6564 || TARGET_POWERPC64
6565 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
6566 && (TARGET_POWERPC64 || mode != DImode)
6567 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6568 && !SPE_VECTOR_MODE (mode)
6569 /* Restrict addressing for DI because of our SUBREG hackery. */
6570 && !(TARGET_E500_DOUBLE
6571 && (mode == DFmode || mode == DDmode || mode == DImode))
6572 && TARGET_UPDATE
6573 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
6574 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
6575 reg_ok_strict, false)
6576 || (!avoiding_indexed_address_p (mode)
6577 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
6578 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6579 return 1;
6580 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
6581 return 1;
6582 return 0;
6585 /* Debug version of rs6000_legitimate_address_p. */
6586 static bool
6587 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
6588 bool reg_ok_strict)
6590 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
6591 fprintf (stderr,
6592 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6593 "strict = %d, code = %s\n",
6594 ret ? "true" : "false",
6595 GET_MODE_NAME (mode),
6596 reg_ok_strict,
6597 GET_RTX_NAME (GET_CODE (x)));
6598 debug_rtx (x);
6600 return ret;
6603 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6605 static bool
6606 rs6000_mode_dependent_address_p (const_rtx addr,
6607 addr_space_t as ATTRIBUTE_UNUSED)
6609 return rs6000_mode_dependent_address_ptr (addr);
6612 /* Go to LABEL if ADDR (a legitimate address expression)
6613 has an effect that depends on the machine mode it is used for.
6615 On the RS/6000 this is true of all integral offsets (since AltiVec
6616 and VSX modes don't allow them) or is a pre-increment or decrement.
6618 ??? Except that due to conceptual problems in offsettable_address_p
6619 we can't really report the problems of integral offsets. So leave
6620 this assuming that the adjustable offset must be valid for the
6621 sub-words of a TFmode operand, which is what we had before. */
6623 static bool
6624 rs6000_mode_dependent_address (const_rtx addr)
6626 switch (GET_CODE (addr))
6628 case PLUS:
6629 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6630 is considered a legitimate address before reload, so there
6631 are no offset restrictions in that case. Note that this
6632 condition is safe in strict mode because any address involving
6633 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6634 been rejected as illegitimate. */
6635 if (XEXP (addr, 0) != virtual_stack_vars_rtx
6636 && XEXP (addr, 0) != arg_pointer_rtx
6637 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6639 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
6640 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
6642 break;
6644 case LO_SUM:
6645 /* Anything in the constant pool is sufficiently aligned that
6646 all bytes have the same high part address. */
6647 return !legitimate_constant_pool_address_p (addr, QImode, false);
6649 /* Auto-increment cases are now treated generically in recog.c. */
6650 case PRE_MODIFY:
6651 return TARGET_UPDATE;
6653 /* AND is only allowed in Altivec loads. */
6654 case AND:
6655 return true;
6657 default:
6658 break;
6661 return false;
6664 /* Debug version of rs6000_mode_dependent_address. */
6665 static bool
6666 rs6000_debug_mode_dependent_address (const_rtx addr)
6668 bool ret = rs6000_mode_dependent_address (addr);
6670 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
6671 ret ? "true" : "false");
6672 debug_rtx (addr);
6674 return ret;
6677 /* Implement FIND_BASE_TERM. */
6680 rs6000_find_base_term (rtx op)
6682 rtx base;
6684 base = op;
6685 if (GET_CODE (base) == CONST)
6686 base = XEXP (base, 0);
6687 if (GET_CODE (base) == PLUS)
6688 base = XEXP (base, 0);
6689 if (GET_CODE (base) == UNSPEC)
6690 switch (XINT (base, 1))
6692 case UNSPEC_TOCREL:
6693 case UNSPEC_MACHOPIC_OFFSET:
6694 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6695 for aliasing purposes. */
6696 return XVECEXP (base, 0, 0);
6699 return op;
6702 /* More elaborate version of recog's offsettable_memref_p predicate
6703 that works around the ??? note of rs6000_mode_dependent_address.
6704 In particular it accepts
6706 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6708 in 32-bit mode, that the recog predicate rejects. */
6710 static bool
6711 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
6713 bool worst_case;
6715 if (!MEM_P (op))
6716 return false;
6718 /* First mimic offsettable_memref_p. */
6719 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
6720 return true;
6722 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6723 the latter predicate knows nothing about the mode of the memory
6724 reference and, therefore, assumes that it is the largest supported
6725 mode (TFmode). As a consequence, legitimate offsettable memory
6726 references are rejected. rs6000_legitimate_offset_address_p contains
6727 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6728 at least with a little bit of help here given that we know the
6729 actual registers used. */
6730 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
6731 || GET_MODE_SIZE (reg_mode) == 4);
6732 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
6733 true, worst_case);
6736 /* Change register usage conditional on target flags. */
6737 static void
6738 rs6000_conditional_register_usage (void)
6740 int i;
6742 if (TARGET_DEBUG_TARGET)
6743 fprintf (stderr, "rs6000_conditional_register_usage called\n");
6745 /* Set MQ register fixed (already call_used) so that it will not be
6746 allocated. */
6747 fixed_regs[64] = 1;
6749 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6750 if (TARGET_64BIT)
6751 fixed_regs[13] = call_used_regs[13]
6752 = call_really_used_regs[13] = 1;
6754 /* Conditionally disable FPRs. */
6755 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
6756 for (i = 32; i < 64; i++)
6757 fixed_regs[i] = call_used_regs[i]
6758 = call_really_used_regs[i] = 1;
6760 /* The TOC register is not killed across calls in a way that is
6761 visible to the compiler. */
6762 if (DEFAULT_ABI == ABI_AIX)
6763 call_really_used_regs[2] = 0;
6765 if (DEFAULT_ABI == ABI_V4
6766 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6767 && flag_pic == 2)
6768 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6770 if (DEFAULT_ABI == ABI_V4
6771 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6772 && flag_pic == 1)
6773 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6774 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6775 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6777 if (DEFAULT_ABI == ABI_DARWIN
6778 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6779 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6780 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6781 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6783 if (TARGET_TOC && TARGET_MINIMAL_TOC)
6784 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6785 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6787 if (TARGET_SPE)
6789 global_regs[SPEFSCR_REGNO] = 1;
6790 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6791 registers in prologues and epilogues. We no longer use r14
6792 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6793 pool for link-compatibility with older versions of GCC. Once
6794 "old" code has died out, we can return r14 to the allocation
6795 pool. */
6796 fixed_regs[14]
6797 = call_used_regs[14]
6798 = call_really_used_regs[14] = 1;
6801 if (!TARGET_ALTIVEC && !TARGET_VSX)
6803 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
6804 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6805 call_really_used_regs[VRSAVE_REGNO] = 1;
6808 if (TARGET_ALTIVEC || TARGET_VSX)
6809 global_regs[VSCR_REGNO] = 1;
6811 if (TARGET_ALTIVEC_ABI)
6813 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
6814 call_used_regs[i] = call_really_used_regs[i] = 1;
6816 /* AIX reserves VR20:31 in non-extended ABI mode. */
6817 if (TARGET_XCOFF)
6818 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
6819 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6823 /* Try to output insns to set TARGET equal to the constant C if it can
6824 be done in less than N insns. Do all computations in MODE.
6825 Returns the place where the output has been placed if it can be
6826 done and the insns have been emitted. If it would take more than N
6827 insns, zero is returned and no insns and emitted. */
6830 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
6831 rtx source, int n ATTRIBUTE_UNUSED)
6833 rtx result, insn, set;
6834 HOST_WIDE_INT c0, c1;
6836 switch (mode)
6838 case QImode:
6839 case HImode:
6840 if (dest == NULL)
6841 dest = gen_reg_rtx (mode);
6842 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
6843 return dest;
6845 case SImode:
6846 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
6848 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
6849 GEN_INT (INTVAL (source)
6850 & (~ (HOST_WIDE_INT) 0xffff))));
6851 emit_insn (gen_rtx_SET (VOIDmode, dest,
6852 gen_rtx_IOR (SImode, copy_rtx (result),
6853 GEN_INT (INTVAL (source) & 0xffff))));
6854 result = dest;
6855 break;
6857 case DImode:
6858 switch (GET_CODE (source))
6860 case CONST_INT:
6861 c0 = INTVAL (source);
6862 c1 = -(c0 < 0);
6863 break;
6865 case CONST_DOUBLE:
6866 #if HOST_BITS_PER_WIDE_INT >= 64
6867 c0 = CONST_DOUBLE_LOW (source);
6868 c1 = -(c0 < 0);
6869 #else
6870 c0 = CONST_DOUBLE_LOW (source);
6871 c1 = CONST_DOUBLE_HIGH (source);
6872 #endif
6873 break;
6875 default:
6876 gcc_unreachable ();
6879 result = rs6000_emit_set_long_const (dest, c0, c1);
6880 break;
6882 default:
6883 gcc_unreachable ();
6886 insn = get_last_insn ();
6887 set = single_set (insn);
6888 if (! CONSTANT_P (SET_SRC (set)))
6889 set_unique_reg_note (insn, REG_EQUAL, source);
6891 return result;
6894 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6895 fall back to a straight forward decomposition. We do this to avoid
6896 exponential run times encountered when looking for longer sequences
6897 with rs6000_emit_set_const. */
6898 static rtx
6899 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
6901 if (!TARGET_POWERPC64)
6903 rtx operand1, operand2;
6905 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
6906 DImode);
6907 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
6908 DImode);
6909 emit_move_insn (operand1, GEN_INT (c1));
6910 emit_move_insn (operand2, GEN_INT (c2));
6912 else
6914 HOST_WIDE_INT ud1, ud2, ud3, ud4;
6916 ud1 = c1 & 0xffff;
6917 ud2 = (c1 & 0xffff0000) >> 16;
6918 #if HOST_BITS_PER_WIDE_INT >= 64
6919 c2 = c1 >> 32;
6920 #endif
6921 ud3 = c2 & 0xffff;
6922 ud4 = (c2 & 0xffff0000) >> 16;
6924 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
6925 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
6927 if (ud1 & 0x8000)
6928 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
6929 else
6930 emit_move_insn (dest, GEN_INT (ud1));
6933 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
6934 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
6936 if (ud2 & 0x8000)
6937 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6938 - 0x80000000));
6939 else
6940 emit_move_insn (dest, GEN_INT (ud2 << 16));
6941 if (ud1 != 0)
6942 emit_move_insn (copy_rtx (dest),
6943 gen_rtx_IOR (DImode, copy_rtx (dest),
6944 GEN_INT (ud1)));
6946 else if (ud3 == 0 && ud4 == 0)
6948 gcc_assert (ud2 & 0x8000);
6949 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6950 - 0x80000000));
6951 if (ud1 != 0)
6952 emit_move_insn (copy_rtx (dest),
6953 gen_rtx_IOR (DImode, copy_rtx (dest),
6954 GEN_INT (ud1)));
6955 emit_move_insn (copy_rtx (dest),
6956 gen_rtx_ZERO_EXTEND (DImode,
6957 gen_lowpart (SImode,
6958 copy_rtx (dest))));
6960 else if ((ud4 == 0xffff && (ud3 & 0x8000))
6961 || (ud4 == 0 && ! (ud3 & 0x8000)))
6963 if (ud3 & 0x8000)
6964 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
6965 - 0x80000000));
6966 else
6967 emit_move_insn (dest, GEN_INT (ud3 << 16));
6969 if (ud2 != 0)
6970 emit_move_insn (copy_rtx (dest),
6971 gen_rtx_IOR (DImode, copy_rtx (dest),
6972 GEN_INT (ud2)));
6973 emit_move_insn (copy_rtx (dest),
6974 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6975 GEN_INT (16)));
6976 if (ud1 != 0)
6977 emit_move_insn (copy_rtx (dest),
6978 gen_rtx_IOR (DImode, copy_rtx (dest),
6979 GEN_INT (ud1)));
6981 else
6983 if (ud4 & 0x8000)
6984 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
6985 - 0x80000000));
6986 else
6987 emit_move_insn (dest, GEN_INT (ud4 << 16));
6989 if (ud3 != 0)
6990 emit_move_insn (copy_rtx (dest),
6991 gen_rtx_IOR (DImode, copy_rtx (dest),
6992 GEN_INT (ud3)));
6994 emit_move_insn (copy_rtx (dest),
6995 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6996 GEN_INT (32)));
6997 if (ud2 != 0)
6998 emit_move_insn (copy_rtx (dest),
6999 gen_rtx_IOR (DImode, copy_rtx (dest),
7000 GEN_INT (ud2 << 16)));
7001 if (ud1 != 0)
7002 emit_move_insn (copy_rtx (dest),
7003 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
7006 return dest;
7009 /* Helper for the following. Get rid of [r+r] memory refs
7010 in cases where it won't work (TImode, TFmode, TDmode). */
7012 static void
7013 rs6000_eliminate_indexed_memrefs (rtx operands[2])
7015 if (reload_in_progress)
7016 return;
7018 if (GET_CODE (operands[0]) == MEM
7019 && GET_CODE (XEXP (operands[0], 0)) != REG
7020 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
7021 GET_MODE (operands[0]), false))
7022 operands[0]
7023 = replace_equiv_address (operands[0],
7024 copy_addr_to_reg (XEXP (operands[0], 0)));
7026 if (GET_CODE (operands[1]) == MEM
7027 && GET_CODE (XEXP (operands[1], 0)) != REG
7028 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
7029 GET_MODE (operands[1]), false))
7030 operands[1]
7031 = replace_equiv_address (operands[1],
7032 copy_addr_to_reg (XEXP (operands[1], 0)));
7035 /* Emit a move from SOURCE to DEST in mode MODE. */
7036 void
7037 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
7039 rtx operands[2];
7040 operands[0] = dest;
7041 operands[1] = source;
7043 if (TARGET_DEBUG_ADDR)
7045 fprintf (stderr,
7046 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
7047 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
7048 GET_MODE_NAME (mode),
7049 reload_in_progress,
7050 reload_completed,
7051 can_create_pseudo_p ());
7052 debug_rtx (dest);
7053 fprintf (stderr, "source:\n");
7054 debug_rtx (source);
7057 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
7058 if (GET_CODE (operands[1]) == CONST_DOUBLE
7059 && ! FLOAT_MODE_P (mode)
7060 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
7062 /* FIXME. This should never happen. */
7063 /* Since it seems that it does, do the safe thing and convert
7064 to a CONST_INT. */
7065 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
7067 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
7068 || FLOAT_MODE_P (mode)
7069 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
7070 || CONST_DOUBLE_LOW (operands[1]) < 0)
7071 && (CONST_DOUBLE_HIGH (operands[1]) != -1
7072 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
7074 /* Check if GCC is setting up a block move that will end up using FP
7075 registers as temporaries. We must make sure this is acceptable. */
7076 if (GET_CODE (operands[0]) == MEM
7077 && GET_CODE (operands[1]) == MEM
7078 && mode == DImode
7079 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
7080 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
7081 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
7082 ? 32 : MEM_ALIGN (operands[0])))
7083 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
7084 ? 32
7085 : MEM_ALIGN (operands[1]))))
7086 && ! MEM_VOLATILE_P (operands [0])
7087 && ! MEM_VOLATILE_P (operands [1]))
7089 emit_move_insn (adjust_address (operands[0], SImode, 0),
7090 adjust_address (operands[1], SImode, 0));
7091 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
7092 adjust_address (copy_rtx (operands[1]), SImode, 4));
7093 return;
7096 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
7097 && !gpc_reg_operand (operands[1], mode))
7098 operands[1] = force_reg (mode, operands[1]);
7100 /* Recognize the case where operand[1] is a reference to thread-local
7101 data and load its address to a register. */
7102 if (rs6000_tls_referenced_p (operands[1]))
7104 enum tls_model model;
7105 rtx tmp = operands[1];
7106 rtx addend = NULL;
7108 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
7110 addend = XEXP (XEXP (tmp, 0), 1);
7111 tmp = XEXP (XEXP (tmp, 0), 0);
7114 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
7115 model = SYMBOL_REF_TLS_MODEL (tmp);
7116 gcc_assert (model != 0);
7118 tmp = rs6000_legitimize_tls_address (tmp, model);
7119 if (addend)
7121 tmp = gen_rtx_PLUS (mode, tmp, addend);
7122 tmp = force_operand (tmp, operands[0]);
7124 operands[1] = tmp;
7127 /* Handle the case where reload calls us with an invalid address. */
7128 if (reload_in_progress && mode == Pmode
7129 && (! general_operand (operands[1], mode)
7130 || ! nonimmediate_operand (operands[0], mode)))
7131 goto emit_set;
7133 /* 128-bit constant floating-point values on Darwin should really be
7134 loaded as two parts. */
7135 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
7136 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
7138 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
7139 simplify_gen_subreg (DFmode, operands[1], mode, 0),
7140 DFmode);
7141 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
7142 GET_MODE_SIZE (DFmode)),
7143 simplify_gen_subreg (DFmode, operands[1], mode,
7144 GET_MODE_SIZE (DFmode)),
7145 DFmode);
7146 return;
7149 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7150 cfun->machine->sdmode_stack_slot =
7151 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7153 if (reload_in_progress
7154 && mode == SDmode
7155 && MEM_P (operands[0])
7156 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7157 && REG_P (operands[1]))
7159 if (FP_REGNO_P (REGNO (operands[1])))
7161 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7162 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7163 emit_insn (gen_movsd_store (mem, operands[1]));
7165 else if (INT_REGNO_P (REGNO (operands[1])))
7167 rtx mem = adjust_address_nv (operands[0], mode, 4);
7168 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7169 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7171 else
7172 gcc_unreachable();
7173 return;
7175 if (reload_in_progress
7176 && mode == SDmode
7177 && REG_P (operands[0])
7178 && MEM_P (operands[1])
7179 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7181 if (FP_REGNO_P (REGNO (operands[0])))
7183 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7184 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7185 emit_insn (gen_movsd_load (operands[0], mem));
7187 else if (INT_REGNO_P (REGNO (operands[0])))
7189 rtx mem = adjust_address_nv (operands[1], mode, 4);
7190 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7191 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7193 else
7194 gcc_unreachable();
7195 return;
7198 /* FIXME: In the long term, this switch statement should go away
7199 and be replaced by a sequence of tests based on things like
7200 mode == Pmode. */
7201 switch (mode)
7203 case HImode:
7204 case QImode:
7205 if (CONSTANT_P (operands[1])
7206 && GET_CODE (operands[1]) != CONST_INT)
7207 operands[1] = force_const_mem (mode, operands[1]);
7208 break;
7210 case TFmode:
7211 case TDmode:
7212 rs6000_eliminate_indexed_memrefs (operands);
7213 /* fall through */
7215 case DFmode:
7216 case DDmode:
7217 case SFmode:
7218 case SDmode:
7219 if (CONSTANT_P (operands[1])
7220 && ! easy_fp_constant (operands[1], mode))
7221 operands[1] = force_const_mem (mode, operands[1]);
7222 break;
7224 case V16QImode:
7225 case V8HImode:
7226 case V4SFmode:
7227 case V4SImode:
7228 case V4HImode:
7229 case V2SFmode:
7230 case V2SImode:
7231 case V1DImode:
7232 case V2DFmode:
7233 case V2DImode:
7234 if (CONSTANT_P (operands[1])
7235 && !easy_vector_constant (operands[1], mode))
7236 operands[1] = force_const_mem (mode, operands[1]);
7237 break;
7239 case SImode:
7240 case DImode:
7241 /* Use default pattern for address of ELF small data */
7242 if (TARGET_ELF
7243 && mode == Pmode
7244 && DEFAULT_ABI == ABI_V4
7245 && (GET_CODE (operands[1]) == SYMBOL_REF
7246 || GET_CODE (operands[1]) == CONST)
7247 && small_data_operand (operands[1], mode))
7249 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7250 return;
7253 if (DEFAULT_ABI == ABI_V4
7254 && mode == Pmode && mode == SImode
7255 && flag_pic == 1 && got_operand (operands[1], mode))
7257 emit_insn (gen_movsi_got (operands[0], operands[1]));
7258 return;
7261 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
7262 && TARGET_NO_TOC
7263 && ! flag_pic
7264 && mode == Pmode
7265 && CONSTANT_P (operands[1])
7266 && GET_CODE (operands[1]) != HIGH
7267 && GET_CODE (operands[1]) != CONST_INT)
7269 rtx target = (!can_create_pseudo_p ()
7270 ? operands[0]
7271 : gen_reg_rtx (mode));
7273 /* If this is a function address on -mcall-aixdesc,
7274 convert it to the address of the descriptor. */
7275 if (DEFAULT_ABI == ABI_AIX
7276 && GET_CODE (operands[1]) == SYMBOL_REF
7277 && XSTR (operands[1], 0)[0] == '.')
7279 const char *name = XSTR (operands[1], 0);
7280 rtx new_ref;
7281 while (*name == '.')
7282 name++;
7283 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7284 CONSTANT_POOL_ADDRESS_P (new_ref)
7285 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7286 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7287 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7288 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7289 operands[1] = new_ref;
7292 if (DEFAULT_ABI == ABI_DARWIN)
7294 #if TARGET_MACHO
7295 if (MACHO_DYNAMIC_NO_PIC_P)
7297 /* Take care of any required data indirection. */
7298 operands[1] = rs6000_machopic_legitimize_pic_address (
7299 operands[1], mode, operands[0]);
7300 if (operands[0] != operands[1])
7301 emit_insn (gen_rtx_SET (VOIDmode,
7302 operands[0], operands[1]));
7303 return;
7305 #endif
7306 emit_insn (gen_macho_high (target, operands[1]));
7307 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7308 return;
7311 emit_insn (gen_elf_high (target, operands[1]));
7312 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7313 return;
7316 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7317 and we have put it in the TOC, we just need to make a TOC-relative
7318 reference to it. */
7319 if (TARGET_TOC
7320 && GET_CODE (operands[1]) == SYMBOL_REF
7321 && use_toc_relative_ref (operands[1]))
7322 operands[1] = create_TOC_reference (operands[1], operands[0]);
7323 else if (mode == Pmode
7324 && CONSTANT_P (operands[1])
7325 && GET_CODE (operands[1]) != HIGH
7326 && ((GET_CODE (operands[1]) != CONST_INT
7327 && ! easy_fp_constant (operands[1], mode))
7328 || (GET_CODE (operands[1]) == CONST_INT
7329 && (num_insns_constant (operands[1], mode)
7330 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7331 || (GET_CODE (operands[0]) == REG
7332 && FP_REGNO_P (REGNO (operands[0]))))
7333 && !toc_relative_expr_p (operands[1], false)
7334 && (TARGET_CMODEL == CMODEL_SMALL
7335 || can_create_pseudo_p ()
7336 || (REG_P (operands[0])
7337 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7340 #if TARGET_MACHO
7341 /* Darwin uses a special PIC legitimizer. */
7342 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7344 operands[1] =
7345 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7346 operands[0]);
7347 if (operands[0] != operands[1])
7348 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7349 return;
7351 #endif
7353 /* If we are to limit the number of things we put in the TOC and
7354 this is a symbol plus a constant we can add in one insn,
7355 just put the symbol in the TOC and add the constant. Don't do
7356 this if reload is in progress. */
7357 if (GET_CODE (operands[1]) == CONST
7358 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7359 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7360 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7361 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7362 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7363 && ! side_effects_p (operands[0]))
7365 rtx sym =
7366 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7367 rtx other = XEXP (XEXP (operands[1], 0), 1);
7369 sym = force_reg (mode, sym);
7370 emit_insn (gen_add3_insn (operands[0], sym, other));
7371 return;
7374 operands[1] = force_const_mem (mode, operands[1]);
7376 if (TARGET_TOC
7377 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7378 && constant_pool_expr_p (XEXP (operands[1], 0))
7379 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7380 get_pool_constant (XEXP (operands[1], 0)),
7381 get_pool_mode (XEXP (operands[1], 0))))
7383 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7384 operands[0]);
7385 operands[1] = gen_const_mem (mode, tocref);
7386 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7389 break;
7391 case TImode:
7392 rs6000_eliminate_indexed_memrefs (operands);
7393 break;
7395 default:
7396 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7399 /* Above, we may have called force_const_mem which may have returned
7400 an invalid address. If we can, fix this up; otherwise, reload will
7401 have to deal with it. */
7402 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7403 operands[1] = validize_mem (operands[1]);
7405 emit_set:
7406 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7409 /* Return true if a structure, union or array containing FIELD should be
7410 accessed using `BLKMODE'.
7412 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7413 entire thing in a DI and use subregs to access the internals.
7414 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7415 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7416 best thing to do is set structs to BLKmode and avoid Severe Tire
7417 Damage.
7419 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7420 fit into 1, whereas DI still needs two. */
7422 static bool
7423 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
7425 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7426 || (TARGET_E500_DOUBLE && mode == DFmode));
7429 /* Nonzero if we can use a floating-point register to pass this arg. */
7430 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7431 (SCALAR_FLOAT_MODE_P (MODE) \
7432 && (CUM)->fregno <= FP_ARG_MAX_REG \
7433 && TARGET_HARD_FLOAT && TARGET_FPRS)
7435 /* Nonzero if we can use an AltiVec register to pass this arg. */
7436 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7437 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7438 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7439 && TARGET_ALTIVEC_ABI \
7440 && (NAMED))
7442 /* Return a nonzero value to say to return the function value in
7443 memory, just as large structures are always returned. TYPE will be
7444 the data type of the value, and FNTYPE will be the type of the
7445 function doing the returning, or @code{NULL} for libcalls.
7447 The AIX ABI for the RS/6000 specifies that all structures are
7448 returned in memory. The Darwin ABI does the same.
7450 For the Darwin 64 Bit ABI, a function result can be returned in
7451 registers or in memory, depending on the size of the return data
7452 type. If it is returned in registers, the value occupies the same
7453 registers as it would if it were the first and only function
7454 argument. Otherwise, the function places its result in memory at
7455 the location pointed to by GPR3.
7457 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7458 but a draft put them in memory, and GCC used to implement the draft
7459 instead of the final standard. Therefore, aix_struct_return
7460 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7461 compatibility can change DRAFT_V4_STRUCT_RET to override the
7462 default, and -m switches get the final word. See
7463 rs6000_option_override_internal for more details.
7465 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7466 long double support is enabled. These values are returned in memory.
7468 int_size_in_bytes returns -1 for variable size objects, which go in
7469 memory always. The cast to unsigned makes -1 > 8. */
7471 static bool
7472 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7474 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7475 if (TARGET_MACHO
7476 && rs6000_darwin64_abi
7477 && TREE_CODE (type) == RECORD_TYPE
7478 && int_size_in_bytes (type) > 0)
7480 CUMULATIVE_ARGS valcum;
7481 rtx valret;
7483 valcum.words = 0;
7484 valcum.fregno = FP_ARG_MIN_REG;
7485 valcum.vregno = ALTIVEC_ARG_MIN_REG;
7486 /* Do a trial code generation as if this were going to be passed
7487 as an argument; if any part goes in memory, we return NULL. */
7488 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
7489 if (valret)
7490 return false;
7491 /* Otherwise fall through to more conventional ABI rules. */
7494 if (AGGREGATE_TYPE_P (type)
7495 && (aix_struct_return
7496 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
7497 return true;
7499 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7500 modes only exist for GCC vector types if -maltivec. */
7501 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
7502 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7503 return false;
7505 /* Return synthetic vectors in memory. */
7506 if (TREE_CODE (type) == VECTOR_TYPE
7507 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
7509 static bool warned_for_return_big_vectors = false;
7510 if (!warned_for_return_big_vectors)
7512 warning (0, "GCC vector returned by reference: "
7513 "non-standard ABI extension with no compatibility guarantee");
7514 warned_for_return_big_vectors = true;
7516 return true;
7519 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
7520 return true;
7522 return false;
7525 #ifdef HAVE_AS_GNU_ATTRIBUTE
7526 /* Return TRUE if a call to function FNDECL may be one that
7527 potentially affects the function calling ABI of the object file. */
7529 static bool
7530 call_ABI_of_interest (tree fndecl)
7532 if (cgraph_state == CGRAPH_STATE_EXPANSION)
7534 struct cgraph_node *c_node;
7536 /* Libcalls are always interesting. */
7537 if (fndecl == NULL_TREE)
7538 return true;
7540 /* Any call to an external function is interesting. */
7541 if (DECL_EXTERNAL (fndecl))
7542 return true;
7544 /* Interesting functions that we are emitting in this object file. */
7545 c_node = cgraph_get_node (fndecl);
7546 c_node = cgraph_function_or_thunk_node (c_node, NULL);
7547 return !cgraph_only_called_directly_p (c_node);
7549 return false;
7551 #endif
7553 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7554 for a call to a function whose data type is FNTYPE.
7555 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7557 For incoming args we set the number of arguments in the prototype large
7558 so we never return a PARALLEL. */
7560 void
7561 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
7562 rtx libname ATTRIBUTE_UNUSED, int incoming,
7563 int libcall, int n_named_args,
7564 tree fndecl ATTRIBUTE_UNUSED,
7565 enum machine_mode return_mode ATTRIBUTE_UNUSED)
7567 static CUMULATIVE_ARGS zero_cumulative;
7569 *cum = zero_cumulative;
7570 cum->words = 0;
7571 cum->fregno = FP_ARG_MIN_REG;
7572 cum->vregno = ALTIVEC_ARG_MIN_REG;
7573 cum->prototype = (fntype && prototype_p (fntype));
7574 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
7575 ? CALL_LIBCALL : CALL_NORMAL);
7576 cum->sysv_gregno = GP_ARG_MIN_REG;
7577 cum->stdarg = stdarg_p (fntype);
7579 cum->nargs_prototype = 0;
7580 if (incoming || cum->prototype)
7581 cum->nargs_prototype = n_named_args;
7583 /* Check for a longcall attribute. */
7584 if ((!fntype && rs6000_default_long_calls)
7585 || (fntype
7586 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
7587 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
7588 cum->call_cookie |= CALL_LONG;
7590 if (TARGET_DEBUG_ARG)
7592 fprintf (stderr, "\ninit_cumulative_args:");
7593 if (fntype)
7595 tree ret_type = TREE_TYPE (fntype);
7596 fprintf (stderr, " ret code = %s,",
7597 tree_code_name[ (int)TREE_CODE (ret_type) ]);
7600 if (cum->call_cookie & CALL_LONG)
7601 fprintf (stderr, " longcall,");
7603 fprintf (stderr, " proto = %d, nargs = %d\n",
7604 cum->prototype, cum->nargs_prototype);
7607 #ifdef HAVE_AS_GNU_ATTRIBUTE
7608 if (DEFAULT_ABI == ABI_V4)
7610 cum->escapes = call_ABI_of_interest (fndecl);
7611 if (cum->escapes)
7613 tree return_type;
7615 if (fntype)
7617 return_type = TREE_TYPE (fntype);
7618 return_mode = TYPE_MODE (return_type);
7620 else
7621 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
7623 if (return_type != NULL)
7625 if (TREE_CODE (return_type) == RECORD_TYPE
7626 && TYPE_TRANSPARENT_AGGR (return_type))
7628 return_type = TREE_TYPE (first_field (return_type));
7629 return_mode = TYPE_MODE (return_type);
7631 if (AGGREGATE_TYPE_P (return_type)
7632 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
7633 <= 8))
7634 rs6000_returns_struct = true;
7636 if (SCALAR_FLOAT_MODE_P (return_mode))
7637 rs6000_passes_float = true;
7638 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
7639 || SPE_VECTOR_MODE (return_mode))
7640 rs6000_passes_vector = true;
7643 #endif
7645 if (fntype
7646 && !TARGET_ALTIVEC
7647 && TARGET_ALTIVEC_ABI
7648 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
7650 error ("cannot return value in vector register because"
7651 " altivec instructions are disabled, use -maltivec"
7652 " to enable them");
7656 /* Return true if TYPE must be passed on the stack and not in registers. */
7658 static bool
7659 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
7661 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
7662 return must_pass_in_stack_var_size (mode, type);
7663 else
7664 return must_pass_in_stack_var_size_or_pad (mode, type);
7667 /* If defined, a C expression which determines whether, and in which
7668 direction, to pad out an argument with extra space. The value
7669 should be of type `enum direction': either `upward' to pad above
7670 the argument, `downward' to pad below, or `none' to inhibit
7671 padding.
7673 For the AIX ABI structs are always stored left shifted in their
7674 argument slot. */
7676 enum direction
7677 function_arg_padding (enum machine_mode mode, const_tree type)
7679 #ifndef AGGREGATE_PADDING_FIXED
7680 #define AGGREGATE_PADDING_FIXED 0
7681 #endif
7682 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7683 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7684 #endif
7686 if (!AGGREGATE_PADDING_FIXED)
7688 /* GCC used to pass structures of the same size as integer types as
7689 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7690 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7691 passed padded downward, except that -mstrict-align further
7692 muddied the water in that multi-component structures of 2 and 4
7693 bytes in size were passed padded upward.
7695 The following arranges for best compatibility with previous
7696 versions of gcc, but removes the -mstrict-align dependency. */
7697 if (BYTES_BIG_ENDIAN)
7699 HOST_WIDE_INT size = 0;
7701 if (mode == BLKmode)
7703 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
7704 size = int_size_in_bytes (type);
7706 else
7707 size = GET_MODE_SIZE (mode);
7709 if (size == 1 || size == 2 || size == 4)
7710 return downward;
7712 return upward;
7715 if (AGGREGATES_PAD_UPWARD_ALWAYS)
7717 if (type != 0 && AGGREGATE_TYPE_P (type))
7718 return upward;
7721 /* Fall back to the default. */
7722 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7725 /* If defined, a C expression that gives the alignment boundary, in bits,
7726 of an argument with the specified mode and type. If it is not defined,
7727 PARM_BOUNDARY is used for all arguments.
7729 V.4 wants long longs and doubles to be double word aligned. Just
7730 testing the mode size is a boneheaded way to do this as it means
7731 that other types such as complex int are also double word aligned.
7732 However, we're stuck with this because changing the ABI might break
7733 existing library interfaces.
7735 Doubleword align SPE vectors.
7736 Quadword align Altivec/VSX vectors.
7737 Quadword align large synthetic vector types. */
7739 static unsigned int
7740 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
7742 if (DEFAULT_ABI == ABI_V4
7743 && (GET_MODE_SIZE (mode) == 8
7744 || (TARGET_HARD_FLOAT
7745 && TARGET_FPRS
7746 && (mode == TFmode || mode == TDmode))))
7747 return 64;
7748 else if (SPE_VECTOR_MODE (mode)
7749 || (type && TREE_CODE (type) == VECTOR_TYPE
7750 && int_size_in_bytes (type) >= 8
7751 && int_size_in_bytes (type) < 16))
7752 return 64;
7753 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7754 || (type && TREE_CODE (type) == VECTOR_TYPE
7755 && int_size_in_bytes (type) >= 16))
7756 return 128;
7757 else if (TARGET_MACHO
7758 && rs6000_darwin64_abi
7759 && mode == BLKmode
7760 && type && TYPE_ALIGN (type) > 64)
7761 return 128;
7762 else
7763 return PARM_BOUNDARY;
7766 /* For a function parm of MODE and TYPE, return the starting word in
7767 the parameter area. NWORDS of the parameter area are already used. */
7769 static unsigned int
7770 rs6000_parm_start (enum machine_mode mode, const_tree type,
7771 unsigned int nwords)
7773 unsigned int align;
7774 unsigned int parm_offset;
7776 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
7777 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
7778 return nwords + (-(parm_offset + nwords) & align);
7781 /* Compute the size (in words) of a function argument. */
7783 static unsigned long
7784 rs6000_arg_size (enum machine_mode mode, const_tree type)
7786 unsigned long size;
7788 if (mode != BLKmode)
7789 size = GET_MODE_SIZE (mode);
7790 else
7791 size = int_size_in_bytes (type);
7793 if (TARGET_32BIT)
7794 return (size + 3) >> 2;
7795 else
7796 return (size + 7) >> 3;
7799 /* Use this to flush pending int fields. */
7801 static void
7802 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
7803 HOST_WIDE_INT bitpos, int final)
7805 unsigned int startbit, endbit;
7806 int intregs, intoffset;
7807 enum machine_mode mode;
7809 /* Handle the situations where a float is taking up the first half
7810 of the GPR, and the other half is empty (typically due to
7811 alignment restrictions). We can detect this by a 8-byte-aligned
7812 int field, or by seeing that this is the final flush for this
7813 argument. Count the word and continue on. */
7814 if (cum->floats_in_gpr == 1
7815 && (cum->intoffset % 64 == 0
7816 || (cum->intoffset == -1 && final)))
7818 cum->words++;
7819 cum->floats_in_gpr = 0;
7822 if (cum->intoffset == -1)
7823 return;
7825 intoffset = cum->intoffset;
7826 cum->intoffset = -1;
7827 cum->floats_in_gpr = 0;
7829 if (intoffset % BITS_PER_WORD != 0)
7831 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7832 MODE_INT, 0);
7833 if (mode == BLKmode)
7835 /* We couldn't find an appropriate mode, which happens,
7836 e.g., in packed structs when there are 3 bytes to load.
7837 Back intoffset back to the beginning of the word in this
7838 case. */
7839 intoffset = intoffset & -BITS_PER_WORD;
7843 startbit = intoffset & -BITS_PER_WORD;
7844 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7845 intregs = (endbit - startbit) / BITS_PER_WORD;
7846 cum->words += intregs;
7847 /* words should be unsigned. */
7848 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
7850 int pad = (endbit/BITS_PER_WORD) - cum->words;
7851 cum->words += pad;
7855 /* The darwin64 ABI calls for us to recurse down through structs,
7856 looking for elements passed in registers. Unfortunately, we have
7857 to track int register count here also because of misalignments
7858 in powerpc alignment mode. */
7860 static void
7861 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
7862 const_tree type,
7863 HOST_WIDE_INT startbitpos)
7865 tree f;
7867 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
7868 if (TREE_CODE (f) == FIELD_DECL)
7870 HOST_WIDE_INT bitpos = startbitpos;
7871 tree ftype = TREE_TYPE (f);
7872 enum machine_mode mode;
7873 if (ftype == error_mark_node)
7874 continue;
7875 mode = TYPE_MODE (ftype);
7877 if (DECL_SIZE (f) != 0
7878 && host_integerp (bit_position (f), 1))
7879 bitpos += int_bit_position (f);
7881 /* ??? FIXME: else assume zero offset. */
7883 if (TREE_CODE (ftype) == RECORD_TYPE)
7884 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
7885 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
7887 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
7888 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7889 cum->fregno += n_fpregs;
7890 /* Single-precision floats present a special problem for
7891 us, because they are smaller than an 8-byte GPR, and so
7892 the structure-packing rules combined with the standard
7893 varargs behavior mean that we want to pack float/float
7894 and float/int combinations into a single register's
7895 space. This is complicated by the arg advance flushing,
7896 which works on arbitrarily large groups of int-type
7897 fields. */
7898 if (mode == SFmode)
7900 if (cum->floats_in_gpr == 1)
7902 /* Two floats in a word; count the word and reset
7903 the float count. */
7904 cum->words++;
7905 cum->floats_in_gpr = 0;
7907 else if (bitpos % 64 == 0)
7909 /* A float at the beginning of an 8-byte word;
7910 count it and put off adjusting cum->words until
7911 we see if a arg advance flush is going to do it
7912 for us. */
7913 cum->floats_in_gpr++;
7915 else
7917 /* The float is at the end of a word, preceded
7918 by integer fields, so the arg advance flush
7919 just above has already set cum->words and
7920 everything is taken care of. */
7923 else
7924 cum->words += n_fpregs;
7926 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
7928 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7929 cum->vregno++;
7930 cum->words += 2;
7932 else if (cum->intoffset == -1)
7933 cum->intoffset = bitpos;
7937 /* Check for an item that needs to be considered specially under the darwin 64
7938 bit ABI. These are record types where the mode is BLK or the structure is
7939 8 bytes in size. */
7940 static int
7941 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
7943 return rs6000_darwin64_abi
7944 && ((mode == BLKmode
7945 && TREE_CODE (type) == RECORD_TYPE
7946 && int_size_in_bytes (type) > 0)
7947 || (type && TREE_CODE (type) == RECORD_TYPE
7948 && int_size_in_bytes (type) == 8)) ? 1 : 0;
7951 /* Update the data in CUM to advance over an argument
7952 of mode MODE and data type TYPE.
7953 (TYPE is null for libcalls where that information may not be available.)
7955 Note that for args passed by reference, function_arg will be called
7956 with MODE and TYPE set to that of the pointer to the arg, not the arg
7957 itself. */
7959 static void
7960 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7961 const_tree type, bool named, int depth)
7963 /* Only tick off an argument if we're not recursing. */
7964 if (depth == 0)
7965 cum->nargs_prototype--;
7967 #ifdef HAVE_AS_GNU_ATTRIBUTE
7968 if (DEFAULT_ABI == ABI_V4
7969 && cum->escapes)
7971 if (SCALAR_FLOAT_MODE_P (mode))
7972 rs6000_passes_float = true;
7973 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
7974 rs6000_passes_vector = true;
7975 else if (SPE_VECTOR_MODE (mode)
7976 && !cum->stdarg
7977 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7978 rs6000_passes_vector = true;
7980 #endif
7982 if (TARGET_ALTIVEC_ABI
7983 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7984 || (type && TREE_CODE (type) == VECTOR_TYPE
7985 && int_size_in_bytes (type) == 16)))
7987 bool stack = false;
7989 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7991 cum->vregno++;
7992 if (!TARGET_ALTIVEC)
7993 error ("cannot pass argument in vector register because"
7994 " altivec instructions are disabled, use -maltivec"
7995 " to enable them");
7997 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7998 even if it is going to be passed in a vector register.
7999 Darwin does the same for variable-argument functions. */
8000 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
8001 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
8002 stack = true;
8004 else
8005 stack = true;
8007 if (stack)
8009 int align;
8011 /* Vector parameters must be 16-byte aligned. This places
8012 them at 2 mod 4 in terms of words in 32-bit mode, since
8013 the parameter save area starts at offset 24 from the
8014 stack. In 64-bit mode, they just have to start on an
8015 even word, since the parameter save area is 16-byte
8016 aligned. Space for GPRs is reserved even if the argument
8017 will be passed in memory. */
8018 if (TARGET_32BIT)
8019 align = (2 - cum->words) & 3;
8020 else
8021 align = cum->words & 1;
8022 cum->words += align + rs6000_arg_size (mode, type);
8024 if (TARGET_DEBUG_ARG)
8026 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
8027 cum->words, align);
8028 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
8029 cum->nargs_prototype, cum->prototype,
8030 GET_MODE_NAME (mode));
8034 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
8035 && !cum->stdarg
8036 && cum->sysv_gregno <= GP_ARG_MAX_REG)
8037 cum->sysv_gregno++;
8039 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8041 int size = int_size_in_bytes (type);
8042 /* Variable sized types have size == -1 and are
8043 treated as if consisting entirely of ints.
8044 Pad to 16 byte boundary if needed. */
8045 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8046 && (cum->words % 2) != 0)
8047 cum->words++;
8048 /* For varargs, we can just go up by the size of the struct. */
8049 if (!named)
8050 cum->words += (size + 7) / 8;
8051 else
8053 /* It is tempting to say int register count just goes up by
8054 sizeof(type)/8, but this is wrong in a case such as
8055 { int; double; int; } [powerpc alignment]. We have to
8056 grovel through the fields for these too. */
8057 cum->intoffset = 0;
8058 cum->floats_in_gpr = 0;
8059 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
8060 rs6000_darwin64_record_arg_advance_flush (cum,
8061 size * BITS_PER_UNIT, 1);
8063 if (TARGET_DEBUG_ARG)
8065 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
8066 cum->words, TYPE_ALIGN (type), size);
8067 fprintf (stderr,
8068 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
8069 cum->nargs_prototype, cum->prototype,
8070 GET_MODE_NAME (mode));
8073 else if (DEFAULT_ABI == ABI_V4)
8075 if (TARGET_HARD_FLOAT && TARGET_FPRS
8076 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8077 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8078 || (mode == TFmode && !TARGET_IEEEQUAD)
8079 || mode == SDmode || mode == DDmode || mode == TDmode))
8081 /* _Decimal128 must use an even/odd register pair. This assumes
8082 that the register number is odd when fregno is odd. */
8083 if (mode == TDmode && (cum->fregno % 2) == 1)
8084 cum->fregno++;
8086 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8087 <= FP_ARG_V4_MAX_REG)
8088 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8089 else
8091 cum->fregno = FP_ARG_V4_MAX_REG + 1;
8092 if (mode == DFmode || mode == TFmode
8093 || mode == DDmode || mode == TDmode)
8094 cum->words += cum->words & 1;
8095 cum->words += rs6000_arg_size (mode, type);
8098 else
8100 int n_words = rs6000_arg_size (mode, type);
8101 int gregno = cum->sysv_gregno;
8103 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8104 (r7,r8) or (r9,r10). As does any other 2 word item such
8105 as complex int due to a historical mistake. */
8106 if (n_words == 2)
8107 gregno += (1 - gregno) & 1;
8109 /* Multi-reg args are not split between registers and stack. */
8110 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8112 /* Long long and SPE vectors are aligned on the stack.
8113 So are other 2 word items such as complex int due to
8114 a historical mistake. */
8115 if (n_words == 2)
8116 cum->words += cum->words & 1;
8117 cum->words += n_words;
8120 /* Note: continuing to accumulate gregno past when we've started
8121 spilling to the stack indicates the fact that we've started
8122 spilling to the stack to expand_builtin_saveregs. */
8123 cum->sysv_gregno = gregno + n_words;
8126 if (TARGET_DEBUG_ARG)
8128 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8129 cum->words, cum->fregno);
8130 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
8131 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
8132 fprintf (stderr, "mode = %4s, named = %d\n",
8133 GET_MODE_NAME (mode), named);
8136 else
8138 int n_words = rs6000_arg_size (mode, type);
8139 int start_words = cum->words;
8140 int align_words = rs6000_parm_start (mode, type, start_words);
8142 cum->words = align_words + n_words;
8144 if (SCALAR_FLOAT_MODE_P (mode)
8145 && TARGET_HARD_FLOAT && TARGET_FPRS)
8147 /* _Decimal128 must be passed in an even/odd float register pair.
8148 This assumes that the register number is odd when fregno is
8149 odd. */
8150 if (mode == TDmode && (cum->fregno % 2) == 1)
8151 cum->fregno++;
8152 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8155 if (TARGET_DEBUG_ARG)
8157 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8158 cum->words, cum->fregno);
8159 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8160 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8161 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8162 named, align_words - start_words, depth);
8167 static void
8168 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8169 const_tree type, bool named)
8171 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8175 static rtx
8176 spe_build_register_parallel (enum machine_mode mode, int gregno)
8178 rtx r1, r3, r5, r7;
8180 switch (mode)
8182 case DFmode:
8183 r1 = gen_rtx_REG (DImode, gregno);
8184 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8185 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8187 case DCmode:
8188 case TFmode:
8189 r1 = gen_rtx_REG (DImode, gregno);
8190 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8191 r3 = gen_rtx_REG (DImode, gregno + 2);
8192 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8193 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8195 case TCmode:
8196 r1 = gen_rtx_REG (DImode, gregno);
8197 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8198 r3 = gen_rtx_REG (DImode, gregno + 2);
8199 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8200 r5 = gen_rtx_REG (DImode, gregno + 4);
8201 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8202 r7 = gen_rtx_REG (DImode, gregno + 6);
8203 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8204 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8206 default:
8207 gcc_unreachable ();
8211 /* Determine where to put a SIMD argument on the SPE. */
8212 static rtx
8213 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8214 const_tree type)
8216 int gregno = cum->sysv_gregno;
8218 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8219 are passed and returned in a pair of GPRs for ABI compatibility. */
8220 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8221 || mode == DCmode || mode == TCmode))
8223 int n_words = rs6000_arg_size (mode, type);
8225 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8226 if (mode == DFmode)
8227 gregno += (1 - gregno) & 1;
8229 /* Multi-reg args are not split between registers and stack. */
8230 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8231 return NULL_RTX;
8233 return spe_build_register_parallel (mode, gregno);
8235 if (cum->stdarg)
8237 int n_words = rs6000_arg_size (mode, type);
8239 /* SPE vectors are put in odd registers. */
8240 if (n_words == 2 && (gregno & 1) == 0)
8241 gregno += 1;
8243 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8245 rtx r1, r2;
8246 enum machine_mode m = SImode;
8248 r1 = gen_rtx_REG (m, gregno);
8249 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8250 r2 = gen_rtx_REG (m, gregno + 1);
8251 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8252 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8254 else
8255 return NULL_RTX;
8257 else
8259 if (gregno <= GP_ARG_MAX_REG)
8260 return gen_rtx_REG (mode, gregno);
8261 else
8262 return NULL_RTX;
8266 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8267 structure between cum->intoffset and bitpos to integer registers. */
8269 static void
8270 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
8271 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
8273 enum machine_mode mode;
8274 unsigned int regno;
8275 unsigned int startbit, endbit;
8276 int this_regno, intregs, intoffset;
8277 rtx reg;
8279 if (cum->intoffset == -1)
8280 return;
8282 intoffset = cum->intoffset;
8283 cum->intoffset = -1;
8285 /* If this is the trailing part of a word, try to only load that
8286 much into the register. Otherwise load the whole register. Note
8287 that in the latter case we may pick up unwanted bits. It's not a
8288 problem at the moment but may wish to revisit. */
8290 if (intoffset % BITS_PER_WORD != 0)
8292 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8293 MODE_INT, 0);
8294 if (mode == BLKmode)
8296 /* We couldn't find an appropriate mode, which happens,
8297 e.g., in packed structs when there are 3 bytes to load.
8298 Back intoffset back to the beginning of the word in this
8299 case. */
8300 intoffset = intoffset & -BITS_PER_WORD;
8301 mode = word_mode;
8304 else
8305 mode = word_mode;
8307 startbit = intoffset & -BITS_PER_WORD;
8308 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8309 intregs = (endbit - startbit) / BITS_PER_WORD;
8310 this_regno = cum->words + intoffset / BITS_PER_WORD;
8312 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8313 cum->use_stack = 1;
8315 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8316 if (intregs <= 0)
8317 return;
8319 intoffset /= BITS_PER_UNIT;
8322 regno = GP_ARG_MIN_REG + this_regno;
8323 reg = gen_rtx_REG (mode, regno);
8324 rvec[(*k)++] =
8325 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8327 this_regno += 1;
8328 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8329 mode = word_mode;
8330 intregs -= 1;
8332 while (intregs > 0);
8335 /* Recursive workhorse for the following. */
8337 static void
8338 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8339 HOST_WIDE_INT startbitpos, rtx rvec[],
8340 int *k)
8342 tree f;
8344 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8345 if (TREE_CODE (f) == FIELD_DECL)
8347 HOST_WIDE_INT bitpos = startbitpos;
8348 tree ftype = TREE_TYPE (f);
8349 enum machine_mode mode;
8350 if (ftype == error_mark_node)
8351 continue;
8352 mode = TYPE_MODE (ftype);
8354 if (DECL_SIZE (f) != 0
8355 && host_integerp (bit_position (f), 1))
8356 bitpos += int_bit_position (f);
8358 /* ??? FIXME: else assume zero offset. */
8360 if (TREE_CODE (ftype) == RECORD_TYPE)
8361 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8362 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8364 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8365 #if 0
8366 switch (mode)
8368 case SCmode: mode = SFmode; break;
8369 case DCmode: mode = DFmode; break;
8370 case TCmode: mode = TFmode; break;
8371 default: break;
8373 #endif
8374 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8375 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8377 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8378 && (mode == TFmode || mode == TDmode));
8379 /* Long double or _Decimal128 split over regs and memory. */
8380 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8381 cum->use_stack=1;
8383 rvec[(*k)++]
8384 = gen_rtx_EXPR_LIST (VOIDmode,
8385 gen_rtx_REG (mode, cum->fregno++),
8386 GEN_INT (bitpos / BITS_PER_UNIT));
8387 if (mode == TFmode || mode == TDmode)
8388 cum->fregno++;
8390 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8392 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8393 rvec[(*k)++]
8394 = gen_rtx_EXPR_LIST (VOIDmode,
8395 gen_rtx_REG (mode, cum->vregno++),
8396 GEN_INT (bitpos / BITS_PER_UNIT));
8398 else if (cum->intoffset == -1)
8399 cum->intoffset = bitpos;
8403 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8404 the register(s) to be used for each field and subfield of a struct
8405 being passed by value, along with the offset of where the
8406 register's value may be found in the block. FP fields go in FP
8407 register, vector fields go in vector registers, and everything
8408 else goes in int registers, packed as in memory.
8410 This code is also used for function return values. RETVAL indicates
8411 whether this is the case.
8413 Much of this is taken from the SPARC V9 port, which has a similar
8414 calling convention. */
8416 static rtx
8417 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8418 bool named, bool retval)
8420 rtx rvec[FIRST_PSEUDO_REGISTER];
8421 int k = 1, kbase = 1;
8422 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8423 /* This is a copy; modifications are not visible to our caller. */
8424 CUMULATIVE_ARGS copy_cum = *orig_cum;
8425 CUMULATIVE_ARGS *cum = &copy_cum;
8427 /* Pad to 16 byte boundary if needed. */
8428 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8429 && (cum->words % 2) != 0)
8430 cum->words++;
8432 cum->intoffset = 0;
8433 cum->use_stack = 0;
8434 cum->named = named;
8436 /* Put entries into rvec[] for individual FP and vector fields, and
8437 for the chunks of memory that go in int regs. Note we start at
8438 element 1; 0 is reserved for an indication of using memory, and
8439 may or may not be filled in below. */
8440 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8441 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8443 /* If any part of the struct went on the stack put all of it there.
8444 This hack is because the generic code for
8445 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8446 parts of the struct are not at the beginning. */
8447 if (cum->use_stack)
8449 if (retval)
8450 return NULL_RTX; /* doesn't go in registers at all */
8451 kbase = 0;
8452 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8454 if (k > 1 || cum->use_stack)
8455 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
8456 else
8457 return NULL_RTX;
8460 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8462 static rtx
8463 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
8464 int align_words)
8466 int n_units;
8467 int i, k;
8468 rtx rvec[GP_ARG_NUM_REG + 1];
8470 if (align_words >= GP_ARG_NUM_REG)
8471 return NULL_RTX;
8473 n_units = rs6000_arg_size (mode, type);
8475 /* Optimize the simple case where the arg fits in one gpr, except in
8476 the case of BLKmode due to assign_parms assuming that registers are
8477 BITS_PER_WORD wide. */
8478 if (n_units == 0
8479 || (n_units == 1 && mode != BLKmode))
8480 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8482 k = 0;
8483 if (align_words + n_units > GP_ARG_NUM_REG)
8484 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8485 using a magic NULL_RTX component.
8486 This is not strictly correct. Only some of the arg belongs in
8487 memory, not all of it. However, the normal scheme using
8488 function_arg_partial_nregs can result in unusual subregs, eg.
8489 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8490 store the whole arg to memory is often more efficient than code
8491 to store pieces, and we know that space is available in the right
8492 place for the whole arg. */
8493 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8495 i = 0;
8498 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
8499 rtx off = GEN_INT (i++ * 4);
8500 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8502 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
8504 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8507 /* Determine where to put an argument to a function.
8508 Value is zero to push the argument on the stack,
8509 or a hard register in which to store the argument.
8511 MODE is the argument's machine mode.
8512 TYPE is the data type of the argument (as a tree).
8513 This is null for libcalls where that information may
8514 not be available.
8515 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8516 the preceding args and about the function being called. It is
8517 not modified in this routine.
8518 NAMED is nonzero if this argument is a named parameter
8519 (otherwise it is an extra parameter matching an ellipsis).
8521 On RS/6000 the first eight words of non-FP are normally in registers
8522 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8523 Under V.4, the first 8 FP args are in registers.
8525 If this is floating-point and no prototype is specified, we use
8526 both an FP and integer register (or possibly FP reg and stack). Library
8527 functions (when CALL_LIBCALL is set) always have the proper types for args,
8528 so we can pass the FP value just in one register. emit_library_function
8529 doesn't support PARALLEL anyway.
8531 Note that for args passed by reference, function_arg will be called
8532 with MODE and TYPE set to that of the pointer to the arg, not the arg
8533 itself. */
8535 static rtx
8536 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8537 const_tree type, bool named)
8539 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8540 enum rs6000_abi abi = DEFAULT_ABI;
8542 /* Return a marker to indicate whether CR1 needs to set or clear the
8543 bit that V.4 uses to say fp args were passed in registers.
8544 Assume that we don't need the marker for software floating point,
8545 or compiler generated library calls. */
8546 if (mode == VOIDmode)
8548 if (abi == ABI_V4
8549 && (cum->call_cookie & CALL_LIBCALL) == 0
8550 && (cum->stdarg
8551 || (cum->nargs_prototype < 0
8552 && (cum->prototype || TARGET_NO_PROTOTYPE))))
8554 /* For the SPE, we need to crxor CR6 always. */
8555 if (TARGET_SPE_ABI)
8556 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
8557 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
8558 return GEN_INT (cum->call_cookie
8559 | ((cum->fregno == FP_ARG_MIN_REG)
8560 ? CALL_V4_SET_FP_ARGS
8561 : CALL_V4_CLEAR_FP_ARGS));
8564 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
8567 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8569 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
8570 if (rslt != NULL_RTX)
8571 return rslt;
8572 /* Else fall through to usual handling. */
8575 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8576 if (TARGET_64BIT && ! cum->prototype)
8578 /* Vector parameters get passed in vector register
8579 and also in GPRs or memory, in absence of prototype. */
8580 int align_words;
8581 rtx slot;
8582 align_words = (cum->words + 1) & ~1;
8584 if (align_words >= GP_ARG_NUM_REG)
8586 slot = NULL_RTX;
8588 else
8590 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8592 return gen_rtx_PARALLEL (mode,
8593 gen_rtvec (2,
8594 gen_rtx_EXPR_LIST (VOIDmode,
8595 slot, const0_rtx),
8596 gen_rtx_EXPR_LIST (VOIDmode,
8597 gen_rtx_REG (mode, cum->vregno),
8598 const0_rtx)));
8600 else
8601 return gen_rtx_REG (mode, cum->vregno);
8602 else if (TARGET_ALTIVEC_ABI
8603 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8604 || (type && TREE_CODE (type) == VECTOR_TYPE
8605 && int_size_in_bytes (type) == 16)))
8607 if (named || abi == ABI_V4)
8608 return NULL_RTX;
8609 else
8611 /* Vector parameters to varargs functions under AIX or Darwin
8612 get passed in memory and possibly also in GPRs. */
8613 int align, align_words, n_words;
8614 enum machine_mode part_mode;
8616 /* Vector parameters must be 16-byte aligned. This places them at
8617 2 mod 4 in terms of words in 32-bit mode, since the parameter
8618 save area starts at offset 24 from the stack. In 64-bit mode,
8619 they just have to start on an even word, since the parameter
8620 save area is 16-byte aligned. */
8621 if (TARGET_32BIT)
8622 align = (2 - cum->words) & 3;
8623 else
8624 align = cum->words & 1;
8625 align_words = cum->words + align;
8627 /* Out of registers? Memory, then. */
8628 if (align_words >= GP_ARG_NUM_REG)
8629 return NULL_RTX;
8631 if (TARGET_32BIT && TARGET_POWERPC64)
8632 return rs6000_mixed_function_arg (mode, type, align_words);
8634 /* The vector value goes in GPRs. Only the part of the
8635 value in GPRs is reported here. */
8636 part_mode = mode;
8637 n_words = rs6000_arg_size (mode, type);
8638 if (align_words + n_words > GP_ARG_NUM_REG)
8639 /* Fortunately, there are only two possibilities, the value
8640 is either wholly in GPRs or half in GPRs and half not. */
8641 part_mode = DImode;
8643 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
8646 else if (TARGET_SPE_ABI && TARGET_SPE
8647 && (SPE_VECTOR_MODE (mode)
8648 || (TARGET_E500_DOUBLE && (mode == DFmode
8649 || mode == DCmode
8650 || mode == TFmode
8651 || mode == TCmode))))
8652 return rs6000_spe_function_arg (cum, mode, type);
8654 else if (abi == ABI_V4)
8656 if (TARGET_HARD_FLOAT && TARGET_FPRS
8657 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8658 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8659 || (mode == TFmode && !TARGET_IEEEQUAD)
8660 || mode == SDmode || mode == DDmode || mode == TDmode))
8662 /* _Decimal128 must use an even/odd register pair. This assumes
8663 that the register number is odd when fregno is odd. */
8664 if (mode == TDmode && (cum->fregno % 2) == 1)
8665 cum->fregno++;
8667 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8668 <= FP_ARG_V4_MAX_REG)
8669 return gen_rtx_REG (mode, cum->fregno);
8670 else
8671 return NULL_RTX;
8673 else
8675 int n_words = rs6000_arg_size (mode, type);
8676 int gregno = cum->sysv_gregno;
8678 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8679 (r7,r8) or (r9,r10). As does any other 2 word item such
8680 as complex int due to a historical mistake. */
8681 if (n_words == 2)
8682 gregno += (1 - gregno) & 1;
8684 /* Multi-reg args are not split between registers and stack. */
8685 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8686 return NULL_RTX;
8688 if (TARGET_32BIT && TARGET_POWERPC64)
8689 return rs6000_mixed_function_arg (mode, type,
8690 gregno - GP_ARG_MIN_REG);
8691 return gen_rtx_REG (mode, gregno);
8694 else
8696 int align_words = rs6000_parm_start (mode, type, cum->words);
8698 /* _Decimal128 must be passed in an even/odd float register pair.
8699 This assumes that the register number is odd when fregno is odd. */
8700 if (mode == TDmode && (cum->fregno % 2) == 1)
8701 cum->fregno++;
8703 if (USE_FP_FOR_ARG_P (cum, mode, type))
8705 rtx rvec[GP_ARG_NUM_REG + 1];
8706 rtx r;
8707 int k;
8708 bool needs_psave;
8709 enum machine_mode fmode = mode;
8710 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8712 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8714 /* Currently, we only ever need one reg here because complex
8715 doubles are split. */
8716 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8717 && (fmode == TFmode || fmode == TDmode));
8719 /* Long double or _Decimal128 split over regs and memory. */
8720 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
8723 /* Do we also need to pass this arg in the parameter save
8724 area? */
8725 needs_psave = (type
8726 && (cum->nargs_prototype <= 0
8727 || (DEFAULT_ABI == ABI_AIX
8728 && TARGET_XL_COMPAT
8729 && align_words >= GP_ARG_NUM_REG)));
8731 if (!needs_psave && mode == fmode)
8732 return gen_rtx_REG (fmode, cum->fregno);
8734 k = 0;
8735 if (needs_psave)
8737 /* Describe the part that goes in gprs or the stack.
8738 This piece must come first, before the fprs. */
8739 if (align_words < GP_ARG_NUM_REG)
8741 unsigned long n_words = rs6000_arg_size (mode, type);
8743 if (align_words + n_words > GP_ARG_NUM_REG
8744 || (TARGET_32BIT && TARGET_POWERPC64))
8746 /* If this is partially on the stack, then we only
8747 include the portion actually in registers here. */
8748 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
8749 rtx off;
8750 int i = 0;
8751 if (align_words + n_words > GP_ARG_NUM_REG)
8752 /* Not all of the arg fits in gprs. Say that it
8753 goes in memory too, using a magic NULL_RTX
8754 component. Also see comment in
8755 rs6000_mixed_function_arg for why the normal
8756 function_arg_partial_nregs scheme doesn't work
8757 in this case. */
8758 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
8759 const0_rtx);
8762 r = gen_rtx_REG (rmode,
8763 GP_ARG_MIN_REG + align_words);
8764 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
8765 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8767 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
8769 else
8771 /* The whole arg fits in gprs. */
8772 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8773 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8776 else
8777 /* It's entirely in memory. */
8778 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8781 /* Describe where this piece goes in the fprs. */
8782 r = gen_rtx_REG (fmode, cum->fregno);
8783 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8785 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8787 else if (align_words < GP_ARG_NUM_REG)
8789 if (TARGET_32BIT && TARGET_POWERPC64)
8790 return rs6000_mixed_function_arg (mode, type, align_words);
8792 if (mode == BLKmode)
8793 mode = Pmode;
8795 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8797 else
8798 return NULL_RTX;
8802 /* For an arg passed partly in registers and partly in memory, this is
8803 the number of bytes passed in registers. For args passed entirely in
8804 registers or entirely in memory, zero. When an arg is described by a
8805 PARALLEL, perhaps using more than one register type, this function
8806 returns the number of bytes used by the first element of the PARALLEL. */
8808 static int
8809 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
8810 tree type, bool named)
8812 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8813 int ret = 0;
8814 int align_words;
8816 if (DEFAULT_ABI == ABI_V4)
8817 return 0;
8819 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
8820 && cum->nargs_prototype >= 0)
8821 return 0;
8823 /* In this complicated case we just disable the partial_nregs code. */
8824 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8825 return 0;
8827 align_words = rs6000_parm_start (mode, type, cum->words);
8829 if (USE_FP_FOR_ARG_P (cum, mode, type))
8831 /* If we are passing this arg in the fixed parameter save area
8832 (gprs or memory) as well as fprs, then this function should
8833 return the number of partial bytes passed in the parameter
8834 save area rather than partial bytes passed in fprs. */
8835 if (type
8836 && (cum->nargs_prototype <= 0
8837 || (DEFAULT_ABI == ABI_AIX
8838 && TARGET_XL_COMPAT
8839 && align_words >= GP_ARG_NUM_REG)))
8840 return 0;
8841 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
8842 > FP_ARG_MAX_REG + 1)
8843 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
8844 else if (cum->nargs_prototype >= 0)
8845 return 0;
8848 if (align_words < GP_ARG_NUM_REG
8849 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
8850 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
8852 if (ret != 0 && TARGET_DEBUG_ARG)
8853 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
8855 return ret;
8858 /* A C expression that indicates when an argument must be passed by
8859 reference. If nonzero for an argument, a copy of that argument is
8860 made in memory and a pointer to the argument is passed instead of
8861 the argument itself. The pointer is passed in whatever way is
8862 appropriate for passing a pointer to that type.
8864 Under V.4, aggregates and long double are passed by reference.
8866 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8867 reference unless the AltiVec vector extension ABI is in force.
8869 As an extension to all ABIs, variable sized types are passed by
8870 reference. */
8872 static bool
8873 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
8874 enum machine_mode mode, const_tree type,
8875 bool named ATTRIBUTE_UNUSED)
8877 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
8879 if (TARGET_DEBUG_ARG)
8880 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
8881 return 1;
8884 if (!type)
8885 return 0;
8887 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
8889 if (TARGET_DEBUG_ARG)
8890 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
8891 return 1;
8894 if (int_size_in_bytes (type) < 0)
8896 if (TARGET_DEBUG_ARG)
8897 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
8898 return 1;
8901 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8902 modes only exist for GCC vector types if -maltivec. */
8903 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
8905 if (TARGET_DEBUG_ARG)
8906 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
8907 return 1;
8910 /* Pass synthetic vectors in memory. */
8911 if (TREE_CODE (type) == VECTOR_TYPE
8912 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8914 static bool warned_for_pass_big_vectors = false;
8915 if (TARGET_DEBUG_ARG)
8916 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
8917 if (!warned_for_pass_big_vectors)
8919 warning (0, "GCC vector passed by reference: "
8920 "non-standard ABI extension with no compatibility guarantee");
8921 warned_for_pass_big_vectors = true;
8923 return 1;
8926 return 0;
8929 static void
8930 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
8932 int i;
8933 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
8935 if (nregs == 0)
8936 return;
8938 for (i = 0; i < nregs; i++)
8940 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
8941 if (reload_completed)
8943 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
8944 tem = NULL_RTX;
8945 else
8946 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
8947 i * GET_MODE_SIZE (reg_mode));
8949 else
8950 tem = replace_equiv_address (tem, XEXP (tem, 0));
8952 gcc_assert (tem);
8954 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
8958 /* Perform any needed actions needed for a function that is receiving a
8959 variable number of arguments.
8961 CUM is as above.
8963 MODE and TYPE are the mode and type of the current parameter.
8965 PRETEND_SIZE is a variable that should be set to the amount of stack
8966 that must be pushed by the prolog to pretend that our caller pushed
8969 Normally, this macro will push all remaining incoming registers on the
8970 stack and set PRETEND_SIZE to the length of the registers pushed. */
8972 static void
8973 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
8974 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8975 int no_rtl)
8977 CUMULATIVE_ARGS next_cum;
8978 int reg_size = TARGET_32BIT ? 4 : 8;
8979 rtx save_area = NULL_RTX, mem;
8980 int first_reg_offset;
8981 alias_set_type set;
8983 /* Skip the last named argument. */
8984 next_cum = *get_cumulative_args (cum);
8985 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
8987 if (DEFAULT_ABI == ABI_V4)
8989 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
8991 if (! no_rtl)
8993 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
8994 HOST_WIDE_INT offset = 0;
8996 /* Try to optimize the size of the varargs save area.
8997 The ABI requires that ap.reg_save_area is doubleword
8998 aligned, but we don't need to allocate space for all
8999 the bytes, only those to which we actually will save
9000 anything. */
9001 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
9002 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
9003 if (TARGET_HARD_FLOAT && TARGET_FPRS
9004 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9005 && cfun->va_list_fpr_size)
9007 if (gpr_reg_num)
9008 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
9009 * UNITS_PER_FP_WORD;
9010 if (cfun->va_list_fpr_size
9011 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9012 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
9013 else
9014 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
9015 * UNITS_PER_FP_WORD;
9017 if (gpr_reg_num)
9019 offset = -((first_reg_offset * reg_size) & ~7);
9020 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
9022 gpr_reg_num = cfun->va_list_gpr_size;
9023 if (reg_size == 4 && (first_reg_offset & 1))
9024 gpr_reg_num++;
9026 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
9028 else if (fpr_size)
9029 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
9030 * UNITS_PER_FP_WORD
9031 - (int) (GP_ARG_NUM_REG * reg_size);
9033 if (gpr_size + fpr_size)
9035 rtx reg_save_area
9036 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
9037 gcc_assert (GET_CODE (reg_save_area) == MEM);
9038 reg_save_area = XEXP (reg_save_area, 0);
9039 if (GET_CODE (reg_save_area) == PLUS)
9041 gcc_assert (XEXP (reg_save_area, 0)
9042 == virtual_stack_vars_rtx);
9043 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
9044 offset += INTVAL (XEXP (reg_save_area, 1));
9046 else
9047 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
9050 cfun->machine->varargs_save_offset = offset;
9051 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
9054 else
9056 first_reg_offset = next_cum.words;
9057 save_area = virtual_incoming_args_rtx;
9059 if (targetm.calls.must_pass_in_stack (mode, type))
9060 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
9063 set = get_varargs_alias_set ();
9064 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
9065 && cfun->va_list_gpr_size)
9067 int nregs = GP_ARG_NUM_REG - first_reg_offset;
9068 int n_gpr;
9070 if (va_list_gpr_counter_field)
9072 /* V4 va_list_gpr_size counts number of registers needed. */
9073 n_gpr = cfun->va_list_gpr_size;
9075 else
9077 /* char * va_list instead counts number of bytes needed. */
9078 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
9080 if (nregs > n_gpr)
9081 nregs = n_gpr;
9083 mem = gen_rtx_MEM (BLKmode,
9084 plus_constant (Pmode, save_area,
9085 first_reg_offset * reg_size));
9086 MEM_NOTRAP_P (mem) = 1;
9087 set_mem_alias_set (mem, set);
9088 set_mem_align (mem, BITS_PER_WORD);
9090 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
9091 nregs);
9094 /* Save FP registers if needed. */
9095 if (DEFAULT_ABI == ABI_V4
9096 && TARGET_HARD_FLOAT && TARGET_FPRS
9097 && ! no_rtl
9098 && next_cum.fregno <= FP_ARG_V4_MAX_REG
9099 && cfun->va_list_fpr_size)
9101 int fregno = next_cum.fregno, nregs;
9102 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
9103 rtx lab = gen_label_rtx ();
9104 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
9105 * UNITS_PER_FP_WORD);
9107 emit_jump_insn
9108 (gen_rtx_SET (VOIDmode,
9109 pc_rtx,
9110 gen_rtx_IF_THEN_ELSE (VOIDmode,
9111 gen_rtx_NE (VOIDmode, cr1,
9112 const0_rtx),
9113 gen_rtx_LABEL_REF (VOIDmode, lab),
9114 pc_rtx)));
9116 for (nregs = 0;
9117 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
9118 fregno++, off += UNITS_PER_FP_WORD, nregs++)
9120 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9121 ? DFmode : SFmode,
9122 plus_constant (Pmode, save_area, off));
9123 MEM_NOTRAP_P (mem) = 1;
9124 set_mem_alias_set (mem, set);
9125 set_mem_align (mem, GET_MODE_ALIGNMENT (
9126 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9127 ? DFmode : SFmode));
9128 emit_move_insn (mem, gen_rtx_REG (
9129 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9130 ? DFmode : SFmode, fregno));
9133 emit_label (lab);
9137 /* Create the va_list data type. */
9139 static tree
9140 rs6000_build_builtin_va_list (void)
9142 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9144 /* For AIX, prefer 'char *' because that's what the system
9145 header files like. */
9146 if (DEFAULT_ABI != ABI_V4)
9147 return build_pointer_type (char_type_node);
9149 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9150 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9151 get_identifier ("__va_list_tag"), record);
9153 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9154 unsigned_char_type_node);
9155 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9156 unsigned_char_type_node);
9157 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9158 every user file. */
9159 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9160 get_identifier ("reserved"), short_unsigned_type_node);
9161 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9162 get_identifier ("overflow_arg_area"),
9163 ptr_type_node);
9164 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9165 get_identifier ("reg_save_area"),
9166 ptr_type_node);
9168 va_list_gpr_counter_field = f_gpr;
9169 va_list_fpr_counter_field = f_fpr;
9171 DECL_FIELD_CONTEXT (f_gpr) = record;
9172 DECL_FIELD_CONTEXT (f_fpr) = record;
9173 DECL_FIELD_CONTEXT (f_res) = record;
9174 DECL_FIELD_CONTEXT (f_ovf) = record;
9175 DECL_FIELD_CONTEXT (f_sav) = record;
9177 TYPE_STUB_DECL (record) = type_decl;
9178 TYPE_NAME (record) = type_decl;
9179 TYPE_FIELDS (record) = f_gpr;
9180 DECL_CHAIN (f_gpr) = f_fpr;
9181 DECL_CHAIN (f_fpr) = f_res;
9182 DECL_CHAIN (f_res) = f_ovf;
9183 DECL_CHAIN (f_ovf) = f_sav;
9185 layout_type (record);
9187 /* The correct type is an array type of one element. */
9188 return build_array_type (record, build_index_type (size_zero_node));
9191 /* Implement va_start. */
9193 static void
9194 rs6000_va_start (tree valist, rtx nextarg)
9196 HOST_WIDE_INT words, n_gpr, n_fpr;
9197 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9198 tree gpr, fpr, ovf, sav, t;
9200 /* Only SVR4 needs something special. */
9201 if (DEFAULT_ABI != ABI_V4)
9203 std_expand_builtin_va_start (valist, nextarg);
9204 return;
9207 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9208 f_fpr = DECL_CHAIN (f_gpr);
9209 f_res = DECL_CHAIN (f_fpr);
9210 f_ovf = DECL_CHAIN (f_res);
9211 f_sav = DECL_CHAIN (f_ovf);
9213 valist = build_simple_mem_ref (valist);
9214 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9215 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9216 f_fpr, NULL_TREE);
9217 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9218 f_ovf, NULL_TREE);
9219 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9220 f_sav, NULL_TREE);
9222 /* Count number of gp and fp argument registers used. */
9223 words = crtl->args.info.words;
9224 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9225 GP_ARG_NUM_REG);
9226 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9227 FP_ARG_NUM_REG);
9229 if (TARGET_DEBUG_ARG)
9230 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9231 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9232 words, n_gpr, n_fpr);
9234 if (cfun->va_list_gpr_size)
9236 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9237 build_int_cst (NULL_TREE, n_gpr));
9238 TREE_SIDE_EFFECTS (t) = 1;
9239 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9242 if (cfun->va_list_fpr_size)
9244 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9245 build_int_cst (NULL_TREE, n_fpr));
9246 TREE_SIDE_EFFECTS (t) = 1;
9247 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9249 #ifdef HAVE_AS_GNU_ATTRIBUTE
9250 if (call_ABI_of_interest (cfun->decl))
9251 rs6000_passes_float = true;
9252 #endif
9255 /* Find the overflow area. */
9256 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9257 if (words != 0)
9258 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9259 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9260 TREE_SIDE_EFFECTS (t) = 1;
9261 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9263 /* If there were no va_arg invocations, don't set up the register
9264 save area. */
9265 if (!cfun->va_list_gpr_size
9266 && !cfun->va_list_fpr_size
9267 && n_gpr < GP_ARG_NUM_REG
9268 && n_fpr < FP_ARG_V4_MAX_REG)
9269 return;
9271 /* Find the register save area. */
9272 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
9273 if (cfun->machine->varargs_save_offset)
9274 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
9275 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9276 TREE_SIDE_EFFECTS (t) = 1;
9277 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9280 /* Implement va_arg. */
9282 static tree
9283 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9284 gimple_seq *post_p)
9286 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9287 tree gpr, fpr, ovf, sav, reg, t, u;
9288 int size, rsize, n_reg, sav_ofs, sav_scale;
9289 tree lab_false, lab_over, addr;
9290 int align;
9291 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9292 int regalign = 0;
9293 gimple stmt;
9295 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9297 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9298 return build_va_arg_indirect_ref (t);
9301 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9302 earlier version of gcc, with the property that it always applied alignment
9303 adjustments to the va-args (even for zero-sized types). The cheapest way
9304 to deal with this is to replicate the effect of the part of
9305 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9306 of relevance.
9307 We don't need to check for pass-by-reference because of the test above.
9308 We can return a simplifed answer, since we know there's no offset to add. */
9310 if (TARGET_MACHO
9311 && rs6000_darwin64_abi
9312 && integer_zerop (TYPE_SIZE (type)))
9314 unsigned HOST_WIDE_INT align, boundary;
9315 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9316 align = PARM_BOUNDARY / BITS_PER_UNIT;
9317 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9318 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9319 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9320 boundary /= BITS_PER_UNIT;
9321 if (boundary > align)
9323 tree t ;
9324 /* This updates arg ptr by the amount that would be necessary
9325 to align the zero-sized (but not zero-alignment) item. */
9326 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9327 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9328 gimplify_and_add (t, pre_p);
9330 t = fold_convert (sizetype, valist_tmp);
9331 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9332 fold_convert (TREE_TYPE (valist),
9333 fold_build2 (BIT_AND_EXPR, sizetype, t,
9334 size_int (-boundary))));
9335 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9336 gimplify_and_add (t, pre_p);
9338 /* Since it is zero-sized there's no increment for the item itself. */
9339 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9340 return build_va_arg_indirect_ref (valist_tmp);
9343 if (DEFAULT_ABI != ABI_V4)
9345 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9347 tree elem_type = TREE_TYPE (type);
9348 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9349 int elem_size = GET_MODE_SIZE (elem_mode);
9351 if (elem_size < UNITS_PER_WORD)
9353 tree real_part, imag_part;
9354 gimple_seq post = NULL;
9356 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9357 &post);
9358 /* Copy the value into a temporary, lest the formal temporary
9359 be reused out from under us. */
9360 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9361 gimple_seq_add_seq (pre_p, post);
9363 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9364 post_p);
9366 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9370 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9373 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9374 f_fpr = DECL_CHAIN (f_gpr);
9375 f_res = DECL_CHAIN (f_fpr);
9376 f_ovf = DECL_CHAIN (f_res);
9377 f_sav = DECL_CHAIN (f_ovf);
9379 valist = build_va_arg_indirect_ref (valist);
9380 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9381 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9382 f_fpr, NULL_TREE);
9383 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9384 f_ovf, NULL_TREE);
9385 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9386 f_sav, NULL_TREE);
9388 size = int_size_in_bytes (type);
9389 rsize = (size + 3) / 4;
9390 align = 1;
9392 if (TARGET_HARD_FLOAT && TARGET_FPRS
9393 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9394 || (TARGET_DOUBLE_FLOAT
9395 && (TYPE_MODE (type) == DFmode
9396 || TYPE_MODE (type) == TFmode
9397 || TYPE_MODE (type) == SDmode
9398 || TYPE_MODE (type) == DDmode
9399 || TYPE_MODE (type) == TDmode))))
9401 /* FP args go in FP registers, if present. */
9402 reg = fpr;
9403 n_reg = (size + 7) / 8;
9404 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9405 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9406 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9407 align = 8;
9409 else
9411 /* Otherwise into GP registers. */
9412 reg = gpr;
9413 n_reg = rsize;
9414 sav_ofs = 0;
9415 sav_scale = 4;
9416 if (n_reg == 2)
9417 align = 8;
9420 /* Pull the value out of the saved registers.... */
9422 lab_over = NULL;
9423 addr = create_tmp_var (ptr_type_node, "addr");
9425 /* AltiVec vectors never go in registers when -mabi=altivec. */
9426 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9427 align = 16;
9428 else
9430 lab_false = create_artificial_label (input_location);
9431 lab_over = create_artificial_label (input_location);
9433 /* Long long and SPE vectors are aligned in the registers.
9434 As are any other 2 gpr item such as complex int due to a
9435 historical mistake. */
9436 u = reg;
9437 if (n_reg == 2 && reg == gpr)
9439 regalign = 1;
9440 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9441 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9442 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9443 unshare_expr (reg), u);
9445 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9446 reg number is 0 for f1, so we want to make it odd. */
9447 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9449 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9450 build_int_cst (TREE_TYPE (reg), 1));
9451 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9454 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9455 t = build2 (GE_EXPR, boolean_type_node, u, t);
9456 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9457 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9458 gimplify_and_add (t, pre_p);
9460 t = sav;
9461 if (sav_ofs)
9462 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9464 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9465 build_int_cst (TREE_TYPE (reg), n_reg));
9466 u = fold_convert (sizetype, u);
9467 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
9468 t = fold_build_pointer_plus (t, u);
9470 /* _Decimal32 varargs are located in the second word of the 64-bit
9471 FP register for 32-bit binaries. */
9472 if (!TARGET_POWERPC64
9473 && TARGET_HARD_FLOAT && TARGET_FPRS
9474 && TYPE_MODE (type) == SDmode)
9475 t = fold_build_pointer_plus_hwi (t, size);
9477 gimplify_assign (addr, t, pre_p);
9479 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9481 stmt = gimple_build_label (lab_false);
9482 gimple_seq_add_stmt (pre_p, stmt);
9484 if ((n_reg == 2 && !regalign) || n_reg > 2)
9486 /* Ensure that we don't find any more args in regs.
9487 Alignment has taken care of for special cases. */
9488 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
9492 /* ... otherwise out of the overflow area. */
9494 /* Care for on-stack alignment if needed. */
9495 t = ovf;
9496 if (align != 1)
9498 t = fold_build_pointer_plus_hwi (t, align - 1);
9499 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9500 build_int_cst (TREE_TYPE (t), -align));
9502 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9504 gimplify_assign (unshare_expr (addr), t, pre_p);
9506 t = fold_build_pointer_plus_hwi (t, size);
9507 gimplify_assign (unshare_expr (ovf), t, pre_p);
9509 if (lab_over)
9511 stmt = gimple_build_label (lab_over);
9512 gimple_seq_add_stmt (pre_p, stmt);
9515 if (STRICT_ALIGNMENT
9516 && (TYPE_ALIGN (type)
9517 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
9519 /* The value (of type complex double, for example) may not be
9520 aligned in memory in the saved registers, so copy via a
9521 temporary. (This is the same code as used for SPARC.) */
9522 tree tmp = create_tmp_var (type, "va_arg_tmp");
9523 tree dest_addr = build_fold_addr_expr (tmp);
9525 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
9526 3, dest_addr, addr, size_int (rsize * 4));
9528 gimplify_and_add (copy, pre_p);
9529 addr = dest_addr;
9532 addr = fold_convert (ptrtype, addr);
9533 return build_va_arg_indirect_ref (addr);
9536 /* Builtins. */
9538 static void
9539 def_builtin (const char *name, tree type, enum rs6000_builtins code)
9541 tree t;
9542 unsigned classify = rs6000_builtin_info[(int)code].attr;
9543 const char *attr_string = "";
9545 gcc_assert (name != NULL);
9546 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
9548 if (rs6000_builtin_decls[(int)code])
9549 fatal_error ("internal error: builtin function %s already processed", name);
9551 rs6000_builtin_decls[(int)code] = t =
9552 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
9554 /* Set any special attributes. */
9555 if ((classify & RS6000_BTC_CONST) != 0)
9557 /* const function, function only depends on the inputs. */
9558 TREE_READONLY (t) = 1;
9559 TREE_NOTHROW (t) = 1;
9560 attr_string = ", pure";
9562 else if ((classify & RS6000_BTC_PURE) != 0)
9564 /* pure function, function can read global memory, but does not set any
9565 external state. */
9566 DECL_PURE_P (t) = 1;
9567 TREE_NOTHROW (t) = 1;
9568 attr_string = ", const";
9570 else if ((classify & RS6000_BTC_FP) != 0)
9572 /* Function is a math function. If rounding mode is on, then treat the
9573 function as not reading global memory, but it can have arbitrary side
9574 effects. If it is off, then assume the function is a const function.
9575 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9576 builtin-attribute.def that is used for the math functions. */
9577 TREE_NOTHROW (t) = 1;
9578 if (flag_rounding_math)
9580 DECL_PURE_P (t) = 1;
9581 DECL_IS_NOVOPS (t) = 1;
9582 attr_string = ", fp, pure";
9584 else
9586 TREE_READONLY (t) = 1;
9587 attr_string = ", fp, const";
9590 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
9591 gcc_unreachable ();
9593 if (TARGET_DEBUG_BUILTIN)
9594 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
9595 (int)code, name, attr_string);
9598 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9600 #undef RS6000_BUILTIN_1
9601 #undef RS6000_BUILTIN_2
9602 #undef RS6000_BUILTIN_3
9603 #undef RS6000_BUILTIN_A
9604 #undef RS6000_BUILTIN_D
9605 #undef RS6000_BUILTIN_E
9606 #undef RS6000_BUILTIN_P
9607 #undef RS6000_BUILTIN_Q
9608 #undef RS6000_BUILTIN_S
9609 #undef RS6000_BUILTIN_X
9611 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9612 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9613 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9614 { MASK, ICODE, NAME, ENUM },
9616 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9617 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9618 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9619 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9620 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9621 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9622 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9624 static const struct builtin_description bdesc_3arg[] =
9626 #include "rs6000-builtin.def"
9629 /* DST operations: void foo (void *, const int, const char). */
9631 #undef RS6000_BUILTIN_1
9632 #undef RS6000_BUILTIN_2
9633 #undef RS6000_BUILTIN_3
9634 #undef RS6000_BUILTIN_A
9635 #undef RS6000_BUILTIN_D
9636 #undef RS6000_BUILTIN_E
9637 #undef RS6000_BUILTIN_P
9638 #undef RS6000_BUILTIN_Q
9639 #undef RS6000_BUILTIN_S
9640 #undef RS6000_BUILTIN_X
9642 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9643 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9644 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9645 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9646 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9647 { MASK, ICODE, NAME, ENUM },
9649 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9650 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9651 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9652 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9653 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9655 static const struct builtin_description bdesc_dst[] =
9657 #include "rs6000-builtin.def"
9660 /* Simple binary operations: VECc = foo (VECa, VECb). */
9662 #undef RS6000_BUILTIN_1
9663 #undef RS6000_BUILTIN_2
9664 #undef RS6000_BUILTIN_3
9665 #undef RS6000_BUILTIN_A
9666 #undef RS6000_BUILTIN_D
9667 #undef RS6000_BUILTIN_E
9668 #undef RS6000_BUILTIN_P
9669 #undef RS6000_BUILTIN_Q
9670 #undef RS6000_BUILTIN_S
9671 #undef RS6000_BUILTIN_X
9673 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9674 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9675 { MASK, ICODE, NAME, ENUM },
9677 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9678 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9679 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9680 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9681 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9682 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9683 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9684 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9686 static const struct builtin_description bdesc_2arg[] =
9688 #include "rs6000-builtin.def"
9691 #undef RS6000_BUILTIN_1
9692 #undef RS6000_BUILTIN_2
9693 #undef RS6000_BUILTIN_3
9694 #undef RS6000_BUILTIN_A
9695 #undef RS6000_BUILTIN_D
9696 #undef RS6000_BUILTIN_E
9697 #undef RS6000_BUILTIN_P
9698 #undef RS6000_BUILTIN_Q
9699 #undef RS6000_BUILTIN_S
9700 #undef RS6000_BUILTIN_X
9702 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9703 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9704 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9705 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9706 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9707 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9708 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9709 { MASK, ICODE, NAME, ENUM },
9711 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9712 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9713 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9715 /* AltiVec predicates. */
9717 static const struct builtin_description bdesc_altivec_preds[] =
9719 #include "rs6000-builtin.def"
9722 /* SPE predicates. */
9723 #undef RS6000_BUILTIN_1
9724 #undef RS6000_BUILTIN_2
9725 #undef RS6000_BUILTIN_3
9726 #undef RS6000_BUILTIN_A
9727 #undef RS6000_BUILTIN_D
9728 #undef RS6000_BUILTIN_E
9729 #undef RS6000_BUILTIN_P
9730 #undef RS6000_BUILTIN_Q
9731 #undef RS6000_BUILTIN_S
9732 #undef RS6000_BUILTIN_X
9734 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9735 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9736 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9737 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9738 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9739 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9740 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9741 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9742 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9743 { MASK, ICODE, NAME, ENUM },
9745 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9747 static const struct builtin_description bdesc_spe_predicates[] =
9749 #include "rs6000-builtin.def"
9752 /* SPE evsel predicates. */
9753 #undef RS6000_BUILTIN_1
9754 #undef RS6000_BUILTIN_2
9755 #undef RS6000_BUILTIN_3
9756 #undef RS6000_BUILTIN_A
9757 #undef RS6000_BUILTIN_D
9758 #undef RS6000_BUILTIN_E
9759 #undef RS6000_BUILTIN_P
9760 #undef RS6000_BUILTIN_Q
9761 #undef RS6000_BUILTIN_S
9762 #undef RS6000_BUILTIN_X
9764 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9765 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9766 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9767 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9768 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9769 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9770 { MASK, ICODE, NAME, ENUM },
9772 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9773 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9774 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9775 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9777 static const struct builtin_description bdesc_spe_evsel[] =
9779 #include "rs6000-builtin.def"
9782 /* PAIRED predicates. */
9783 #undef RS6000_BUILTIN_1
9784 #undef RS6000_BUILTIN_2
9785 #undef RS6000_BUILTIN_3
9786 #undef RS6000_BUILTIN_A
9787 #undef RS6000_BUILTIN_D
9788 #undef RS6000_BUILTIN_E
9789 #undef RS6000_BUILTIN_P
9790 #undef RS6000_BUILTIN_Q
9791 #undef RS6000_BUILTIN_S
9792 #undef RS6000_BUILTIN_X
9794 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9795 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9796 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9797 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9798 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9799 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9800 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9801 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9802 { MASK, ICODE, NAME, ENUM },
9804 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9805 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9807 static const struct builtin_description bdesc_paired_preds[] =
9809 #include "rs6000-builtin.def"
9812 /* ABS* operations. */
9814 #undef RS6000_BUILTIN_1
9815 #undef RS6000_BUILTIN_2
9816 #undef RS6000_BUILTIN_3
9817 #undef RS6000_BUILTIN_A
9818 #undef RS6000_BUILTIN_D
9819 #undef RS6000_BUILTIN_E
9820 #undef RS6000_BUILTIN_P
9821 #undef RS6000_BUILTIN_Q
9822 #undef RS6000_BUILTIN_S
9823 #undef RS6000_BUILTIN_X
9825 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9826 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9827 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9828 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9829 { MASK, ICODE, NAME, ENUM },
9831 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9832 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9833 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9834 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9835 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9836 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9838 static const struct builtin_description bdesc_abs[] =
9840 #include "rs6000-builtin.def"
9843 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9844 foo (VECa). */
9846 #undef RS6000_BUILTIN_1
9847 #undef RS6000_BUILTIN_2
9848 #undef RS6000_BUILTIN_3
9849 #undef RS6000_BUILTIN_A
9850 #undef RS6000_BUILTIN_E
9851 #undef RS6000_BUILTIN_D
9852 #undef RS6000_BUILTIN_P
9853 #undef RS6000_BUILTIN_Q
9854 #undef RS6000_BUILTIN_S
9855 #undef RS6000_BUILTIN_X
9857 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9858 { MASK, ICODE, NAME, ENUM },
9860 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9861 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9862 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9863 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9864 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9865 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9866 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9867 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9868 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9870 static const struct builtin_description bdesc_1arg[] =
9872 #include "rs6000-builtin.def"
9875 #undef RS6000_BUILTIN_1
9876 #undef RS6000_BUILTIN_2
9877 #undef RS6000_BUILTIN_3
9878 #undef RS6000_BUILTIN_A
9879 #undef RS6000_BUILTIN_D
9880 #undef RS6000_BUILTIN_E
9881 #undef RS6000_BUILTIN_P
9882 #undef RS6000_BUILTIN_Q
9883 #undef RS6000_BUILTIN_S
9884 #undef RS6000_BUILTIN_X
9886 /* Return true if a builtin function is overloaded. */
9887 bool
9888 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
9890 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
9893 /* Expand an expression EXP that calls a builtin without arguments. */
9894 static rtx
9895 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
9897 rtx pat;
9898 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9900 if (icode == CODE_FOR_nothing)
9901 /* Builtin not supported on this processor. */
9902 return 0;
9904 if (target == 0
9905 || GET_MODE (target) != tmode
9906 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9907 target = gen_reg_rtx (tmode);
9909 pat = GEN_FCN (icode) (target);
9910 if (! pat)
9911 return 0;
9912 emit_insn (pat);
9914 return target;
9918 static rtx
9919 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
9921 rtx pat;
9922 tree arg0 = CALL_EXPR_ARG (exp, 0);
9923 rtx op0 = expand_normal (arg0);
9924 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9925 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9927 if (icode == CODE_FOR_nothing)
9928 /* Builtin not supported on this processor. */
9929 return 0;
9931 /* If we got invalid arguments bail out before generating bad rtl. */
9932 if (arg0 == error_mark_node)
9933 return const0_rtx;
9935 if (icode == CODE_FOR_altivec_vspltisb
9936 || icode == CODE_FOR_altivec_vspltish
9937 || icode == CODE_FOR_altivec_vspltisw
9938 || icode == CODE_FOR_spe_evsplatfi
9939 || icode == CODE_FOR_spe_evsplati)
9941 /* Only allow 5-bit *signed* literals. */
9942 if (GET_CODE (op0) != CONST_INT
9943 || INTVAL (op0) > 15
9944 || INTVAL (op0) < -16)
9946 error ("argument 1 must be a 5-bit signed literal");
9947 return const0_rtx;
9951 if (target == 0
9952 || GET_MODE (target) != tmode
9953 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9954 target = gen_reg_rtx (tmode);
9956 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9957 op0 = copy_to_mode_reg (mode0, op0);
9959 pat = GEN_FCN (icode) (target, op0);
9960 if (! pat)
9961 return 0;
9962 emit_insn (pat);
9964 return target;
9967 static rtx
9968 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
9970 rtx pat, scratch1, scratch2;
9971 tree arg0 = CALL_EXPR_ARG (exp, 0);
9972 rtx op0 = expand_normal (arg0);
9973 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9974 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9976 /* If we have invalid arguments, bail out before generating bad rtl. */
9977 if (arg0 == error_mark_node)
9978 return const0_rtx;
9980 if (target == 0
9981 || GET_MODE (target) != tmode
9982 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9983 target = gen_reg_rtx (tmode);
9985 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9986 op0 = copy_to_mode_reg (mode0, op0);
9988 scratch1 = gen_reg_rtx (mode0);
9989 scratch2 = gen_reg_rtx (mode0);
9991 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
9992 if (! pat)
9993 return 0;
9994 emit_insn (pat);
9996 return target;
9999 static rtx
10000 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
10002 rtx pat;
10003 tree arg0 = CALL_EXPR_ARG (exp, 0);
10004 tree arg1 = CALL_EXPR_ARG (exp, 1);
10005 rtx op0 = expand_normal (arg0);
10006 rtx op1 = expand_normal (arg1);
10007 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10008 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10009 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10011 if (icode == CODE_FOR_nothing)
10012 /* Builtin not supported on this processor. */
10013 return 0;
10015 /* If we got invalid arguments bail out before generating bad rtl. */
10016 if (arg0 == error_mark_node || arg1 == error_mark_node)
10017 return const0_rtx;
10019 if (icode == CODE_FOR_altivec_vcfux
10020 || icode == CODE_FOR_altivec_vcfsx
10021 || icode == CODE_FOR_altivec_vctsxs
10022 || icode == CODE_FOR_altivec_vctuxs
10023 || icode == CODE_FOR_altivec_vspltb
10024 || icode == CODE_FOR_altivec_vsplth
10025 || icode == CODE_FOR_altivec_vspltw
10026 || icode == CODE_FOR_spe_evaddiw
10027 || icode == CODE_FOR_spe_evldd
10028 || icode == CODE_FOR_spe_evldh
10029 || icode == CODE_FOR_spe_evldw
10030 || icode == CODE_FOR_spe_evlhhesplat
10031 || icode == CODE_FOR_spe_evlhhossplat
10032 || icode == CODE_FOR_spe_evlhhousplat
10033 || icode == CODE_FOR_spe_evlwhe
10034 || icode == CODE_FOR_spe_evlwhos
10035 || icode == CODE_FOR_spe_evlwhou
10036 || icode == CODE_FOR_spe_evlwhsplat
10037 || icode == CODE_FOR_spe_evlwwsplat
10038 || icode == CODE_FOR_spe_evrlwi
10039 || icode == CODE_FOR_spe_evslwi
10040 || icode == CODE_FOR_spe_evsrwis
10041 || icode == CODE_FOR_spe_evsubifw
10042 || icode == CODE_FOR_spe_evsrwiu)
10044 /* Only allow 5-bit unsigned literals. */
10045 STRIP_NOPS (arg1);
10046 if (TREE_CODE (arg1) != INTEGER_CST
10047 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10049 error ("argument 2 must be a 5-bit unsigned literal");
10050 return const0_rtx;
10054 if (target == 0
10055 || GET_MODE (target) != tmode
10056 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10057 target = gen_reg_rtx (tmode);
10059 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10060 op0 = copy_to_mode_reg (mode0, op0);
10061 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10062 op1 = copy_to_mode_reg (mode1, op1);
10064 pat = GEN_FCN (icode) (target, op0, op1);
10065 if (! pat)
10066 return 0;
10067 emit_insn (pat);
10069 return target;
10072 static rtx
10073 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10075 rtx pat, scratch;
10076 tree cr6_form = CALL_EXPR_ARG (exp, 0);
10077 tree arg0 = CALL_EXPR_ARG (exp, 1);
10078 tree arg1 = CALL_EXPR_ARG (exp, 2);
10079 rtx op0 = expand_normal (arg0);
10080 rtx op1 = expand_normal (arg1);
10081 enum machine_mode tmode = SImode;
10082 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10083 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10084 int cr6_form_int;
10086 if (TREE_CODE (cr6_form) != INTEGER_CST)
10088 error ("argument 1 of __builtin_altivec_predicate must be a constant");
10089 return const0_rtx;
10091 else
10092 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
10094 gcc_assert (mode0 == mode1);
10096 /* If we have invalid arguments, bail out before generating bad rtl. */
10097 if (arg0 == error_mark_node || arg1 == error_mark_node)
10098 return const0_rtx;
10100 if (target == 0
10101 || GET_MODE (target) != tmode
10102 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10103 target = gen_reg_rtx (tmode);
10105 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10106 op0 = copy_to_mode_reg (mode0, op0);
10107 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10108 op1 = copy_to_mode_reg (mode1, op1);
10110 scratch = gen_reg_rtx (mode0);
10112 pat = GEN_FCN (icode) (scratch, op0, op1);
10113 if (! pat)
10114 return 0;
10115 emit_insn (pat);
10117 /* The vec_any* and vec_all* predicates use the same opcodes for two
10118 different operations, but the bits in CR6 will be different
10119 depending on what information we want. So we have to play tricks
10120 with CR6 to get the right bits out.
10122 If you think this is disgusting, look at the specs for the
10123 AltiVec predicates. */
10125 switch (cr6_form_int)
10127 case 0:
10128 emit_insn (gen_cr6_test_for_zero (target));
10129 break;
10130 case 1:
10131 emit_insn (gen_cr6_test_for_zero_reverse (target));
10132 break;
10133 case 2:
10134 emit_insn (gen_cr6_test_for_lt (target));
10135 break;
10136 case 3:
10137 emit_insn (gen_cr6_test_for_lt_reverse (target));
10138 break;
10139 default:
10140 error ("argument 1 of __builtin_altivec_predicate is out of range");
10141 break;
10144 return target;
10147 static rtx
10148 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10150 rtx pat, addr;
10151 tree arg0 = CALL_EXPR_ARG (exp, 0);
10152 tree arg1 = CALL_EXPR_ARG (exp, 1);
10153 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10154 enum machine_mode mode0 = Pmode;
10155 enum machine_mode mode1 = Pmode;
10156 rtx op0 = expand_normal (arg0);
10157 rtx op1 = expand_normal (arg1);
10159 if (icode == CODE_FOR_nothing)
10160 /* Builtin not supported on this processor. */
10161 return 0;
10163 /* If we got invalid arguments bail out before generating bad rtl. */
10164 if (arg0 == error_mark_node || arg1 == error_mark_node)
10165 return const0_rtx;
10167 if (target == 0
10168 || GET_MODE (target) != tmode
10169 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10170 target = gen_reg_rtx (tmode);
10172 op1 = copy_to_mode_reg (mode1, op1);
10174 if (op0 == const0_rtx)
10176 addr = gen_rtx_MEM (tmode, op1);
10178 else
10180 op0 = copy_to_mode_reg (mode0, op0);
10181 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10184 pat = GEN_FCN (icode) (target, addr);
10186 if (! pat)
10187 return 0;
10188 emit_insn (pat);
10190 return target;
10193 static rtx
10194 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10196 rtx pat, addr;
10197 tree arg0 = CALL_EXPR_ARG (exp, 0);
10198 tree arg1 = CALL_EXPR_ARG (exp, 1);
10199 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10200 enum machine_mode mode0 = Pmode;
10201 enum machine_mode mode1 = Pmode;
10202 rtx op0 = expand_normal (arg0);
10203 rtx op1 = expand_normal (arg1);
10205 if (icode == CODE_FOR_nothing)
10206 /* Builtin not supported on this processor. */
10207 return 0;
10209 /* If we got invalid arguments bail out before generating bad rtl. */
10210 if (arg0 == error_mark_node || arg1 == error_mark_node)
10211 return const0_rtx;
10213 if (target == 0
10214 || GET_MODE (target) != tmode
10215 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10216 target = gen_reg_rtx (tmode);
10218 op1 = copy_to_mode_reg (mode1, op1);
10220 if (op0 == const0_rtx)
10222 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
10224 else
10226 op0 = copy_to_mode_reg (mode0, op0);
10227 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
10230 pat = GEN_FCN (icode) (target, addr);
10232 if (! pat)
10233 return 0;
10234 emit_insn (pat);
10236 return target;
10239 static rtx
10240 spe_expand_stv_builtin (enum insn_code icode, tree exp)
10242 tree arg0 = CALL_EXPR_ARG (exp, 0);
10243 tree arg1 = CALL_EXPR_ARG (exp, 1);
10244 tree arg2 = CALL_EXPR_ARG (exp, 2);
10245 rtx op0 = expand_normal (arg0);
10246 rtx op1 = expand_normal (arg1);
10247 rtx op2 = expand_normal (arg2);
10248 rtx pat;
10249 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
10250 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
10251 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
10253 /* Invalid arguments. Bail before doing anything stoopid! */
10254 if (arg0 == error_mark_node
10255 || arg1 == error_mark_node
10256 || arg2 == error_mark_node)
10257 return const0_rtx;
10259 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
10260 op0 = copy_to_mode_reg (mode2, op0);
10261 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
10262 op1 = copy_to_mode_reg (mode0, op1);
10263 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10264 op2 = copy_to_mode_reg (mode1, op2);
10266 pat = GEN_FCN (icode) (op1, op2, op0);
10267 if (pat)
10268 emit_insn (pat);
10269 return NULL_RTX;
10272 static rtx
10273 paired_expand_stv_builtin (enum insn_code icode, tree exp)
10275 tree arg0 = CALL_EXPR_ARG (exp, 0);
10276 tree arg1 = CALL_EXPR_ARG (exp, 1);
10277 tree arg2 = CALL_EXPR_ARG (exp, 2);
10278 rtx op0 = expand_normal (arg0);
10279 rtx op1 = expand_normal (arg1);
10280 rtx op2 = expand_normal (arg2);
10281 rtx pat, addr;
10282 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10283 enum machine_mode mode1 = Pmode;
10284 enum machine_mode mode2 = Pmode;
10286 /* Invalid arguments. Bail before doing anything stoopid! */
10287 if (arg0 == error_mark_node
10288 || arg1 == error_mark_node
10289 || arg2 == error_mark_node)
10290 return const0_rtx;
10292 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
10293 op0 = copy_to_mode_reg (tmode, op0);
10295 op2 = copy_to_mode_reg (mode2, op2);
10297 if (op1 == const0_rtx)
10299 addr = gen_rtx_MEM (tmode, op2);
10301 else
10303 op1 = copy_to_mode_reg (mode1, op1);
10304 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10307 pat = GEN_FCN (icode) (addr, op0);
10308 if (pat)
10309 emit_insn (pat);
10310 return NULL_RTX;
10313 static rtx
10314 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10316 tree arg0 = CALL_EXPR_ARG (exp, 0);
10317 tree arg1 = CALL_EXPR_ARG (exp, 1);
10318 tree arg2 = CALL_EXPR_ARG (exp, 2);
10319 rtx op0 = expand_normal (arg0);
10320 rtx op1 = expand_normal (arg1);
10321 rtx op2 = expand_normal (arg2);
10322 rtx pat, addr;
10323 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10324 enum machine_mode smode = insn_data[icode].operand[1].mode;
10325 enum machine_mode mode1 = Pmode;
10326 enum machine_mode mode2 = Pmode;
10328 /* Invalid arguments. Bail before doing anything stoopid! */
10329 if (arg0 == error_mark_node
10330 || arg1 == error_mark_node
10331 || arg2 == error_mark_node)
10332 return const0_rtx;
10334 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10335 op0 = copy_to_mode_reg (smode, op0);
10337 op2 = copy_to_mode_reg (mode2, op2);
10339 if (op1 == const0_rtx)
10341 addr = gen_rtx_MEM (tmode, op2);
10343 else
10345 op1 = copy_to_mode_reg (mode1, op1);
10346 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10349 pat = GEN_FCN (icode) (addr, op0);
10350 if (pat)
10351 emit_insn (pat);
10352 return NULL_RTX;
10355 static rtx
10356 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10358 rtx pat;
10359 tree arg0 = CALL_EXPR_ARG (exp, 0);
10360 tree arg1 = CALL_EXPR_ARG (exp, 1);
10361 tree arg2 = CALL_EXPR_ARG (exp, 2);
10362 rtx op0 = expand_normal (arg0);
10363 rtx op1 = expand_normal (arg1);
10364 rtx op2 = expand_normal (arg2);
10365 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10366 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10367 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10368 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10370 if (icode == CODE_FOR_nothing)
10371 /* Builtin not supported on this processor. */
10372 return 0;
10374 /* If we got invalid arguments bail out before generating bad rtl. */
10375 if (arg0 == error_mark_node
10376 || arg1 == error_mark_node
10377 || arg2 == error_mark_node)
10378 return const0_rtx;
10380 /* Check and prepare argument depending on the instruction code.
10382 Note that a switch statement instead of the sequence of tests
10383 would be incorrect as many of the CODE_FOR values could be
10384 CODE_FOR_nothing and that would yield multiple alternatives
10385 with identical values. We'd never reach here at runtime in
10386 this case. */
10387 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10388 || icode == CODE_FOR_altivec_vsldoi_v4si
10389 || icode == CODE_FOR_altivec_vsldoi_v8hi
10390 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10392 /* Only allow 4-bit unsigned literals. */
10393 STRIP_NOPS (arg2);
10394 if (TREE_CODE (arg2) != INTEGER_CST
10395 || TREE_INT_CST_LOW (arg2) & ~0xf)
10397 error ("argument 3 must be a 4-bit unsigned literal");
10398 return const0_rtx;
10401 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10402 || icode == CODE_FOR_vsx_xxpermdi_v2di
10403 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10404 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10405 || icode == CODE_FOR_vsx_xxsldwi_v4si
10406 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10407 || icode == CODE_FOR_vsx_xxsldwi_v2di
10408 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10410 /* Only allow 2-bit unsigned literals. */
10411 STRIP_NOPS (arg2);
10412 if (TREE_CODE (arg2) != INTEGER_CST
10413 || TREE_INT_CST_LOW (arg2) & ~0x3)
10415 error ("argument 3 must be a 2-bit unsigned literal");
10416 return const0_rtx;
10419 else if (icode == CODE_FOR_vsx_set_v2df
10420 || icode == CODE_FOR_vsx_set_v2di)
10422 /* Only allow 1-bit unsigned literals. */
10423 STRIP_NOPS (arg2);
10424 if (TREE_CODE (arg2) != INTEGER_CST
10425 || TREE_INT_CST_LOW (arg2) & ~0x1)
10427 error ("argument 3 must be a 1-bit unsigned literal");
10428 return const0_rtx;
10432 if (target == 0
10433 || GET_MODE (target) != tmode
10434 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10435 target = gen_reg_rtx (tmode);
10437 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10438 op0 = copy_to_mode_reg (mode0, op0);
10439 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10440 op1 = copy_to_mode_reg (mode1, op1);
10441 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10442 op2 = copy_to_mode_reg (mode2, op2);
10444 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10445 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10446 else
10447 pat = GEN_FCN (icode) (target, op0, op1, op2);
10448 if (! pat)
10449 return 0;
10450 emit_insn (pat);
10452 return target;
10455 /* Expand the lvx builtins. */
10456 static rtx
10457 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10459 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10460 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10461 tree arg0;
10462 enum machine_mode tmode, mode0;
10463 rtx pat, op0;
10464 enum insn_code icode;
10466 switch (fcode)
10468 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10469 icode = CODE_FOR_vector_altivec_load_v16qi;
10470 break;
10471 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10472 icode = CODE_FOR_vector_altivec_load_v8hi;
10473 break;
10474 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10475 icode = CODE_FOR_vector_altivec_load_v4si;
10476 break;
10477 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10478 icode = CODE_FOR_vector_altivec_load_v4sf;
10479 break;
10480 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
10481 icode = CODE_FOR_vector_altivec_load_v2df;
10482 break;
10483 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
10484 icode = CODE_FOR_vector_altivec_load_v2di;
10485 break;
10486 default:
10487 *expandedp = false;
10488 return NULL_RTX;
10491 *expandedp = true;
10493 arg0 = CALL_EXPR_ARG (exp, 0);
10494 op0 = expand_normal (arg0);
10495 tmode = insn_data[icode].operand[0].mode;
10496 mode0 = insn_data[icode].operand[1].mode;
10498 if (target == 0
10499 || GET_MODE (target) != tmode
10500 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10501 target = gen_reg_rtx (tmode);
10503 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10504 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10506 pat = GEN_FCN (icode) (target, op0);
10507 if (! pat)
10508 return 0;
10509 emit_insn (pat);
10510 return target;
10513 /* Expand the stvx builtins. */
10514 static rtx
10515 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10516 bool *expandedp)
10518 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10519 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10520 tree arg0, arg1;
10521 enum machine_mode mode0, mode1;
10522 rtx pat, op0, op1;
10523 enum insn_code icode;
10525 switch (fcode)
10527 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10528 icode = CODE_FOR_vector_altivec_store_v16qi;
10529 break;
10530 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10531 icode = CODE_FOR_vector_altivec_store_v8hi;
10532 break;
10533 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10534 icode = CODE_FOR_vector_altivec_store_v4si;
10535 break;
10536 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10537 icode = CODE_FOR_vector_altivec_store_v4sf;
10538 break;
10539 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
10540 icode = CODE_FOR_vector_altivec_store_v2df;
10541 break;
10542 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
10543 icode = CODE_FOR_vector_altivec_store_v2di;
10544 break;
10545 default:
10546 *expandedp = false;
10547 return NULL_RTX;
10550 arg0 = CALL_EXPR_ARG (exp, 0);
10551 arg1 = CALL_EXPR_ARG (exp, 1);
10552 op0 = expand_normal (arg0);
10553 op1 = expand_normal (arg1);
10554 mode0 = insn_data[icode].operand[0].mode;
10555 mode1 = insn_data[icode].operand[1].mode;
10557 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10558 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10559 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10560 op1 = copy_to_mode_reg (mode1, op1);
10562 pat = GEN_FCN (icode) (op0, op1);
10563 if (pat)
10564 emit_insn (pat);
10566 *expandedp = true;
10567 return NULL_RTX;
10570 /* Expand the dst builtins. */
10571 static rtx
10572 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10573 bool *expandedp)
10575 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10576 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10577 tree arg0, arg1, arg2;
10578 enum machine_mode mode0, mode1;
10579 rtx pat, op0, op1, op2;
10580 const struct builtin_description *d;
10581 size_t i;
10583 *expandedp = false;
10585 /* Handle DST variants. */
10586 d = bdesc_dst;
10587 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10588 if (d->code == fcode)
10590 arg0 = CALL_EXPR_ARG (exp, 0);
10591 arg1 = CALL_EXPR_ARG (exp, 1);
10592 arg2 = CALL_EXPR_ARG (exp, 2);
10593 op0 = expand_normal (arg0);
10594 op1 = expand_normal (arg1);
10595 op2 = expand_normal (arg2);
10596 mode0 = insn_data[d->icode].operand[0].mode;
10597 mode1 = insn_data[d->icode].operand[1].mode;
10599 /* Invalid arguments, bail out before generating bad rtl. */
10600 if (arg0 == error_mark_node
10601 || arg1 == error_mark_node
10602 || arg2 == error_mark_node)
10603 return const0_rtx;
10605 *expandedp = true;
10606 STRIP_NOPS (arg2);
10607 if (TREE_CODE (arg2) != INTEGER_CST
10608 || TREE_INT_CST_LOW (arg2) & ~0x3)
10610 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10611 return const0_rtx;
10614 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10615 op0 = copy_to_mode_reg (Pmode, op0);
10616 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10617 op1 = copy_to_mode_reg (mode1, op1);
10619 pat = GEN_FCN (d->icode) (op0, op1, op2);
10620 if (pat != 0)
10621 emit_insn (pat);
10623 return NULL_RTX;
10626 return NULL_RTX;
10629 /* Expand vec_init builtin. */
10630 static rtx
10631 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10633 enum machine_mode tmode = TYPE_MODE (type);
10634 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10635 int i, n_elt = GET_MODE_NUNITS (tmode);
10636 rtvec v = rtvec_alloc (n_elt);
10638 gcc_assert (VECTOR_MODE_P (tmode));
10639 gcc_assert (n_elt == call_expr_nargs (exp));
10641 for (i = 0; i < n_elt; ++i)
10643 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10644 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10647 if (!target || !register_operand (target, tmode))
10648 target = gen_reg_rtx (tmode);
10650 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10651 return target;
10654 /* Return the integer constant in ARG. Constrain it to be in the range
10655 of the subparts of VEC_TYPE; issue an error if not. */
10657 static int
10658 get_element_number (tree vec_type, tree arg)
10660 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10662 if (!host_integerp (arg, 1)
10663 || (elt = tree_low_cst (arg, 1), elt > max))
10665 error ("selector must be an integer constant in the range 0..%wi", max);
10666 return 0;
10669 return elt;
10672 /* Expand vec_set builtin. */
10673 static rtx
10674 altivec_expand_vec_set_builtin (tree exp)
10676 enum machine_mode tmode, mode1;
10677 tree arg0, arg1, arg2;
10678 int elt;
10679 rtx op0, op1;
10681 arg0 = CALL_EXPR_ARG (exp, 0);
10682 arg1 = CALL_EXPR_ARG (exp, 1);
10683 arg2 = CALL_EXPR_ARG (exp, 2);
10685 tmode = TYPE_MODE (TREE_TYPE (arg0));
10686 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10687 gcc_assert (VECTOR_MODE_P (tmode));
10689 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10690 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10691 elt = get_element_number (TREE_TYPE (arg0), arg2);
10693 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10694 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10696 op0 = force_reg (tmode, op0);
10697 op1 = force_reg (mode1, op1);
10699 rs6000_expand_vector_set (op0, op1, elt);
10701 return op0;
10704 /* Expand vec_ext builtin. */
10705 static rtx
10706 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10708 enum machine_mode tmode, mode0;
10709 tree arg0, arg1;
10710 int elt;
10711 rtx op0;
10713 arg0 = CALL_EXPR_ARG (exp, 0);
10714 arg1 = CALL_EXPR_ARG (exp, 1);
10716 op0 = expand_normal (arg0);
10717 elt = get_element_number (TREE_TYPE (arg0), arg1);
10719 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10720 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10721 gcc_assert (VECTOR_MODE_P (mode0));
10723 op0 = force_reg (mode0, op0);
10725 if (optimize || !target || !register_operand (target, tmode))
10726 target = gen_reg_rtx (tmode);
10728 rs6000_expand_vector_extract (target, op0, elt);
10730 return target;
10733 /* Expand the builtin in EXP and store the result in TARGET. Store
10734 true in *EXPANDEDP if we found a builtin to expand. */
10735 static rtx
10736 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
10738 const struct builtin_description *d;
10739 size_t i;
10740 enum insn_code icode;
10741 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10742 tree arg0;
10743 rtx op0, pat;
10744 enum machine_mode tmode, mode0;
10745 enum rs6000_builtins fcode
10746 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10748 if (rs6000_overloaded_builtin_p (fcode))
10750 *expandedp = true;
10751 error ("unresolved overload for Altivec builtin %qF", fndecl);
10753 /* Given it is invalid, just generate a normal call. */
10754 return expand_call (exp, target, false);
10757 target = altivec_expand_ld_builtin (exp, target, expandedp);
10758 if (*expandedp)
10759 return target;
10761 target = altivec_expand_st_builtin (exp, target, expandedp);
10762 if (*expandedp)
10763 return target;
10765 target = altivec_expand_dst_builtin (exp, target, expandedp);
10766 if (*expandedp)
10767 return target;
10769 *expandedp = true;
10771 switch (fcode)
10773 case ALTIVEC_BUILTIN_STVX:
10774 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
10775 case ALTIVEC_BUILTIN_STVEBX:
10776 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
10777 case ALTIVEC_BUILTIN_STVEHX:
10778 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
10779 case ALTIVEC_BUILTIN_STVEWX:
10780 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
10781 case ALTIVEC_BUILTIN_STVXL:
10782 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
10784 case ALTIVEC_BUILTIN_STVLX:
10785 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
10786 case ALTIVEC_BUILTIN_STVLXL:
10787 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
10788 case ALTIVEC_BUILTIN_STVRX:
10789 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
10790 case ALTIVEC_BUILTIN_STVRXL:
10791 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
10793 case VSX_BUILTIN_STXVD2X_V2DF:
10794 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
10795 case VSX_BUILTIN_STXVD2X_V2DI:
10796 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
10797 case VSX_BUILTIN_STXVW4X_V4SF:
10798 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
10799 case VSX_BUILTIN_STXVW4X_V4SI:
10800 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
10801 case VSX_BUILTIN_STXVW4X_V8HI:
10802 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
10803 case VSX_BUILTIN_STXVW4X_V16QI:
10804 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
10806 case ALTIVEC_BUILTIN_MFVSCR:
10807 icode = CODE_FOR_altivec_mfvscr;
10808 tmode = insn_data[icode].operand[0].mode;
10810 if (target == 0
10811 || GET_MODE (target) != tmode
10812 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10813 target = gen_reg_rtx (tmode);
10815 pat = GEN_FCN (icode) (target);
10816 if (! pat)
10817 return 0;
10818 emit_insn (pat);
10819 return target;
10821 case ALTIVEC_BUILTIN_MTVSCR:
10822 icode = CODE_FOR_altivec_mtvscr;
10823 arg0 = CALL_EXPR_ARG (exp, 0);
10824 op0 = expand_normal (arg0);
10825 mode0 = insn_data[icode].operand[0].mode;
10827 /* If we got invalid arguments bail out before generating bad rtl. */
10828 if (arg0 == error_mark_node)
10829 return const0_rtx;
10831 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10832 op0 = copy_to_mode_reg (mode0, op0);
10834 pat = GEN_FCN (icode) (op0);
10835 if (pat)
10836 emit_insn (pat);
10837 return NULL_RTX;
10839 case ALTIVEC_BUILTIN_DSSALL:
10840 emit_insn (gen_altivec_dssall ());
10841 return NULL_RTX;
10843 case ALTIVEC_BUILTIN_DSS:
10844 icode = CODE_FOR_altivec_dss;
10845 arg0 = CALL_EXPR_ARG (exp, 0);
10846 STRIP_NOPS (arg0);
10847 op0 = expand_normal (arg0);
10848 mode0 = insn_data[icode].operand[0].mode;
10850 /* If we got invalid arguments bail out before generating bad rtl. */
10851 if (arg0 == error_mark_node)
10852 return const0_rtx;
10854 if (TREE_CODE (arg0) != INTEGER_CST
10855 || TREE_INT_CST_LOW (arg0) & ~0x3)
10857 error ("argument to dss must be a 2-bit unsigned literal");
10858 return const0_rtx;
10861 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10862 op0 = copy_to_mode_reg (mode0, op0);
10864 emit_insn (gen_altivec_dss (op0));
10865 return NULL_RTX;
10867 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
10868 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
10869 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
10870 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
10871 case VSX_BUILTIN_VEC_INIT_V2DF:
10872 case VSX_BUILTIN_VEC_INIT_V2DI:
10873 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
10875 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
10876 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
10877 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
10878 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
10879 case VSX_BUILTIN_VEC_SET_V2DF:
10880 case VSX_BUILTIN_VEC_SET_V2DI:
10881 return altivec_expand_vec_set_builtin (exp);
10883 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
10884 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
10885 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
10886 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
10887 case VSX_BUILTIN_VEC_EXT_V2DF:
10888 case VSX_BUILTIN_VEC_EXT_V2DI:
10889 return altivec_expand_vec_ext_builtin (exp, target);
10891 default:
10892 break;
10893 /* Fall through. */
10896 /* Expand abs* operations. */
10897 d = bdesc_abs;
10898 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10899 if (d->code == fcode)
10900 return altivec_expand_abs_builtin (d->icode, exp, target);
10902 /* Expand the AltiVec predicates. */
10903 d = bdesc_altivec_preds;
10904 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
10905 if (d->code == fcode)
10906 return altivec_expand_predicate_builtin (d->icode, exp, target);
10908 /* LV* are funky. We initialized them differently. */
10909 switch (fcode)
10911 case ALTIVEC_BUILTIN_LVSL:
10912 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
10913 exp, target, false);
10914 case ALTIVEC_BUILTIN_LVSR:
10915 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
10916 exp, target, false);
10917 case ALTIVEC_BUILTIN_LVEBX:
10918 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
10919 exp, target, false);
10920 case ALTIVEC_BUILTIN_LVEHX:
10921 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
10922 exp, target, false);
10923 case ALTIVEC_BUILTIN_LVEWX:
10924 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
10925 exp, target, false);
10926 case ALTIVEC_BUILTIN_LVXL:
10927 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
10928 exp, target, false);
10929 case ALTIVEC_BUILTIN_LVX:
10930 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
10931 exp, target, false);
10932 case ALTIVEC_BUILTIN_LVLX:
10933 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
10934 exp, target, true);
10935 case ALTIVEC_BUILTIN_LVLXL:
10936 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
10937 exp, target, true);
10938 case ALTIVEC_BUILTIN_LVRX:
10939 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
10940 exp, target, true);
10941 case ALTIVEC_BUILTIN_LVRXL:
10942 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
10943 exp, target, true);
10944 case VSX_BUILTIN_LXVD2X_V2DF:
10945 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
10946 exp, target, false);
10947 case VSX_BUILTIN_LXVD2X_V2DI:
10948 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
10949 exp, target, false);
10950 case VSX_BUILTIN_LXVW4X_V4SF:
10951 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
10952 exp, target, false);
10953 case VSX_BUILTIN_LXVW4X_V4SI:
10954 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
10955 exp, target, false);
10956 case VSX_BUILTIN_LXVW4X_V8HI:
10957 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
10958 exp, target, false);
10959 case VSX_BUILTIN_LXVW4X_V16QI:
10960 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
10961 exp, target, false);
10962 break;
10963 default:
10964 break;
10965 /* Fall through. */
10968 *expandedp = false;
10969 return NULL_RTX;
10972 /* Expand the builtin in EXP and store the result in TARGET. Store
10973 true in *EXPANDEDP if we found a builtin to expand. */
10974 static rtx
10975 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
10977 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10978 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10979 const struct builtin_description *d;
10980 size_t i;
10982 *expandedp = true;
10984 switch (fcode)
10986 case PAIRED_BUILTIN_STX:
10987 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
10988 case PAIRED_BUILTIN_LX:
10989 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
10990 default:
10991 break;
10992 /* Fall through. */
10995 /* Expand the paired predicates. */
10996 d = bdesc_paired_preds;
10997 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
10998 if (d->code == fcode)
10999 return paired_expand_predicate_builtin (d->icode, exp, target);
11001 *expandedp = false;
11002 return NULL_RTX;
11005 /* Binops that need to be initialized manually, but can be expanded
11006 automagically by rs6000_expand_binop_builtin. */
11007 static const struct builtin_description bdesc_2arg_spe[] =
11009 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
11010 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
11011 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
11012 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
11013 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
11014 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
11015 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
11016 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
11017 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
11018 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
11019 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
11020 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
11021 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
11022 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
11023 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
11024 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
11025 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
11026 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
11027 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
11028 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
11029 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
11030 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
11033 /* Expand the builtin in EXP and store the result in TARGET. Store
11034 true in *EXPANDEDP if we found a builtin to expand.
11036 This expands the SPE builtins that are not simple unary and binary
11037 operations. */
11038 static rtx
11039 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
11041 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11042 tree arg1, arg0;
11043 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
11044 enum insn_code icode;
11045 enum machine_mode tmode, mode0;
11046 rtx pat, op0;
11047 const struct builtin_description *d;
11048 size_t i;
11050 *expandedp = true;
11052 /* Syntax check for a 5-bit unsigned immediate. */
11053 switch (fcode)
11055 case SPE_BUILTIN_EVSTDD:
11056 case SPE_BUILTIN_EVSTDH:
11057 case SPE_BUILTIN_EVSTDW:
11058 case SPE_BUILTIN_EVSTWHE:
11059 case SPE_BUILTIN_EVSTWHO:
11060 case SPE_BUILTIN_EVSTWWE:
11061 case SPE_BUILTIN_EVSTWWO:
11062 arg1 = CALL_EXPR_ARG (exp, 2);
11063 if (TREE_CODE (arg1) != INTEGER_CST
11064 || TREE_INT_CST_LOW (arg1) & ~0x1f)
11066 error ("argument 2 must be a 5-bit unsigned literal");
11067 return const0_rtx;
11069 break;
11070 default:
11071 break;
11074 /* The evsplat*i instructions are not quite generic. */
11075 switch (fcode)
11077 case SPE_BUILTIN_EVSPLATFI:
11078 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
11079 exp, target);
11080 case SPE_BUILTIN_EVSPLATI:
11081 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
11082 exp, target);
11083 default:
11084 break;
11087 d = bdesc_2arg_spe;
11088 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
11089 if (d->code == fcode)
11090 return rs6000_expand_binop_builtin (d->icode, exp, target);
11092 d = bdesc_spe_predicates;
11093 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
11094 if (d->code == fcode)
11095 return spe_expand_predicate_builtin (d->icode, exp, target);
11097 d = bdesc_spe_evsel;
11098 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
11099 if (d->code == fcode)
11100 return spe_expand_evsel_builtin (d->icode, exp, target);
11102 switch (fcode)
11104 case SPE_BUILTIN_EVSTDDX:
11105 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
11106 case SPE_BUILTIN_EVSTDHX:
11107 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
11108 case SPE_BUILTIN_EVSTDWX:
11109 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
11110 case SPE_BUILTIN_EVSTWHEX:
11111 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
11112 case SPE_BUILTIN_EVSTWHOX:
11113 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
11114 case SPE_BUILTIN_EVSTWWEX:
11115 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
11116 case SPE_BUILTIN_EVSTWWOX:
11117 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
11118 case SPE_BUILTIN_EVSTDD:
11119 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
11120 case SPE_BUILTIN_EVSTDH:
11121 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
11122 case SPE_BUILTIN_EVSTDW:
11123 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
11124 case SPE_BUILTIN_EVSTWHE:
11125 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
11126 case SPE_BUILTIN_EVSTWHO:
11127 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
11128 case SPE_BUILTIN_EVSTWWE:
11129 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
11130 case SPE_BUILTIN_EVSTWWO:
11131 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
11132 case SPE_BUILTIN_MFSPEFSCR:
11133 icode = CODE_FOR_spe_mfspefscr;
11134 tmode = insn_data[icode].operand[0].mode;
11136 if (target == 0
11137 || GET_MODE (target) != tmode
11138 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11139 target = gen_reg_rtx (tmode);
11141 pat = GEN_FCN (icode) (target);
11142 if (! pat)
11143 return 0;
11144 emit_insn (pat);
11145 return target;
11146 case SPE_BUILTIN_MTSPEFSCR:
11147 icode = CODE_FOR_spe_mtspefscr;
11148 arg0 = CALL_EXPR_ARG (exp, 0);
11149 op0 = expand_normal (arg0);
11150 mode0 = insn_data[icode].operand[0].mode;
11152 if (arg0 == error_mark_node)
11153 return const0_rtx;
11155 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11156 op0 = copy_to_mode_reg (mode0, op0);
11158 pat = GEN_FCN (icode) (op0);
11159 if (pat)
11160 emit_insn (pat);
11161 return NULL_RTX;
11162 default:
11163 break;
11166 *expandedp = false;
11167 return NULL_RTX;
11170 static rtx
11171 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11173 rtx pat, scratch, tmp;
11174 tree form = CALL_EXPR_ARG (exp, 0);
11175 tree arg0 = CALL_EXPR_ARG (exp, 1);
11176 tree arg1 = CALL_EXPR_ARG (exp, 2);
11177 rtx op0 = expand_normal (arg0);
11178 rtx op1 = expand_normal (arg1);
11179 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11180 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11181 int form_int;
11182 enum rtx_code code;
11184 if (TREE_CODE (form) != INTEGER_CST)
11186 error ("argument 1 of __builtin_paired_predicate must be a constant");
11187 return const0_rtx;
11189 else
11190 form_int = TREE_INT_CST_LOW (form);
11192 gcc_assert (mode0 == mode1);
11194 if (arg0 == error_mark_node || arg1 == error_mark_node)
11195 return const0_rtx;
11197 if (target == 0
11198 || GET_MODE (target) != SImode
11199 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
11200 target = gen_reg_rtx (SImode);
11201 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
11202 op0 = copy_to_mode_reg (mode0, op0);
11203 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
11204 op1 = copy_to_mode_reg (mode1, op1);
11206 scratch = gen_reg_rtx (CCFPmode);
11208 pat = GEN_FCN (icode) (scratch, op0, op1);
11209 if (!pat)
11210 return const0_rtx;
11212 emit_insn (pat);
11214 switch (form_int)
11216 /* LT bit. */
11217 case 0:
11218 code = LT;
11219 break;
11220 /* GT bit. */
11221 case 1:
11222 code = GT;
11223 break;
11224 /* EQ bit. */
11225 case 2:
11226 code = EQ;
11227 break;
11228 /* UN bit. */
11229 case 3:
11230 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11231 return target;
11232 default:
11233 error ("argument 1 of __builtin_paired_predicate is out of range");
11234 return const0_rtx;
11237 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11238 emit_move_insn (target, tmp);
11239 return target;
11242 static rtx
11243 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11245 rtx pat, scratch, tmp;
11246 tree form = CALL_EXPR_ARG (exp, 0);
11247 tree arg0 = CALL_EXPR_ARG (exp, 1);
11248 tree arg1 = CALL_EXPR_ARG (exp, 2);
11249 rtx op0 = expand_normal (arg0);
11250 rtx op1 = expand_normal (arg1);
11251 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11252 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11253 int form_int;
11254 enum rtx_code code;
11256 if (TREE_CODE (form) != INTEGER_CST)
11258 error ("argument 1 of __builtin_spe_predicate must be a constant");
11259 return const0_rtx;
11261 else
11262 form_int = TREE_INT_CST_LOW (form);
11264 gcc_assert (mode0 == mode1);
11266 if (arg0 == error_mark_node || arg1 == error_mark_node)
11267 return const0_rtx;
11269 if (target == 0
11270 || GET_MODE (target) != SImode
11271 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
11272 target = gen_reg_rtx (SImode);
11274 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11275 op0 = copy_to_mode_reg (mode0, op0);
11276 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11277 op1 = copy_to_mode_reg (mode1, op1);
11279 scratch = gen_reg_rtx (CCmode);
11281 pat = GEN_FCN (icode) (scratch, op0, op1);
11282 if (! pat)
11283 return const0_rtx;
11284 emit_insn (pat);
11286 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11287 _lower_. We use one compare, but look in different bits of the
11288 CR for each variant.
11290 There are 2 elements in each SPE simd type (upper/lower). The CR
11291 bits are set as follows:
11293 BIT0 | BIT 1 | BIT 2 | BIT 3
11294 U | L | (U | L) | (U & L)
11296 So, for an "all" relationship, BIT 3 would be set.
11297 For an "any" relationship, BIT 2 would be set. Etc.
11299 Following traditional nomenclature, these bits map to:
11301 BIT0 | BIT 1 | BIT 2 | BIT 3
11302 LT | GT | EQ | OV
11304 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11307 switch (form_int)
11309 /* All variant. OV bit. */
11310 case 0:
11311 /* We need to get to the OV bit, which is the ORDERED bit. We
11312 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11313 that's ugly and will make validate_condition_mode die.
11314 So let's just use another pattern. */
11315 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11316 return target;
11317 /* Any variant. EQ bit. */
11318 case 1:
11319 code = EQ;
11320 break;
11321 /* Upper variant. LT bit. */
11322 case 2:
11323 code = LT;
11324 break;
11325 /* Lower variant. GT bit. */
11326 case 3:
11327 code = GT;
11328 break;
11329 default:
11330 error ("argument 1 of __builtin_spe_predicate is out of range");
11331 return const0_rtx;
11334 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11335 emit_move_insn (target, tmp);
11337 return target;
11340 /* The evsel builtins look like this:
11342 e = __builtin_spe_evsel_OP (a, b, c, d);
11344 and work like this:
11346 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11347 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11350 static rtx
11351 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11353 rtx pat, scratch;
11354 tree arg0 = CALL_EXPR_ARG (exp, 0);
11355 tree arg1 = CALL_EXPR_ARG (exp, 1);
11356 tree arg2 = CALL_EXPR_ARG (exp, 2);
11357 tree arg3 = CALL_EXPR_ARG (exp, 3);
11358 rtx op0 = expand_normal (arg0);
11359 rtx op1 = expand_normal (arg1);
11360 rtx op2 = expand_normal (arg2);
11361 rtx op3 = expand_normal (arg3);
11362 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11363 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11365 gcc_assert (mode0 == mode1);
11367 if (arg0 == error_mark_node || arg1 == error_mark_node
11368 || arg2 == error_mark_node || arg3 == error_mark_node)
11369 return const0_rtx;
11371 if (target == 0
11372 || GET_MODE (target) != mode0
11373 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11374 target = gen_reg_rtx (mode0);
11376 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11377 op0 = copy_to_mode_reg (mode0, op0);
11378 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11379 op1 = copy_to_mode_reg (mode0, op1);
11380 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11381 op2 = copy_to_mode_reg (mode0, op2);
11382 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11383 op3 = copy_to_mode_reg (mode0, op3);
11385 /* Generate the compare. */
11386 scratch = gen_reg_rtx (CCmode);
11387 pat = GEN_FCN (icode) (scratch, op0, op1);
11388 if (! pat)
11389 return const0_rtx;
11390 emit_insn (pat);
11392 if (mode0 == V2SImode)
11393 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11394 else
11395 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11397 return target;
11400 /* Raise an error message for a builtin function that is called without the
11401 appropriate target options being set. */
11403 static void
11404 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11406 size_t uns_fncode = (size_t)fncode;
11407 const char *name = rs6000_builtin_info[uns_fncode].name;
11408 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
11410 gcc_assert (name != NULL);
11411 if ((fnmask & RS6000_BTM_CELL) != 0)
11412 error ("Builtin function %s is only valid for the cell processor", name);
11413 else if ((fnmask & RS6000_BTM_VSX) != 0)
11414 error ("Builtin function %s requires the -mvsx option", name);
11415 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11416 error ("Builtin function %s requires the -maltivec option", name);
11417 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11418 error ("Builtin function %s requires the -mpaired option", name);
11419 else if ((fnmask & RS6000_BTM_SPE) != 0)
11420 error ("Builtin function %s requires the -mspe option", name);
11421 else
11422 error ("Builtin function %s is not supported with the current options",
11423 name);
11426 /* Expand an expression EXP that calls a built-in function,
11427 with result going to TARGET if that's convenient
11428 (and in mode MODE if that's convenient).
11429 SUBTARGET may be used as the target for computing one of EXP's operands.
11430 IGNORE is nonzero if the value is to be ignored. */
11432 static rtx
11433 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11434 enum machine_mode mode ATTRIBUTE_UNUSED,
11435 int ignore ATTRIBUTE_UNUSED)
11437 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11438 enum rs6000_builtins fcode
11439 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
11440 size_t uns_fcode = (size_t)fcode;
11441 const struct builtin_description *d;
11442 size_t i;
11443 rtx ret;
11444 bool success;
11445 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
11446 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
11448 if (TARGET_DEBUG_BUILTIN)
11450 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
11451 const char *name1 = rs6000_builtin_info[uns_fcode].name;
11452 const char *name2 = ((icode != CODE_FOR_nothing)
11453 ? get_insn_name ((int)icode)
11454 : "nothing");
11455 const char *name3;
11457 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
11459 default: name3 = "unknown"; break;
11460 case RS6000_BTC_SPECIAL: name3 = "special"; break;
11461 case RS6000_BTC_UNARY: name3 = "unary"; break;
11462 case RS6000_BTC_BINARY: name3 = "binary"; break;
11463 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
11464 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
11465 case RS6000_BTC_ABS: name3 = "abs"; break;
11466 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
11467 case RS6000_BTC_DST: name3 = "dst"; break;
11471 fprintf (stderr,
11472 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11473 (name1) ? name1 : "---", fcode,
11474 (name2) ? name2 : "---", (int)icode,
11475 name3,
11476 func_valid_p ? "" : ", not valid");
11479 if (!func_valid_p)
11481 rs6000_invalid_builtin (fcode);
11483 /* Given it is invalid, just generate a normal call. */
11484 return expand_call (exp, target, ignore);
11487 switch (fcode)
11489 case RS6000_BUILTIN_RECIP:
11490 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
11492 case RS6000_BUILTIN_RECIPF:
11493 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
11495 case RS6000_BUILTIN_RSQRTF:
11496 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
11498 case RS6000_BUILTIN_RSQRT:
11499 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
11501 case POWER7_BUILTIN_BPERMD:
11502 return rs6000_expand_binop_builtin (((TARGET_64BIT)
11503 ? CODE_FOR_bpermd_di
11504 : CODE_FOR_bpermd_si), exp, target);
11506 case RS6000_BUILTIN_GET_TB:
11507 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
11508 target);
11510 case RS6000_BUILTIN_MFTB:
11511 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
11512 ? CODE_FOR_rs6000_mftb_di
11513 : CODE_FOR_rs6000_mftb_si),
11514 target);
11516 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
11517 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
11519 int icode = (int) CODE_FOR_altivec_lvsr;
11520 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11521 enum machine_mode mode = insn_data[icode].operand[1].mode;
11522 tree arg;
11523 rtx op, addr, pat;
11525 gcc_assert (TARGET_ALTIVEC);
11527 arg = CALL_EXPR_ARG (exp, 0);
11528 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
11529 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
11530 addr = memory_address (mode, op);
11531 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11532 op = addr;
11533 else
11535 /* For the load case need to negate the address. */
11536 op = gen_reg_rtx (GET_MODE (addr));
11537 emit_insn (gen_rtx_SET (VOIDmode, op,
11538 gen_rtx_NEG (GET_MODE (addr), addr)));
11540 op = gen_rtx_MEM (mode, op);
11542 if (target == 0
11543 || GET_MODE (target) != tmode
11544 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11545 target = gen_reg_rtx (tmode);
11547 /*pat = gen_altivec_lvsr (target, op);*/
11548 pat = GEN_FCN (icode) (target, op);
11549 if (!pat)
11550 return 0;
11551 emit_insn (pat);
11553 return target;
11556 case ALTIVEC_BUILTIN_VCFUX:
11557 case ALTIVEC_BUILTIN_VCFSX:
11558 case ALTIVEC_BUILTIN_VCTUXS:
11559 case ALTIVEC_BUILTIN_VCTSXS:
11560 /* FIXME: There's got to be a nicer way to handle this case than
11561 constructing a new CALL_EXPR. */
11562 if (call_expr_nargs (exp) == 1)
11564 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11565 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11567 break;
11569 default:
11570 break;
11573 if (TARGET_ALTIVEC)
11575 ret = altivec_expand_builtin (exp, target, &success);
11577 if (success)
11578 return ret;
11580 if (TARGET_SPE)
11582 ret = spe_expand_builtin (exp, target, &success);
11584 if (success)
11585 return ret;
11587 if (TARGET_PAIRED_FLOAT)
11589 ret = paired_expand_builtin (exp, target, &success);
11591 if (success)
11592 return ret;
11595 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11597 /* Handle simple unary operations. */
11598 d = bdesc_1arg;
11599 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11600 if (d->code == fcode)
11601 return rs6000_expand_unop_builtin (d->icode, exp, target);
11603 /* Handle simple binary operations. */
11604 d = bdesc_2arg;
11605 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11606 if (d->code == fcode)
11607 return rs6000_expand_binop_builtin (d->icode, exp, target);
11609 /* Handle simple ternary operations. */
11610 d = bdesc_3arg;
11611 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11612 if (d->code == fcode)
11613 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11615 gcc_unreachable ();
11618 static void
11619 rs6000_init_builtins (void)
11621 tree tdecl;
11622 tree ftype;
11623 enum machine_mode mode;
11625 if (TARGET_DEBUG_BUILTIN)
11626 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
11627 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
11628 (TARGET_SPE) ? ", spe" : "",
11629 (TARGET_ALTIVEC) ? ", altivec" : "",
11630 (TARGET_VSX) ? ", vsx" : "");
11632 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11633 V2SF_type_node = build_vector_type (float_type_node, 2);
11634 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11635 V2DF_type_node = build_vector_type (double_type_node, 2);
11636 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11637 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11638 V4SF_type_node = build_vector_type (float_type_node, 4);
11639 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11640 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11642 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11643 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11644 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11645 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11647 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11648 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11649 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11650 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11652 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11653 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11654 'vector unsigned short'. */
11656 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11657 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11658 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11659 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11660 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11662 long_integer_type_internal_node = long_integer_type_node;
11663 long_unsigned_type_internal_node = long_unsigned_type_node;
11664 long_long_integer_type_internal_node = long_long_integer_type_node;
11665 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
11666 intQI_type_internal_node = intQI_type_node;
11667 uintQI_type_internal_node = unsigned_intQI_type_node;
11668 intHI_type_internal_node = intHI_type_node;
11669 uintHI_type_internal_node = unsigned_intHI_type_node;
11670 intSI_type_internal_node = intSI_type_node;
11671 uintSI_type_internal_node = unsigned_intSI_type_node;
11672 intDI_type_internal_node = intDI_type_node;
11673 uintDI_type_internal_node = unsigned_intDI_type_node;
11674 float_type_internal_node = float_type_node;
11675 double_type_internal_node = double_type_node;
11676 void_type_internal_node = void_type_node;
11678 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11679 tree type node. */
11680 builtin_mode_to_type[QImode][0] = integer_type_node;
11681 builtin_mode_to_type[HImode][0] = integer_type_node;
11682 builtin_mode_to_type[SImode][0] = intSI_type_node;
11683 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11684 builtin_mode_to_type[DImode][0] = intDI_type_node;
11685 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11686 builtin_mode_to_type[SFmode][0] = float_type_node;
11687 builtin_mode_to_type[DFmode][0] = double_type_node;
11688 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11689 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11690 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11691 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11692 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11693 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11694 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11695 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11696 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11697 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11698 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11699 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11700 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11702 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
11703 TYPE_NAME (bool_char_type_node) = tdecl;
11705 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
11706 TYPE_NAME (bool_short_type_node) = tdecl;
11708 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
11709 TYPE_NAME (bool_int_type_node) = tdecl;
11711 tdecl = add_builtin_type ("__pixel", pixel_type_node);
11712 TYPE_NAME (pixel_type_node) = tdecl;
11714 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11715 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11716 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11717 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11718 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11720 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
11721 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11723 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
11724 TYPE_NAME (V16QI_type_node) = tdecl;
11726 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
11727 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11729 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
11730 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
11732 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
11733 TYPE_NAME (V8HI_type_node) = tdecl;
11735 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
11736 TYPE_NAME (bool_V8HI_type_node) = tdecl;
11738 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
11739 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
11741 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
11742 TYPE_NAME (V4SI_type_node) = tdecl;
11744 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
11745 TYPE_NAME (bool_V4SI_type_node) = tdecl;
11747 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
11748 TYPE_NAME (V4SF_type_node) = tdecl;
11750 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
11751 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
11753 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
11754 TYPE_NAME (V2DF_type_node) = tdecl;
11756 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
11757 TYPE_NAME (V2DI_type_node) = tdecl;
11759 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
11760 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
11762 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
11763 TYPE_NAME (bool_V2DI_type_node) = tdecl;
11765 /* Paired and SPE builtins are only available if you build a compiler with
11766 the appropriate options, so only create those builtins with the
11767 appropriate compiler option. Create Altivec and VSX builtins on machines
11768 with at least the general purpose extensions (970 and newer) to allow the
11769 use of the target attribute. */
11770 if (TARGET_PAIRED_FLOAT)
11771 paired_init_builtins ();
11772 if (TARGET_SPE)
11773 spe_init_builtins ();
11774 if (TARGET_EXTRA_BUILTINS)
11775 altivec_init_builtins ();
11776 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
11777 rs6000_common_init_builtins ();
11779 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
11780 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
11781 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
11783 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
11784 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
11785 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
11787 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
11788 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
11789 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
11791 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
11792 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
11793 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
11795 mode = (TARGET_64BIT) ? DImode : SImode;
11796 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
11797 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
11798 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
11800 ftype = build_function_type_list (unsigned_intDI_type_node,
11801 NULL_TREE);
11802 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
11804 if (TARGET_64BIT)
11805 ftype = build_function_type_list (unsigned_intDI_type_node,
11806 NULL_TREE);
11807 else
11808 ftype = build_function_type_list (unsigned_intSI_type_node,
11809 NULL_TREE);
11810 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
11812 #if TARGET_XCOFF
11813 /* AIX libm provides clog as __clog. */
11814 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
11815 set_user_assembler_name (tdecl, "__clog");
11816 #endif
11818 #ifdef SUBTARGET_INIT_BUILTINS
11819 SUBTARGET_INIT_BUILTINS;
11820 #endif
11823 /* Returns the rs6000 builtin decl for CODE. */
11825 static tree
11826 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11828 HOST_WIDE_INT fnmask;
11830 if (code >= RS6000_BUILTIN_COUNT)
11831 return error_mark_node;
11833 fnmask = rs6000_builtin_info[code].mask;
11834 if ((fnmask & rs6000_builtin_mask) != fnmask)
11836 rs6000_invalid_builtin ((enum rs6000_builtins)code);
11837 return error_mark_node;
11840 return rs6000_builtin_decls[code];
11843 static void
11844 spe_init_builtins (void)
11846 tree puint_type_node = build_pointer_type (unsigned_type_node);
11847 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
11848 const struct builtin_description *d;
11849 size_t i;
11851 tree v2si_ftype_4_v2si
11852 = build_function_type_list (opaque_V2SI_type_node,
11853 opaque_V2SI_type_node,
11854 opaque_V2SI_type_node,
11855 opaque_V2SI_type_node,
11856 opaque_V2SI_type_node,
11857 NULL_TREE);
11859 tree v2sf_ftype_4_v2sf
11860 = build_function_type_list (opaque_V2SF_type_node,
11861 opaque_V2SF_type_node,
11862 opaque_V2SF_type_node,
11863 opaque_V2SF_type_node,
11864 opaque_V2SF_type_node,
11865 NULL_TREE);
11867 tree int_ftype_int_v2si_v2si
11868 = build_function_type_list (integer_type_node,
11869 integer_type_node,
11870 opaque_V2SI_type_node,
11871 opaque_V2SI_type_node,
11872 NULL_TREE);
11874 tree int_ftype_int_v2sf_v2sf
11875 = build_function_type_list (integer_type_node,
11876 integer_type_node,
11877 opaque_V2SF_type_node,
11878 opaque_V2SF_type_node,
11879 NULL_TREE);
11881 tree void_ftype_v2si_puint_int
11882 = build_function_type_list (void_type_node,
11883 opaque_V2SI_type_node,
11884 puint_type_node,
11885 integer_type_node,
11886 NULL_TREE);
11888 tree void_ftype_v2si_puint_char
11889 = build_function_type_list (void_type_node,
11890 opaque_V2SI_type_node,
11891 puint_type_node,
11892 char_type_node,
11893 NULL_TREE);
11895 tree void_ftype_v2si_pv2si_int
11896 = build_function_type_list (void_type_node,
11897 opaque_V2SI_type_node,
11898 opaque_p_V2SI_type_node,
11899 integer_type_node,
11900 NULL_TREE);
11902 tree void_ftype_v2si_pv2si_char
11903 = build_function_type_list (void_type_node,
11904 opaque_V2SI_type_node,
11905 opaque_p_V2SI_type_node,
11906 char_type_node,
11907 NULL_TREE);
11909 tree void_ftype_int
11910 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11912 tree int_ftype_void
11913 = build_function_type_list (integer_type_node, NULL_TREE);
11915 tree v2si_ftype_pv2si_int
11916 = build_function_type_list (opaque_V2SI_type_node,
11917 opaque_p_V2SI_type_node,
11918 integer_type_node,
11919 NULL_TREE);
11921 tree v2si_ftype_puint_int
11922 = build_function_type_list (opaque_V2SI_type_node,
11923 puint_type_node,
11924 integer_type_node,
11925 NULL_TREE);
11927 tree v2si_ftype_pushort_int
11928 = build_function_type_list (opaque_V2SI_type_node,
11929 pushort_type_node,
11930 integer_type_node,
11931 NULL_TREE);
11933 tree v2si_ftype_signed_char
11934 = build_function_type_list (opaque_V2SI_type_node,
11935 signed_char_type_node,
11936 NULL_TREE);
11938 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
11940 /* Initialize irregular SPE builtins. */
11942 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
11943 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
11944 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
11945 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
11946 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
11947 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
11948 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
11949 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
11950 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
11951 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
11952 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
11953 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
11954 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
11955 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
11956 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
11957 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
11958 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
11959 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
11961 /* Loads. */
11962 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
11963 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
11964 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
11965 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
11966 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
11967 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
11968 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
11969 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
11970 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
11971 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
11972 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
11973 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
11974 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
11975 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
11976 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
11977 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
11978 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
11979 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
11980 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
11981 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
11982 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
11983 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
11985 /* Predicates. */
11986 d = bdesc_spe_predicates;
11987 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
11989 tree type;
11991 switch (insn_data[d->icode].operand[1].mode)
11993 case V2SImode:
11994 type = int_ftype_int_v2si_v2si;
11995 break;
11996 case V2SFmode:
11997 type = int_ftype_int_v2sf_v2sf;
11998 break;
11999 default:
12000 gcc_unreachable ();
12003 def_builtin (d->name, type, d->code);
12006 /* Evsel predicates. */
12007 d = bdesc_spe_evsel;
12008 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
12010 tree type;
12012 switch (insn_data[d->icode].operand[1].mode)
12014 case V2SImode:
12015 type = v2si_ftype_4_v2si;
12016 break;
12017 case V2SFmode:
12018 type = v2sf_ftype_4_v2sf;
12019 break;
12020 default:
12021 gcc_unreachable ();
12024 def_builtin (d->name, type, d->code);
12028 static void
12029 paired_init_builtins (void)
12031 const struct builtin_description *d;
12032 size_t i;
12034 tree int_ftype_int_v2sf_v2sf
12035 = build_function_type_list (integer_type_node,
12036 integer_type_node,
12037 V2SF_type_node,
12038 V2SF_type_node,
12039 NULL_TREE);
12040 tree pcfloat_type_node =
12041 build_pointer_type (build_qualified_type
12042 (float_type_node, TYPE_QUAL_CONST));
12044 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
12045 long_integer_type_node,
12046 pcfloat_type_node,
12047 NULL_TREE);
12048 tree void_ftype_v2sf_long_pcfloat =
12049 build_function_type_list (void_type_node,
12050 V2SF_type_node,
12051 long_integer_type_node,
12052 pcfloat_type_node,
12053 NULL_TREE);
12056 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
12057 PAIRED_BUILTIN_LX);
12060 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
12061 PAIRED_BUILTIN_STX);
12063 /* Predicates. */
12064 d = bdesc_paired_preds;
12065 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
12067 tree type;
12069 if (TARGET_DEBUG_BUILTIN)
12070 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
12071 (int)i, get_insn_name (d->icode), (int)d->icode,
12072 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
12074 switch (insn_data[d->icode].operand[1].mode)
12076 case V2SFmode:
12077 type = int_ftype_int_v2sf_v2sf;
12078 break;
12079 default:
12080 gcc_unreachable ();
12083 def_builtin (d->name, type, d->code);
12087 static void
12088 altivec_init_builtins (void)
12090 const struct builtin_description *d;
12091 size_t i;
12092 tree ftype;
12093 tree decl;
12095 tree pvoid_type_node = build_pointer_type (void_type_node);
12097 tree pcvoid_type_node
12098 = build_pointer_type (build_qualified_type (void_type_node,
12099 TYPE_QUAL_CONST));
12101 tree int_ftype_opaque
12102 = build_function_type_list (integer_type_node,
12103 opaque_V4SI_type_node, NULL_TREE);
12104 tree opaque_ftype_opaque
12105 = build_function_type_list (integer_type_node, NULL_TREE);
12106 tree opaque_ftype_opaque_int
12107 = build_function_type_list (opaque_V4SI_type_node,
12108 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
12109 tree opaque_ftype_opaque_opaque_int
12110 = build_function_type_list (opaque_V4SI_type_node,
12111 opaque_V4SI_type_node, opaque_V4SI_type_node,
12112 integer_type_node, NULL_TREE);
12113 tree int_ftype_int_opaque_opaque
12114 = build_function_type_list (integer_type_node,
12115 integer_type_node, opaque_V4SI_type_node,
12116 opaque_V4SI_type_node, NULL_TREE);
12117 tree int_ftype_int_v4si_v4si
12118 = build_function_type_list (integer_type_node,
12119 integer_type_node, V4SI_type_node,
12120 V4SI_type_node, NULL_TREE);
12121 tree void_ftype_v4si
12122 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
12123 tree v8hi_ftype_void
12124 = build_function_type_list (V8HI_type_node, NULL_TREE);
12125 tree void_ftype_void
12126 = build_function_type_list (void_type_node, NULL_TREE);
12127 tree void_ftype_int
12128 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
12130 tree opaque_ftype_long_pcvoid
12131 = build_function_type_list (opaque_V4SI_type_node,
12132 long_integer_type_node, pcvoid_type_node,
12133 NULL_TREE);
12134 tree v16qi_ftype_long_pcvoid
12135 = build_function_type_list (V16QI_type_node,
12136 long_integer_type_node, pcvoid_type_node,
12137 NULL_TREE);
12138 tree v8hi_ftype_long_pcvoid
12139 = build_function_type_list (V8HI_type_node,
12140 long_integer_type_node, pcvoid_type_node,
12141 NULL_TREE);
12142 tree v4si_ftype_long_pcvoid
12143 = build_function_type_list (V4SI_type_node,
12144 long_integer_type_node, pcvoid_type_node,
12145 NULL_TREE);
12146 tree v4sf_ftype_long_pcvoid
12147 = build_function_type_list (V4SF_type_node,
12148 long_integer_type_node, pcvoid_type_node,
12149 NULL_TREE);
12150 tree v2df_ftype_long_pcvoid
12151 = build_function_type_list (V2DF_type_node,
12152 long_integer_type_node, pcvoid_type_node,
12153 NULL_TREE);
12154 tree v2di_ftype_long_pcvoid
12155 = build_function_type_list (V2DI_type_node,
12156 long_integer_type_node, pcvoid_type_node,
12157 NULL_TREE);
12159 tree void_ftype_opaque_long_pvoid
12160 = build_function_type_list (void_type_node,
12161 opaque_V4SI_type_node, long_integer_type_node,
12162 pvoid_type_node, NULL_TREE);
12163 tree void_ftype_v4si_long_pvoid
12164 = build_function_type_list (void_type_node,
12165 V4SI_type_node, long_integer_type_node,
12166 pvoid_type_node, NULL_TREE);
12167 tree void_ftype_v16qi_long_pvoid
12168 = build_function_type_list (void_type_node,
12169 V16QI_type_node, long_integer_type_node,
12170 pvoid_type_node, NULL_TREE);
12171 tree void_ftype_v8hi_long_pvoid
12172 = build_function_type_list (void_type_node,
12173 V8HI_type_node, long_integer_type_node,
12174 pvoid_type_node, NULL_TREE);
12175 tree void_ftype_v4sf_long_pvoid
12176 = build_function_type_list (void_type_node,
12177 V4SF_type_node, long_integer_type_node,
12178 pvoid_type_node, NULL_TREE);
12179 tree void_ftype_v2df_long_pvoid
12180 = build_function_type_list (void_type_node,
12181 V2DF_type_node, long_integer_type_node,
12182 pvoid_type_node, NULL_TREE);
12183 tree void_ftype_v2di_long_pvoid
12184 = build_function_type_list (void_type_node,
12185 V2DI_type_node, long_integer_type_node,
12186 pvoid_type_node, NULL_TREE);
12187 tree int_ftype_int_v8hi_v8hi
12188 = build_function_type_list (integer_type_node,
12189 integer_type_node, V8HI_type_node,
12190 V8HI_type_node, NULL_TREE);
12191 tree int_ftype_int_v16qi_v16qi
12192 = build_function_type_list (integer_type_node,
12193 integer_type_node, V16QI_type_node,
12194 V16QI_type_node, NULL_TREE);
12195 tree int_ftype_int_v4sf_v4sf
12196 = build_function_type_list (integer_type_node,
12197 integer_type_node, V4SF_type_node,
12198 V4SF_type_node, NULL_TREE);
12199 tree int_ftype_int_v2df_v2df
12200 = build_function_type_list (integer_type_node,
12201 integer_type_node, V2DF_type_node,
12202 V2DF_type_node, NULL_TREE);
12203 tree v4si_ftype_v4si
12204 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
12205 tree v8hi_ftype_v8hi
12206 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
12207 tree v16qi_ftype_v16qi
12208 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
12209 tree v4sf_ftype_v4sf
12210 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12211 tree v2df_ftype_v2df
12212 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12213 tree void_ftype_pcvoid_int_int
12214 = build_function_type_list (void_type_node,
12215 pcvoid_type_node, integer_type_node,
12216 integer_type_node, NULL_TREE);
12218 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
12219 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
12220 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
12221 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
12222 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
12223 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
12224 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
12225 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
12226 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
12227 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
12228 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
12229 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
12230 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
12231 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
12232 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
12233 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
12234 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
12235 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
12236 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
12237 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
12238 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
12239 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
12240 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
12241 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
12242 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
12243 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
12244 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
12245 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
12246 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
12247 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
12249 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
12250 VSX_BUILTIN_LXVD2X_V2DF);
12251 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
12252 VSX_BUILTIN_LXVD2X_V2DI);
12253 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
12254 VSX_BUILTIN_LXVW4X_V4SF);
12255 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
12256 VSX_BUILTIN_LXVW4X_V4SI);
12257 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
12258 VSX_BUILTIN_LXVW4X_V8HI);
12259 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
12260 VSX_BUILTIN_LXVW4X_V16QI);
12261 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
12262 VSX_BUILTIN_STXVD2X_V2DF);
12263 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
12264 VSX_BUILTIN_STXVD2X_V2DI);
12265 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
12266 VSX_BUILTIN_STXVW4X_V4SF);
12267 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
12268 VSX_BUILTIN_STXVW4X_V4SI);
12269 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
12270 VSX_BUILTIN_STXVW4X_V8HI);
12271 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
12272 VSX_BUILTIN_STXVW4X_V16QI);
12273 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
12274 VSX_BUILTIN_VEC_LD);
12275 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
12276 VSX_BUILTIN_VEC_ST);
12278 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
12279 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
12280 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
12282 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
12283 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
12284 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
12285 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
12286 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
12287 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
12288 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
12289 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
12290 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
12291 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
12292 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
12293 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
12295 /* Cell builtins. */
12296 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
12297 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
12298 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
12299 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
12301 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
12302 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
12303 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
12304 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
12306 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
12307 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
12308 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
12309 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
12311 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
12312 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
12313 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
12314 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
12316 /* Add the DST variants. */
12317 d = bdesc_dst;
12318 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12319 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
12321 /* Initialize the predicates. */
12322 d = bdesc_altivec_preds;
12323 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12325 enum machine_mode mode1;
12326 tree type;
12328 if (rs6000_overloaded_builtin_p (d->code))
12329 mode1 = VOIDmode;
12330 else
12331 mode1 = insn_data[d->icode].operand[1].mode;
12333 switch (mode1)
12335 case VOIDmode:
12336 type = int_ftype_int_opaque_opaque;
12337 break;
12338 case V4SImode:
12339 type = int_ftype_int_v4si_v4si;
12340 break;
12341 case V8HImode:
12342 type = int_ftype_int_v8hi_v8hi;
12343 break;
12344 case V16QImode:
12345 type = int_ftype_int_v16qi_v16qi;
12346 break;
12347 case V4SFmode:
12348 type = int_ftype_int_v4sf_v4sf;
12349 break;
12350 case V2DFmode:
12351 type = int_ftype_int_v2df_v2df;
12352 break;
12353 default:
12354 gcc_unreachable ();
12357 def_builtin (d->name, type, d->code);
12360 /* Initialize the abs* operators. */
12361 d = bdesc_abs;
12362 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12364 enum machine_mode mode0;
12365 tree type;
12367 mode0 = insn_data[d->icode].operand[0].mode;
12369 switch (mode0)
12371 case V4SImode:
12372 type = v4si_ftype_v4si;
12373 break;
12374 case V8HImode:
12375 type = v8hi_ftype_v8hi;
12376 break;
12377 case V16QImode:
12378 type = v16qi_ftype_v16qi;
12379 break;
12380 case V4SFmode:
12381 type = v4sf_ftype_v4sf;
12382 break;
12383 case V2DFmode:
12384 type = v2df_ftype_v2df;
12385 break;
12386 default:
12387 gcc_unreachable ();
12390 def_builtin (d->name, type, d->code);
12393 /* Initialize target builtin that implements
12394 targetm.vectorize.builtin_mask_for_load. */
12396 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12397 v16qi_ftype_long_pcvoid,
12398 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12399 BUILT_IN_MD, NULL, NULL_TREE);
12400 TREE_READONLY (decl) = 1;
12401 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12402 altivec_builtin_mask_for_load = decl;
12404 /* Access to the vec_init patterns. */
12405 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12406 integer_type_node, integer_type_node,
12407 integer_type_node, NULL_TREE);
12408 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12410 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12411 short_integer_type_node,
12412 short_integer_type_node,
12413 short_integer_type_node,
12414 short_integer_type_node,
12415 short_integer_type_node,
12416 short_integer_type_node,
12417 short_integer_type_node, NULL_TREE);
12418 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12420 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12421 char_type_node, char_type_node,
12422 char_type_node, char_type_node,
12423 char_type_node, char_type_node,
12424 char_type_node, char_type_node,
12425 char_type_node, char_type_node,
12426 char_type_node, char_type_node,
12427 char_type_node, char_type_node,
12428 char_type_node, NULL_TREE);
12429 def_builtin ("__builtin_vec_init_v16qi", ftype,
12430 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
12432 ftype = build_function_type_list (V4SF_type_node, float_type_node,
12433 float_type_node, float_type_node,
12434 float_type_node, NULL_TREE);
12435 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
12437 /* VSX builtins. */
12438 ftype = build_function_type_list (V2DF_type_node, double_type_node,
12439 double_type_node, NULL_TREE);
12440 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
12442 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12443 intDI_type_node, NULL_TREE);
12444 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
12446 /* Access to the vec_set patterns. */
12447 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12448 intSI_type_node,
12449 integer_type_node, NULL_TREE);
12450 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
12452 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12453 intHI_type_node,
12454 integer_type_node, NULL_TREE);
12455 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
12457 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12458 intQI_type_node,
12459 integer_type_node, NULL_TREE);
12460 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
12462 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12463 float_type_node,
12464 integer_type_node, NULL_TREE);
12465 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
12467 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12468 double_type_node,
12469 integer_type_node, NULL_TREE);
12470 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
12472 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12473 intDI_type_node,
12474 integer_type_node, NULL_TREE);
12475 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
12477 /* Access to the vec_extract patterns. */
12478 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12479 integer_type_node, NULL_TREE);
12480 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12482 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12483 integer_type_node, NULL_TREE);
12484 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12486 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12487 integer_type_node, NULL_TREE);
12488 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12490 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12491 integer_type_node, NULL_TREE);
12492 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12494 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12495 integer_type_node, NULL_TREE);
12496 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
12498 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12499 integer_type_node, NULL_TREE);
12500 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
12503 /* Hash function for builtin functions with up to 3 arguments and a return
12504 type. */
12505 static unsigned
12506 builtin_hash_function (const void *hash_entry)
12508 unsigned ret = 0;
12509 int i;
12510 const struct builtin_hash_struct *bh =
12511 (const struct builtin_hash_struct *) hash_entry;
12513 for (i = 0; i < 4; i++)
12515 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12516 ret = (ret * 2) + bh->uns_p[i];
12519 return ret;
12522 /* Compare builtin hash entries H1 and H2 for equivalence. */
12523 static int
12524 builtin_hash_eq (const void *h1, const void *h2)
12526 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12527 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12529 return ((p1->mode[0] == p2->mode[0])
12530 && (p1->mode[1] == p2->mode[1])
12531 && (p1->mode[2] == p2->mode[2])
12532 && (p1->mode[3] == p2->mode[3])
12533 && (p1->uns_p[0] == p2->uns_p[0])
12534 && (p1->uns_p[1] == p2->uns_p[1])
12535 && (p1->uns_p[2] == p2->uns_p[2])
12536 && (p1->uns_p[3] == p2->uns_p[3]));
12539 /* Map types for builtin functions with an explicit return type and up to 3
12540 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12541 of the argument. */
12542 static tree
12543 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12544 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12545 enum rs6000_builtins builtin, const char *name)
12547 struct builtin_hash_struct h;
12548 struct builtin_hash_struct *h2;
12549 void **found;
12550 int num_args = 3;
12551 int i;
12552 tree ret_type = NULL_TREE;
12553 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12555 /* Create builtin_hash_table. */
12556 if (builtin_hash_table == NULL)
12557 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12558 builtin_hash_eq, NULL);
12560 h.type = NULL_TREE;
12561 h.mode[0] = mode_ret;
12562 h.mode[1] = mode_arg0;
12563 h.mode[2] = mode_arg1;
12564 h.mode[3] = mode_arg2;
12565 h.uns_p[0] = 0;
12566 h.uns_p[1] = 0;
12567 h.uns_p[2] = 0;
12568 h.uns_p[3] = 0;
12570 /* If the builtin is a type that produces unsigned results or takes unsigned
12571 arguments, and it is returned as a decl for the vectorizer (such as
12572 widening multiplies, permute), make sure the arguments and return value
12573 are type correct. */
12574 switch (builtin)
12576 /* unsigned 2 argument functions. */
12577 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12578 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12579 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12580 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12581 h.uns_p[0] = 1;
12582 h.uns_p[1] = 1;
12583 h.uns_p[2] = 1;
12584 break;
12586 /* unsigned 3 argument functions. */
12587 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12588 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12589 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12590 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12591 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12592 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12593 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12594 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12595 case VSX_BUILTIN_VPERM_16QI_UNS:
12596 case VSX_BUILTIN_VPERM_8HI_UNS:
12597 case VSX_BUILTIN_VPERM_4SI_UNS:
12598 case VSX_BUILTIN_VPERM_2DI_UNS:
12599 case VSX_BUILTIN_XXSEL_16QI_UNS:
12600 case VSX_BUILTIN_XXSEL_8HI_UNS:
12601 case VSX_BUILTIN_XXSEL_4SI_UNS:
12602 case VSX_BUILTIN_XXSEL_2DI_UNS:
12603 h.uns_p[0] = 1;
12604 h.uns_p[1] = 1;
12605 h.uns_p[2] = 1;
12606 h.uns_p[3] = 1;
12607 break;
12609 /* signed permute functions with unsigned char mask. */
12610 case ALTIVEC_BUILTIN_VPERM_16QI:
12611 case ALTIVEC_BUILTIN_VPERM_8HI:
12612 case ALTIVEC_BUILTIN_VPERM_4SI:
12613 case ALTIVEC_BUILTIN_VPERM_4SF:
12614 case ALTIVEC_BUILTIN_VPERM_2DI:
12615 case ALTIVEC_BUILTIN_VPERM_2DF:
12616 case VSX_BUILTIN_VPERM_16QI:
12617 case VSX_BUILTIN_VPERM_8HI:
12618 case VSX_BUILTIN_VPERM_4SI:
12619 case VSX_BUILTIN_VPERM_4SF:
12620 case VSX_BUILTIN_VPERM_2DI:
12621 case VSX_BUILTIN_VPERM_2DF:
12622 h.uns_p[3] = 1;
12623 break;
12625 /* unsigned args, signed return. */
12626 case VSX_BUILTIN_XVCVUXDDP_UNS:
12627 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
12628 h.uns_p[1] = 1;
12629 break;
12631 /* signed args, unsigned return. */
12632 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12633 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
12634 h.uns_p[0] = 1;
12635 break;
12637 default:
12638 break;
12641 /* Figure out how many args are present. */
12642 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12643 num_args--;
12645 if (num_args == 0)
12646 fatal_error ("internal error: builtin function %s had no type", name);
12648 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12649 if (!ret_type && h.uns_p[0])
12650 ret_type = builtin_mode_to_type[h.mode[0]][0];
12652 if (!ret_type)
12653 fatal_error ("internal error: builtin function %s had an unexpected "
12654 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12656 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
12657 arg_type[i] = NULL_TREE;
12659 for (i = 0; i < num_args; i++)
12661 int m = (int) h.mode[i+1];
12662 int uns_p = h.uns_p[i+1];
12664 arg_type[i] = builtin_mode_to_type[m][uns_p];
12665 if (!arg_type[i] && uns_p)
12666 arg_type[i] = builtin_mode_to_type[m][0];
12668 if (!arg_type[i])
12669 fatal_error ("internal error: builtin function %s, argument %d "
12670 "had unexpected argument type %s", name, i,
12671 GET_MODE_NAME (m));
12674 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12675 if (*found == NULL)
12677 h2 = ggc_alloc_builtin_hash_struct ();
12678 *h2 = h;
12679 *found = (void *)h2;
12681 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
12682 arg_type[2], NULL_TREE);
12685 return ((struct builtin_hash_struct *)(*found))->type;
12688 static void
12689 rs6000_common_init_builtins (void)
12691 const struct builtin_description *d;
12692 size_t i;
12694 tree opaque_ftype_opaque = NULL_TREE;
12695 tree opaque_ftype_opaque_opaque = NULL_TREE;
12696 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
12697 tree v2si_ftype_qi = NULL_TREE;
12698 tree v2si_ftype_v2si_qi = NULL_TREE;
12699 tree v2si_ftype_int_qi = NULL_TREE;
12700 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
12702 if (!TARGET_PAIRED_FLOAT)
12704 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
12705 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
12708 /* Paired and SPE builtins are only available if you build a compiler with
12709 the appropriate options, so only create those builtins with the
12710 appropriate compiler option. Create Altivec and VSX builtins on machines
12711 with at least the general purpose extensions (970 and newer) to allow the
12712 use of the target attribute.. */
12714 if (TARGET_EXTRA_BUILTINS)
12715 builtin_mask |= RS6000_BTM_COMMON;
12717 /* Add the ternary operators. */
12718 d = bdesc_3arg;
12719 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12721 tree type;
12722 HOST_WIDE_INT mask = d->mask;
12724 if ((mask & builtin_mask) != mask)
12726 if (TARGET_DEBUG_BUILTIN)
12727 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
12728 continue;
12731 if (rs6000_overloaded_builtin_p (d->code))
12733 if (! (type = opaque_ftype_opaque_opaque_opaque))
12734 type = opaque_ftype_opaque_opaque_opaque
12735 = build_function_type_list (opaque_V4SI_type_node,
12736 opaque_V4SI_type_node,
12737 opaque_V4SI_type_node,
12738 opaque_V4SI_type_node,
12739 NULL_TREE);
12741 else
12743 enum insn_code icode = d->icode;
12744 if (d->name == 0 || icode == CODE_FOR_nothing)
12745 continue;
12747 type = builtin_function_type (insn_data[icode].operand[0].mode,
12748 insn_data[icode].operand[1].mode,
12749 insn_data[icode].operand[2].mode,
12750 insn_data[icode].operand[3].mode,
12751 d->code, d->name);
12754 def_builtin (d->name, type, d->code);
12757 /* Add the binary operators. */
12758 d = bdesc_2arg;
12759 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12761 enum machine_mode mode0, mode1, mode2;
12762 tree type;
12763 HOST_WIDE_INT mask = d->mask;
12765 if ((mask & builtin_mask) != mask)
12767 if (TARGET_DEBUG_BUILTIN)
12768 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
12769 continue;
12772 if (rs6000_overloaded_builtin_p (d->code))
12774 if (! (type = opaque_ftype_opaque_opaque))
12775 type = opaque_ftype_opaque_opaque
12776 = build_function_type_list (opaque_V4SI_type_node,
12777 opaque_V4SI_type_node,
12778 opaque_V4SI_type_node,
12779 NULL_TREE);
12781 else
12783 enum insn_code icode = d->icode;
12784 if (d->name == 0 || icode == CODE_FOR_nothing)
12785 continue;
12787 mode0 = insn_data[icode].operand[0].mode;
12788 mode1 = insn_data[icode].operand[1].mode;
12789 mode2 = insn_data[icode].operand[2].mode;
12791 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
12793 if (! (type = v2si_ftype_v2si_qi))
12794 type = v2si_ftype_v2si_qi
12795 = build_function_type_list (opaque_V2SI_type_node,
12796 opaque_V2SI_type_node,
12797 char_type_node,
12798 NULL_TREE);
12801 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
12802 && mode2 == QImode)
12804 if (! (type = v2si_ftype_int_qi))
12805 type = v2si_ftype_int_qi
12806 = build_function_type_list (opaque_V2SI_type_node,
12807 integer_type_node,
12808 char_type_node,
12809 NULL_TREE);
12812 else
12813 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
12814 d->code, d->name);
12817 def_builtin (d->name, type, d->code);
12820 /* Add the simple unary operators. */
12821 d = bdesc_1arg;
12822 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12824 enum machine_mode mode0, mode1;
12825 tree type;
12826 HOST_WIDE_INT mask = d->mask;
12828 if ((mask & builtin_mask) != mask)
12830 if (TARGET_DEBUG_BUILTIN)
12831 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
12832 continue;
12835 if (rs6000_overloaded_builtin_p (d->code))
12837 if (! (type = opaque_ftype_opaque))
12838 type = opaque_ftype_opaque
12839 = build_function_type_list (opaque_V4SI_type_node,
12840 opaque_V4SI_type_node,
12841 NULL_TREE);
12843 else
12845 enum insn_code icode = d->icode;
12846 if (d->name == 0 || icode == CODE_FOR_nothing)
12847 continue;
12849 mode0 = insn_data[icode].operand[0].mode;
12850 mode1 = insn_data[icode].operand[1].mode;
12852 if (mode0 == V2SImode && mode1 == QImode)
12854 if (! (type = v2si_ftype_qi))
12855 type = v2si_ftype_qi
12856 = build_function_type_list (opaque_V2SI_type_node,
12857 char_type_node,
12858 NULL_TREE);
12861 else
12862 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
12863 d->code, d->name);
12866 def_builtin (d->name, type, d->code);
12870 static void
12871 rs6000_init_libfuncs (void)
12873 if (!TARGET_IEEEQUAD)
12874 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12875 if (!TARGET_XL_COMPAT)
12877 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
12878 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
12879 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
12880 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
12882 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
12884 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
12885 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
12886 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
12887 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
12888 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
12889 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
12890 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
12892 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
12893 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
12894 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
12895 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
12896 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
12897 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
12898 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
12899 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
12902 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
12903 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
12905 else
12907 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
12908 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
12909 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
12910 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
12912 else
12914 /* 32-bit SVR4 quad floating point routines. */
12916 set_optab_libfunc (add_optab, TFmode, "_q_add");
12917 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
12918 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
12919 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
12920 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
12921 if (TARGET_PPC_GPOPT)
12922 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
12924 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
12925 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
12926 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
12927 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
12928 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
12929 set_optab_libfunc (le_optab, TFmode, "_q_fle");
12931 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
12932 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
12933 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
12934 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
12935 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
12936 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
12937 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
12938 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
12943 /* Expand a block clear operation, and return 1 if successful. Return 0
12944 if we should let the compiler generate normal code.
12946 operands[0] is the destination
12947 operands[1] is the length
12948 operands[3] is the alignment */
12951 expand_block_clear (rtx operands[])
12953 rtx orig_dest = operands[0];
12954 rtx bytes_rtx = operands[1];
12955 rtx align_rtx = operands[3];
12956 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
12957 HOST_WIDE_INT align;
12958 HOST_WIDE_INT bytes;
12959 int offset;
12960 int clear_bytes;
12961 int clear_step;
12963 /* If this is not a fixed size move, just call memcpy */
12964 if (! constp)
12965 return 0;
12967 /* This must be a fixed size alignment */
12968 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12969 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12971 /* Anything to clear? */
12972 bytes = INTVAL (bytes_rtx);
12973 if (bytes <= 0)
12974 return 1;
12976 /* Use the builtin memset after a point, to avoid huge code bloat.
12977 When optimize_size, avoid any significant code bloat; calling
12978 memset is about 4 instructions, so allow for one instruction to
12979 load zero and three to do clearing. */
12980 if (TARGET_ALTIVEC && align >= 128)
12981 clear_step = 16;
12982 else if (TARGET_POWERPC64 && align >= 32)
12983 clear_step = 8;
12984 else if (TARGET_SPE && align >= 64)
12985 clear_step = 8;
12986 else
12987 clear_step = 4;
12989 if (optimize_size && bytes > 3 * clear_step)
12990 return 0;
12991 if (! optimize_size && bytes > 8 * clear_step)
12992 return 0;
12994 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
12996 enum machine_mode mode = BLKmode;
12997 rtx dest;
12999 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
13001 clear_bytes = 16;
13002 mode = V4SImode;
13004 else if (bytes >= 8 && TARGET_SPE && align >= 64)
13006 clear_bytes = 8;
13007 mode = V2SImode;
13009 else if (bytes >= 8 && TARGET_POWERPC64
13010 /* 64-bit loads and stores require word-aligned
13011 displacements. */
13012 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13014 clear_bytes = 8;
13015 mode = DImode;
13017 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13018 { /* move 4 bytes */
13019 clear_bytes = 4;
13020 mode = SImode;
13022 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13023 { /* move 2 bytes */
13024 clear_bytes = 2;
13025 mode = HImode;
13027 else /* move 1 byte at a time */
13029 clear_bytes = 1;
13030 mode = QImode;
13033 dest = adjust_address (orig_dest, mode, offset);
13035 emit_move_insn (dest, CONST0_RTX (mode));
13038 return 1;
13042 /* Expand a block move operation, and return 1 if successful. Return 0
13043 if we should let the compiler generate normal code.
13045 operands[0] is the destination
13046 operands[1] is the source
13047 operands[2] is the length
13048 operands[3] is the alignment */
13050 #define MAX_MOVE_REG 4
13053 expand_block_move (rtx operands[])
13055 rtx orig_dest = operands[0];
13056 rtx orig_src = operands[1];
13057 rtx bytes_rtx = operands[2];
13058 rtx align_rtx = operands[3];
13059 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
13060 int align;
13061 int bytes;
13062 int offset;
13063 int move_bytes;
13064 rtx stores[MAX_MOVE_REG];
13065 int num_reg = 0;
13067 /* If this is not a fixed size move, just call memcpy */
13068 if (! constp)
13069 return 0;
13071 /* This must be a fixed size alignment */
13072 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
13073 align = INTVAL (align_rtx) * BITS_PER_UNIT;
13075 /* Anything to move? */
13076 bytes = INTVAL (bytes_rtx);
13077 if (bytes <= 0)
13078 return 1;
13080 if (bytes > rs6000_block_move_inline_limit)
13081 return 0;
13083 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
13085 union {
13086 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
13087 rtx (*mov) (rtx, rtx);
13088 } gen_func;
13089 enum machine_mode mode = BLKmode;
13090 rtx src, dest;
13092 /* Altivec first, since it will be faster than a string move
13093 when it applies, and usually not significantly larger. */
13094 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
13096 move_bytes = 16;
13097 mode = V4SImode;
13098 gen_func.mov = gen_movv4si;
13100 else if (TARGET_SPE && bytes >= 8 && align >= 64)
13102 move_bytes = 8;
13103 mode = V2SImode;
13104 gen_func.mov = gen_movv2si;
13106 else if (TARGET_STRING
13107 && bytes > 24 /* move up to 32 bytes at a time */
13108 && ! fixed_regs[5]
13109 && ! fixed_regs[6]
13110 && ! fixed_regs[7]
13111 && ! fixed_regs[8]
13112 && ! fixed_regs[9]
13113 && ! fixed_regs[10]
13114 && ! fixed_regs[11]
13115 && ! fixed_regs[12])
13117 move_bytes = (bytes > 32) ? 32 : bytes;
13118 gen_func.movmemsi = gen_movmemsi_8reg;
13120 else if (TARGET_STRING
13121 && bytes > 16 /* move up to 24 bytes at a time */
13122 && ! fixed_regs[5]
13123 && ! fixed_regs[6]
13124 && ! fixed_regs[7]
13125 && ! fixed_regs[8]
13126 && ! fixed_regs[9]
13127 && ! fixed_regs[10])
13129 move_bytes = (bytes > 24) ? 24 : bytes;
13130 gen_func.movmemsi = gen_movmemsi_6reg;
13132 else if (TARGET_STRING
13133 && bytes > 8 /* move up to 16 bytes at a time */
13134 && ! fixed_regs[5]
13135 && ! fixed_regs[6]
13136 && ! fixed_regs[7]
13137 && ! fixed_regs[8])
13139 move_bytes = (bytes > 16) ? 16 : bytes;
13140 gen_func.movmemsi = gen_movmemsi_4reg;
13142 else if (bytes >= 8 && TARGET_POWERPC64
13143 /* 64-bit loads and stores require word-aligned
13144 displacements. */
13145 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13147 move_bytes = 8;
13148 mode = DImode;
13149 gen_func.mov = gen_movdi;
13151 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
13152 { /* move up to 8 bytes at a time */
13153 move_bytes = (bytes > 8) ? 8 : bytes;
13154 gen_func.movmemsi = gen_movmemsi_2reg;
13156 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13157 { /* move 4 bytes */
13158 move_bytes = 4;
13159 mode = SImode;
13160 gen_func.mov = gen_movsi;
13162 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13163 { /* move 2 bytes */
13164 move_bytes = 2;
13165 mode = HImode;
13166 gen_func.mov = gen_movhi;
13168 else if (TARGET_STRING && bytes > 1)
13169 { /* move up to 4 bytes at a time */
13170 move_bytes = (bytes > 4) ? 4 : bytes;
13171 gen_func.movmemsi = gen_movmemsi_1reg;
13173 else /* move 1 byte at a time */
13175 move_bytes = 1;
13176 mode = QImode;
13177 gen_func.mov = gen_movqi;
13180 src = adjust_address (orig_src, mode, offset);
13181 dest = adjust_address (orig_dest, mode, offset);
13183 if (mode != BLKmode)
13185 rtx tmp_reg = gen_reg_rtx (mode);
13187 emit_insn ((*gen_func.mov) (tmp_reg, src));
13188 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
13191 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
13193 int i;
13194 for (i = 0; i < num_reg; i++)
13195 emit_insn (stores[i]);
13196 num_reg = 0;
13199 if (mode == BLKmode)
13201 /* Move the address into scratch registers. The movmemsi
13202 patterns require zero offset. */
13203 if (!REG_P (XEXP (src, 0)))
13205 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
13206 src = replace_equiv_address (src, src_reg);
13208 set_mem_size (src, move_bytes);
13210 if (!REG_P (XEXP (dest, 0)))
13212 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
13213 dest = replace_equiv_address (dest, dest_reg);
13215 set_mem_size (dest, move_bytes);
13217 emit_insn ((*gen_func.movmemsi) (dest, src,
13218 GEN_INT (move_bytes & 31),
13219 align_rtx));
13223 return 1;
13227 /* Return a string to perform a load_multiple operation.
13228 operands[0] is the vector.
13229 operands[1] is the source address.
13230 operands[2] is the first destination register. */
13232 const char *
13233 rs6000_output_load_multiple (rtx operands[3])
13235 /* We have to handle the case where the pseudo used to contain the address
13236 is assigned to one of the output registers. */
13237 int i, j;
13238 int words = XVECLEN (operands[0], 0);
13239 rtx xop[10];
13241 if (XVECLEN (operands[0], 0) == 1)
13242 return "lwz %2,0(%1)";
13244 for (i = 0; i < words; i++)
13245 if (refers_to_regno_p (REGNO (operands[2]) + i,
13246 REGNO (operands[2]) + i + 1, operands[1], 0))
13248 if (i == words-1)
13250 xop[0] = GEN_INT (4 * (words-1));
13251 xop[1] = operands[1];
13252 xop[2] = operands[2];
13253 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
13254 return "";
13256 else if (i == 0)
13258 xop[0] = GEN_INT (4 * (words-1));
13259 xop[1] = operands[1];
13260 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
13261 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
13262 return "";
13264 else
13266 for (j = 0; j < words; j++)
13267 if (j != i)
13269 xop[0] = GEN_INT (j * 4);
13270 xop[1] = operands[1];
13271 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
13272 output_asm_insn ("lwz %2,%0(%1)", xop);
13274 xop[0] = GEN_INT (i * 4);
13275 xop[1] = operands[1];
13276 output_asm_insn ("lwz %1,%0(%1)", xop);
13277 return "";
13281 return "lswi %2,%1,%N0";
13285 /* A validation routine: say whether CODE, a condition code, and MODE
13286 match. The other alternatives either don't make sense or should
13287 never be generated. */
13289 void
13290 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
13292 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
13293 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
13294 && GET_MODE_CLASS (mode) == MODE_CC);
13296 /* These don't make sense. */
13297 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
13298 || mode != CCUNSmode);
13300 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
13301 || mode == CCUNSmode);
13303 gcc_assert (mode == CCFPmode
13304 || (code != ORDERED && code != UNORDERED
13305 && code != UNEQ && code != LTGT
13306 && code != UNGT && code != UNLT
13307 && code != UNGE && code != UNLE));
13309 /* These should never be generated except for
13310 flag_finite_math_only. */
13311 gcc_assert (mode != CCFPmode
13312 || flag_finite_math_only
13313 || (code != LE && code != GE
13314 && code != UNEQ && code != LTGT
13315 && code != UNGT && code != UNLT));
13317 /* These are invalid; the information is not there. */
13318 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
13322 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13323 mask required to convert the result of a rotate insn into a shift
13324 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13327 includes_lshift_p (rtx shiftop, rtx andop)
13329 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13331 shift_mask <<= INTVAL (shiftop);
13333 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13336 /* Similar, but for right shift. */
13339 includes_rshift_p (rtx shiftop, rtx andop)
13341 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13343 shift_mask >>= INTVAL (shiftop);
13345 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13348 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13349 to perform a left shift. It must have exactly SHIFTOP least
13350 significant 0's, then one or more 1's, then zero or more 0's. */
13353 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13355 if (GET_CODE (andop) == CONST_INT)
13357 HOST_WIDE_INT c, lsb, shift_mask;
13359 c = INTVAL (andop);
13360 if (c == 0 || c == ~0)
13361 return 0;
13363 shift_mask = ~0;
13364 shift_mask <<= INTVAL (shiftop);
13366 /* Find the least significant one bit. */
13367 lsb = c & -c;
13369 /* It must coincide with the LSB of the shift mask. */
13370 if (-lsb != shift_mask)
13371 return 0;
13373 /* Invert to look for the next transition (if any). */
13374 c = ~c;
13376 /* Remove the low group of ones (originally low group of zeros). */
13377 c &= -lsb;
13379 /* Again find the lsb, and check we have all 1's above. */
13380 lsb = c & -c;
13381 return c == -lsb;
13383 else if (GET_CODE (andop) == CONST_DOUBLE
13384 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13386 HOST_WIDE_INT low, high, lsb;
13387 HOST_WIDE_INT shift_mask_low, shift_mask_high;
13389 low = CONST_DOUBLE_LOW (andop);
13390 if (HOST_BITS_PER_WIDE_INT < 64)
13391 high = CONST_DOUBLE_HIGH (andop);
13393 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
13394 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
13395 return 0;
13397 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13399 shift_mask_high = ~0;
13400 if (INTVAL (shiftop) > 32)
13401 shift_mask_high <<= INTVAL (shiftop) - 32;
13403 lsb = high & -high;
13405 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
13406 return 0;
13408 high = ~high;
13409 high &= -lsb;
13411 lsb = high & -high;
13412 return high == -lsb;
13415 shift_mask_low = ~0;
13416 shift_mask_low <<= INTVAL (shiftop);
13418 lsb = low & -low;
13420 if (-lsb != shift_mask_low)
13421 return 0;
13423 if (HOST_BITS_PER_WIDE_INT < 64)
13424 high = ~high;
13425 low = ~low;
13426 low &= -lsb;
13428 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13430 lsb = high & -high;
13431 return high == -lsb;
13434 lsb = low & -low;
13435 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
13437 else
13438 return 0;
13441 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13442 to perform a left shift. It must have SHIFTOP or more least
13443 significant 0's, with the remainder of the word 1's. */
13446 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13448 if (GET_CODE (andop) == CONST_INT)
13450 HOST_WIDE_INT c, lsb, shift_mask;
13452 shift_mask = ~0;
13453 shift_mask <<= INTVAL (shiftop);
13454 c = INTVAL (andop);
13456 /* Find the least significant one bit. */
13457 lsb = c & -c;
13459 /* It must be covered by the shift mask.
13460 This test also rejects c == 0. */
13461 if ((lsb & shift_mask) == 0)
13462 return 0;
13464 /* Check we have all 1's above the transition, and reject all 1's. */
13465 return c == -lsb && lsb != 1;
13467 else if (GET_CODE (andop) == CONST_DOUBLE
13468 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13470 HOST_WIDE_INT low, lsb, shift_mask_low;
13472 low = CONST_DOUBLE_LOW (andop);
13474 if (HOST_BITS_PER_WIDE_INT < 64)
13476 HOST_WIDE_INT high, shift_mask_high;
13478 high = CONST_DOUBLE_HIGH (andop);
13480 if (low == 0)
13482 shift_mask_high = ~0;
13483 if (INTVAL (shiftop) > 32)
13484 shift_mask_high <<= INTVAL (shiftop) - 32;
13486 lsb = high & -high;
13488 if ((lsb & shift_mask_high) == 0)
13489 return 0;
13491 return high == -lsb;
13493 if (high != ~0)
13494 return 0;
13497 shift_mask_low = ~0;
13498 shift_mask_low <<= INTVAL (shiftop);
13500 lsb = low & -low;
13502 if ((lsb & shift_mask_low) == 0)
13503 return 0;
13505 return low == -lsb && lsb != 1;
13507 else
13508 return 0;
13511 /* Return 1 if operands will generate a valid arguments to rlwimi
13512 instruction for insert with right shift in 64-bit mode. The mask may
13513 not start on the first bit or stop on the last bit because wrap-around
13514 effects of instruction do not correspond to semantics of RTL insn. */
13517 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13519 if (INTVAL (startop) > 32
13520 && INTVAL (startop) < 64
13521 && INTVAL (sizeop) > 1
13522 && INTVAL (sizeop) + INTVAL (startop) < 64
13523 && INTVAL (shiftop) > 0
13524 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13525 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13526 return 1;
13528 return 0;
13531 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13532 for lfq and stfq insns iff the registers are hard registers. */
13535 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13537 /* We might have been passed a SUBREG. */
13538 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13539 return 0;
13541 /* We might have been passed non floating point registers. */
13542 if (!FP_REGNO_P (REGNO (reg1))
13543 || !FP_REGNO_P (REGNO (reg2)))
13544 return 0;
13546 return (REGNO (reg1) == REGNO (reg2) - 1);
13549 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13550 addr1 and addr2 must be in consecutive memory locations
13551 (addr2 == addr1 + 8). */
13554 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13556 rtx addr1, addr2;
13557 unsigned int reg1, reg2;
13558 int offset1, offset2;
13560 /* The mems cannot be volatile. */
13561 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13562 return 0;
13564 addr1 = XEXP (mem1, 0);
13565 addr2 = XEXP (mem2, 0);
13567 /* Extract an offset (if used) from the first addr. */
13568 if (GET_CODE (addr1) == PLUS)
13570 /* If not a REG, return zero. */
13571 if (GET_CODE (XEXP (addr1, 0)) != REG)
13572 return 0;
13573 else
13575 reg1 = REGNO (XEXP (addr1, 0));
13576 /* The offset must be constant! */
13577 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13578 return 0;
13579 offset1 = INTVAL (XEXP (addr1, 1));
13582 else if (GET_CODE (addr1) != REG)
13583 return 0;
13584 else
13586 reg1 = REGNO (addr1);
13587 /* This was a simple (mem (reg)) expression. Offset is 0. */
13588 offset1 = 0;
13591 /* And now for the second addr. */
13592 if (GET_CODE (addr2) == PLUS)
13594 /* If not a REG, return zero. */
13595 if (GET_CODE (XEXP (addr2, 0)) != REG)
13596 return 0;
13597 else
13599 reg2 = REGNO (XEXP (addr2, 0));
13600 /* The offset must be constant. */
13601 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13602 return 0;
13603 offset2 = INTVAL (XEXP (addr2, 1));
13606 else if (GET_CODE (addr2) != REG)
13607 return 0;
13608 else
13610 reg2 = REGNO (addr2);
13611 /* This was a simple (mem (reg)) expression. Offset is 0. */
13612 offset2 = 0;
13615 /* Both of these must have the same base register. */
13616 if (reg1 != reg2)
13617 return 0;
13619 /* The offset for the second addr must be 8 more than the first addr. */
13620 if (offset2 != offset1 + 8)
13621 return 0;
13623 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13624 instructions. */
13625 return 1;
13630 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13632 static bool eliminated = false;
13633 rtx ret;
13635 if (mode != SDmode)
13636 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13637 else
13639 rtx mem = cfun->machine->sdmode_stack_slot;
13640 gcc_assert (mem != NULL_RTX);
13642 if (!eliminated)
13644 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13645 cfun->machine->sdmode_stack_slot = mem;
13646 eliminated = true;
13648 ret = mem;
13651 if (TARGET_DEBUG_ADDR)
13653 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13654 GET_MODE_NAME (mode));
13655 if (!ret)
13656 fprintf (stderr, "\tNULL_RTX\n");
13657 else
13658 debug_rtx (ret);
13661 return ret;
13664 static tree
13665 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13667 /* Don't walk into types. */
13668 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13670 *walk_subtrees = 0;
13671 return NULL_TREE;
13674 switch (TREE_CODE (*tp))
13676 case VAR_DECL:
13677 case PARM_DECL:
13678 case FIELD_DECL:
13679 case RESULT_DECL:
13680 case SSA_NAME:
13681 case REAL_CST:
13682 case MEM_REF:
13683 case VIEW_CONVERT_EXPR:
13684 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13685 return *tp;
13686 break;
13687 default:
13688 break;
13691 return NULL_TREE;
13694 enum reload_reg_type {
13695 GPR_REGISTER_TYPE,
13696 VECTOR_REGISTER_TYPE,
13697 OTHER_REGISTER_TYPE
13700 static enum reload_reg_type
13701 rs6000_reload_register_type (enum reg_class rclass)
13703 switch (rclass)
13705 case GENERAL_REGS:
13706 case BASE_REGS:
13707 return GPR_REGISTER_TYPE;
13709 case FLOAT_REGS:
13710 case ALTIVEC_REGS:
13711 case VSX_REGS:
13712 return VECTOR_REGISTER_TYPE;
13714 default:
13715 return OTHER_REGISTER_TYPE;
13719 /* Inform reload about cases where moving X with a mode MODE to a register in
13720 RCLASS requires an extra scratch or immediate register. Return the class
13721 needed for the immediate register.
13723 For VSX and Altivec, we may need a register to convert sp+offset into
13724 reg+sp.
13726 For misaligned 64-bit gpr loads and stores we need a register to
13727 convert an offset address to indirect. */
13729 static reg_class_t
13730 rs6000_secondary_reload (bool in_p,
13731 rtx x,
13732 reg_class_t rclass_i,
13733 enum machine_mode mode,
13734 secondary_reload_info *sri)
13736 enum reg_class rclass = (enum reg_class) rclass_i;
13737 reg_class_t ret = ALL_REGS;
13738 enum insn_code icode;
13739 bool default_p = false;
13741 sri->icode = CODE_FOR_nothing;
13743 /* Convert vector loads and stores into gprs to use an additional base
13744 register. */
13745 icode = rs6000_vector_reload[mode][in_p != false];
13746 if (icode != CODE_FOR_nothing)
13748 ret = NO_REGS;
13749 sri->icode = CODE_FOR_nothing;
13750 sri->extra_cost = 0;
13752 if (GET_CODE (x) == MEM)
13754 rtx addr = XEXP (x, 0);
13756 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13757 an extra register in that case, but it would need an extra
13758 register if the addressing is reg+reg or (reg+reg)&(-16). */
13759 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
13761 if (!legitimate_indirect_address_p (addr, false)
13762 && !rs6000_legitimate_offset_address_p (TImode, addr,
13763 false, true))
13765 sri->icode = icode;
13766 /* account for splitting the loads, and converting the
13767 address from reg+reg to reg. */
13768 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
13769 + ((GET_CODE (addr) == AND) ? 1 : 0));
13772 /* Loads to and stores from vector registers can only do reg+reg
13773 addressing. Altivec registers can also do (reg+reg)&(-16). */
13774 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
13775 || rclass == FLOAT_REGS || rclass == NO_REGS)
13777 if (!VECTOR_MEM_ALTIVEC_P (mode)
13778 && GET_CODE (addr) == AND
13779 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13780 && INTVAL (XEXP (addr, 1)) == -16
13781 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
13782 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
13784 sri->icode = icode;
13785 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
13786 ? 2 : 1);
13788 else if (!legitimate_indirect_address_p (addr, false)
13789 && (rclass == NO_REGS
13790 || !legitimate_indexed_address_p (addr, false)))
13792 sri->icode = icode;
13793 sri->extra_cost = 1;
13795 else
13796 icode = CODE_FOR_nothing;
13798 /* Any other loads, including to pseudo registers which haven't been
13799 assigned to a register yet, default to require a scratch
13800 register. */
13801 else
13803 sri->icode = icode;
13804 sri->extra_cost = 2;
13807 else if (REG_P (x))
13809 int regno = true_regnum (x);
13811 icode = CODE_FOR_nothing;
13812 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
13813 default_p = true;
13814 else
13816 enum reg_class xclass = REGNO_REG_CLASS (regno);
13817 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
13818 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
13820 /* If memory is needed, use default_secondary_reload to create the
13821 stack slot. */
13822 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
13823 default_p = true;
13824 else
13825 ret = NO_REGS;
13828 else
13829 default_p = true;
13831 else if (TARGET_POWERPC64
13832 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13833 && MEM_P (x)
13834 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
13836 rtx addr = XEXP (x, 0);
13837 rtx off = address_offset (addr);
13839 if (off != NULL_RTX)
13841 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13842 unsigned HOST_WIDE_INT offset = INTVAL (off);
13844 /* We need a secondary reload when our legitimate_address_p
13845 says the address is good (as otherwise the entire address
13846 will be reloaded), and the offset is not a multiple of
13847 four or we have an address wrap. Address wrap will only
13848 occur for LO_SUMs since legitimate_offset_address_p
13849 rejects addresses for 16-byte mems that will wrap. */
13850 if (GET_CODE (addr) == LO_SUM
13851 ? (1 /* legitimate_address_p allows any offset for lo_sum */
13852 && ((offset & 3) != 0
13853 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
13854 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
13855 && (offset & 3) != 0))
13857 if (in_p)
13858 sri->icode = CODE_FOR_reload_di_load;
13859 else
13860 sri->icode = CODE_FOR_reload_di_store;
13861 sri->extra_cost = 2;
13862 ret = NO_REGS;
13864 else
13865 default_p = true;
13867 else
13868 default_p = true;
13870 else if (!TARGET_POWERPC64
13871 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13872 && MEM_P (x)
13873 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
13875 rtx addr = XEXP (x, 0);
13876 rtx off = address_offset (addr);
13878 if (off != NULL_RTX)
13880 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13881 unsigned HOST_WIDE_INT offset = INTVAL (off);
13883 /* We need a secondary reload when our legitimate_address_p
13884 says the address is good (as otherwise the entire address
13885 will be reloaded), and we have a wrap.
13887 legitimate_lo_sum_address_p allows LO_SUM addresses to
13888 have any offset so test for wrap in the low 16 bits.
13890 legitimate_offset_address_p checks for the range
13891 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
13892 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
13893 [0x7ff4,0x7fff] respectively, so test for the
13894 intersection of these ranges, [0x7ffc,0x7fff] and
13895 [0x7ff4,0x7ff7] respectively.
13897 Note that the address we see here may have been
13898 manipulated by legitimize_reload_address. */
13899 if (GET_CODE (addr) == LO_SUM
13900 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
13901 : offset - (0x8000 - extra) < UNITS_PER_WORD)
13903 if (in_p)
13904 sri->icode = CODE_FOR_reload_si_load;
13905 else
13906 sri->icode = CODE_FOR_reload_si_store;
13907 sri->extra_cost = 2;
13908 ret = NO_REGS;
13910 else
13911 default_p = true;
13913 else
13914 default_p = true;
13916 else
13917 default_p = true;
13919 if (default_p)
13920 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
13922 gcc_assert (ret != ALL_REGS);
13924 if (TARGET_DEBUG_ADDR)
13926 fprintf (stderr,
13927 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13928 "mode = %s",
13929 reg_class_names[ret],
13930 in_p ? "true" : "false",
13931 reg_class_names[rclass],
13932 GET_MODE_NAME (mode));
13934 if (default_p)
13935 fprintf (stderr, ", default secondary reload");
13937 if (sri->icode != CODE_FOR_nothing)
13938 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
13939 insn_data[sri->icode].name, sri->extra_cost);
13940 else
13941 fprintf (stderr, "\n");
13943 debug_rtx (x);
13946 return ret;
13949 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13950 to SP+reg addressing. */
13952 void
13953 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
13955 int regno = true_regnum (reg);
13956 enum machine_mode mode = GET_MODE (reg);
13957 enum reg_class rclass;
13958 rtx addr;
13959 rtx and_op2 = NULL_RTX;
13960 rtx addr_op1;
13961 rtx addr_op2;
13962 rtx scratch_or_premodify = scratch;
13963 rtx and_rtx;
13964 rtx cc_clobber;
13966 if (TARGET_DEBUG_ADDR)
13968 fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
13969 store_p ? "store" : "load");
13970 fprintf (stderr, "reg:\n");
13971 debug_rtx (reg);
13972 fprintf (stderr, "mem:\n");
13973 debug_rtx (mem);
13974 fprintf (stderr, "scratch:\n");
13975 debug_rtx (scratch);
13978 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13979 gcc_assert (GET_CODE (mem) == MEM);
13980 rclass = REGNO_REG_CLASS (regno);
13981 addr = XEXP (mem, 0);
13983 switch (rclass)
13985 /* GPRs can handle reg + small constant, all other addresses need to use
13986 the scratch register. */
13987 case GENERAL_REGS:
13988 case BASE_REGS:
13989 if (GET_CODE (addr) == AND)
13991 and_op2 = XEXP (addr, 1);
13992 addr = XEXP (addr, 0);
13995 if (GET_CODE (addr) == PRE_MODIFY)
13997 scratch_or_premodify = XEXP (addr, 0);
13998 gcc_assert (REG_P (scratch_or_premodify));
13999 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
14000 addr = XEXP (addr, 1);
14003 if (GET_CODE (addr) == PLUS
14004 && (and_op2 != NULL_RTX
14005 || !rs6000_legitimate_offset_address_p (TImode, addr,
14006 false, true)))
14008 addr_op1 = XEXP (addr, 0);
14009 addr_op2 = XEXP (addr, 1);
14010 gcc_assert (legitimate_indirect_address_p (addr_op1, false));
14012 if (!REG_P (addr_op2)
14013 && (GET_CODE (addr_op2) != CONST_INT
14014 || !satisfies_constraint_I (addr_op2)))
14016 if (TARGET_DEBUG_ADDR)
14018 fprintf (stderr,
14019 "\nMove plus addr to register %s, mode = %s: ",
14020 rs6000_reg_names[REGNO (scratch)],
14021 GET_MODE_NAME (mode));
14022 debug_rtx (addr_op2);
14024 rs6000_emit_move (scratch, addr_op2, Pmode);
14025 addr_op2 = scratch;
14028 emit_insn (gen_rtx_SET (VOIDmode,
14029 scratch_or_premodify,
14030 gen_rtx_PLUS (Pmode,
14031 addr_op1,
14032 addr_op2)));
14034 addr = scratch_or_premodify;
14035 scratch_or_premodify = scratch;
14037 else if (!legitimate_indirect_address_p (addr, false)
14038 && !rs6000_legitimate_offset_address_p (TImode, addr,
14039 false, true))
14041 if (TARGET_DEBUG_ADDR)
14043 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
14044 rs6000_reg_names[REGNO (scratch_or_premodify)],
14045 GET_MODE_NAME (mode));
14046 debug_rtx (addr);
14048 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14049 addr = scratch_or_premodify;
14050 scratch_or_premodify = scratch;
14052 break;
14054 /* Float/Altivec registers can only handle reg+reg addressing. Move
14055 other addresses into a scratch register. */
14056 case FLOAT_REGS:
14057 case VSX_REGS:
14058 case ALTIVEC_REGS:
14060 /* With float regs, we need to handle the AND ourselves, since we can't
14061 use the Altivec instruction with an implicit AND -16. Allow scalar
14062 loads to float registers to use reg+offset even if VSX. */
14063 if (GET_CODE (addr) == AND
14064 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
14065 || GET_CODE (XEXP (addr, 1)) != CONST_INT
14066 || INTVAL (XEXP (addr, 1)) != -16
14067 || !VECTOR_MEM_ALTIVEC_P (mode)))
14069 and_op2 = XEXP (addr, 1);
14070 addr = XEXP (addr, 0);
14073 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
14074 as the address later. */
14075 if (GET_CODE (addr) == PRE_MODIFY
14076 && (!VECTOR_MEM_VSX_P (mode)
14077 || and_op2 != NULL_RTX
14078 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
14080 scratch_or_premodify = XEXP (addr, 0);
14081 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
14082 false));
14083 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
14084 addr = XEXP (addr, 1);
14087 if (legitimate_indirect_address_p (addr, false) /* reg */
14088 || legitimate_indexed_address_p (addr, false) /* reg+reg */
14089 || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
14090 || (GET_CODE (addr) == AND /* Altivec memory */
14091 && GET_CODE (XEXP (addr, 1)) == CONST_INT
14092 && INTVAL (XEXP (addr, 1)) == -16
14093 && VECTOR_MEM_ALTIVEC_P (mode))
14094 || (rclass == FLOAT_REGS /* legacy float mem */
14095 && GET_MODE_SIZE (mode) == 8
14096 && and_op2 == NULL_RTX
14097 && scratch_or_premodify == scratch
14098 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
14101 else if (GET_CODE (addr) == PLUS)
14103 addr_op1 = XEXP (addr, 0);
14104 addr_op2 = XEXP (addr, 1);
14105 gcc_assert (REG_P (addr_op1));
14107 if (TARGET_DEBUG_ADDR)
14109 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
14110 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
14111 debug_rtx (addr_op2);
14113 rs6000_emit_move (scratch, addr_op2, Pmode);
14114 emit_insn (gen_rtx_SET (VOIDmode,
14115 scratch_or_premodify,
14116 gen_rtx_PLUS (Pmode,
14117 addr_op1,
14118 scratch)));
14119 addr = scratch_or_premodify;
14120 scratch_or_premodify = scratch;
14123 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
14124 || GET_CODE (addr) == CONST_INT || REG_P (addr))
14126 if (TARGET_DEBUG_ADDR)
14128 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
14129 rs6000_reg_names[REGNO (scratch_or_premodify)],
14130 GET_MODE_NAME (mode));
14131 debug_rtx (addr);
14134 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14135 addr = scratch_or_premodify;
14136 scratch_or_premodify = scratch;
14139 else
14140 gcc_unreachable ();
14142 break;
14144 default:
14145 gcc_unreachable ();
14148 /* If the original address involved a pre-modify that we couldn't use the VSX
14149 memory instruction with update, and we haven't taken care of already,
14150 store the address in the pre-modify register and use that as the
14151 address. */
14152 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
14154 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
14155 addr = scratch_or_premodify;
14158 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
14159 memory instruction, recreate the AND now, including the clobber which is
14160 generated by the general ANDSI3/ANDDI3 patterns for the
14161 andi. instruction. */
14162 if (and_op2 != NULL_RTX)
14164 if (! legitimate_indirect_address_p (addr, false))
14166 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
14167 addr = scratch;
14170 if (TARGET_DEBUG_ADDR)
14172 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
14173 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
14174 debug_rtx (and_op2);
14177 and_rtx = gen_rtx_SET (VOIDmode,
14178 scratch,
14179 gen_rtx_AND (Pmode,
14180 addr,
14181 and_op2));
14183 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
14184 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14185 gen_rtvec (2, and_rtx, cc_clobber)));
14186 addr = scratch;
14189 /* Adjust the address if it changed. */
14190 if (addr != XEXP (mem, 0))
14192 mem = change_address (mem, mode, addr);
14193 if (TARGET_DEBUG_ADDR)
14194 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
14197 /* Now create the move. */
14198 if (store_p)
14199 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14200 else
14201 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14203 return;
14206 /* Convert reloads involving 64-bit gprs and misaligned offset
14207 addressing, or multiple 32-bit gprs and offsets that are too large,
14208 to use indirect addressing. */
14210 void
14211 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
14213 int regno = true_regnum (reg);
14214 enum reg_class rclass;
14215 rtx addr;
14216 rtx scratch_or_premodify = scratch;
14218 if (TARGET_DEBUG_ADDR)
14220 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
14221 store_p ? "store" : "load");
14222 fprintf (stderr, "reg:\n");
14223 debug_rtx (reg);
14224 fprintf (stderr, "mem:\n");
14225 debug_rtx (mem);
14226 fprintf (stderr, "scratch:\n");
14227 debug_rtx (scratch);
14230 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
14231 gcc_assert (GET_CODE (mem) == MEM);
14232 rclass = REGNO_REG_CLASS (regno);
14233 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
14234 addr = XEXP (mem, 0);
14236 if (GET_CODE (addr) == PRE_MODIFY)
14238 scratch_or_premodify = XEXP (addr, 0);
14239 gcc_assert (REG_P (scratch_or_premodify));
14240 addr = XEXP (addr, 1);
14242 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
14244 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14246 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
14248 /* Now create the move. */
14249 if (store_p)
14250 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14251 else
14252 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14254 return;
14257 /* Allocate a 64-bit stack slot to be used for copying SDmode
14258 values through if this function has any SDmode references. */
14260 static void
14261 rs6000_alloc_sdmode_stack_slot (void)
14263 tree t;
14264 basic_block bb;
14265 gimple_stmt_iterator gsi;
14267 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
14269 FOR_EACH_BB (bb)
14270 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
14272 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
14273 if (ret)
14275 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14276 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14277 SDmode, 0);
14278 return;
14282 /* Check for any SDmode parameters of the function. */
14283 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
14285 if (TREE_TYPE (t) == error_mark_node)
14286 continue;
14288 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
14289 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
14291 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14292 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14293 SDmode, 0);
14294 return;
14299 static void
14300 rs6000_instantiate_decls (void)
14302 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
14303 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
14306 /* Given an rtx X being reloaded into a reg required to be
14307 in class CLASS, return the class of reg to actually use.
14308 In general this is just CLASS; but on some machines
14309 in some cases it is preferable to use a more restrictive class.
14311 On the RS/6000, we have to return NO_REGS when we want to reload a
14312 floating-point CONST_DOUBLE to force it to be copied to memory.
14314 We also don't want to reload integer values into floating-point
14315 registers if we can at all help it. In fact, this can
14316 cause reload to die, if it tries to generate a reload of CTR
14317 into a FP register and discovers it doesn't have the memory location
14318 required.
14320 ??? Would it be a good idea to have reload do the converse, that is
14321 try to reload floating modes into FP registers if possible?
14324 static enum reg_class
14325 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
14327 enum machine_mode mode = GET_MODE (x);
14329 if (VECTOR_UNIT_VSX_P (mode)
14330 && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
14331 return rclass;
14333 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
14334 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
14335 && easy_vector_constant (x, mode))
14336 return ALTIVEC_REGS;
14338 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
14339 return NO_REGS;
14341 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
14342 return GENERAL_REGS;
14344 /* For VSX, prefer the traditional registers for 64-bit values because we can
14345 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14346 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14347 prefer Altivec loads.. */
14348 if (rclass == VSX_REGS)
14350 if (GET_MODE_SIZE (mode) <= 8)
14351 return FLOAT_REGS;
14353 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
14354 return ALTIVEC_REGS;
14356 return rclass;
14359 return rclass;
14362 /* Debug version of rs6000_preferred_reload_class. */
14363 static enum reg_class
14364 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
14366 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
14368 fprintf (stderr,
14369 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14370 "mode = %s, x:\n",
14371 reg_class_names[ret], reg_class_names[rclass],
14372 GET_MODE_NAME (GET_MODE (x)));
14373 debug_rtx (x);
14375 return ret;
14378 /* If we are copying between FP or AltiVec registers and anything else, we need
14379 a memory location. The exception is when we are targeting ppc64 and the
14380 move to/from fpr to gpr instructions are available. Also, under VSX, you
14381 can copy vector registers from the FP register set to the Altivec register
14382 set and vice versa. */
14384 static bool
14385 rs6000_secondary_memory_needed (enum reg_class class1,
14386 enum reg_class class2,
14387 enum machine_mode mode)
14389 if (class1 == class2)
14390 return false;
14392 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14393 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14394 between these classes. But we need memory for other things that can go in
14395 FLOAT_REGS like SFmode. */
14396 if (TARGET_VSX
14397 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
14398 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
14399 || class1 == FLOAT_REGS))
14400 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
14401 && class2 != FLOAT_REGS);
14403 if (class1 == VSX_REGS || class2 == VSX_REGS)
14404 return true;
14406 if (class1 == FLOAT_REGS
14407 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14408 || ((mode != DFmode)
14409 && (mode != DDmode)
14410 && (mode != DImode))))
14411 return true;
14413 if (class2 == FLOAT_REGS
14414 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14415 || ((mode != DFmode)
14416 && (mode != DDmode)
14417 && (mode != DImode))))
14418 return true;
14420 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
14421 return true;
14423 return false;
14426 /* Debug version of rs6000_secondary_memory_needed. */
14427 static bool
14428 rs6000_debug_secondary_memory_needed (enum reg_class class1,
14429 enum reg_class class2,
14430 enum machine_mode mode)
14432 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
14434 fprintf (stderr,
14435 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14436 "class2 = %s, mode = %s\n",
14437 ret ? "true" : "false", reg_class_names[class1],
14438 reg_class_names[class2], GET_MODE_NAME (mode));
14440 return ret;
14443 /* Return the register class of a scratch register needed to copy IN into
14444 or out of a register in RCLASS in MODE. If it can be done directly,
14445 NO_REGS is returned. */
14447 static enum reg_class
14448 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
14449 rtx in)
14451 int regno;
14453 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
14454 #if TARGET_MACHO
14455 && MACHOPIC_INDIRECT
14456 #endif
14459 /* We cannot copy a symbolic operand directly into anything
14460 other than BASE_REGS for TARGET_ELF. So indicate that a
14461 register from BASE_REGS is needed as an intermediate
14462 register.
14464 On Darwin, pic addresses require a load from memory, which
14465 needs a base register. */
14466 if (rclass != BASE_REGS
14467 && (GET_CODE (in) == SYMBOL_REF
14468 || GET_CODE (in) == HIGH
14469 || GET_CODE (in) == LABEL_REF
14470 || GET_CODE (in) == CONST))
14471 return BASE_REGS;
14474 if (GET_CODE (in) == REG)
14476 regno = REGNO (in);
14477 if (regno >= FIRST_PSEUDO_REGISTER)
14479 regno = true_regnum (in);
14480 if (regno >= FIRST_PSEUDO_REGISTER)
14481 regno = -1;
14484 else if (GET_CODE (in) == SUBREG)
14486 regno = true_regnum (in);
14487 if (regno >= FIRST_PSEUDO_REGISTER)
14488 regno = -1;
14490 else
14491 regno = -1;
14493 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14494 into anything. */
14495 if (rclass == GENERAL_REGS || rclass == BASE_REGS
14496 || (regno >= 0 && INT_REGNO_P (regno)))
14497 return NO_REGS;
14499 /* Constants, memory, and FP registers can go into FP registers. */
14500 if ((regno == -1 || FP_REGNO_P (regno))
14501 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
14502 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
14504 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14505 VSX. */
14506 if (TARGET_VSX
14507 && (regno == -1 || VSX_REGNO_P (regno))
14508 && VSX_REG_CLASS_P (rclass))
14509 return NO_REGS;
14511 /* Memory, and AltiVec registers can go into AltiVec registers. */
14512 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
14513 && rclass == ALTIVEC_REGS)
14514 return NO_REGS;
14516 /* We can copy among the CR registers. */
14517 if ((rclass == CR_REGS || rclass == CR0_REGS)
14518 && regno >= 0 && CR_REGNO_P (regno))
14519 return NO_REGS;
14521 /* Otherwise, we need GENERAL_REGS. */
14522 return GENERAL_REGS;
14525 /* Debug version of rs6000_secondary_reload_class. */
14526 static enum reg_class
14527 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14528 enum machine_mode mode, rtx in)
14530 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14531 fprintf (stderr,
14532 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14533 "mode = %s, input rtx:\n",
14534 reg_class_names[ret], reg_class_names[rclass],
14535 GET_MODE_NAME (mode));
14536 debug_rtx (in);
14538 return ret;
14541 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14543 static bool
14544 rs6000_cannot_change_mode_class (enum machine_mode from,
14545 enum machine_mode to,
14546 enum reg_class rclass)
14548 unsigned from_size = GET_MODE_SIZE (from);
14549 unsigned to_size = GET_MODE_SIZE (to);
14551 if (from_size != to_size)
14553 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14554 return ((from_size < 8 || to_size < 8 || TARGET_IEEEQUAD)
14555 && reg_classes_intersect_p (xclass, rclass));
14558 if (TARGET_E500_DOUBLE
14559 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14560 || (((to) == TFmode) + ((from) == TFmode)) == 1
14561 || (((to) == DDmode) + ((from) == DDmode)) == 1
14562 || (((to) == TDmode) + ((from) == TDmode)) == 1
14563 || (((to) == DImode) + ((from) == DImode)) == 1))
14564 return true;
14566 /* Since the VSX register set includes traditional floating point registers
14567 and altivec registers, just check for the size being different instead of
14568 trying to check whether the modes are vector modes. Otherwise it won't
14569 allow say DF and DI to change classes. */
14570 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14571 return (from_size != 8 && from_size != 16);
14573 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14574 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14575 return true;
14577 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14578 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14579 return true;
14581 return false;
14584 /* Debug version of rs6000_cannot_change_mode_class. */
14585 static bool
14586 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14587 enum machine_mode to,
14588 enum reg_class rclass)
14590 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14592 fprintf (stderr,
14593 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14594 "to = %s, rclass = %s\n",
14595 ret ? "true" : "false",
14596 GET_MODE_NAME (from), GET_MODE_NAME (to),
14597 reg_class_names[rclass]);
14599 return ret;
14602 /* Given a comparison operation, return the bit number in CCR to test. We
14603 know this is a valid comparison.
14605 SCC_P is 1 if this is for an scc. That means that %D will have been
14606 used instead of %C, so the bits will be in different places.
14608 Return -1 if OP isn't a valid comparison for some reason. */
14611 ccr_bit (rtx op, int scc_p)
14613 enum rtx_code code = GET_CODE (op);
14614 enum machine_mode cc_mode;
14615 int cc_regnum;
14616 int base_bit;
14617 rtx reg;
14619 if (!COMPARISON_P (op))
14620 return -1;
14622 reg = XEXP (op, 0);
14624 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14626 cc_mode = GET_MODE (reg);
14627 cc_regnum = REGNO (reg);
14628 base_bit = 4 * (cc_regnum - CR0_REGNO);
14630 validate_condition_mode (code, cc_mode);
14632 /* When generating a sCOND operation, only positive conditions are
14633 allowed. */
14634 gcc_assert (!scc_p
14635 || code == EQ || code == GT || code == LT || code == UNORDERED
14636 || code == GTU || code == LTU);
14638 switch (code)
14640 case NE:
14641 return scc_p ? base_bit + 3 : base_bit + 2;
14642 case EQ:
14643 return base_bit + 2;
14644 case GT: case GTU: case UNLE:
14645 return base_bit + 1;
14646 case LT: case LTU: case UNGE:
14647 return base_bit;
14648 case ORDERED: case UNORDERED:
14649 return base_bit + 3;
14651 case GE: case GEU:
14652 /* If scc, we will have done a cror to put the bit in the
14653 unordered position. So test that bit. For integer, this is ! LT
14654 unless this is an scc insn. */
14655 return scc_p ? base_bit + 3 : base_bit;
14657 case LE: case LEU:
14658 return scc_p ? base_bit + 3 : base_bit + 1;
14660 default:
14661 gcc_unreachable ();
14665 /* Return the GOT register. */
14668 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
14670 /* The second flow pass currently (June 1999) can't update
14671 regs_ever_live without disturbing other parts of the compiler, so
14672 update it here to make the prolog/epilogue code happy. */
14673 if (!can_create_pseudo_p ()
14674 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
14675 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
14677 crtl->uses_pic_offset_table = 1;
14679 return pic_offset_table_rtx;
14682 static rs6000_stack_t stack_info;
14684 /* Function to init struct machine_function.
14685 This will be called, via a pointer variable,
14686 from push_function_context. */
14688 static struct machine_function *
14689 rs6000_init_machine_status (void)
14691 stack_info.reload_completed = 0;
14692 return ggc_alloc_cleared_machine_function ();
14695 /* These macros test for integers and extract the low-order bits. */
14696 #define INT_P(X) \
14697 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14698 && GET_MODE (X) == VOIDmode)
14700 #define INT_LOWPART(X) \
14701 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14704 extract_MB (rtx op)
14706 int i;
14707 unsigned long val = INT_LOWPART (op);
14709 /* If the high bit is zero, the value is the first 1 bit we find
14710 from the left. */
14711 if ((val & 0x80000000) == 0)
14713 gcc_assert (val & 0xffffffff);
14715 i = 1;
14716 while (((val <<= 1) & 0x80000000) == 0)
14717 ++i;
14718 return i;
14721 /* If the high bit is set and the low bit is not, or the mask is all
14722 1's, the value is zero. */
14723 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
14724 return 0;
14726 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14727 from the right. */
14728 i = 31;
14729 while (((val >>= 1) & 1) != 0)
14730 --i;
14732 return i;
14736 extract_ME (rtx op)
14738 int i;
14739 unsigned long val = INT_LOWPART (op);
14741 /* If the low bit is zero, the value is the first 1 bit we find from
14742 the right. */
14743 if ((val & 1) == 0)
14745 gcc_assert (val & 0xffffffff);
14747 i = 30;
14748 while (((val >>= 1) & 1) == 0)
14749 --i;
14751 return i;
14754 /* If the low bit is set and the high bit is not, or the mask is all
14755 1's, the value is 31. */
14756 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
14757 return 31;
14759 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14760 from the left. */
14761 i = 0;
14762 while (((val <<= 1) & 0x80000000) != 0)
14763 ++i;
14765 return i;
14768 /* Locate some local-dynamic symbol still in use by this function
14769 so that we can print its name in some tls_ld pattern. */
14771 static const char *
14772 rs6000_get_some_local_dynamic_name (void)
14774 rtx insn;
14776 if (cfun->machine->some_ld_name)
14777 return cfun->machine->some_ld_name;
14779 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
14780 if (INSN_P (insn)
14781 && for_each_rtx (&PATTERN (insn),
14782 rs6000_get_some_local_dynamic_name_1, 0))
14783 return cfun->machine->some_ld_name;
14785 gcc_unreachable ();
14788 /* Helper function for rs6000_get_some_local_dynamic_name. */
14790 static int
14791 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
14793 rtx x = *px;
14795 if (GET_CODE (x) == SYMBOL_REF)
14797 const char *str = XSTR (x, 0);
14798 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
14800 cfun->machine->some_ld_name = str;
14801 return 1;
14805 return 0;
14808 /* Write out a function code label. */
14810 void
14811 rs6000_output_function_entry (FILE *file, const char *fname)
14813 if (fname[0] != '.')
14815 switch (DEFAULT_ABI)
14817 default:
14818 gcc_unreachable ();
14820 case ABI_AIX:
14821 if (DOT_SYMBOLS)
14822 putc ('.', file);
14823 else
14824 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
14825 break;
14827 case ABI_V4:
14828 case ABI_DARWIN:
14829 break;
14833 RS6000_OUTPUT_BASENAME (file, fname);
14836 /* Print an operand. Recognize special options, documented below. */
14838 #if TARGET_ELF
14839 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14840 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14841 #else
14842 #define SMALL_DATA_RELOC "sda21"
14843 #define SMALL_DATA_REG 0
14844 #endif
14846 void
14847 print_operand (FILE *file, rtx x, int code)
14849 int i;
14850 unsigned HOST_WIDE_INT uval;
14852 switch (code)
14854 /* %a is output_address. */
14856 case 'b':
14857 /* If constant, low-order 16 bits of constant, unsigned.
14858 Otherwise, write normally. */
14859 if (INT_P (x))
14860 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
14861 else
14862 print_operand (file, x, 0);
14863 return;
14865 case 'B':
14866 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14867 for 64-bit mask direction. */
14868 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
14869 return;
14871 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14872 output_operand. */
14874 case 'D':
14875 /* Like 'J' but get to the GT bit only. */
14876 gcc_assert (REG_P (x));
14878 /* Bit 1 is GT bit. */
14879 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
14881 /* Add one for shift count in rlinm for scc. */
14882 fprintf (file, "%d", i + 1);
14883 return;
14885 case 'E':
14886 /* X is a CR register. Print the number of the EQ bit of the CR */
14887 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14888 output_operand_lossage ("invalid %%E value");
14889 else
14890 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
14891 return;
14893 case 'f':
14894 /* X is a CR register. Print the shift count needed to move it
14895 to the high-order four bits. */
14896 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14897 output_operand_lossage ("invalid %%f value");
14898 else
14899 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
14900 return;
14902 case 'F':
14903 /* Similar, but print the count for the rotate in the opposite
14904 direction. */
14905 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14906 output_operand_lossage ("invalid %%F value");
14907 else
14908 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
14909 return;
14911 case 'G':
14912 /* X is a constant integer. If it is negative, print "m",
14913 otherwise print "z". This is to make an aze or ame insn. */
14914 if (GET_CODE (x) != CONST_INT)
14915 output_operand_lossage ("invalid %%G value");
14916 else if (INTVAL (x) >= 0)
14917 putc ('z', file);
14918 else
14919 putc ('m', file);
14920 return;
14922 case 'h':
14923 /* If constant, output low-order five bits. Otherwise, write
14924 normally. */
14925 if (INT_P (x))
14926 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
14927 else
14928 print_operand (file, x, 0);
14929 return;
14931 case 'H':
14932 /* If constant, output low-order six bits. Otherwise, write
14933 normally. */
14934 if (INT_P (x))
14935 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
14936 else
14937 print_operand (file, x, 0);
14938 return;
14940 case 'I':
14941 /* Print `i' if this is a constant, else nothing. */
14942 if (INT_P (x))
14943 putc ('i', file);
14944 return;
14946 case 'j':
14947 /* Write the bit number in CCR for jump. */
14948 i = ccr_bit (x, 0);
14949 if (i == -1)
14950 output_operand_lossage ("invalid %%j code");
14951 else
14952 fprintf (file, "%d", i);
14953 return;
14955 case 'J':
14956 /* Similar, but add one for shift count in rlinm for scc and pass
14957 scc flag to `ccr_bit'. */
14958 i = ccr_bit (x, 1);
14959 if (i == -1)
14960 output_operand_lossage ("invalid %%J code");
14961 else
14962 /* If we want bit 31, write a shift count of zero, not 32. */
14963 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14964 return;
14966 case 'k':
14967 /* X must be a constant. Write the 1's complement of the
14968 constant. */
14969 if (! INT_P (x))
14970 output_operand_lossage ("invalid %%k value");
14971 else
14972 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
14973 return;
14975 case 'K':
14976 /* X must be a symbolic constant on ELF. Write an
14977 expression suitable for an 'addi' that adds in the low 16
14978 bits of the MEM. */
14979 if (GET_CODE (x) == CONST)
14981 if (GET_CODE (XEXP (x, 0)) != PLUS
14982 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
14983 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
14984 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
14985 output_operand_lossage ("invalid %%K value");
14987 print_operand_address (file, x);
14988 fputs ("@l", file);
14989 return;
14991 /* %l is output_asm_label. */
14993 case 'L':
14994 /* Write second word of DImode or DFmode reference. Works on register
14995 or non-indexed memory only. */
14996 if (REG_P (x))
14997 fputs (reg_names[REGNO (x) + 1], file);
14998 else if (MEM_P (x))
15000 /* Handle possible auto-increment. Since it is pre-increment and
15001 we have already done it, we can just use an offset of word. */
15002 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15003 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15004 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
15005 UNITS_PER_WORD));
15006 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15007 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
15008 UNITS_PER_WORD));
15009 else
15010 output_address (XEXP (adjust_address_nv (x, SImode,
15011 UNITS_PER_WORD),
15012 0));
15014 if (small_data_operand (x, GET_MODE (x)))
15015 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15016 reg_names[SMALL_DATA_REG]);
15018 return;
15020 case 'm':
15021 /* MB value for a mask operand. */
15022 if (! mask_operand (x, SImode))
15023 output_operand_lossage ("invalid %%m value");
15025 fprintf (file, "%d", extract_MB (x));
15026 return;
15028 case 'M':
15029 /* ME value for a mask operand. */
15030 if (! mask_operand (x, SImode))
15031 output_operand_lossage ("invalid %%M value");
15033 fprintf (file, "%d", extract_ME (x));
15034 return;
15036 /* %n outputs the negative of its operand. */
15038 case 'N':
15039 /* Write the number of elements in the vector times 4. */
15040 if (GET_CODE (x) != PARALLEL)
15041 output_operand_lossage ("invalid %%N value");
15042 else
15043 fprintf (file, "%d", XVECLEN (x, 0) * 4);
15044 return;
15046 case 'O':
15047 /* Similar, but subtract 1 first. */
15048 if (GET_CODE (x) != PARALLEL)
15049 output_operand_lossage ("invalid %%O value");
15050 else
15051 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
15052 return;
15054 case 'p':
15055 /* X is a CONST_INT that is a power of two. Output the logarithm. */
15056 if (! INT_P (x)
15057 || INT_LOWPART (x) < 0
15058 || (i = exact_log2 (INT_LOWPART (x))) < 0)
15059 output_operand_lossage ("invalid %%p value");
15060 else
15061 fprintf (file, "%d", i);
15062 return;
15064 case 'P':
15065 /* The operand must be an indirect memory reference. The result
15066 is the register name. */
15067 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
15068 || REGNO (XEXP (x, 0)) >= 32)
15069 output_operand_lossage ("invalid %%P value");
15070 else
15071 fputs (reg_names[REGNO (XEXP (x, 0))], file);
15072 return;
15074 case 'q':
15075 /* This outputs the logical code corresponding to a boolean
15076 expression. The expression may have one or both operands
15077 negated (if one, only the first one). For condition register
15078 logical operations, it will also treat the negated
15079 CR codes as NOTs, but not handle NOTs of them. */
15081 const char *const *t = 0;
15082 const char *s;
15083 enum rtx_code code = GET_CODE (x);
15084 static const char * const tbl[3][3] = {
15085 { "and", "andc", "nor" },
15086 { "or", "orc", "nand" },
15087 { "xor", "eqv", "xor" } };
15089 if (code == AND)
15090 t = tbl[0];
15091 else if (code == IOR)
15092 t = tbl[1];
15093 else if (code == XOR)
15094 t = tbl[2];
15095 else
15096 output_operand_lossage ("invalid %%q value");
15098 if (GET_CODE (XEXP (x, 0)) != NOT)
15099 s = t[0];
15100 else
15102 if (GET_CODE (XEXP (x, 1)) == NOT)
15103 s = t[2];
15104 else
15105 s = t[1];
15108 fputs (s, file);
15110 return;
15112 case 'Q':
15113 if (! TARGET_MFCRF)
15114 return;
15115 fputc (',', file);
15116 /* FALLTHRU */
15118 case 'R':
15119 /* X is a CR register. Print the mask for `mtcrf'. */
15120 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
15121 output_operand_lossage ("invalid %%R value");
15122 else
15123 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
15124 return;
15126 case 's':
15127 /* Low 5 bits of 32 - value */
15128 if (! INT_P (x))
15129 output_operand_lossage ("invalid %%s value");
15130 else
15131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
15132 return;
15134 case 'S':
15135 /* PowerPC64 mask position. All 0's is excluded.
15136 CONST_INT 32-bit mask is considered sign-extended so any
15137 transition must occur within the CONST_INT, not on the boundary. */
15138 if (! mask64_operand (x, DImode))
15139 output_operand_lossage ("invalid %%S value");
15141 uval = INT_LOWPART (x);
15143 if (uval & 1) /* Clear Left */
15145 #if HOST_BITS_PER_WIDE_INT > 64
15146 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
15147 #endif
15148 i = 64;
15150 else /* Clear Right */
15152 uval = ~uval;
15153 #if HOST_BITS_PER_WIDE_INT > 64
15154 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
15155 #endif
15156 i = 63;
15158 while (uval != 0)
15159 --i, uval >>= 1;
15160 gcc_assert (i >= 0);
15161 fprintf (file, "%d", i);
15162 return;
15164 case 't':
15165 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
15166 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
15168 /* Bit 3 is OV bit. */
15169 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
15171 /* If we want bit 31, write a shift count of zero, not 32. */
15172 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15173 return;
15175 case 'T':
15176 /* Print the symbolic name of a branch target register. */
15177 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
15178 && REGNO (x) != CTR_REGNO))
15179 output_operand_lossage ("invalid %%T value");
15180 else if (REGNO (x) == LR_REGNO)
15181 fputs ("lr", file);
15182 else
15183 fputs ("ctr", file);
15184 return;
15186 case 'u':
15187 /* High-order 16 bits of constant for use in unsigned operand. */
15188 if (! INT_P (x))
15189 output_operand_lossage ("invalid %%u value");
15190 else
15191 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15192 (INT_LOWPART (x) >> 16) & 0xffff);
15193 return;
15195 case 'v':
15196 /* High-order 16 bits of constant for use in signed operand. */
15197 if (! INT_P (x))
15198 output_operand_lossage ("invalid %%v value");
15199 else
15200 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15201 (INT_LOWPART (x) >> 16) & 0xffff);
15202 return;
15204 case 'U':
15205 /* Print `u' if this has an auto-increment or auto-decrement. */
15206 if (MEM_P (x)
15207 && (GET_CODE (XEXP (x, 0)) == PRE_INC
15208 || GET_CODE (XEXP (x, 0)) == PRE_DEC
15209 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
15210 putc ('u', file);
15211 return;
15213 case 'V':
15214 /* Print the trap code for this operand. */
15215 switch (GET_CODE (x))
15217 case EQ:
15218 fputs ("eq", file); /* 4 */
15219 break;
15220 case NE:
15221 fputs ("ne", file); /* 24 */
15222 break;
15223 case LT:
15224 fputs ("lt", file); /* 16 */
15225 break;
15226 case LE:
15227 fputs ("le", file); /* 20 */
15228 break;
15229 case GT:
15230 fputs ("gt", file); /* 8 */
15231 break;
15232 case GE:
15233 fputs ("ge", file); /* 12 */
15234 break;
15235 case LTU:
15236 fputs ("llt", file); /* 2 */
15237 break;
15238 case LEU:
15239 fputs ("lle", file); /* 6 */
15240 break;
15241 case GTU:
15242 fputs ("lgt", file); /* 1 */
15243 break;
15244 case GEU:
15245 fputs ("lge", file); /* 5 */
15246 break;
15247 default:
15248 gcc_unreachable ();
15250 break;
15252 case 'w':
15253 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15254 normally. */
15255 if (INT_P (x))
15256 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
15257 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
15258 else
15259 print_operand (file, x, 0);
15260 return;
15262 case 'W':
15263 /* MB value for a PowerPC64 rldic operand. */
15264 i = clz_hwi (GET_CODE (x) == CONST_INT
15265 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
15267 #if HOST_BITS_PER_WIDE_INT == 32
15268 if (GET_CODE (x) == CONST_INT && i > 0)
15269 i += 32; /* zero-extend high-part was all 0's */
15270 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
15271 i = clz_hwi (CONST_DOUBLE_LOW (x)) + 32;
15272 #endif
15274 fprintf (file, "%d", i);
15275 return;
15277 case 'x':
15278 /* X is a FPR or Altivec register used in a VSX context. */
15279 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
15280 output_operand_lossage ("invalid %%x value");
15281 else
15283 int reg = REGNO (x);
15284 int vsx_reg = (FP_REGNO_P (reg)
15285 ? reg - 32
15286 : reg - FIRST_ALTIVEC_REGNO + 32);
15288 #ifdef TARGET_REGNAMES
15289 if (TARGET_REGNAMES)
15290 fprintf (file, "%%vs%d", vsx_reg);
15291 else
15292 #endif
15293 fprintf (file, "%d", vsx_reg);
15295 return;
15297 case 'X':
15298 if (MEM_P (x)
15299 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
15300 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
15301 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
15302 putc ('x', file);
15303 return;
15305 case 'Y':
15306 /* Like 'L', for third word of TImode */
15307 if (REG_P (x))
15308 fputs (reg_names[REGNO (x) + 2], file);
15309 else if (MEM_P (x))
15311 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15312 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15313 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15314 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15315 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15316 else
15317 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
15318 if (small_data_operand (x, GET_MODE (x)))
15319 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15320 reg_names[SMALL_DATA_REG]);
15322 return;
15324 case 'z':
15325 /* X is a SYMBOL_REF. Write out the name preceded by a
15326 period and without any trailing data in brackets. Used for function
15327 names. If we are configured for System V (or the embedded ABI) on
15328 the PowerPC, do not emit the period, since those systems do not use
15329 TOCs and the like. */
15330 gcc_assert (GET_CODE (x) == SYMBOL_REF);
15332 /* Mark the decl as referenced so that cgraph will output the
15333 function. */
15334 if (SYMBOL_REF_DECL (x))
15335 mark_decl_referenced (SYMBOL_REF_DECL (x));
15337 /* For macho, check to see if we need a stub. */
15338 if (TARGET_MACHO)
15340 const char *name = XSTR (x, 0);
15341 #if TARGET_MACHO
15342 if (darwin_emit_branch_islands
15343 && MACHOPIC_INDIRECT
15344 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
15345 name = machopic_indirection_name (x, /*stub_p=*/true);
15346 #endif
15347 assemble_name (file, name);
15349 else if (!DOT_SYMBOLS)
15350 assemble_name (file, XSTR (x, 0));
15351 else
15352 rs6000_output_function_entry (file, XSTR (x, 0));
15353 return;
15355 case 'Z':
15356 /* Like 'L', for last word of TImode. */
15357 if (REG_P (x))
15358 fputs (reg_names[REGNO (x) + 3], file);
15359 else if (MEM_P (x))
15361 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15362 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15363 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15364 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15365 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15366 else
15367 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
15368 if (small_data_operand (x, GET_MODE (x)))
15369 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15370 reg_names[SMALL_DATA_REG]);
15372 return;
15374 /* Print AltiVec or SPE memory operand. */
15375 case 'y':
15377 rtx tmp;
15379 gcc_assert (MEM_P (x));
15381 tmp = XEXP (x, 0);
15383 /* Ugly hack because %y is overloaded. */
15384 if ((TARGET_SPE || TARGET_E500_DOUBLE)
15385 && (GET_MODE_SIZE (GET_MODE (x)) == 8
15386 || GET_MODE (x) == TFmode
15387 || GET_MODE (x) == TImode))
15389 /* Handle [reg]. */
15390 if (REG_P (tmp))
15392 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
15393 break;
15395 /* Handle [reg+UIMM]. */
15396 else if (GET_CODE (tmp) == PLUS &&
15397 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
15399 int x;
15401 gcc_assert (REG_P (XEXP (tmp, 0)));
15403 x = INTVAL (XEXP (tmp, 1));
15404 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
15405 break;
15408 /* Fall through. Must be [reg+reg]. */
15410 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
15411 && GET_CODE (tmp) == AND
15412 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
15413 && INTVAL (XEXP (tmp, 1)) == -16)
15414 tmp = XEXP (tmp, 0);
15415 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
15416 && GET_CODE (tmp) == PRE_MODIFY)
15417 tmp = XEXP (tmp, 1);
15418 if (REG_P (tmp))
15419 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
15420 else
15422 if (!GET_CODE (tmp) == PLUS
15423 || !REG_P (XEXP (tmp, 0))
15424 || !REG_P (XEXP (tmp, 1)))
15426 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15427 break;
15430 if (REGNO (XEXP (tmp, 0)) == 0)
15431 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
15432 reg_names[ REGNO (XEXP (tmp, 0)) ]);
15433 else
15434 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
15435 reg_names[ REGNO (XEXP (tmp, 1)) ]);
15437 break;
15440 case 0:
15441 if (REG_P (x))
15442 fprintf (file, "%s", reg_names[REGNO (x)]);
15443 else if (MEM_P (x))
15445 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15446 know the width from the mode. */
15447 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
15448 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
15449 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15450 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
15451 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
15452 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15453 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15454 output_address (XEXP (XEXP (x, 0), 1));
15455 else
15456 output_address (XEXP (x, 0));
15458 else
15460 if (toc_relative_expr_p (x, false))
15461 /* This hack along with a corresponding hack in
15462 rs6000_output_addr_const_extra arranges to output addends
15463 where the assembler expects to find them. eg.
15464 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15465 without this hack would be output as "x@toc+4". We
15466 want "x+4@toc". */
15467 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15468 else
15469 output_addr_const (file, x);
15471 return;
15473 case '&':
15474 assemble_name (file, rs6000_get_some_local_dynamic_name ());
15475 return;
15477 default:
15478 output_operand_lossage ("invalid %%xn code");
15482 /* Print the address of an operand. */
15484 void
15485 print_operand_address (FILE *file, rtx x)
15487 if (REG_P (x))
15488 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15489 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15490 || GET_CODE (x) == LABEL_REF)
15492 output_addr_const (file, x);
15493 if (small_data_operand (x, GET_MODE (x)))
15494 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15495 reg_names[SMALL_DATA_REG]);
15496 else
15497 gcc_assert (!TARGET_TOC);
15499 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15500 && REG_P (XEXP (x, 1)))
15502 if (REGNO (XEXP (x, 0)) == 0)
15503 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15504 reg_names[ REGNO (XEXP (x, 0)) ]);
15505 else
15506 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15507 reg_names[ REGNO (XEXP (x, 1)) ]);
15509 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15510 && GET_CODE (XEXP (x, 1)) == CONST_INT)
15511 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15512 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15513 #if TARGET_MACHO
15514 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15515 && CONSTANT_P (XEXP (x, 1)))
15517 fprintf (file, "lo16(");
15518 output_addr_const (file, XEXP (x, 1));
15519 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15521 #endif
15522 #if TARGET_ELF
15523 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15524 && CONSTANT_P (XEXP (x, 1)))
15526 output_addr_const (file, XEXP (x, 1));
15527 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15529 #endif
15530 else if (toc_relative_expr_p (x, false))
15532 /* This hack along with a corresponding hack in
15533 rs6000_output_addr_const_extra arranges to output addends
15534 where the assembler expects to find them. eg.
15535 (lo_sum (reg 9)
15536 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15537 without this hack would be output as "x@toc+8@l(9)". We
15538 want "x+8@toc@l(9)". */
15539 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15540 if (GET_CODE (x) == LO_SUM)
15541 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
15542 else
15543 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
15545 else
15546 gcc_unreachable ();
15549 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15551 static bool
15552 rs6000_output_addr_const_extra (FILE *file, rtx x)
15554 if (GET_CODE (x) == UNSPEC)
15555 switch (XINT (x, 1))
15557 case UNSPEC_TOCREL:
15558 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
15559 && REG_P (XVECEXP (x, 0, 1))
15560 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
15561 output_addr_const (file, XVECEXP (x, 0, 0));
15562 if (x == tocrel_base && tocrel_offset != const0_rtx)
15564 if (INTVAL (tocrel_offset) >= 0)
15565 fprintf (file, "+");
15566 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
15568 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15570 putc ('-', file);
15571 assemble_name (file, toc_label_name);
15573 else if (TARGET_ELF)
15574 fputs ("@toc", file);
15575 return true;
15577 #if TARGET_MACHO
15578 case UNSPEC_MACHOPIC_OFFSET:
15579 output_addr_const (file, XVECEXP (x, 0, 0));
15580 putc ('-', file);
15581 machopic_output_function_base_name (file);
15582 return true;
15583 #endif
15585 return false;
15588 /* Target hook for assembling integer objects. The PowerPC version has
15589 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15590 is defined. It also needs to handle DI-mode objects on 64-bit
15591 targets. */
15593 static bool
15594 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15596 #ifdef RELOCATABLE_NEEDS_FIXUP
15597 /* Special handling for SI values. */
15598 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15600 static int recurse = 0;
15602 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15603 the .fixup section. Since the TOC section is already relocated, we
15604 don't need to mark it here. We used to skip the text section, but it
15605 should never be valid for relocated addresses to be placed in the text
15606 section. */
15607 if (TARGET_RELOCATABLE
15608 && in_section != toc_section
15609 && !recurse
15610 && GET_CODE (x) != CONST_INT
15611 && GET_CODE (x) != CONST_DOUBLE
15612 && CONSTANT_P (x))
15614 char buf[256];
15616 recurse = 1;
15617 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15618 fixuplabelno++;
15619 ASM_OUTPUT_LABEL (asm_out_file, buf);
15620 fprintf (asm_out_file, "\t.long\t(");
15621 output_addr_const (asm_out_file, x);
15622 fprintf (asm_out_file, ")@fixup\n");
15623 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15624 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15625 fprintf (asm_out_file, "\t.long\t");
15626 assemble_name (asm_out_file, buf);
15627 fprintf (asm_out_file, "\n\t.previous\n");
15628 recurse = 0;
15629 return true;
15631 /* Remove initial .'s to turn a -mcall-aixdesc function
15632 address into the address of the descriptor, not the function
15633 itself. */
15634 else if (GET_CODE (x) == SYMBOL_REF
15635 && XSTR (x, 0)[0] == '.'
15636 && DEFAULT_ABI == ABI_AIX)
15638 const char *name = XSTR (x, 0);
15639 while (*name == '.')
15640 name++;
15642 fprintf (asm_out_file, "\t.long\t%s\n", name);
15643 return true;
15646 #endif /* RELOCATABLE_NEEDS_FIXUP */
15647 return default_assemble_integer (x, size, aligned_p);
15650 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15651 /* Emit an assembler directive to set symbol visibility for DECL to
15652 VISIBILITY_TYPE. */
15654 static void
15655 rs6000_assemble_visibility (tree decl, int vis)
15657 if (TARGET_XCOFF)
15658 return;
15660 /* Functions need to have their entry point symbol visibility set as
15661 well as their descriptor symbol visibility. */
15662 if (DEFAULT_ABI == ABI_AIX
15663 && DOT_SYMBOLS
15664 && TREE_CODE (decl) == FUNCTION_DECL)
15666 static const char * const visibility_types[] = {
15667 NULL, "internal", "hidden", "protected"
15670 const char *name, *type;
15672 name = ((* targetm.strip_name_encoding)
15673 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
15674 type = visibility_types[vis];
15676 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
15677 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
15679 else
15680 default_assemble_visibility (decl, vis);
15682 #endif
15684 enum rtx_code
15685 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
15687 /* Reversal of FP compares takes care -- an ordered compare
15688 becomes an unordered compare and vice versa. */
15689 if (mode == CCFPmode
15690 && (!flag_finite_math_only
15691 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
15692 || code == UNEQ || code == LTGT))
15693 return reverse_condition_maybe_unordered (code);
15694 else
15695 return reverse_condition (code);
15698 /* Generate a compare for CODE. Return a brand-new rtx that
15699 represents the result of the compare. */
15701 static rtx
15702 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
15704 enum machine_mode comp_mode;
15705 rtx compare_result;
15706 enum rtx_code code = GET_CODE (cmp);
15707 rtx op0 = XEXP (cmp, 0);
15708 rtx op1 = XEXP (cmp, 1);
15710 if (FLOAT_MODE_P (mode))
15711 comp_mode = CCFPmode;
15712 else if (code == GTU || code == LTU
15713 || code == GEU || code == LEU)
15714 comp_mode = CCUNSmode;
15715 else if ((code == EQ || code == NE)
15716 && unsigned_reg_p (op0)
15717 && (unsigned_reg_p (op1)
15718 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
15719 /* These are unsigned values, perhaps there will be a later
15720 ordering compare that can be shared with this one. */
15721 comp_mode = CCUNSmode;
15722 else
15723 comp_mode = CCmode;
15725 /* If we have an unsigned compare, make sure we don't have a signed value as
15726 an immediate. */
15727 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
15728 && INTVAL (op1) < 0)
15730 op0 = copy_rtx_if_shared (op0);
15731 op1 = force_reg (GET_MODE (op0), op1);
15732 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
15735 /* First, the compare. */
15736 compare_result = gen_reg_rtx (comp_mode);
15738 /* E500 FP compare instructions on the GPRs. Yuck! */
15739 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
15740 && FLOAT_MODE_P (mode))
15742 rtx cmp, or_result, compare_result2;
15743 enum machine_mode op_mode = GET_MODE (op0);
15745 if (op_mode == VOIDmode)
15746 op_mode = GET_MODE (op1);
15748 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15749 This explains the following mess. */
15751 switch (code)
15753 case EQ: case UNEQ: case NE: case LTGT:
15754 switch (op_mode)
15756 case SFmode:
15757 cmp = (flag_finite_math_only && !flag_trapping_math)
15758 ? gen_tstsfeq_gpr (compare_result, op0, op1)
15759 : gen_cmpsfeq_gpr (compare_result, op0, op1);
15760 break;
15762 case DFmode:
15763 cmp = (flag_finite_math_only && !flag_trapping_math)
15764 ? gen_tstdfeq_gpr (compare_result, op0, op1)
15765 : gen_cmpdfeq_gpr (compare_result, op0, op1);
15766 break;
15768 case TFmode:
15769 cmp = (flag_finite_math_only && !flag_trapping_math)
15770 ? gen_tsttfeq_gpr (compare_result, op0, op1)
15771 : gen_cmptfeq_gpr (compare_result, op0, op1);
15772 break;
15774 default:
15775 gcc_unreachable ();
15777 break;
15779 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
15780 switch (op_mode)
15782 case SFmode:
15783 cmp = (flag_finite_math_only && !flag_trapping_math)
15784 ? gen_tstsfgt_gpr (compare_result, op0, op1)
15785 : gen_cmpsfgt_gpr (compare_result, op0, op1);
15786 break;
15788 case DFmode:
15789 cmp = (flag_finite_math_only && !flag_trapping_math)
15790 ? gen_tstdfgt_gpr (compare_result, op0, op1)
15791 : gen_cmpdfgt_gpr (compare_result, op0, op1);
15792 break;
15794 case TFmode:
15795 cmp = (flag_finite_math_only && !flag_trapping_math)
15796 ? gen_tsttfgt_gpr (compare_result, op0, op1)
15797 : gen_cmptfgt_gpr (compare_result, op0, op1);
15798 break;
15800 default:
15801 gcc_unreachable ();
15803 break;
15805 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
15806 switch (op_mode)
15808 case SFmode:
15809 cmp = (flag_finite_math_only && !flag_trapping_math)
15810 ? gen_tstsflt_gpr (compare_result, op0, op1)
15811 : gen_cmpsflt_gpr (compare_result, op0, op1);
15812 break;
15814 case DFmode:
15815 cmp = (flag_finite_math_only && !flag_trapping_math)
15816 ? gen_tstdflt_gpr (compare_result, op0, op1)
15817 : gen_cmpdflt_gpr (compare_result, op0, op1);
15818 break;
15820 case TFmode:
15821 cmp = (flag_finite_math_only && !flag_trapping_math)
15822 ? gen_tsttflt_gpr (compare_result, op0, op1)
15823 : gen_cmptflt_gpr (compare_result, op0, op1);
15824 break;
15826 default:
15827 gcc_unreachable ();
15829 break;
15830 default:
15831 gcc_unreachable ();
15834 /* Synthesize LE and GE from LT/GT || EQ. */
15835 if (code == LE || code == GE || code == LEU || code == GEU)
15837 emit_insn (cmp);
15839 switch (code)
15841 case LE: code = LT; break;
15842 case GE: code = GT; break;
15843 case LEU: code = LT; break;
15844 case GEU: code = GT; break;
15845 default: gcc_unreachable ();
15848 compare_result2 = gen_reg_rtx (CCFPmode);
15850 /* Do the EQ. */
15851 switch (op_mode)
15853 case SFmode:
15854 cmp = (flag_finite_math_only && !flag_trapping_math)
15855 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
15856 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
15857 break;
15859 case DFmode:
15860 cmp = (flag_finite_math_only && !flag_trapping_math)
15861 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
15862 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
15863 break;
15865 case TFmode:
15866 cmp = (flag_finite_math_only && !flag_trapping_math)
15867 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
15868 : gen_cmptfeq_gpr (compare_result2, op0, op1);
15869 break;
15871 default:
15872 gcc_unreachable ();
15874 emit_insn (cmp);
15876 /* OR them together. */
15877 or_result = gen_reg_rtx (CCFPmode);
15878 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
15879 compare_result2);
15880 compare_result = or_result;
15881 code = EQ;
15883 else
15885 if (code == NE || code == LTGT)
15886 code = NE;
15887 else
15888 code = EQ;
15891 emit_insn (cmp);
15893 else
15895 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15896 CLOBBERs to match cmptf_internal2 pattern. */
15897 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
15898 && GET_MODE (op0) == TFmode
15899 && !TARGET_IEEEQUAD
15900 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
15901 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15902 gen_rtvec (10,
15903 gen_rtx_SET (VOIDmode,
15904 compare_result,
15905 gen_rtx_COMPARE (comp_mode, op0, op1)),
15906 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15907 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15908 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15909 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15910 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15911 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15912 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15913 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15914 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
15915 else if (GET_CODE (op1) == UNSPEC
15916 && XINT (op1, 1) == UNSPEC_SP_TEST)
15918 rtx op1b = XVECEXP (op1, 0, 0);
15919 comp_mode = CCEQmode;
15920 compare_result = gen_reg_rtx (CCEQmode);
15921 if (TARGET_64BIT)
15922 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
15923 else
15924 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
15926 else
15927 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
15928 gen_rtx_COMPARE (comp_mode, op0, op1)));
15931 /* Some kinds of FP comparisons need an OR operation;
15932 under flag_finite_math_only we don't bother. */
15933 if (FLOAT_MODE_P (mode)
15934 && !flag_finite_math_only
15935 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
15936 && (code == LE || code == GE
15937 || code == UNEQ || code == LTGT
15938 || code == UNGT || code == UNLT))
15940 enum rtx_code or1, or2;
15941 rtx or1_rtx, or2_rtx, compare2_rtx;
15942 rtx or_result = gen_reg_rtx (CCEQmode);
15944 switch (code)
15946 case LE: or1 = LT; or2 = EQ; break;
15947 case GE: or1 = GT; or2 = EQ; break;
15948 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
15949 case LTGT: or1 = LT; or2 = GT; break;
15950 case UNGT: or1 = UNORDERED; or2 = GT; break;
15951 case UNLT: or1 = UNORDERED; or2 = LT; break;
15952 default: gcc_unreachable ();
15954 validate_condition_mode (or1, comp_mode);
15955 validate_condition_mode (or2, comp_mode);
15956 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
15957 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
15958 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
15959 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
15960 const_true_rtx);
15961 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
15963 compare_result = or_result;
15964 code = EQ;
15967 validate_condition_mode (code, GET_MODE (compare_result));
15969 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
15973 /* Emit the RTL for an sISEL pattern. */
15975 void
15976 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
15978 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
15981 void
15982 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
15984 rtx condition_rtx;
15985 enum machine_mode op_mode;
15986 enum rtx_code cond_code;
15987 rtx result = operands[0];
15989 if (TARGET_ISEL && (mode == SImode || mode == DImode))
15991 rs6000_emit_sISEL (mode, operands);
15992 return;
15995 condition_rtx = rs6000_generate_compare (operands[1], mode);
15996 cond_code = GET_CODE (condition_rtx);
15998 if (FLOAT_MODE_P (mode)
15999 && !TARGET_FPRS && TARGET_HARD_FLOAT)
16001 rtx t;
16003 PUT_MODE (condition_rtx, SImode);
16004 t = XEXP (condition_rtx, 0);
16006 gcc_assert (cond_code == NE || cond_code == EQ);
16008 if (cond_code == NE)
16009 emit_insn (gen_e500_flip_gt_bit (t, t));
16011 emit_insn (gen_move_from_CR_gt_bit (result, t));
16012 return;
16015 if (cond_code == NE
16016 || cond_code == GE || cond_code == LE
16017 || cond_code == GEU || cond_code == LEU
16018 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
16020 rtx not_result = gen_reg_rtx (CCEQmode);
16021 rtx not_op, rev_cond_rtx;
16022 enum machine_mode cc_mode;
16024 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
16026 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
16027 SImode, XEXP (condition_rtx, 0), const0_rtx);
16028 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
16029 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
16030 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
16033 op_mode = GET_MODE (XEXP (operands[1], 0));
16034 if (op_mode == VOIDmode)
16035 op_mode = GET_MODE (XEXP (operands[1], 1));
16037 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
16039 PUT_MODE (condition_rtx, DImode);
16040 convert_move (result, condition_rtx, 0);
16042 else
16044 PUT_MODE (condition_rtx, SImode);
16045 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
16049 /* Emit a branch of kind CODE to location LOC. */
16051 void
16052 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
16054 rtx condition_rtx, loc_ref;
16056 condition_rtx = rs6000_generate_compare (operands[0], mode);
16057 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
16058 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
16059 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
16060 loc_ref, pc_rtx)));
16063 /* Return the string to output a conditional branch to LABEL, which is
16064 the operand template of the label, or NULL if the branch is really a
16065 conditional return.
16067 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
16068 condition code register and its mode specifies what kind of
16069 comparison we made.
16071 REVERSED is nonzero if we should reverse the sense of the comparison.
16073 INSN is the insn. */
16075 char *
16076 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
16078 static char string[64];
16079 enum rtx_code code = GET_CODE (op);
16080 rtx cc_reg = XEXP (op, 0);
16081 enum machine_mode mode = GET_MODE (cc_reg);
16082 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
16083 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
16084 int really_reversed = reversed ^ need_longbranch;
16085 char *s = string;
16086 const char *ccode;
16087 const char *pred;
16088 rtx note;
16090 validate_condition_mode (code, mode);
16092 /* Work out which way this really branches. We could use
16093 reverse_condition_maybe_unordered here always but this
16094 makes the resulting assembler clearer. */
16095 if (really_reversed)
16097 /* Reversal of FP compares takes care -- an ordered compare
16098 becomes an unordered compare and vice versa. */
16099 if (mode == CCFPmode)
16100 code = reverse_condition_maybe_unordered (code);
16101 else
16102 code = reverse_condition (code);
16105 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
16107 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
16108 to the GT bit. */
16109 switch (code)
16111 case EQ:
16112 /* Opposite of GT. */
16113 code = GT;
16114 break;
16116 case NE:
16117 code = UNLE;
16118 break;
16120 default:
16121 gcc_unreachable ();
16125 switch (code)
16127 /* Not all of these are actually distinct opcodes, but
16128 we distinguish them for clarity of the resulting assembler. */
16129 case NE: case LTGT:
16130 ccode = "ne"; break;
16131 case EQ: case UNEQ:
16132 ccode = "eq"; break;
16133 case GE: case GEU:
16134 ccode = "ge"; break;
16135 case GT: case GTU: case UNGT:
16136 ccode = "gt"; break;
16137 case LE: case LEU:
16138 ccode = "le"; break;
16139 case LT: case LTU: case UNLT:
16140 ccode = "lt"; break;
16141 case UNORDERED: ccode = "un"; break;
16142 case ORDERED: ccode = "nu"; break;
16143 case UNGE: ccode = "nl"; break;
16144 case UNLE: ccode = "ng"; break;
16145 default:
16146 gcc_unreachable ();
16149 /* Maybe we have a guess as to how likely the branch is. */
16150 pred = "";
16151 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
16152 if (note != NULL_RTX)
16154 /* PROB is the difference from 50%. */
16155 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
16157 /* Only hint for highly probable/improbable branches on newer
16158 cpus as static prediction overrides processor dynamic
16159 prediction. For older cpus we may as well always hint, but
16160 assume not taken for branches that are very close to 50% as a
16161 mispredicted taken branch is more expensive than a
16162 mispredicted not-taken branch. */
16163 if (rs6000_always_hint
16164 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
16165 && br_prob_note_reliable_p (note)))
16167 if (abs (prob) > REG_BR_PROB_BASE / 20
16168 && ((prob > 0) ^ need_longbranch))
16169 pred = "+";
16170 else
16171 pred = "-";
16175 if (label == NULL)
16176 s += sprintf (s, "b%slr%s ", ccode, pred);
16177 else
16178 s += sprintf (s, "b%s%s ", ccode, pred);
16180 /* We need to escape any '%' characters in the reg_names string.
16181 Assume they'd only be the first character.... */
16182 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
16183 *s++ = '%';
16184 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
16186 if (label != NULL)
16188 /* If the branch distance was too far, we may have to use an
16189 unconditional branch to go the distance. */
16190 if (need_longbranch)
16191 s += sprintf (s, ",$+8\n\tb %s", label);
16192 else
16193 s += sprintf (s, ",%s", label);
16196 return string;
16199 /* Return the string to flip the GT bit on a CR. */
16200 char *
16201 output_e500_flip_gt_bit (rtx dst, rtx src)
16203 static char string[64];
16204 int a, b;
16206 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
16207 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
16209 /* GT bit. */
16210 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
16211 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
16213 sprintf (string, "crnot %d,%d", a, b);
16214 return string;
16217 /* Return insn for VSX or Altivec comparisons. */
16219 static rtx
16220 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
16222 rtx mask;
16223 enum machine_mode mode = GET_MODE (op0);
16225 switch (code)
16227 default:
16228 break;
16230 case GE:
16231 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
16232 return NULL_RTX;
16234 case EQ:
16235 case GT:
16236 case GTU:
16237 case ORDERED:
16238 case UNORDERED:
16239 case UNEQ:
16240 case LTGT:
16241 mask = gen_reg_rtx (mode);
16242 emit_insn (gen_rtx_SET (VOIDmode,
16243 mask,
16244 gen_rtx_fmt_ee (code, mode, op0, op1)));
16245 return mask;
16248 return NULL_RTX;
16251 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16252 DMODE is expected destination mode. This is a recursive function. */
16254 static rtx
16255 rs6000_emit_vector_compare (enum rtx_code rcode,
16256 rtx op0, rtx op1,
16257 enum machine_mode dmode)
16259 rtx mask;
16260 bool swap_operands = false;
16261 bool try_again = false;
16263 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
16264 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
16266 /* See if the comparison works as is. */
16267 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16268 if (mask)
16269 return mask;
16271 switch (rcode)
16273 case LT:
16274 rcode = GT;
16275 swap_operands = true;
16276 try_again = true;
16277 break;
16278 case LTU:
16279 rcode = GTU;
16280 swap_operands = true;
16281 try_again = true;
16282 break;
16283 case NE:
16284 case UNLE:
16285 case UNLT:
16286 case UNGE:
16287 case UNGT:
16288 /* Invert condition and try again.
16289 e.g., A != B becomes ~(A==B). */
16291 enum rtx_code rev_code;
16292 enum insn_code nor_code;
16293 rtx mask2;
16295 rev_code = reverse_condition_maybe_unordered (rcode);
16296 if (rev_code == UNKNOWN)
16297 return NULL_RTX;
16299 nor_code = optab_handler (one_cmpl_optab, dmode);
16300 if (nor_code == CODE_FOR_nothing)
16301 return NULL_RTX;
16303 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
16304 if (!mask2)
16305 return NULL_RTX;
16307 mask = gen_reg_rtx (dmode);
16308 emit_insn (GEN_FCN (nor_code) (mask, mask2));
16309 return mask;
16311 break;
16312 case GE:
16313 case GEU:
16314 case LE:
16315 case LEU:
16316 /* Try GT/GTU/LT/LTU OR EQ */
16318 rtx c_rtx, eq_rtx;
16319 enum insn_code ior_code;
16320 enum rtx_code new_code;
16322 switch (rcode)
16324 case GE:
16325 new_code = GT;
16326 break;
16328 case GEU:
16329 new_code = GTU;
16330 break;
16332 case LE:
16333 new_code = LT;
16334 break;
16336 case LEU:
16337 new_code = LTU;
16338 break;
16340 default:
16341 gcc_unreachable ();
16344 ior_code = optab_handler (ior_optab, dmode);
16345 if (ior_code == CODE_FOR_nothing)
16346 return NULL_RTX;
16348 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
16349 if (!c_rtx)
16350 return NULL_RTX;
16352 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
16353 if (!eq_rtx)
16354 return NULL_RTX;
16356 mask = gen_reg_rtx (dmode);
16357 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
16358 return mask;
16360 break;
16361 default:
16362 return NULL_RTX;
16365 if (try_again)
16367 if (swap_operands)
16369 rtx tmp;
16370 tmp = op0;
16371 op0 = op1;
16372 op1 = tmp;
16375 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16376 if (mask)
16377 return mask;
16380 /* You only get two chances. */
16381 return NULL_RTX;
16384 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16385 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16386 operands for the relation operation COND. */
16389 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
16390 rtx cond, rtx cc_op0, rtx cc_op1)
16392 enum machine_mode dest_mode = GET_MODE (dest);
16393 enum machine_mode mask_mode = GET_MODE (cc_op0);
16394 enum rtx_code rcode = GET_CODE (cond);
16395 enum machine_mode cc_mode = CCmode;
16396 rtx mask;
16397 rtx cond2;
16398 rtx tmp;
16399 bool invert_move = false;
16401 if (VECTOR_UNIT_NONE_P (dest_mode))
16402 return 0;
16404 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
16405 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
16407 switch (rcode)
16409 /* Swap operands if we can, and fall back to doing the operation as
16410 specified, and doing a NOR to invert the test. */
16411 case NE:
16412 case UNLE:
16413 case UNLT:
16414 case UNGE:
16415 case UNGT:
16416 /* Invert condition and try again.
16417 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16418 invert_move = true;
16419 rcode = reverse_condition_maybe_unordered (rcode);
16420 if (rcode == UNKNOWN)
16421 return 0;
16422 break;
16424 /* Mark unsigned tests with CCUNSmode. */
16425 case GTU:
16426 case GEU:
16427 case LTU:
16428 case LEU:
16429 cc_mode = CCUNSmode;
16430 break;
16432 default:
16433 break;
16436 /* Get the vector mask for the given relational operations. */
16437 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
16439 if (!mask)
16440 return 0;
16442 if (invert_move)
16444 tmp = op_true;
16445 op_true = op_false;
16446 op_false = tmp;
16449 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
16450 CONST0_RTX (dest_mode));
16451 emit_insn (gen_rtx_SET (VOIDmode,
16452 dest,
16453 gen_rtx_IF_THEN_ELSE (dest_mode,
16454 cond2,
16455 op_true,
16456 op_false)));
16457 return 1;
16460 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16461 operands of the last comparison is nonzero/true, FALSE_COND if it
16462 is zero/false. Return 0 if the hardware has no such operation. */
16465 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16467 enum rtx_code code = GET_CODE (op);
16468 rtx op0 = XEXP (op, 0);
16469 rtx op1 = XEXP (op, 1);
16470 REAL_VALUE_TYPE c1;
16471 enum machine_mode compare_mode = GET_MODE (op0);
16472 enum machine_mode result_mode = GET_MODE (dest);
16473 rtx temp;
16474 bool is_against_zero;
16476 /* These modes should always match. */
16477 if (GET_MODE (op1) != compare_mode
16478 /* In the isel case however, we can use a compare immediate, so
16479 op1 may be a small constant. */
16480 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
16481 return 0;
16482 if (GET_MODE (true_cond) != result_mode)
16483 return 0;
16484 if (GET_MODE (false_cond) != result_mode)
16485 return 0;
16487 /* Don't allow using floating point comparisons for integer results for
16488 now. */
16489 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
16490 return 0;
16492 /* First, work out if the hardware can do this at all, or
16493 if it's too slow.... */
16494 if (!FLOAT_MODE_P (compare_mode))
16496 if (TARGET_ISEL)
16497 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16498 return 0;
16500 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16501 && SCALAR_FLOAT_MODE_P (compare_mode))
16502 return 0;
16504 is_against_zero = op1 == CONST0_RTX (compare_mode);
16506 /* A floating-point subtract might overflow, underflow, or produce
16507 an inexact result, thus changing the floating-point flags, so it
16508 can't be generated if we care about that. It's safe if one side
16509 of the construct is zero, since then no subtract will be
16510 generated. */
16511 if (SCALAR_FLOAT_MODE_P (compare_mode)
16512 && flag_trapping_math && ! is_against_zero)
16513 return 0;
16515 /* Eliminate half of the comparisons by switching operands, this
16516 makes the remaining code simpler. */
16517 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16518 || code == LTGT || code == LT || code == UNLE)
16520 code = reverse_condition_maybe_unordered (code);
16521 temp = true_cond;
16522 true_cond = false_cond;
16523 false_cond = temp;
16526 /* UNEQ and LTGT take four instructions for a comparison with zero,
16527 it'll probably be faster to use a branch here too. */
16528 if (code == UNEQ && HONOR_NANS (compare_mode))
16529 return 0;
16531 if (GET_CODE (op1) == CONST_DOUBLE)
16532 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16534 /* We're going to try to implement comparisons by performing
16535 a subtract, then comparing against zero. Unfortunately,
16536 Inf - Inf is NaN which is not zero, and so if we don't
16537 know that the operand is finite and the comparison
16538 would treat EQ different to UNORDERED, we can't do it. */
16539 if (HONOR_INFINITIES (compare_mode)
16540 && code != GT && code != UNGE
16541 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16542 /* Constructs of the form (a OP b ? a : b) are safe. */
16543 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16544 || (! rtx_equal_p (op0, true_cond)
16545 && ! rtx_equal_p (op1, true_cond))))
16546 return 0;
16548 /* At this point we know we can use fsel. */
16550 /* Reduce the comparison to a comparison against zero. */
16551 if (! is_against_zero)
16553 temp = gen_reg_rtx (compare_mode);
16554 emit_insn (gen_rtx_SET (VOIDmode, temp,
16555 gen_rtx_MINUS (compare_mode, op0, op1)));
16556 op0 = temp;
16557 op1 = CONST0_RTX (compare_mode);
16560 /* If we don't care about NaNs we can reduce some of the comparisons
16561 down to faster ones. */
16562 if (! HONOR_NANS (compare_mode))
16563 switch (code)
16565 case GT:
16566 code = LE;
16567 temp = true_cond;
16568 true_cond = false_cond;
16569 false_cond = temp;
16570 break;
16571 case UNGE:
16572 code = GE;
16573 break;
16574 case UNEQ:
16575 code = EQ;
16576 break;
16577 default:
16578 break;
16581 /* Now, reduce everything down to a GE. */
16582 switch (code)
16584 case GE:
16585 break;
16587 case LE:
16588 temp = gen_reg_rtx (compare_mode);
16589 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16590 op0 = temp;
16591 break;
16593 case ORDERED:
16594 temp = gen_reg_rtx (compare_mode);
16595 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16596 op0 = temp;
16597 break;
16599 case EQ:
16600 temp = gen_reg_rtx (compare_mode);
16601 emit_insn (gen_rtx_SET (VOIDmode, temp,
16602 gen_rtx_NEG (compare_mode,
16603 gen_rtx_ABS (compare_mode, op0))));
16604 op0 = temp;
16605 break;
16607 case UNGE:
16608 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16609 temp = gen_reg_rtx (result_mode);
16610 emit_insn (gen_rtx_SET (VOIDmode, temp,
16611 gen_rtx_IF_THEN_ELSE (result_mode,
16612 gen_rtx_GE (VOIDmode,
16613 op0, op1),
16614 true_cond, false_cond)));
16615 false_cond = true_cond;
16616 true_cond = temp;
16618 temp = gen_reg_rtx (compare_mode);
16619 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16620 op0 = temp;
16621 break;
16623 case GT:
16624 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16625 temp = gen_reg_rtx (result_mode);
16626 emit_insn (gen_rtx_SET (VOIDmode, temp,
16627 gen_rtx_IF_THEN_ELSE (result_mode,
16628 gen_rtx_GE (VOIDmode,
16629 op0, op1),
16630 true_cond, false_cond)));
16631 true_cond = false_cond;
16632 false_cond = temp;
16634 temp = gen_reg_rtx (compare_mode);
16635 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16636 op0 = temp;
16637 break;
16639 default:
16640 gcc_unreachable ();
16643 emit_insn (gen_rtx_SET (VOIDmode, dest,
16644 gen_rtx_IF_THEN_ELSE (result_mode,
16645 gen_rtx_GE (VOIDmode,
16646 op0, op1),
16647 true_cond, false_cond)));
16648 return 1;
16651 /* Same as above, but for ints (isel). */
16653 static int
16654 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16656 rtx condition_rtx, cr;
16657 enum machine_mode mode = GET_MODE (dest);
16658 enum rtx_code cond_code;
16659 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
16660 bool signedp;
16662 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
16663 return 0;
16665 /* We still have to do the compare, because isel doesn't do a
16666 compare, it just looks at the CRx bits set by a previous compare
16667 instruction. */
16668 condition_rtx = rs6000_generate_compare (op, mode);
16669 cond_code = GET_CODE (condition_rtx);
16670 cr = XEXP (condition_rtx, 0);
16671 signedp = GET_MODE (cr) == CCmode;
16673 isel_func = (mode == SImode
16674 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
16675 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
16677 switch (cond_code)
16679 case LT: case GT: case LTU: case GTU: case EQ:
16680 /* isel handles these directly. */
16681 break;
16683 default:
16684 /* We need to swap the sense of the comparison. */
16686 rtx t = true_cond;
16687 true_cond = false_cond;
16688 false_cond = t;
16689 PUT_CODE (condition_rtx, reverse_condition (cond_code));
16691 break;
16694 false_cond = force_reg (mode, false_cond);
16695 if (true_cond != const0_rtx)
16696 true_cond = force_reg (mode, true_cond);
16698 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
16700 return 1;
16703 const char *
16704 output_isel (rtx *operands)
16706 enum rtx_code code;
16708 code = GET_CODE (operands[1]);
16710 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
16712 gcc_assert (GET_CODE (operands[2]) == REG
16713 && GET_CODE (operands[3]) == REG);
16714 PUT_CODE (operands[1], reverse_condition (code));
16715 return "isel %0,%3,%2,%j1";
16718 return "isel %0,%2,%3,%j1";
16721 void
16722 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16724 enum machine_mode mode = GET_MODE (op0);
16725 enum rtx_code c;
16726 rtx target;
16728 /* VSX/altivec have direct min/max insns. */
16729 if ((code == SMAX || code == SMIN)
16730 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
16731 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
16733 emit_insn (gen_rtx_SET (VOIDmode,
16734 dest,
16735 gen_rtx_fmt_ee (code, mode, op0, op1)));
16736 return;
16739 if (code == SMAX || code == SMIN)
16740 c = GE;
16741 else
16742 c = GEU;
16744 if (code == SMAX || code == UMAX)
16745 target = emit_conditional_move (dest, c, op0, op1, mode,
16746 op0, op1, mode, 0);
16747 else
16748 target = emit_conditional_move (dest, c, op0, op1, mode,
16749 op1, op0, mode, 0);
16750 gcc_assert (target);
16751 if (target != dest)
16752 emit_move_insn (dest, target);
16755 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16756 COND is true. Mark the jump as unlikely to be taken. */
16758 static void
16759 emit_unlikely_jump (rtx cond, rtx label)
16761 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
16762 rtx x;
16764 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
16765 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
16766 add_reg_note (x, REG_BR_PROB, very_unlikely);
16769 /* A subroutine of the atomic operation splitters. Emit a load-locked
16770 instruction in MODE. */
16772 static void
16773 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
16775 rtx (*fn) (rtx, rtx) = NULL;
16777 switch (mode)
16779 case SImode:
16780 fn = gen_load_lockedsi;
16781 break;
16782 case DImode:
16783 fn = gen_load_lockeddi;
16784 break;
16785 default:
16786 gcc_unreachable ();
16788 emit_insn (fn (reg, mem));
16791 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16792 instruction in MODE. */
16794 static void
16795 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
16797 rtx (*fn) (rtx, rtx, rtx) = NULL;
16799 switch (mode)
16801 case SImode:
16802 fn = gen_store_conditionalsi;
16803 break;
16804 case DImode:
16805 fn = gen_store_conditionaldi;
16806 break;
16807 default:
16808 gcc_unreachable ();
16811 /* Emit sync before stwcx. to address PPC405 Erratum. */
16812 if (PPC405_ERRATUM77)
16813 emit_insn (gen_hwsync ());
16815 emit_insn (fn (res, mem, val));
16818 /* Expand barriers before and after a load_locked/store_cond sequence. */
16820 static rtx
16821 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
16823 rtx addr = XEXP (mem, 0);
16824 int strict_p = (reload_in_progress || reload_completed);
16826 if (!legitimate_indirect_address_p (addr, strict_p)
16827 && !legitimate_indexed_address_p (addr, strict_p))
16829 addr = force_reg (Pmode, addr);
16830 mem = replace_equiv_address_nv (mem, addr);
16833 switch (model)
16835 case MEMMODEL_RELAXED:
16836 case MEMMODEL_CONSUME:
16837 case MEMMODEL_ACQUIRE:
16838 break;
16839 case MEMMODEL_RELEASE:
16840 case MEMMODEL_ACQ_REL:
16841 emit_insn (gen_lwsync ());
16842 break;
16843 case MEMMODEL_SEQ_CST:
16844 emit_insn (gen_hwsync ());
16845 break;
16846 default:
16847 gcc_unreachable ();
16849 return mem;
16852 static void
16853 rs6000_post_atomic_barrier (enum memmodel model)
16855 switch (model)
16857 case MEMMODEL_RELAXED:
16858 case MEMMODEL_CONSUME:
16859 case MEMMODEL_RELEASE:
16860 break;
16861 case MEMMODEL_ACQUIRE:
16862 case MEMMODEL_ACQ_REL:
16863 case MEMMODEL_SEQ_CST:
16864 emit_insn (gen_isync ());
16865 break;
16866 default:
16867 gcc_unreachable ();
16871 /* A subroutine of the various atomic expanders. For sub-word operations,
16872 we must adjust things to operate on SImode. Given the original MEM,
16873 return a new aligned memory. Also build and return the quantities by
16874 which to shift and mask. */
16876 static rtx
16877 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
16879 rtx addr, align, shift, mask, mem;
16880 HOST_WIDE_INT shift_mask;
16881 enum machine_mode mode = GET_MODE (orig_mem);
16883 /* For smaller modes, we have to implement this via SImode. */
16884 shift_mask = (mode == QImode ? 0x18 : 0x10);
16886 addr = XEXP (orig_mem, 0);
16887 addr = force_reg (GET_MODE (addr), addr);
16889 /* Aligned memory containing subword. Generate a new memory. We
16890 do not want any of the existing MEM_ATTR data, as we're now
16891 accessing memory outside the original object. */
16892 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
16893 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16894 mem = gen_rtx_MEM (SImode, align);
16895 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
16896 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
16897 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
16899 /* Shift amount for subword relative to aligned word. */
16900 shift = gen_reg_rtx (SImode);
16901 addr = gen_lowpart (SImode, addr);
16902 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
16903 if (WORDS_BIG_ENDIAN)
16904 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
16905 shift, 1, OPTAB_LIB_WIDEN);
16906 *pshift = shift;
16908 /* Mask for insertion. */
16909 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
16910 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
16911 *pmask = mask;
16913 return mem;
16916 /* A subroutine of the various atomic expanders. For sub-word operands,
16917 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16919 static rtx
16920 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
16922 rtx x;
16924 x = gen_reg_rtx (SImode);
16925 emit_insn (gen_rtx_SET (VOIDmode, x,
16926 gen_rtx_AND (SImode,
16927 gen_rtx_NOT (SImode, mask),
16928 oldval)));
16930 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
16932 return x;
16935 /* A subroutine of the various atomic expanders. For sub-word operands,
16936 extract WIDE to NARROW via SHIFT. */
16938 static void
16939 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
16941 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
16942 wide, 1, OPTAB_LIB_WIDEN);
16943 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
16946 /* Expand an atomic compare and swap operation. */
16948 void
16949 rs6000_expand_atomic_compare_and_swap (rtx operands[])
16951 rtx boolval, retval, mem, oldval, newval, cond;
16952 rtx label1, label2, x, mask, shift;
16953 enum machine_mode mode;
16954 enum memmodel mod_s, mod_f;
16955 bool is_weak;
16957 boolval = operands[0];
16958 retval = operands[1];
16959 mem = operands[2];
16960 oldval = operands[3];
16961 newval = operands[4];
16962 is_weak = (INTVAL (operands[5]) != 0);
16963 mod_s = (enum memmodel) INTVAL (operands[6]);
16964 mod_f = (enum memmodel) INTVAL (operands[7]);
16965 mode = GET_MODE (mem);
16967 mask = shift = NULL_RTX;
16968 if (mode == QImode || mode == HImode)
16970 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16972 /* Shift and mask OLDVAL into position with the word. */
16973 oldval = convert_modes (SImode, mode, oldval, 1);
16974 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
16975 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16977 /* Shift and mask NEWVAL into position within the word. */
16978 newval = convert_modes (SImode, mode, newval, 1);
16979 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
16980 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16982 /* Prepare to adjust the return value. */
16983 retval = gen_reg_rtx (SImode);
16984 mode = SImode;
16986 else if (reg_overlap_mentioned_p (retval, oldval))
16987 oldval = copy_to_reg (oldval);
16989 mem = rs6000_pre_atomic_barrier (mem, mod_s);
16991 label1 = NULL_RTX;
16992 if (!is_weak)
16994 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16995 emit_label (XEXP (label1, 0));
16997 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16999 emit_load_locked (mode, retval, mem);
17001 x = retval;
17002 if (mask)
17004 x = expand_simple_binop (SImode, AND, retval, mask,
17005 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17008 cond = gen_reg_rtx (CCmode);
17009 x = gen_rtx_COMPARE (CCmode, x, oldval);
17010 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
17012 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17013 emit_unlikely_jump (x, label2);
17015 x = newval;
17016 if (mask)
17017 x = rs6000_mask_atomic_subword (retval, newval, mask);
17019 emit_store_conditional (mode, cond, mem, x);
17021 if (!is_weak)
17023 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17024 emit_unlikely_jump (x, label1);
17027 if (mod_f != MEMMODEL_RELAXED)
17028 emit_label (XEXP (label2, 0));
17030 rs6000_post_atomic_barrier (mod_s);
17032 if (mod_f == MEMMODEL_RELAXED)
17033 emit_label (XEXP (label2, 0));
17035 if (shift)
17036 rs6000_finish_atomic_subword (operands[1], retval, shift);
17038 /* In all cases, CR0 contains EQ on success, and NE on failure. */
17039 x = gen_rtx_EQ (SImode, cond, const0_rtx);
17040 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
17043 /* Expand an atomic exchange operation. */
17045 void
17046 rs6000_expand_atomic_exchange (rtx operands[])
17048 rtx retval, mem, val, cond;
17049 enum machine_mode mode;
17050 enum memmodel model;
17051 rtx label, x, mask, shift;
17053 retval = operands[0];
17054 mem = operands[1];
17055 val = operands[2];
17056 model = (enum memmodel) INTVAL (operands[3]);
17057 mode = GET_MODE (mem);
17059 mask = shift = NULL_RTX;
17060 if (mode == QImode || mode == HImode)
17062 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
17064 /* Shift and mask VAL into position with the word. */
17065 val = convert_modes (SImode, mode, val, 1);
17066 val = expand_simple_binop (SImode, ASHIFT, val, shift,
17067 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17069 /* Prepare to adjust the return value. */
17070 retval = gen_reg_rtx (SImode);
17071 mode = SImode;
17074 mem = rs6000_pre_atomic_barrier (mem, model);
17076 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
17077 emit_label (XEXP (label, 0));
17079 emit_load_locked (mode, retval, mem);
17081 x = val;
17082 if (mask)
17083 x = rs6000_mask_atomic_subword (retval, val, mask);
17085 cond = gen_reg_rtx (CCmode);
17086 emit_store_conditional (mode, cond, mem, x);
17088 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17089 emit_unlikely_jump (x, label);
17091 rs6000_post_atomic_barrier (model);
17093 if (shift)
17094 rs6000_finish_atomic_subword (operands[0], retval, shift);
17097 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
17098 to perform. MEM is the memory on which to operate. VAL is the second
17099 operand of the binary operator. BEFORE and AFTER are optional locations to
17100 return the value of MEM either before of after the operation. MODEL_RTX
17101 is a CONST_INT containing the memory model to use. */
17103 void
17104 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
17105 rtx orig_before, rtx orig_after, rtx model_rtx)
17107 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
17108 enum machine_mode mode = GET_MODE (mem);
17109 rtx label, x, cond, mask, shift;
17110 rtx before = orig_before, after = orig_after;
17112 mask = shift = NULL_RTX;
17113 if (mode == QImode || mode == HImode)
17115 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
17117 /* Shift and mask VAL into position with the word. */
17118 val = convert_modes (SImode, mode, val, 1);
17119 val = expand_simple_binop (SImode, ASHIFT, val, shift,
17120 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17122 switch (code)
17124 case IOR:
17125 case XOR:
17126 /* We've already zero-extended VAL. That is sufficient to
17127 make certain that it does not affect other bits. */
17128 mask = NULL;
17129 break;
17131 case AND:
17132 /* If we make certain that all of the other bits in VAL are
17133 set, that will be sufficient to not affect other bits. */
17134 x = gen_rtx_NOT (SImode, mask);
17135 x = gen_rtx_IOR (SImode, x, val);
17136 emit_insn (gen_rtx_SET (VOIDmode, val, x));
17137 mask = NULL;
17138 break;
17140 case NOT:
17141 case PLUS:
17142 case MINUS:
17143 /* These will all affect bits outside the field and need
17144 adjustment via MASK within the loop. */
17145 break;
17147 default:
17148 gcc_unreachable ();
17151 /* Prepare to adjust the return value. */
17152 before = gen_reg_rtx (SImode);
17153 if (after)
17154 after = gen_reg_rtx (SImode);
17155 mode = SImode;
17158 mem = rs6000_pre_atomic_barrier (mem, model);
17160 label = gen_label_rtx ();
17161 emit_label (label);
17162 label = gen_rtx_LABEL_REF (VOIDmode, label);
17164 if (before == NULL_RTX)
17165 before = gen_reg_rtx (mode);
17167 emit_load_locked (mode, before, mem);
17169 if (code == NOT)
17171 x = expand_simple_binop (mode, AND, before, val,
17172 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17173 after = expand_simple_unop (mode, NOT, x, after, 1);
17175 else
17177 after = expand_simple_binop (mode, code, before, val,
17178 after, 1, OPTAB_LIB_WIDEN);
17181 x = after;
17182 if (mask)
17184 x = expand_simple_binop (SImode, AND, after, mask,
17185 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17186 x = rs6000_mask_atomic_subword (before, x, mask);
17189 cond = gen_reg_rtx (CCmode);
17190 emit_store_conditional (mode, cond, mem, x);
17192 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17193 emit_unlikely_jump (x, label);
17195 rs6000_post_atomic_barrier (model);
17197 if (shift)
17199 if (orig_before)
17200 rs6000_finish_atomic_subword (orig_before, before, shift);
17201 if (orig_after)
17202 rs6000_finish_atomic_subword (orig_after, after, shift);
17204 else if (orig_after && after != orig_after)
17205 emit_move_insn (orig_after, after);
17208 /* Emit instructions to move SRC to DST. Called by splitters for
17209 multi-register moves. It will emit at most one instruction for
17210 each register that is accessed; that is, it won't emit li/lis pairs
17211 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17212 register. */
17214 void
17215 rs6000_split_multireg_move (rtx dst, rtx src)
17217 /* The register number of the first register being moved. */
17218 int reg;
17219 /* The mode that is to be moved. */
17220 enum machine_mode mode;
17221 /* The mode that the move is being done in, and its size. */
17222 enum machine_mode reg_mode;
17223 int reg_mode_size;
17224 /* The number of registers that will be moved. */
17225 int nregs;
17227 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
17228 mode = GET_MODE (dst);
17229 nregs = hard_regno_nregs[reg][mode];
17230 if (FP_REGNO_P (reg))
17231 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
17232 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
17233 else if (ALTIVEC_REGNO_P (reg))
17234 reg_mode = V16QImode;
17235 else if (TARGET_E500_DOUBLE && mode == TFmode)
17236 reg_mode = DFmode;
17237 else
17238 reg_mode = word_mode;
17239 reg_mode_size = GET_MODE_SIZE (reg_mode);
17241 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
17243 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
17245 /* Move register range backwards, if we might have destructive
17246 overlap. */
17247 int i;
17248 for (i = nregs - 1; i >= 0; i--)
17249 emit_insn (gen_rtx_SET (VOIDmode,
17250 simplify_gen_subreg (reg_mode, dst, mode,
17251 i * reg_mode_size),
17252 simplify_gen_subreg (reg_mode, src, mode,
17253 i * reg_mode_size)));
17255 else
17257 int i;
17258 int j = -1;
17259 bool used_update = false;
17260 rtx restore_basereg = NULL_RTX;
17262 if (MEM_P (src) && INT_REGNO_P (reg))
17264 rtx breg;
17266 if (GET_CODE (XEXP (src, 0)) == PRE_INC
17267 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
17269 rtx delta_rtx;
17270 breg = XEXP (XEXP (src, 0), 0);
17271 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
17272 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
17273 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
17274 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17275 src = replace_equiv_address (src, breg);
17277 else if (! rs6000_offsettable_memref_p (src, reg_mode))
17279 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
17281 rtx basereg = XEXP (XEXP (src, 0), 0);
17282 if (TARGET_UPDATE)
17284 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
17285 emit_insn (gen_rtx_SET (VOIDmode, ndst,
17286 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
17287 used_update = true;
17289 else
17290 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17291 XEXP (XEXP (src, 0), 1)));
17292 src = replace_equiv_address (src, basereg);
17294 else
17296 rtx basereg = gen_rtx_REG (Pmode, reg);
17297 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
17298 src = replace_equiv_address (src, basereg);
17302 breg = XEXP (src, 0);
17303 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
17304 breg = XEXP (breg, 0);
17306 /* If the base register we are using to address memory is
17307 also a destination reg, then change that register last. */
17308 if (REG_P (breg)
17309 && REGNO (breg) >= REGNO (dst)
17310 && REGNO (breg) < REGNO (dst) + nregs)
17311 j = REGNO (breg) - REGNO (dst);
17313 else if (MEM_P (dst) && INT_REGNO_P (reg))
17315 rtx breg;
17317 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
17318 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
17320 rtx delta_rtx;
17321 breg = XEXP (XEXP (dst, 0), 0);
17322 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
17323 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
17324 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
17326 /* We have to update the breg before doing the store.
17327 Use store with update, if available. */
17329 if (TARGET_UPDATE)
17331 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17332 emit_insn (TARGET_32BIT
17333 ? (TARGET_POWERPC64
17334 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
17335 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
17336 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
17337 used_update = true;
17339 else
17340 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17341 dst = replace_equiv_address (dst, breg);
17343 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
17344 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
17346 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
17348 rtx basereg = XEXP (XEXP (dst, 0), 0);
17349 if (TARGET_UPDATE)
17351 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17352 emit_insn (gen_rtx_SET (VOIDmode,
17353 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
17354 used_update = true;
17356 else
17357 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17358 XEXP (XEXP (dst, 0), 1)));
17359 dst = replace_equiv_address (dst, basereg);
17361 else
17363 rtx basereg = XEXP (XEXP (dst, 0), 0);
17364 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
17365 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
17366 && REG_P (basereg)
17367 && REG_P (offsetreg)
17368 && REGNO (basereg) != REGNO (offsetreg));
17369 if (REGNO (basereg) == 0)
17371 rtx tmp = offsetreg;
17372 offsetreg = basereg;
17373 basereg = tmp;
17375 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
17376 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
17377 dst = replace_equiv_address (dst, basereg);
17380 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
17381 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
17384 for (i = 0; i < nregs; i++)
17386 /* Calculate index to next subword. */
17387 ++j;
17388 if (j == nregs)
17389 j = 0;
17391 /* If compiler already emitted move of first word by
17392 store with update, no need to do anything. */
17393 if (j == 0 && used_update)
17394 continue;
17396 emit_insn (gen_rtx_SET (VOIDmode,
17397 simplify_gen_subreg (reg_mode, dst, mode,
17398 j * reg_mode_size),
17399 simplify_gen_subreg (reg_mode, src, mode,
17400 j * reg_mode_size)));
17402 if (restore_basereg != NULL_RTX)
17403 emit_insn (restore_basereg);
17408 /* This page contains routines that are used to determine what the
17409 function prologue and epilogue code will do and write them out. */
17411 static inline bool
17412 save_reg_p (int r)
17414 return !call_used_regs[r] && df_regs_ever_live_p (r);
17417 /* Return the first fixed-point register that is required to be
17418 saved. 32 if none. */
17421 first_reg_to_save (void)
17423 int first_reg;
17425 /* Find lowest numbered live register. */
17426 for (first_reg = 13; first_reg <= 31; first_reg++)
17427 if (save_reg_p (first_reg))
17428 break;
17430 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
17431 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
17432 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
17433 || (TARGET_TOC && TARGET_MINIMAL_TOC))
17434 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17435 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
17437 #if TARGET_MACHO
17438 if (flag_pic
17439 && crtl->uses_pic_offset_table
17440 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
17441 return RS6000_PIC_OFFSET_TABLE_REGNUM;
17442 #endif
17444 return first_reg;
17447 /* Similar, for FP regs. */
17450 first_fp_reg_to_save (void)
17452 int first_reg;
17454 /* Find lowest numbered live register. */
17455 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
17456 if (save_reg_p (first_reg))
17457 break;
17459 return first_reg;
17462 /* Similar, for AltiVec regs. */
17464 static int
17465 first_altivec_reg_to_save (void)
17467 int i;
17469 /* Stack frame remains as is unless we are in AltiVec ABI. */
17470 if (! TARGET_ALTIVEC_ABI)
17471 return LAST_ALTIVEC_REGNO + 1;
17473 /* On Darwin, the unwind routines are compiled without
17474 TARGET_ALTIVEC, and use save_world to save/restore the
17475 altivec registers when necessary. */
17476 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17477 && ! TARGET_ALTIVEC)
17478 return FIRST_ALTIVEC_REGNO + 20;
17480 /* Find lowest numbered live register. */
17481 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17482 if (save_reg_p (i))
17483 break;
17485 return i;
17488 /* Return a 32-bit mask of the AltiVec registers we need to set in
17489 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17490 the 32-bit word is 0. */
17492 static unsigned int
17493 compute_vrsave_mask (void)
17495 unsigned int i, mask = 0;
17497 /* On Darwin, the unwind routines are compiled without
17498 TARGET_ALTIVEC, and use save_world to save/restore the
17499 call-saved altivec registers when necessary. */
17500 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17501 && ! TARGET_ALTIVEC)
17502 mask |= 0xFFF;
17504 /* First, find out if we use _any_ altivec registers. */
17505 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
17506 if (df_regs_ever_live_p (i))
17507 mask |= ALTIVEC_REG_BIT (i);
17509 if (mask == 0)
17510 return mask;
17512 /* Next, remove the argument registers from the set. These must
17513 be in the VRSAVE mask set by the caller, so we don't need to add
17514 them in again. More importantly, the mask we compute here is
17515 used to generate CLOBBERs in the set_vrsave insn, and we do not
17516 wish the argument registers to die. */
17517 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
17518 mask &= ~ALTIVEC_REG_BIT (i);
17520 /* Similarly, remove the return value from the set. */
17522 bool yes = false;
17523 diddle_return_value (is_altivec_return_reg, &yes);
17524 if (yes)
17525 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
17528 return mask;
17531 /* For a very restricted set of circumstances, we can cut down the
17532 size of prologues/epilogues by calling our own save/restore-the-world
17533 routines. */
17535 static void
17536 compute_save_world_info (rs6000_stack_t *info_ptr)
17538 info_ptr->world_save_p = 1;
17539 info_ptr->world_save_p
17540 = (WORLD_SAVE_P (info_ptr)
17541 && DEFAULT_ABI == ABI_DARWIN
17542 && !cfun->has_nonlocal_label
17543 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17544 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17545 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17546 && info_ptr->cr_save_p);
17548 /* This will not work in conjunction with sibcalls. Make sure there
17549 are none. (This check is expensive, but seldom executed.) */
17550 if (WORLD_SAVE_P (info_ptr))
17552 rtx insn;
17553 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17554 if ( GET_CODE (insn) == CALL_INSN
17555 && SIBLING_CALL_P (insn))
17557 info_ptr->world_save_p = 0;
17558 break;
17562 if (WORLD_SAVE_P (info_ptr))
17564 /* Even if we're not touching VRsave, make sure there's room on the
17565 stack for it, if it looks like we're calling SAVE_WORLD, which
17566 will attempt to save it. */
17567 info_ptr->vrsave_size = 4;
17569 /* If we are going to save the world, we need to save the link register too. */
17570 info_ptr->lr_save_p = 1;
17572 /* "Save" the VRsave register too if we're saving the world. */
17573 if (info_ptr->vrsave_mask == 0)
17574 info_ptr->vrsave_mask = compute_vrsave_mask ();
17576 /* Because the Darwin register save/restore routines only handle
17577 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17578 check. */
17579 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17580 && (info_ptr->first_altivec_reg_save
17581 >= FIRST_SAVED_ALTIVEC_REGNO));
17583 return;
17587 static void
17588 is_altivec_return_reg (rtx reg, void *xyes)
17590 bool *yes = (bool *) xyes;
17591 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17592 *yes = true;
17596 /* Look for user-defined global regs in the range FIRST to LAST-1.
17597 We should not restore these, and so cannot use lmw or out-of-line
17598 restore functions if there are any. We also can't save them
17599 (well, emit frame notes for them), because frame unwinding during
17600 exception handling will restore saved registers. */
17602 static bool
17603 global_regs_p (unsigned first, unsigned last)
17605 while (first < last)
17606 if (global_regs[first++])
17607 return true;
17608 return false;
17611 /* Determine the strategy for savings/restoring registers. */
17613 enum {
17614 SAVRES_MULTIPLE = 0x1,
17615 SAVE_INLINE_FPRS = 0x2,
17616 SAVE_INLINE_GPRS = 0x4,
17617 REST_INLINE_FPRS = 0x8,
17618 REST_INLINE_GPRS = 0x10,
17619 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
17620 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
17621 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
17622 SAVE_INLINE_VRS = 0x100,
17623 REST_INLINE_VRS = 0x200
17626 static int
17627 rs6000_savres_strategy (rs6000_stack_t *info,
17628 bool using_static_chain_p)
17630 int strategy = 0;
17631 bool lr_save_p;
17633 if (TARGET_MULTIPLE
17634 && !TARGET_POWERPC64
17635 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
17636 && info->first_gp_reg_save < 31
17637 && !global_regs_p (info->first_gp_reg_save, 32))
17638 strategy |= SAVRES_MULTIPLE;
17640 if (crtl->calls_eh_return
17641 || cfun->machine->ra_need_lr)
17642 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
17643 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
17644 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17646 if (info->first_fp_reg_save == 64
17647 /* The out-of-line FP routines use double-precision stores;
17648 we can't use those routines if we don't have such stores. */
17649 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
17650 || global_regs_p (info->first_fp_reg_save, 64))
17651 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17653 if (info->first_gp_reg_save == 32
17654 || (!(strategy & SAVRES_MULTIPLE)
17655 && global_regs_p (info->first_gp_reg_save, 32)))
17656 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17658 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
17659 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
17660 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17662 /* Define cutoff for using out-of-line functions to save registers. */
17663 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
17665 if (!optimize_size)
17667 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17668 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17669 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17671 else
17673 /* Prefer out-of-line restore if it will exit. */
17674 if (info->first_fp_reg_save > 61)
17675 strategy |= SAVE_INLINE_FPRS;
17676 if (info->first_gp_reg_save > 29)
17678 if (info->first_fp_reg_save == 64)
17679 strategy |= SAVE_INLINE_GPRS;
17680 else
17681 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17683 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
17684 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17687 else if (DEFAULT_ABI == ABI_DARWIN)
17689 if (info->first_fp_reg_save > 60)
17690 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17691 if (info->first_gp_reg_save > 29)
17692 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17693 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17695 else
17697 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
17698 if (info->first_fp_reg_save > 61)
17699 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17700 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17701 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17704 /* Don't bother to try to save things out-of-line if r11 is occupied
17705 by the static chain. It would require too much fiddling and the
17706 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17707 pointer on Darwin, and AIX uses r1 or r12. */
17708 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
17709 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
17710 | SAVE_INLINE_GPRS
17711 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17713 /* We can only use the out-of-line routines to restore if we've
17714 saved all the registers from first_fp_reg_save in the prologue.
17715 Otherwise, we risk loading garbage. */
17716 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
17718 int i;
17720 for (i = info->first_fp_reg_save; i < 64; i++)
17721 if (!save_reg_p (i))
17723 strategy |= REST_INLINE_FPRS;
17724 break;
17728 /* If we are going to use store multiple, then don't even bother
17729 with the out-of-line routines, since the store-multiple
17730 instruction will always be smaller. */
17731 if ((strategy & SAVRES_MULTIPLE))
17732 strategy |= SAVE_INLINE_GPRS;
17734 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17735 saved is an out-of-line save or restore. Set up the value for
17736 the next test (excluding out-of-line gpr restore). */
17737 lr_save_p = (info->lr_save_p
17738 || !(strategy & SAVE_INLINE_GPRS)
17739 || !(strategy & SAVE_INLINE_FPRS)
17740 || !(strategy & SAVE_INLINE_VRS)
17741 || !(strategy & REST_INLINE_FPRS)
17742 || !(strategy & REST_INLINE_VRS));
17744 /* The situation is more complicated with load multiple. We'd
17745 prefer to use the out-of-line routines for restores, since the
17746 "exit" out-of-line routines can handle the restore of LR and the
17747 frame teardown. However if doesn't make sense to use the
17748 out-of-line routine if that is the only reason we'd need to save
17749 LR, and we can't use the "exit" out-of-line gpr restore if we
17750 have saved some fprs; In those cases it is advantageous to use
17751 load multiple when available. */
17752 if ((strategy & SAVRES_MULTIPLE)
17753 && (!lr_save_p
17754 || info->first_fp_reg_save != 64))
17755 strategy |= REST_INLINE_GPRS;
17757 /* Saving CR interferes with the exit routines used on the SPE, so
17758 just punt here. */
17759 if (TARGET_SPE_ABI
17760 && info->spe_64bit_regs_used
17761 && info->cr_save_p)
17762 strategy |= REST_INLINE_GPRS;
17764 /* We can only use load multiple or the out-of-line routines to
17765 restore if we've used store multiple or out-of-line routines
17766 in the prologue, i.e. if we've saved all the registers from
17767 first_gp_reg_save. Otherwise, we risk loading garbage. */
17768 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
17769 == SAVE_INLINE_GPRS)
17771 int i;
17773 for (i = info->first_gp_reg_save; i < 32; i++)
17774 if (!save_reg_p (i))
17776 strategy |= REST_INLINE_GPRS;
17777 break;
17781 if (TARGET_ELF && TARGET_64BIT)
17783 if (!(strategy & SAVE_INLINE_FPRS))
17784 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17785 else if (!(strategy & SAVE_INLINE_GPRS)
17786 && info->first_fp_reg_save == 64)
17787 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
17789 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
17790 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
17792 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
17793 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17795 return strategy;
17798 /* Calculate the stack information for the current function. This is
17799 complicated by having two separate calling sequences, the AIX calling
17800 sequence and the V.4 calling sequence.
17802 AIX (and Darwin/Mac OS X) stack frames look like:
17803 32-bit 64-bit
17804 SP----> +---------------------------------------+
17805 | back chain to caller | 0 0
17806 +---------------------------------------+
17807 | saved CR | 4 8 (8-11)
17808 +---------------------------------------+
17809 | saved LR | 8 16
17810 +---------------------------------------+
17811 | reserved for compilers | 12 24
17812 +---------------------------------------+
17813 | reserved for binders | 16 32
17814 +---------------------------------------+
17815 | saved TOC pointer | 20 40
17816 +---------------------------------------+
17817 | Parameter save area (P) | 24 48
17818 +---------------------------------------+
17819 | Alloca space (A) | 24+P etc.
17820 +---------------------------------------+
17821 | Local variable space (L) | 24+P+A
17822 +---------------------------------------+
17823 | Float/int conversion temporary (X) | 24+P+A+L
17824 +---------------------------------------+
17825 | Save area for AltiVec registers (W) | 24+P+A+L+X
17826 +---------------------------------------+
17827 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17828 +---------------------------------------+
17829 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17830 +---------------------------------------+
17831 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17832 +---------------------------------------+
17833 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17834 +---------------------------------------+
17835 old SP->| back chain to caller's caller |
17836 +---------------------------------------+
17838 The required alignment for AIX configurations is two words (i.e., 8
17839 or 16 bytes).
17842 V.4 stack frames look like:
17844 SP----> +---------------------------------------+
17845 | back chain to caller | 0
17846 +---------------------------------------+
17847 | caller's saved LR | 4
17848 +---------------------------------------+
17849 | Parameter save area (P) | 8
17850 +---------------------------------------+
17851 | Alloca space (A) | 8+P
17852 +---------------------------------------+
17853 | Varargs save area (V) | 8+P+A
17854 +---------------------------------------+
17855 | Local variable space (L) | 8+P+A+V
17856 +---------------------------------------+
17857 | Float/int conversion temporary (X) | 8+P+A+V+L
17858 +---------------------------------------+
17859 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17860 +---------------------------------------+
17861 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17862 +---------------------------------------+
17863 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17864 +---------------------------------------+
17865 | SPE: area for 64-bit GP registers |
17866 +---------------------------------------+
17867 | SPE alignment padding |
17868 +---------------------------------------+
17869 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17870 +---------------------------------------+
17871 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17872 +---------------------------------------+
17873 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17874 +---------------------------------------+
17875 old SP->| back chain to caller's caller |
17876 +---------------------------------------+
17878 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17879 given. (But note below and in sysv4.h that we require only 8 and
17880 may round up the size of our stack frame anyways. The historical
17881 reason is early versions of powerpc-linux which didn't properly
17882 align the stack at program startup. A happy side-effect is that
17883 -mno-eabi libraries can be used with -meabi programs.)
17885 The EABI configuration defaults to the V.4 layout. However,
17886 the stack alignment requirements may differ. If -mno-eabi is not
17887 given, the required stack alignment is 8 bytes; if -mno-eabi is
17888 given, the required alignment is 16 bytes. (But see V.4 comment
17889 above.) */
17891 #ifndef ABI_STACK_BOUNDARY
17892 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17893 #endif
17895 static rs6000_stack_t *
17896 rs6000_stack_info (void)
17898 rs6000_stack_t *info_ptr = &stack_info;
17899 int reg_size = TARGET_32BIT ? 4 : 8;
17900 int ehrd_size;
17901 int save_align;
17902 int first_gp;
17903 HOST_WIDE_INT non_fixed_size;
17904 bool using_static_chain_p;
17906 if (reload_completed && info_ptr->reload_completed)
17907 return info_ptr;
17909 memset (info_ptr, 0, sizeof (*info_ptr));
17910 info_ptr->reload_completed = reload_completed;
17912 if (TARGET_SPE)
17914 /* Cache value so we don't rescan instruction chain over and over. */
17915 if (cfun->machine->insn_chain_scanned_p == 0)
17916 cfun->machine->insn_chain_scanned_p
17917 = spe_func_has_64bit_regs_p () + 1;
17918 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
17921 /* Select which calling sequence. */
17922 info_ptr->abi = DEFAULT_ABI;
17924 /* Calculate which registers need to be saved & save area size. */
17925 info_ptr->first_gp_reg_save = first_reg_to_save ();
17926 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17927 even if it currently looks like we won't. Reload may need it to
17928 get at a constant; if so, it will have already created a constant
17929 pool entry for it. */
17930 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
17931 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
17932 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
17933 && crtl->uses_const_pool
17934 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
17935 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
17936 else
17937 first_gp = info_ptr->first_gp_reg_save;
17939 info_ptr->gp_size = reg_size * (32 - first_gp);
17941 /* For the SPE, we have an additional upper 32-bits on each GPR.
17942 Ideally we should save the entire 64-bits only when the upper
17943 half is used in SIMD instructions. Since we only record
17944 registers live (not the size they are used in), this proves
17945 difficult because we'd have to traverse the instruction chain at
17946 the right time, taking reload into account. This is a real pain,
17947 so we opt to save the GPRs in 64-bits always if but one register
17948 gets used in 64-bits. Otherwise, all the registers in the frame
17949 get saved in 32-bits.
17951 So... since when we save all GPRs (except the SP) in 64-bits, the
17952 traditional GP save area will be empty. */
17953 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17954 info_ptr->gp_size = 0;
17956 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
17957 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
17959 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
17960 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
17961 - info_ptr->first_altivec_reg_save);
17963 /* Does this function call anything? */
17964 info_ptr->calls_p = (! crtl->is_leaf
17965 || cfun->machine->ra_needs_full_frame);
17967 /* Determine if we need to save the condition code registers. */
17968 if (df_regs_ever_live_p (CR2_REGNO)
17969 || df_regs_ever_live_p (CR3_REGNO)
17970 || df_regs_ever_live_p (CR4_REGNO))
17972 info_ptr->cr_save_p = 1;
17973 if (DEFAULT_ABI == ABI_V4)
17974 info_ptr->cr_size = reg_size;
17977 /* If the current function calls __builtin_eh_return, then we need
17978 to allocate stack space for registers that will hold data for
17979 the exception handler. */
17980 if (crtl->calls_eh_return)
17982 unsigned int i;
17983 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
17984 continue;
17986 /* SPE saves EH registers in 64-bits. */
17987 ehrd_size = i * (TARGET_SPE_ABI
17988 && info_ptr->spe_64bit_regs_used != 0
17989 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
17991 else
17992 ehrd_size = 0;
17994 /* Determine various sizes. */
17995 info_ptr->reg_size = reg_size;
17996 info_ptr->fixed_size = RS6000_SAVE_AREA;
17997 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
17998 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
17999 TARGET_ALTIVEC ? 16 : 8);
18000 if (FRAME_GROWS_DOWNWARD)
18001 info_ptr->vars_size
18002 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
18003 + info_ptr->parm_size,
18004 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
18005 - (info_ptr->fixed_size + info_ptr->vars_size
18006 + info_ptr->parm_size);
18008 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
18009 info_ptr->spe_gp_size = 8 * (32 - first_gp);
18010 else
18011 info_ptr->spe_gp_size = 0;
18013 if (TARGET_ALTIVEC_ABI)
18014 info_ptr->vrsave_mask = compute_vrsave_mask ();
18015 else
18016 info_ptr->vrsave_mask = 0;
18018 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
18019 info_ptr->vrsave_size = 4;
18020 else
18021 info_ptr->vrsave_size = 0;
18023 compute_save_world_info (info_ptr);
18025 /* Calculate the offsets. */
18026 switch (DEFAULT_ABI)
18028 case ABI_NONE:
18029 default:
18030 gcc_unreachable ();
18032 case ABI_AIX:
18033 case ABI_DARWIN:
18034 info_ptr->fp_save_offset = - info_ptr->fp_size;
18035 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
18037 if (TARGET_ALTIVEC_ABI)
18039 info_ptr->vrsave_save_offset
18040 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
18042 /* Align stack so vector save area is on a quadword boundary.
18043 The padding goes above the vectors. */
18044 if (info_ptr->altivec_size != 0)
18045 info_ptr->altivec_padding_size
18046 = info_ptr->vrsave_save_offset & 0xF;
18047 else
18048 info_ptr->altivec_padding_size = 0;
18050 info_ptr->altivec_save_offset
18051 = info_ptr->vrsave_save_offset
18052 - info_ptr->altivec_padding_size
18053 - info_ptr->altivec_size;
18054 gcc_assert (info_ptr->altivec_size == 0
18055 || info_ptr->altivec_save_offset % 16 == 0);
18057 /* Adjust for AltiVec case. */
18058 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
18060 else
18061 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
18062 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
18063 info_ptr->lr_save_offset = 2*reg_size;
18064 break;
18066 case ABI_V4:
18067 info_ptr->fp_save_offset = - info_ptr->fp_size;
18068 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
18069 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
18071 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
18073 /* Align stack so SPE GPR save area is aligned on a
18074 double-word boundary. */
18075 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
18076 info_ptr->spe_padding_size
18077 = 8 - (-info_ptr->cr_save_offset % 8);
18078 else
18079 info_ptr->spe_padding_size = 0;
18081 info_ptr->spe_gp_save_offset
18082 = info_ptr->cr_save_offset
18083 - info_ptr->spe_padding_size
18084 - info_ptr->spe_gp_size;
18086 /* Adjust for SPE case. */
18087 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
18089 else if (TARGET_ALTIVEC_ABI)
18091 info_ptr->vrsave_save_offset
18092 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
18094 /* Align stack so vector save area is on a quadword boundary. */
18095 if (info_ptr->altivec_size != 0)
18096 info_ptr->altivec_padding_size
18097 = 16 - (-info_ptr->vrsave_save_offset % 16);
18098 else
18099 info_ptr->altivec_padding_size = 0;
18101 info_ptr->altivec_save_offset
18102 = info_ptr->vrsave_save_offset
18103 - info_ptr->altivec_padding_size
18104 - info_ptr->altivec_size;
18106 /* Adjust for AltiVec case. */
18107 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
18109 else
18110 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
18111 info_ptr->ehrd_offset -= ehrd_size;
18112 info_ptr->lr_save_offset = reg_size;
18113 break;
18116 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
18117 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
18118 + info_ptr->gp_size
18119 + info_ptr->altivec_size
18120 + info_ptr->altivec_padding_size
18121 + info_ptr->spe_gp_size
18122 + info_ptr->spe_padding_size
18123 + ehrd_size
18124 + info_ptr->cr_size
18125 + info_ptr->vrsave_size,
18126 save_align);
18128 non_fixed_size = (info_ptr->vars_size
18129 + info_ptr->parm_size
18130 + info_ptr->save_size);
18132 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
18133 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
18135 /* Determine if we need to save the link register. */
18136 if (info_ptr->calls_p
18137 || (DEFAULT_ABI == ABI_AIX
18138 && crtl->profile
18139 && !TARGET_PROFILE_KERNEL)
18140 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
18141 #ifdef TARGET_RELOCATABLE
18142 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
18143 #endif
18144 || rs6000_ra_ever_killed ())
18145 info_ptr->lr_save_p = 1;
18147 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
18148 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
18149 && call_used_regs[STATIC_CHAIN_REGNUM]);
18150 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
18151 using_static_chain_p);
18153 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
18154 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
18155 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
18156 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
18157 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
18158 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
18159 info_ptr->lr_save_p = 1;
18161 if (info_ptr->lr_save_p)
18162 df_set_regs_ever_live (LR_REGNO, true);
18164 /* Determine if we need to allocate any stack frame:
18166 For AIX we need to push the stack if a frame pointer is needed
18167 (because the stack might be dynamically adjusted), if we are
18168 debugging, if we make calls, or if the sum of fp_save, gp_save,
18169 and local variables are more than the space needed to save all
18170 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
18171 + 18*8 = 288 (GPR13 reserved).
18173 For V.4 we don't have the stack cushion that AIX uses, but assume
18174 that the debugger can handle stackless frames. */
18176 if (info_ptr->calls_p)
18177 info_ptr->push_p = 1;
18179 else if (DEFAULT_ABI == ABI_V4)
18180 info_ptr->push_p = non_fixed_size != 0;
18182 else if (frame_pointer_needed)
18183 info_ptr->push_p = 1;
18185 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
18186 info_ptr->push_p = 1;
18188 else
18189 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
18191 /* Zero offsets if we're not saving those registers. */
18192 if (info_ptr->fp_size == 0)
18193 info_ptr->fp_save_offset = 0;
18195 if (info_ptr->gp_size == 0)
18196 info_ptr->gp_save_offset = 0;
18198 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
18199 info_ptr->altivec_save_offset = 0;
18201 /* Zero VRSAVE offset if not saved and restored. */
18202 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
18203 info_ptr->vrsave_save_offset = 0;
18205 if (! TARGET_SPE_ABI
18206 || info_ptr->spe_64bit_regs_used == 0
18207 || info_ptr->spe_gp_size == 0)
18208 info_ptr->spe_gp_save_offset = 0;
18210 if (! info_ptr->lr_save_p)
18211 info_ptr->lr_save_offset = 0;
18213 if (! info_ptr->cr_save_p)
18214 info_ptr->cr_save_offset = 0;
18216 return info_ptr;
18219 /* Return true if the current function uses any GPRs in 64-bit SIMD
18220 mode. */
18222 static bool
18223 spe_func_has_64bit_regs_p (void)
18225 rtx insns, insn;
18227 /* Functions that save and restore all the call-saved registers will
18228 need to save/restore the registers in 64-bits. */
18229 if (crtl->calls_eh_return
18230 || cfun->calls_setjmp
18231 || crtl->has_nonlocal_goto)
18232 return true;
18234 insns = get_insns ();
18236 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
18238 if (INSN_P (insn))
18240 rtx i;
18242 /* FIXME: This should be implemented with attributes...
18244 (set_attr "spe64" "true")....then,
18245 if (get_spe64(insn)) return true;
18247 It's the only reliable way to do the stuff below. */
18249 i = PATTERN (insn);
18250 if (GET_CODE (i) == SET)
18252 enum machine_mode mode = GET_MODE (SET_SRC (i));
18254 if (SPE_VECTOR_MODE (mode))
18255 return true;
18256 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
18257 return true;
18262 return false;
18265 static void
18266 debug_stack_info (rs6000_stack_t *info)
18268 const char *abi_string;
18270 if (! info)
18271 info = rs6000_stack_info ();
18273 fprintf (stderr, "\nStack information for function %s:\n",
18274 ((current_function_decl && DECL_NAME (current_function_decl))
18275 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
18276 : "<unknown>"));
18278 switch (info->abi)
18280 default: abi_string = "Unknown"; break;
18281 case ABI_NONE: abi_string = "NONE"; break;
18282 case ABI_AIX: abi_string = "AIX"; break;
18283 case ABI_DARWIN: abi_string = "Darwin"; break;
18284 case ABI_V4: abi_string = "V.4"; break;
18287 fprintf (stderr, "\tABI = %5s\n", abi_string);
18289 if (TARGET_ALTIVEC_ABI)
18290 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
18292 if (TARGET_SPE_ABI)
18293 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
18295 if (info->first_gp_reg_save != 32)
18296 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
18298 if (info->first_fp_reg_save != 64)
18299 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
18301 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
18302 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
18303 info->first_altivec_reg_save);
18305 if (info->lr_save_p)
18306 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
18308 if (info->cr_save_p)
18309 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
18311 if (info->vrsave_mask)
18312 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
18314 if (info->push_p)
18315 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
18317 if (info->calls_p)
18318 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
18320 if (info->gp_save_offset)
18321 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
18323 if (info->fp_save_offset)
18324 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
18326 if (info->altivec_save_offset)
18327 fprintf (stderr, "\taltivec_save_offset = %5d\n",
18328 info->altivec_save_offset);
18330 if (info->spe_gp_save_offset)
18331 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
18332 info->spe_gp_save_offset);
18334 if (info->vrsave_save_offset)
18335 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
18336 info->vrsave_save_offset);
18338 if (info->lr_save_offset)
18339 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
18341 if (info->cr_save_offset)
18342 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
18344 if (info->varargs_save_offset)
18345 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
18347 if (info->total_size)
18348 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18349 info->total_size);
18351 if (info->vars_size)
18352 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18353 info->vars_size);
18355 if (info->parm_size)
18356 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
18358 if (info->fixed_size)
18359 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
18361 if (info->gp_size)
18362 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
18364 if (info->spe_gp_size)
18365 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
18367 if (info->fp_size)
18368 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
18370 if (info->altivec_size)
18371 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
18373 if (info->vrsave_size)
18374 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
18376 if (info->altivec_padding_size)
18377 fprintf (stderr, "\taltivec_padding_size= %5d\n",
18378 info->altivec_padding_size);
18380 if (info->spe_padding_size)
18381 fprintf (stderr, "\tspe_padding_size = %5d\n",
18382 info->spe_padding_size);
18384 if (info->cr_size)
18385 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
18387 if (info->save_size)
18388 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
18390 if (info->reg_size != 4)
18391 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
18393 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
18395 fprintf (stderr, "\n");
18399 rs6000_return_addr (int count, rtx frame)
18401 /* Currently we don't optimize very well between prolog and body
18402 code and for PIC code the code can be actually quite bad, so
18403 don't try to be too clever here. */
18404 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
18406 cfun->machine->ra_needs_full_frame = 1;
18408 return
18409 gen_rtx_MEM
18410 (Pmode,
18411 memory_address
18412 (Pmode,
18413 plus_constant (Pmode,
18414 copy_to_reg
18415 (gen_rtx_MEM (Pmode,
18416 memory_address (Pmode, frame))),
18417 RETURN_ADDRESS_OFFSET)));
18420 cfun->machine->ra_need_lr = 1;
18421 return get_hard_reg_initial_val (Pmode, LR_REGNO);
18424 /* Say whether a function is a candidate for sibcall handling or not. */
18426 static bool
18427 rs6000_function_ok_for_sibcall (tree decl, tree exp)
18429 tree fntype;
18431 if (decl)
18432 fntype = TREE_TYPE (decl);
18433 else
18434 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
18436 /* We can't do it if the called function has more vector parameters
18437 than the current function; there's nowhere to put the VRsave code. */
18438 if (TARGET_ALTIVEC_ABI
18439 && TARGET_ALTIVEC_VRSAVE
18440 && !(decl && decl == current_function_decl))
18442 function_args_iterator args_iter;
18443 tree type;
18444 int nvreg = 0;
18446 /* Functions with vector parameters are required to have a
18447 prototype, so the argument type info must be available
18448 here. */
18449 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
18450 if (TREE_CODE (type) == VECTOR_TYPE
18451 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18452 nvreg++;
18454 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
18455 if (TREE_CODE (type) == VECTOR_TYPE
18456 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18457 nvreg--;
18459 if (nvreg > 0)
18460 return false;
18463 /* Under the AIX ABI we can't allow calls to non-local functions,
18464 because the callee may have a different TOC pointer to the
18465 caller and there's no way to ensure we restore the TOC when we
18466 return. With the secure-plt SYSV ABI we can't make non-local
18467 calls when -fpic/PIC because the plt call stubs use r30. */
18468 if (DEFAULT_ABI == ABI_DARWIN
18469 || (DEFAULT_ABI == ABI_AIX
18470 && decl
18471 && !DECL_EXTERNAL (decl)
18472 && (*targetm.binds_local_p) (decl))
18473 || (DEFAULT_ABI == ABI_V4
18474 && (!TARGET_SECURE_PLT
18475 || !flag_pic
18476 || (decl
18477 && (*targetm.binds_local_p) (decl)))))
18479 tree attr_list = TYPE_ATTRIBUTES (fntype);
18481 if (!lookup_attribute ("longcall", attr_list)
18482 || lookup_attribute ("shortcall", attr_list))
18483 return true;
18486 return false;
18489 /* NULL if INSN insn is valid within a low-overhead loop.
18490 Otherwise return why doloop cannot be applied.
18491 PowerPC uses the COUNT register for branch on table instructions. */
18493 static const char *
18494 rs6000_invalid_within_doloop (const_rtx insn)
18496 if (CALL_P (insn))
18497 return "Function call in the loop.";
18499 if (JUMP_P (insn)
18500 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
18501 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
18502 return "Computed branch in the loop.";
18504 return NULL;
18507 static int
18508 rs6000_ra_ever_killed (void)
18510 rtx top;
18511 rtx reg;
18512 rtx insn;
18514 if (cfun->is_thunk)
18515 return 0;
18517 if (cfun->machine->lr_save_state)
18518 return cfun->machine->lr_save_state - 1;
18520 /* regs_ever_live has LR marked as used if any sibcalls are present,
18521 but this should not force saving and restoring in the
18522 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18523 clobbers LR, so that is inappropriate. */
18525 /* Also, the prologue can generate a store into LR that
18526 doesn't really count, like this:
18528 move LR->R0
18529 bcl to set PIC register
18530 move LR->R31
18531 move R0->LR
18533 When we're called from the epilogue, we need to avoid counting
18534 this as a store. */
18536 push_topmost_sequence ();
18537 top = get_insns ();
18538 pop_topmost_sequence ();
18539 reg = gen_rtx_REG (Pmode, LR_REGNO);
18541 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
18543 if (INSN_P (insn))
18545 if (CALL_P (insn))
18547 if (!SIBLING_CALL_P (insn))
18548 return 1;
18550 else if (find_regno_note (insn, REG_INC, LR_REGNO))
18551 return 1;
18552 else if (set_of (reg, insn) != NULL_RTX
18553 && !prologue_epilogue_contains (insn))
18554 return 1;
18557 return 0;
18560 /* Emit instructions needed to load the TOC register.
18561 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18562 a constant pool; or for SVR4 -fpic. */
18564 void
18565 rs6000_emit_load_toc_table (int fromprolog)
18567 rtx dest;
18568 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
18570 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
18572 char buf[30];
18573 rtx lab, tmp1, tmp2, got;
18575 lab = gen_label_rtx ();
18576 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
18577 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18578 if (flag_pic == 2)
18579 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18580 else
18581 got = rs6000_got_sym ();
18582 tmp1 = tmp2 = dest;
18583 if (!fromprolog)
18585 tmp1 = gen_reg_rtx (Pmode);
18586 tmp2 = gen_reg_rtx (Pmode);
18588 emit_insn (gen_load_toc_v4_PIC_1 (lab));
18589 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
18590 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
18591 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
18593 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
18595 emit_insn (gen_load_toc_v4_pic_si ());
18596 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18598 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
18600 char buf[30];
18601 rtx temp0 = (fromprolog
18602 ? gen_rtx_REG (Pmode, 0)
18603 : gen_reg_rtx (Pmode));
18605 if (fromprolog)
18607 rtx symF, symL;
18609 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
18610 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18612 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
18613 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18615 emit_insn (gen_load_toc_v4_PIC_1 (symF));
18616 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18617 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
18619 else
18621 rtx tocsym, lab;
18623 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18624 lab = gen_label_rtx ();
18625 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
18626 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18627 if (TARGET_LINK_STACK)
18628 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
18629 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
18631 emit_insn (gen_addsi3 (dest, temp0, dest));
18633 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
18635 /* This is for AIX code running in non-PIC ELF32. */
18636 char buf[30];
18637 rtx realsym;
18638 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
18639 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18641 emit_insn (gen_elf_high (dest, realsym));
18642 emit_insn (gen_elf_low (dest, dest, realsym));
18644 else
18646 gcc_assert (DEFAULT_ABI == ABI_AIX);
18648 if (TARGET_32BIT)
18649 emit_insn (gen_load_toc_aix_si (dest));
18650 else
18651 emit_insn (gen_load_toc_aix_di (dest));
18655 /* Emit instructions to restore the link register after determining where
18656 its value has been stored. */
18658 void
18659 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
18661 rs6000_stack_t *info = rs6000_stack_info ();
18662 rtx operands[2];
18664 operands[0] = source;
18665 operands[1] = scratch;
18667 if (info->lr_save_p)
18669 rtx frame_rtx = stack_pointer_rtx;
18670 HOST_WIDE_INT sp_offset = 0;
18671 rtx tmp;
18673 if (frame_pointer_needed
18674 || cfun->calls_alloca
18675 || info->total_size > 32767)
18677 tmp = gen_frame_mem (Pmode, frame_rtx);
18678 emit_move_insn (operands[1], tmp);
18679 frame_rtx = operands[1];
18681 else if (info->push_p)
18682 sp_offset = info->total_size;
18684 tmp = plus_constant (Pmode, frame_rtx,
18685 info->lr_save_offset + sp_offset);
18686 tmp = gen_frame_mem (Pmode, tmp);
18687 emit_move_insn (tmp, operands[0]);
18689 else
18690 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
18692 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18693 state of lr_save_p so any change from here on would be a bug. In
18694 particular, stop rs6000_ra_ever_killed from considering the SET
18695 of lr we may have added just above. */
18696 cfun->machine->lr_save_state = info->lr_save_p + 1;
18699 static GTY(()) alias_set_type set = -1;
18701 alias_set_type
18702 get_TOC_alias_set (void)
18704 if (set == -1)
18705 set = new_alias_set ();
18706 return set;
18709 /* This returns nonzero if the current function uses the TOC. This is
18710 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18711 is generated by the ABI_V4 load_toc_* patterns. */
18712 #if TARGET_ELF
18713 static int
18714 uses_TOC (void)
18716 rtx insn;
18718 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18719 if (INSN_P (insn))
18721 rtx pat = PATTERN (insn);
18722 int i;
18724 if (GET_CODE (pat) == PARALLEL)
18725 for (i = 0; i < XVECLEN (pat, 0); i++)
18727 rtx sub = XVECEXP (pat, 0, i);
18728 if (GET_CODE (sub) == USE)
18730 sub = XEXP (sub, 0);
18731 if (GET_CODE (sub) == UNSPEC
18732 && XINT (sub, 1) == UNSPEC_TOC)
18733 return 1;
18737 return 0;
18739 #endif
18742 create_TOC_reference (rtx symbol, rtx largetoc_reg)
18744 rtx tocrel, tocreg, hi;
18746 if (TARGET_DEBUG_ADDR)
18748 if (GET_CODE (symbol) == SYMBOL_REF)
18749 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18750 XSTR (symbol, 0));
18751 else
18753 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
18754 GET_RTX_NAME (GET_CODE (symbol)));
18755 debug_rtx (symbol);
18759 if (!can_create_pseudo_p ())
18760 df_set_regs_ever_live (TOC_REGISTER, true);
18762 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
18763 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
18764 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
18765 return tocrel;
18767 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
18768 if (largetoc_reg != NULL)
18770 emit_move_insn (largetoc_reg, hi);
18771 hi = largetoc_reg;
18773 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
18776 /* Issue assembly directives that create a reference to the given DWARF
18777 FRAME_TABLE_LABEL from the current function section. */
18778 void
18779 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
18781 fprintf (asm_out_file, "\t.ref %s\n",
18782 (* targetm.strip_name_encoding) (frame_table_label));
18785 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18786 and the change to the stack pointer. */
18788 static void
18789 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
18791 rtvec p;
18792 int i;
18793 rtx regs[3];
18795 i = 0;
18796 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18797 if (hard_frame_needed)
18798 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
18799 if (!(REGNO (fp) == STACK_POINTER_REGNUM
18800 || (hard_frame_needed
18801 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
18802 regs[i++] = fp;
18804 p = rtvec_alloc (i);
18805 while (--i >= 0)
18807 rtx mem = gen_frame_mem (BLKmode, regs[i]);
18808 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
18811 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
18814 /* Emit the correct code for allocating stack space, as insns.
18815 If COPY_REG, make sure a copy of the old frame is left there.
18816 The generated code may use hard register 0 as a temporary. */
18818 static void
18819 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
18821 rtx insn;
18822 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18823 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
18824 rtx todec = gen_int_mode (-size, Pmode);
18825 rtx par, set, mem;
18827 if (INTVAL (todec) != -size)
18829 warning (0, "stack frame too large");
18830 emit_insn (gen_trap ());
18831 return;
18834 if (crtl->limit_stack)
18836 if (REG_P (stack_limit_rtx)
18837 && REGNO (stack_limit_rtx) > 1
18838 && REGNO (stack_limit_rtx) <= 31)
18840 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
18841 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18842 const0_rtx));
18844 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
18845 && TARGET_32BIT
18846 && DEFAULT_ABI == ABI_V4)
18848 rtx toload = gen_rtx_CONST (VOIDmode,
18849 gen_rtx_PLUS (Pmode,
18850 stack_limit_rtx,
18851 GEN_INT (size)));
18853 emit_insn (gen_elf_high (tmp_reg, toload));
18854 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
18855 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18856 const0_rtx));
18858 else
18859 warning (0, "stack limit expression is not supported");
18862 if (copy_reg)
18864 if (copy_off != 0)
18865 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
18866 else
18867 emit_move_insn (copy_reg, stack_reg);
18870 if (size > 32767)
18872 /* Need a note here so that try_split doesn't get confused. */
18873 if (get_last_insn () == NULL_RTX)
18874 emit_note (NOTE_INSN_DELETED);
18875 insn = emit_move_insn (tmp_reg, todec);
18876 try_split (PATTERN (insn), insn, 0);
18877 todec = tmp_reg;
18880 insn = emit_insn (TARGET_32BIT
18881 ? gen_movsi_update_stack (stack_reg, stack_reg,
18882 todec, stack_reg)
18883 : gen_movdi_di_update_stack (stack_reg, stack_reg,
18884 todec, stack_reg));
18885 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18886 it now and set the alias set/attributes. The above gen_*_update
18887 calls will generate a PARALLEL with the MEM set being the first
18888 operation. */
18889 par = PATTERN (insn);
18890 gcc_assert (GET_CODE (par) == PARALLEL);
18891 set = XVECEXP (par, 0, 0);
18892 gcc_assert (GET_CODE (set) == SET);
18893 mem = SET_DEST (set);
18894 gcc_assert (MEM_P (mem));
18895 MEM_NOTRAP_P (mem) = 1;
18896 set_mem_alias_set (mem, get_frame_alias_set ());
18898 RTX_FRAME_RELATED_P (insn) = 1;
18899 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
18900 gen_rtx_SET (VOIDmode, stack_reg,
18901 gen_rtx_PLUS (Pmode, stack_reg,
18902 GEN_INT (-size))));
18905 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18907 #if PROBE_INTERVAL > 32768
18908 #error Cannot use indexed addressing mode for stack probing
18909 #endif
18911 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18912 inclusive. These are offsets from the current stack pointer. */
18914 static void
18915 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
18917 /* See if we have a constant small number of probes to generate. If so,
18918 that's the easy case. */
18919 if (first + size <= 32768)
18921 HOST_WIDE_INT i;
18923 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18924 it exceeds SIZE. If only one probe is needed, this will not
18925 generate any code. Then probe at FIRST + SIZE. */
18926 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
18927 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18928 -(first + i)));
18930 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18931 -(first + size)));
18934 /* Otherwise, do the same as above, but in a loop. Note that we must be
18935 extra careful with variables wrapping around because we might be at
18936 the very top (or the very bottom) of the address space and we have
18937 to be able to handle this case properly; in particular, we use an
18938 equality test for the loop condition. */
18939 else
18941 HOST_WIDE_INT rounded_size;
18942 rtx r12 = gen_rtx_REG (Pmode, 12);
18943 rtx r0 = gen_rtx_REG (Pmode, 0);
18945 /* Sanity check for the addressing mode we're going to use. */
18946 gcc_assert (first <= 32768);
18948 /* Step 1: round SIZE to the previous multiple of the interval. */
18950 rounded_size = size & -PROBE_INTERVAL;
18953 /* Step 2: compute initial and final value of the loop counter. */
18955 /* TEST_ADDR = SP + FIRST. */
18956 emit_insn (gen_rtx_SET (VOIDmode, r12,
18957 plus_constant (Pmode, stack_pointer_rtx,
18958 -first)));
18960 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18961 if (rounded_size > 32768)
18963 emit_move_insn (r0, GEN_INT (-rounded_size));
18964 emit_insn (gen_rtx_SET (VOIDmode, r0,
18965 gen_rtx_PLUS (Pmode, r12, r0)));
18967 else
18968 emit_insn (gen_rtx_SET (VOIDmode, r0,
18969 plus_constant (Pmode, r12, -rounded_size)));
18972 /* Step 3: the loop
18974 while (TEST_ADDR != LAST_ADDR)
18976 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18977 probe at TEST_ADDR
18980 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18981 until it is equal to ROUNDED_SIZE. */
18983 if (TARGET_64BIT)
18984 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
18985 else
18986 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
18989 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18990 that SIZE is equal to ROUNDED_SIZE. */
18992 if (size != rounded_size)
18993 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
18997 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18998 absolute addresses. */
19000 const char *
19001 output_probe_stack_range (rtx reg1, rtx reg2)
19003 static int labelno = 0;
19004 char loop_lab[32], end_lab[32];
19005 rtx xops[2];
19007 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
19008 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
19010 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
19012 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
19013 xops[0] = reg1;
19014 xops[1] = reg2;
19015 if (TARGET_64BIT)
19016 output_asm_insn ("cmpd 0,%0,%1", xops);
19017 else
19018 output_asm_insn ("cmpw 0,%0,%1", xops);
19020 fputs ("\tbeq 0,", asm_out_file);
19021 assemble_name_raw (asm_out_file, end_lab);
19022 fputc ('\n', asm_out_file);
19024 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
19025 xops[1] = GEN_INT (-PROBE_INTERVAL);
19026 output_asm_insn ("addi %0,%0,%1", xops);
19028 /* Probe at TEST_ADDR and branch. */
19029 xops[1] = gen_rtx_REG (Pmode, 0);
19030 output_asm_insn ("stw %1,0(%0)", xops);
19031 fprintf (asm_out_file, "\tb ");
19032 assemble_name_raw (asm_out_file, loop_lab);
19033 fputc ('\n', asm_out_file);
19035 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
19037 return "";
19040 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
19041 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
19042 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
19043 deduce these equivalences by itself so it wasn't necessary to hold
19044 its hand so much. Don't be tempted to always supply d2_f_d_e with
19045 the actual cfa register, ie. r31 when we are using a hard frame
19046 pointer. That fails when saving regs off r1, and sched moves the
19047 r31 setup past the reg saves. */
19049 static rtx
19050 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
19051 rtx reg2, rtx rreg)
19053 rtx real, temp;
19055 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
19057 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
19058 int i;
19060 gcc_checking_assert (val == 0);
19061 real = PATTERN (insn);
19062 if (GET_CODE (real) == PARALLEL)
19063 for (i = 0; i < XVECLEN (real, 0); i++)
19064 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
19066 rtx set = XVECEXP (real, 0, i);
19068 RTX_FRAME_RELATED_P (set) = 1;
19070 RTX_FRAME_RELATED_P (insn) = 1;
19071 return insn;
19074 /* copy_rtx will not make unique copies of registers, so we need to
19075 ensure we don't have unwanted sharing here. */
19076 if (reg == reg2)
19077 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
19079 if (reg == rreg)
19080 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
19082 real = copy_rtx (PATTERN (insn));
19084 if (reg2 != NULL_RTX)
19085 real = replace_rtx (real, reg2, rreg);
19087 if (REGNO (reg) == STACK_POINTER_REGNUM)
19088 gcc_checking_assert (val == 0);
19089 else
19090 real = replace_rtx (real, reg,
19091 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
19092 STACK_POINTER_REGNUM),
19093 GEN_INT (val)));
19095 /* We expect that 'real' is either a SET or a PARALLEL containing
19096 SETs (and possibly other stuff). In a PARALLEL, all the SETs
19097 are important so they all have to be marked RTX_FRAME_RELATED_P. */
19099 if (GET_CODE (real) == SET)
19101 rtx set = real;
19103 temp = simplify_rtx (SET_SRC (set));
19104 if (temp)
19105 SET_SRC (set) = temp;
19106 temp = simplify_rtx (SET_DEST (set));
19107 if (temp)
19108 SET_DEST (set) = temp;
19109 if (GET_CODE (SET_DEST (set)) == MEM)
19111 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
19112 if (temp)
19113 XEXP (SET_DEST (set), 0) = temp;
19116 else
19118 int i;
19120 gcc_assert (GET_CODE (real) == PARALLEL);
19121 for (i = 0; i < XVECLEN (real, 0); i++)
19122 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
19124 rtx set = XVECEXP (real, 0, i);
19126 temp = simplify_rtx (SET_SRC (set));
19127 if (temp)
19128 SET_SRC (set) = temp;
19129 temp = simplify_rtx (SET_DEST (set));
19130 if (temp)
19131 SET_DEST (set) = temp;
19132 if (GET_CODE (SET_DEST (set)) == MEM)
19134 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
19135 if (temp)
19136 XEXP (SET_DEST (set), 0) = temp;
19138 RTX_FRAME_RELATED_P (set) = 1;
19142 RTX_FRAME_RELATED_P (insn) = 1;
19143 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
19145 return insn;
19148 /* Returns an insn that has a vrsave set operation with the
19149 appropriate CLOBBERs. */
19151 static rtx
19152 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
19154 int nclobs, i;
19155 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
19156 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
19158 clobs[0]
19159 = gen_rtx_SET (VOIDmode,
19160 vrsave,
19161 gen_rtx_UNSPEC_VOLATILE (SImode,
19162 gen_rtvec (2, reg, vrsave),
19163 UNSPECV_SET_VRSAVE));
19165 nclobs = 1;
19167 /* We need to clobber the registers in the mask so the scheduler
19168 does not move sets to VRSAVE before sets of AltiVec registers.
19170 However, if the function receives nonlocal gotos, reload will set
19171 all call saved registers live. We will end up with:
19173 (set (reg 999) (mem))
19174 (parallel [ (set (reg vrsave) (unspec blah))
19175 (clobber (reg 999))])
19177 The clobber will cause the store into reg 999 to be dead, and
19178 flow will attempt to delete an epilogue insn. In this case, we
19179 need an unspec use/set of the register. */
19181 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
19182 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19184 if (!epiloguep || call_used_regs [i])
19185 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
19186 gen_rtx_REG (V4SImode, i));
19187 else
19189 rtx reg = gen_rtx_REG (V4SImode, i);
19191 clobs[nclobs++]
19192 = gen_rtx_SET (VOIDmode,
19193 reg,
19194 gen_rtx_UNSPEC (V4SImode,
19195 gen_rtvec (1, reg), 27));
19199 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
19201 for (i = 0; i < nclobs; ++i)
19202 XVECEXP (insn, 0, i) = clobs[i];
19204 return insn;
19207 static rtx
19208 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
19210 rtx addr, mem;
19212 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
19213 mem = gen_frame_mem (GET_MODE (reg), addr);
19214 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
19217 static rtx
19218 gen_frame_load (rtx reg, rtx frame_reg, int offset)
19220 return gen_frame_set (reg, frame_reg, offset, false);
19223 static rtx
19224 gen_frame_store (rtx reg, rtx frame_reg, int offset)
19226 return gen_frame_set (reg, frame_reg, offset, true);
19229 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19230 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19232 static rtx
19233 emit_frame_save (rtx frame_reg, enum machine_mode mode,
19234 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
19236 rtx reg, insn;
19238 /* Some cases that need register indexed addressing. */
19239 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
19240 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
19241 || (TARGET_E500_DOUBLE && mode == DFmode)
19242 || (TARGET_SPE_ABI
19243 && SPE_VECTOR_MODE (mode)
19244 && !SPE_CONST_OFFSET_OK (offset))));
19246 reg = gen_rtx_REG (mode, regno);
19247 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
19248 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
19249 NULL_RTX, NULL_RTX);
19252 /* Emit an offset memory reference suitable for a frame store, while
19253 converting to a valid addressing mode. */
19255 static rtx
19256 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
19258 rtx int_rtx, offset_rtx;
19260 int_rtx = GEN_INT (offset);
19262 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
19263 || (TARGET_E500_DOUBLE && mode == DFmode))
19265 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
19266 emit_move_insn (offset_rtx, int_rtx);
19268 else
19269 offset_rtx = int_rtx;
19271 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
19274 #ifndef TARGET_FIX_AND_CONTINUE
19275 #define TARGET_FIX_AND_CONTINUE 0
19276 #endif
19278 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19279 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19280 #define LAST_SAVRES_REGISTER 31
19281 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19283 enum {
19284 SAVRES_LR = 0x1,
19285 SAVRES_SAVE = 0x2,
19286 SAVRES_REG = 0x0c,
19287 SAVRES_GPR = 0,
19288 SAVRES_FPR = 4,
19289 SAVRES_VR = 8
19292 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
19294 /* Temporary holding space for an out-of-line register save/restore
19295 routine name. */
19296 static char savres_routine_name[30];
19298 /* Return the name for an out-of-line register save/restore routine.
19299 We are saving/restoring GPRs if GPR is true. */
19301 static char *
19302 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
19304 const char *prefix = "";
19305 const char *suffix = "";
19307 /* Different targets are supposed to define
19308 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19309 routine name could be defined with:
19311 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19313 This is a nice idea in practice, but in reality, things are
19314 complicated in several ways:
19316 - ELF targets have save/restore routines for GPRs.
19318 - SPE targets use different prefixes for 32/64-bit registers, and
19319 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19321 - PPC64 ELF targets have routines for save/restore of GPRs that
19322 differ in what they do with the link register, so having a set
19323 prefix doesn't work. (We only use one of the save routines at
19324 the moment, though.)
19326 - PPC32 elf targets have "exit" versions of the restore routines
19327 that restore the link register and can save some extra space.
19328 These require an extra suffix. (There are also "tail" versions
19329 of the restore routines and "GOT" versions of the save routines,
19330 but we don't generate those at present. Same problems apply,
19331 though.)
19333 We deal with all this by synthesizing our own prefix/suffix and
19334 using that for the simple sprintf call shown above. */
19335 if (TARGET_SPE)
19337 /* No floating point saves on the SPE. */
19338 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
19340 if ((sel & SAVRES_SAVE))
19341 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
19342 else
19343 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
19345 if ((sel & SAVRES_LR))
19346 suffix = "_x";
19348 else if (DEFAULT_ABI == ABI_V4)
19350 if (TARGET_64BIT)
19351 goto aix_names;
19353 if ((sel & SAVRES_REG) == SAVRES_GPR)
19354 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
19355 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19356 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
19357 else if ((sel & SAVRES_REG) == SAVRES_VR)
19358 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19359 else
19360 abort ();
19362 if ((sel & SAVRES_LR))
19363 suffix = "_x";
19365 else if (DEFAULT_ABI == ABI_AIX)
19367 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19368 /* No out-of-line save/restore routines for GPRs on AIX. */
19369 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
19370 #endif
19372 aix_names:
19373 if ((sel & SAVRES_REG) == SAVRES_GPR)
19374 prefix = ((sel & SAVRES_SAVE)
19375 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
19376 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
19377 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19379 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19380 if ((sel & SAVRES_LR))
19381 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
19382 else
19383 #endif
19385 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
19386 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
19389 else if ((sel & SAVRES_REG) == SAVRES_VR)
19390 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19391 else
19392 abort ();
19395 if (DEFAULT_ABI == ABI_DARWIN)
19397 /* The Darwin approach is (slightly) different, in order to be
19398 compatible with code generated by the system toolchain. There is a
19399 single symbol for the start of save sequence, and the code here
19400 embeds an offset into that code on the basis of the first register
19401 to be saved. */
19402 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
19403 if ((sel & SAVRES_REG) == SAVRES_GPR)
19404 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
19405 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
19406 (regno - 13) * 4, prefix, regno);
19407 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19408 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
19409 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
19410 else if ((sel & SAVRES_REG) == SAVRES_VR)
19411 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
19412 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
19413 else
19414 abort ();
19416 else
19417 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
19419 return savres_routine_name;
19422 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19423 We are saving/restoring GPRs if GPR is true. */
19425 static rtx
19426 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
19428 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
19429 ? info->first_gp_reg_save
19430 : (sel & SAVRES_REG) == SAVRES_FPR
19431 ? info->first_fp_reg_save - 32
19432 : (sel & SAVRES_REG) == SAVRES_VR
19433 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
19434 : -1);
19435 rtx sym;
19436 int select = sel;
19438 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19439 versions of the gpr routines. */
19440 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
19441 && info->spe_64bit_regs_used)
19442 select ^= SAVRES_FPR ^ SAVRES_GPR;
19444 /* Don't generate bogus routine names. */
19445 gcc_assert (FIRST_SAVRES_REGISTER <= regno
19446 && regno <= LAST_SAVRES_REGISTER
19447 && select >= 0 && select <= 12);
19449 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
19451 if (sym == NULL)
19453 char *name;
19455 name = rs6000_savres_routine_name (info, regno, sel);
19457 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
19458 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
19459 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
19462 return sym;
19465 /* Emit a sequence of insns, including a stack tie if needed, for
19466 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19467 reset the stack pointer, but move the base of the frame into
19468 reg UPDT_REGNO for use by out-of-line register restore routines. */
19470 static rtx
19471 rs6000_emit_stack_reset (rs6000_stack_t *info,
19472 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
19473 unsigned updt_regno)
19475 rtx updt_reg_rtx;
19477 /* This blockage is needed so that sched doesn't decide to move
19478 the sp change before the register restores. */
19479 if (DEFAULT_ABI == ABI_V4
19480 || (TARGET_SPE_ABI
19481 && info->spe_64bit_regs_used != 0
19482 && info->first_gp_reg_save != 32))
19483 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
19485 /* If we are restoring registers out-of-line, we will be using the
19486 "exit" variants of the restore routines, which will reset the
19487 stack for us. But we do need to point updt_reg into the
19488 right place for those routines. */
19489 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
19491 if (frame_off != 0)
19492 return emit_insn (gen_add3_insn (updt_reg_rtx,
19493 frame_reg_rtx, GEN_INT (frame_off)));
19494 else if (REGNO (frame_reg_rtx) != updt_regno)
19495 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
19497 return NULL_RTX;
19500 /* Return the register number used as a pointer by out-of-line
19501 save/restore functions. */
19503 static inline unsigned
19504 ptr_regno_for_savres (int sel)
19506 if (DEFAULT_ABI == ABI_AIX)
19507 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
19508 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
19511 /* Construct a parallel rtx describing the effect of a call to an
19512 out-of-line register save/restore routine, and emit the insn
19513 or jump_insn as appropriate. */
19515 static rtx
19516 rs6000_emit_savres_rtx (rs6000_stack_t *info,
19517 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
19518 enum machine_mode reg_mode, int sel)
19520 int i;
19521 int offset, start_reg, end_reg, n_regs, use_reg;
19522 int reg_size = GET_MODE_SIZE (reg_mode);
19523 rtx sym;
19524 rtvec p;
19525 rtx par, insn;
19527 offset = 0;
19528 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19529 ? info->first_gp_reg_save
19530 : (sel & SAVRES_REG) == SAVRES_FPR
19531 ? info->first_fp_reg_save
19532 : (sel & SAVRES_REG) == SAVRES_VR
19533 ? info->first_altivec_reg_save
19534 : -1);
19535 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19536 ? 32
19537 : (sel & SAVRES_REG) == SAVRES_FPR
19538 ? 64
19539 : (sel & SAVRES_REG) == SAVRES_VR
19540 ? LAST_ALTIVEC_REGNO + 1
19541 : -1);
19542 n_regs = end_reg - start_reg;
19543 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
19544 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
19545 + n_regs);
19547 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19548 RTVEC_ELT (p, offset++) = ret_rtx;
19550 RTVEC_ELT (p, offset++)
19551 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
19553 sym = rs6000_savres_routine_sym (info, sel);
19554 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
19556 use_reg = ptr_regno_for_savres (sel);
19557 if ((sel & SAVRES_REG) == SAVRES_VR)
19559 /* Vector regs are saved/restored using [reg+reg] addressing. */
19560 RTVEC_ELT (p, offset++)
19561 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19562 RTVEC_ELT (p, offset++)
19563 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
19565 else
19566 RTVEC_ELT (p, offset++)
19567 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19569 for (i = 0; i < end_reg - start_reg; i++)
19570 RTVEC_ELT (p, i + offset)
19571 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
19572 frame_reg_rtx, save_area_offset + reg_size * i,
19573 (sel & SAVRES_SAVE) != 0);
19575 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19576 RTVEC_ELT (p, i + offset)
19577 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
19579 par = gen_rtx_PARALLEL (VOIDmode, p);
19581 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19583 insn = emit_jump_insn (par);
19584 JUMP_LABEL (insn) = ret_rtx;
19586 else
19587 insn = emit_insn (par);
19588 return insn;
19591 /* Determine whether the gp REG is really used. */
19593 static bool
19594 rs6000_reg_live_or_pic_offset_p (int reg)
19596 /* If the function calls eh_return, claim used all the registers that would
19597 be checked for liveness otherwise. This is required for the PIC offset
19598 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19599 register allocation purposes in this case. */
19601 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
19602 && (!call_used_regs[reg]
19603 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19604 && !TARGET_SINGLE_PIC_BASE
19605 && TARGET_TOC && TARGET_MINIMAL_TOC)))
19606 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19607 && !TARGET_SINGLE_PIC_BASE
19608 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19609 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
19612 /* Emit function prologue as insns. */
19614 void
19615 rs6000_emit_prologue (void)
19617 rs6000_stack_t *info = rs6000_stack_info ();
19618 enum machine_mode reg_mode = Pmode;
19619 int reg_size = TARGET_32BIT ? 4 : 8;
19620 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19621 rtx frame_reg_rtx = sp_reg_rtx;
19622 unsigned int cr_save_regno;
19623 rtx cr_save_rtx = NULL_RTX;
19624 rtx insn;
19625 int strategy;
19626 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19627 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19628 && call_used_regs[STATIC_CHAIN_REGNUM]);
19629 /* Offset to top of frame for frame_reg and sp respectively. */
19630 HOST_WIDE_INT frame_off = 0;
19631 HOST_WIDE_INT sp_off = 0;
19633 #ifdef ENABLE_CHECKING
19634 /* Track and check usage of r0, r11, r12. */
19635 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
19636 #define START_USE(R) do \
19638 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19639 reg_inuse |= 1 << (R); \
19640 } while (0)
19641 #define END_USE(R) do \
19643 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19644 reg_inuse &= ~(1 << (R)); \
19645 } while (0)
19646 #define NOT_INUSE(R) do \
19648 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19649 } while (0)
19650 #else
19651 #define START_USE(R) do {} while (0)
19652 #define END_USE(R) do {} while (0)
19653 #define NOT_INUSE(R) do {} while (0)
19654 #endif
19656 if (flag_stack_usage_info)
19657 current_function_static_stack_size = info->total_size;
19659 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
19660 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
19662 if (TARGET_FIX_AND_CONTINUE)
19664 /* gdb on darwin arranges to forward a function from the old
19665 address by modifying the first 5 instructions of the function
19666 to branch to the overriding function. This is necessary to
19667 permit function pointers that point to the old function to
19668 actually forward to the new function. */
19669 emit_insn (gen_nop ());
19670 emit_insn (gen_nop ());
19671 emit_insn (gen_nop ());
19672 emit_insn (gen_nop ());
19673 emit_insn (gen_nop ());
19676 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
19678 reg_mode = V2SImode;
19679 reg_size = 8;
19682 /* Handle world saves specially here. */
19683 if (WORLD_SAVE_P (info))
19685 int i, j, sz;
19686 rtx treg;
19687 rtvec p;
19688 rtx reg0;
19690 /* save_world expects lr in r0. */
19691 reg0 = gen_rtx_REG (Pmode, 0);
19692 if (info->lr_save_p)
19694 insn = emit_move_insn (reg0,
19695 gen_rtx_REG (Pmode, LR_REGNO));
19696 RTX_FRAME_RELATED_P (insn) = 1;
19699 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19700 assumptions about the offsets of various bits of the stack
19701 frame. */
19702 gcc_assert (info->gp_save_offset == -220
19703 && info->fp_save_offset == -144
19704 && info->lr_save_offset == 8
19705 && info->cr_save_offset == 4
19706 && info->push_p
19707 && info->lr_save_p
19708 && (!crtl->calls_eh_return
19709 || info->ehrd_offset == -432)
19710 && info->vrsave_save_offset == -224
19711 && info->altivec_save_offset == -416);
19713 treg = gen_rtx_REG (SImode, 11);
19714 emit_move_insn (treg, GEN_INT (-info->total_size));
19716 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19717 in R11. It also clobbers R12, so beware! */
19719 /* Preserve CR2 for save_world prologues */
19720 sz = 5;
19721 sz += 32 - info->first_gp_reg_save;
19722 sz += 64 - info->first_fp_reg_save;
19723 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
19724 p = rtvec_alloc (sz);
19725 j = 0;
19726 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
19727 gen_rtx_REG (SImode,
19728 LR_REGNO));
19729 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
19730 gen_rtx_SYMBOL_REF (Pmode,
19731 "*save_world"));
19732 /* We do floats first so that the instruction pattern matches
19733 properly. */
19734 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19735 RTVEC_ELT (p, j++)
19736 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19737 ? DFmode : SFmode,
19738 info->first_fp_reg_save + i),
19739 frame_reg_rtx,
19740 info->fp_save_offset + frame_off + 8 * i);
19741 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
19742 RTVEC_ELT (p, j++)
19743 = gen_frame_store (gen_rtx_REG (V4SImode,
19744 info->first_altivec_reg_save + i),
19745 frame_reg_rtx,
19746 info->altivec_save_offset + frame_off + 16 * i);
19747 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19748 RTVEC_ELT (p, j++)
19749 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19750 frame_reg_rtx,
19751 info->gp_save_offset + frame_off + reg_size * i);
19753 /* CR register traditionally saved as CR2. */
19754 RTVEC_ELT (p, j++)
19755 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
19756 frame_reg_rtx, info->cr_save_offset + frame_off);
19757 /* Explain about use of R0. */
19758 if (info->lr_save_p)
19759 RTVEC_ELT (p, j++)
19760 = gen_frame_store (reg0,
19761 frame_reg_rtx, info->lr_save_offset + frame_off);
19762 /* Explain what happens to the stack pointer. */
19764 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
19765 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
19768 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19769 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19770 treg, GEN_INT (-info->total_size));
19771 sp_off = frame_off = info->total_size;
19774 strategy = info->savres_strategy;
19776 /* For V.4, update stack before we do any saving and set back pointer. */
19777 if (! WORLD_SAVE_P (info)
19778 && info->push_p
19779 && (DEFAULT_ABI == ABI_V4
19780 || crtl->calls_eh_return))
19782 bool need_r11 = (TARGET_SPE
19783 ? (!(strategy & SAVE_INLINE_GPRS)
19784 && info->spe_64bit_regs_used == 0)
19785 : (!(strategy & SAVE_INLINE_FPRS)
19786 || !(strategy & SAVE_INLINE_GPRS)
19787 || !(strategy & SAVE_INLINE_VRS)));
19788 int ptr_regno = -1;
19789 rtx ptr_reg = NULL_RTX;
19790 int ptr_off = 0;
19792 if (info->total_size < 32767)
19793 frame_off = info->total_size;
19794 else if (need_r11)
19795 ptr_regno = 11;
19796 else if (info->cr_save_p
19797 || info->lr_save_p
19798 || info->first_fp_reg_save < 64
19799 || info->first_gp_reg_save < 32
19800 || info->altivec_size != 0
19801 || info->vrsave_mask != 0
19802 || crtl->calls_eh_return)
19803 ptr_regno = 12;
19804 else
19806 /* The prologue won't be saving any regs so there is no need
19807 to set up a frame register to access any frame save area.
19808 We also won't be using frame_off anywhere below, but set
19809 the correct value anyway to protect against future
19810 changes to this function. */
19811 frame_off = info->total_size;
19813 if (ptr_regno != -1)
19815 /* Set up the frame offset to that needed by the first
19816 out-of-line save function. */
19817 START_USE (ptr_regno);
19818 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19819 frame_reg_rtx = ptr_reg;
19820 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
19821 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
19822 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
19823 ptr_off = info->gp_save_offset + info->gp_size;
19824 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
19825 ptr_off = info->altivec_save_offset + info->altivec_size;
19826 frame_off = -ptr_off;
19828 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
19829 sp_off = info->total_size;
19830 if (frame_reg_rtx != sp_reg_rtx)
19831 rs6000_emit_stack_tie (frame_reg_rtx, false);
19834 /* If we use the link register, get it into r0. */
19835 if (!WORLD_SAVE_P (info) && info->lr_save_p)
19837 rtx addr, reg, mem;
19839 reg = gen_rtx_REG (Pmode, 0);
19840 START_USE (0);
19841 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
19842 RTX_FRAME_RELATED_P (insn) = 1;
19844 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
19845 | SAVE_NOINLINE_FPRS_SAVES_LR)))
19847 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19848 GEN_INT (info->lr_save_offset + frame_off));
19849 mem = gen_rtx_MEM (Pmode, addr);
19850 /* This should not be of rs6000_sr_alias_set, because of
19851 __builtin_return_address. */
19853 insn = emit_move_insn (mem, reg);
19854 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19855 NULL_RTX, NULL_RTX);
19856 END_USE (0);
19860 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19861 r12 will be needed by out-of-line gpr restore. */
19862 cr_save_regno = (DEFAULT_ABI == ABI_AIX
19863 && !(strategy & (SAVE_INLINE_GPRS
19864 | SAVE_NOINLINE_GPRS_SAVES_LR))
19865 ? 11 : 12);
19866 if (!WORLD_SAVE_P (info)
19867 && info->cr_save_p
19868 && REGNO (frame_reg_rtx) != cr_save_regno
19869 && !(using_static_chain_p && cr_save_regno == 11))
19871 rtx set;
19873 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
19874 START_USE (cr_save_regno);
19875 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19876 RTX_FRAME_RELATED_P (insn) = 1;
19877 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19878 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19879 But that's OK. All we have to do is specify that _one_ condition
19880 code register is saved in this stack slot. The thrower's epilogue
19881 will then restore all the call-saved registers.
19882 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19883 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
19884 gen_rtx_REG (SImode, CR2_REGNO));
19885 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19888 /* Do any required saving of fpr's. If only one or two to save, do
19889 it ourselves. Otherwise, call function. */
19890 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
19892 int i;
19893 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19894 if (save_reg_p (info->first_fp_reg_save + i))
19895 emit_frame_save (frame_reg_rtx,
19896 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19897 ? DFmode : SFmode),
19898 info->first_fp_reg_save + i,
19899 info->fp_save_offset + frame_off + 8 * i,
19900 sp_off - frame_off);
19902 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
19904 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
19905 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
19906 unsigned ptr_regno = ptr_regno_for_savres (sel);
19907 rtx ptr_reg = frame_reg_rtx;
19909 if (REGNO (frame_reg_rtx) == ptr_regno)
19910 gcc_checking_assert (frame_off == 0);
19911 else
19913 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19914 NOT_INUSE (ptr_regno);
19915 emit_insn (gen_add3_insn (ptr_reg,
19916 frame_reg_rtx, GEN_INT (frame_off)));
19918 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19919 info->fp_save_offset,
19920 info->lr_save_offset,
19921 DFmode, sel);
19922 rs6000_frame_related (insn, ptr_reg, sp_off,
19923 NULL_RTX, NULL_RTX);
19924 if (lr)
19925 END_USE (0);
19928 /* Save GPRs. This is done as a PARALLEL if we are using
19929 the store-multiple instructions. */
19930 if (!WORLD_SAVE_P (info)
19931 && TARGET_SPE_ABI
19932 && info->spe_64bit_regs_used != 0
19933 && info->first_gp_reg_save != 32)
19935 int i;
19936 rtx spe_save_area_ptr;
19937 HOST_WIDE_INT save_off;
19938 int ool_adjust = 0;
19940 /* Determine whether we can address all of the registers that need
19941 to be saved with an offset from frame_reg_rtx that fits in
19942 the small const field for SPE memory instructions. */
19943 int spe_regs_addressable
19944 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
19945 + reg_size * (32 - info->first_gp_reg_save - 1))
19946 && (strategy & SAVE_INLINE_GPRS));
19948 if (spe_regs_addressable)
19950 spe_save_area_ptr = frame_reg_rtx;
19951 save_off = frame_off;
19953 else
19955 /* Make r11 point to the start of the SPE save area. We need
19956 to be careful here if r11 is holding the static chain. If
19957 it is, then temporarily save it in r0. */
19958 HOST_WIDE_INT offset;
19960 if (!(strategy & SAVE_INLINE_GPRS))
19961 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
19962 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
19963 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
19964 save_off = frame_off - offset;
19966 if (using_static_chain_p)
19968 rtx r0 = gen_rtx_REG (Pmode, 0);
19970 START_USE (0);
19971 gcc_assert (info->first_gp_reg_save > 11);
19973 emit_move_insn (r0, spe_save_area_ptr);
19975 else if (REGNO (frame_reg_rtx) != 11)
19976 START_USE (11);
19978 emit_insn (gen_addsi3 (spe_save_area_ptr,
19979 frame_reg_rtx, GEN_INT (offset)));
19980 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
19981 frame_off = -info->spe_gp_save_offset + ool_adjust;
19984 if ((strategy & SAVE_INLINE_GPRS))
19986 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19987 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19988 emit_frame_save (spe_save_area_ptr, reg_mode,
19989 info->first_gp_reg_save + i,
19990 (info->spe_gp_save_offset + save_off
19991 + reg_size * i),
19992 sp_off - save_off);
19994 else
19996 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
19997 info->spe_gp_save_offset + save_off,
19998 0, reg_mode,
19999 SAVRES_SAVE | SAVRES_GPR);
20001 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
20002 NULL_RTX, NULL_RTX);
20005 /* Move the static chain pointer back. */
20006 if (!spe_regs_addressable)
20008 if (using_static_chain_p)
20010 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
20011 END_USE (0);
20013 else if (REGNO (frame_reg_rtx) != 11)
20014 END_USE (11);
20017 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
20019 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
20020 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
20021 unsigned ptr_regno = ptr_regno_for_savres (sel);
20022 rtx ptr_reg = frame_reg_rtx;
20023 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
20024 int end_save = info->gp_save_offset + info->gp_size;
20025 int ptr_off;
20027 if (!ptr_set_up)
20028 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20030 /* Need to adjust r11 (r12) if we saved any FPRs. */
20031 if (end_save + frame_off != 0)
20033 rtx offset = GEN_INT (end_save + frame_off);
20035 if (ptr_set_up)
20036 frame_off = -end_save;
20037 else
20038 NOT_INUSE (ptr_regno);
20039 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20041 else if (!ptr_set_up)
20043 NOT_INUSE (ptr_regno);
20044 emit_move_insn (ptr_reg, frame_reg_rtx);
20046 ptr_off = -end_save;
20047 insn = rs6000_emit_savres_rtx (info, ptr_reg,
20048 info->gp_save_offset + ptr_off,
20049 info->lr_save_offset + ptr_off,
20050 reg_mode, sel);
20051 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
20052 NULL_RTX, NULL_RTX);
20053 if (lr)
20054 END_USE (0);
20056 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
20058 rtvec p;
20059 int i;
20060 p = rtvec_alloc (32 - info->first_gp_reg_save);
20061 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20062 RTVEC_ELT (p, i)
20063 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
20064 frame_reg_rtx,
20065 info->gp_save_offset + frame_off + reg_size * i);
20066 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20067 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20068 NULL_RTX, NULL_RTX);
20070 else if (!WORLD_SAVE_P (info))
20072 int i;
20073 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20074 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
20075 emit_frame_save (frame_reg_rtx, reg_mode,
20076 info->first_gp_reg_save + i,
20077 info->gp_save_offset + frame_off + reg_size * i,
20078 sp_off - frame_off);
20081 if (crtl->calls_eh_return)
20083 unsigned int i;
20084 rtvec p;
20086 for (i = 0; ; ++i)
20088 unsigned int regno = EH_RETURN_DATA_REGNO (i);
20089 if (regno == INVALID_REGNUM)
20090 break;
20093 p = rtvec_alloc (i);
20095 for (i = 0; ; ++i)
20097 unsigned int regno = EH_RETURN_DATA_REGNO (i);
20098 if (regno == INVALID_REGNUM)
20099 break;
20101 insn
20102 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
20103 sp_reg_rtx,
20104 info->ehrd_offset + sp_off + reg_size * (int) i);
20105 RTVEC_ELT (p, i) = insn;
20106 RTX_FRAME_RELATED_P (insn) = 1;
20109 insn = emit_insn (gen_blockage ());
20110 RTX_FRAME_RELATED_P (insn) = 1;
20111 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
20114 /* In AIX ABI we need to make sure r2 is really saved. */
20115 if (TARGET_AIX && crtl->calls_eh_return)
20117 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
20118 rtx save_insn, join_insn, note;
20119 long toc_restore_insn;
20121 tmp_reg = gen_rtx_REG (Pmode, 11);
20122 tmp_reg_si = gen_rtx_REG (SImode, 11);
20123 if (using_static_chain_p)
20125 START_USE (0);
20126 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
20128 else
20129 START_USE (11);
20130 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
20131 /* Peek at instruction to which this function returns. If it's
20132 restoring r2, then we know we've already saved r2. We can't
20133 unconditionally save r2 because the value we have will already
20134 be updated if we arrived at this function via a plt call or
20135 toc adjusting stub. */
20136 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
20137 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
20138 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
20139 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
20140 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
20141 validate_condition_mode (EQ, CCUNSmode);
20142 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
20143 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
20144 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
20145 toc_save_done = gen_label_rtx ();
20146 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
20147 gen_rtx_EQ (VOIDmode, compare_result,
20148 const0_rtx),
20149 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
20150 pc_rtx);
20151 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
20152 JUMP_LABEL (jump) = toc_save_done;
20153 LABEL_NUSES (toc_save_done) += 1;
20155 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
20156 TOC_REGNUM, frame_off + 5 * reg_size,
20157 sp_off - frame_off);
20159 emit_label (toc_save_done);
20161 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
20162 have a CFG that has different saves along different paths.
20163 Move the note to a dummy blockage insn, which describes that
20164 R2 is unconditionally saved after the label. */
20165 /* ??? An alternate representation might be a special insn pattern
20166 containing both the branch and the store. That might let the
20167 code that minimizes the number of DW_CFA_advance opcodes better
20168 freedom in placing the annotations. */
20169 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
20170 if (note)
20171 remove_note (save_insn, note);
20172 else
20173 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
20174 copy_rtx (PATTERN (save_insn)), NULL_RTX);
20175 RTX_FRAME_RELATED_P (save_insn) = 0;
20177 join_insn = emit_insn (gen_blockage ());
20178 REG_NOTES (join_insn) = note;
20179 RTX_FRAME_RELATED_P (join_insn) = 1;
20181 if (using_static_chain_p)
20183 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
20184 END_USE (0);
20186 else
20187 END_USE (11);
20190 /* Save CR if we use any that must be preserved. */
20191 if (!WORLD_SAVE_P (info) && info->cr_save_p)
20193 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20194 GEN_INT (info->cr_save_offset + frame_off));
20195 rtx mem = gen_frame_mem (SImode, addr);
20196 /* See the large comment above about why CR2_REGNO is used. */
20197 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
20199 /* If we didn't copy cr before, do so now using r0. */
20200 if (cr_save_rtx == NULL_RTX)
20202 rtx set;
20204 START_USE (0);
20205 cr_save_rtx = gen_rtx_REG (SImode, 0);
20206 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20207 RTX_FRAME_RELATED_P (insn) = 1;
20208 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
20209 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20211 insn = emit_move_insn (mem, cr_save_rtx);
20212 END_USE (REGNO (cr_save_rtx));
20214 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20215 NULL_RTX, NULL_RTX);
20218 /* Update stack and set back pointer unless this is V.4,
20219 for which it was done previously. */
20220 if (!WORLD_SAVE_P (info) && info->push_p
20221 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
20223 rtx ptr_reg = NULL;
20224 int ptr_off = 0;
20226 /* If saving altivec regs we need to be able to address all save
20227 locations using a 16-bit offset. */
20228 if ((strategy & SAVE_INLINE_VRS) == 0
20229 || (info->altivec_size != 0
20230 && (info->altivec_save_offset + info->altivec_size - 16
20231 + info->total_size - frame_off) > 32767)
20232 || (info->vrsave_size != 0
20233 && (info->vrsave_save_offset
20234 + info->total_size - frame_off) > 32767))
20236 int sel = SAVRES_SAVE | SAVRES_VR;
20237 unsigned ptr_regno = ptr_regno_for_savres (sel);
20239 if (using_static_chain_p
20240 && ptr_regno == STATIC_CHAIN_REGNUM)
20241 ptr_regno = 12;
20242 if (REGNO (frame_reg_rtx) != ptr_regno)
20243 START_USE (ptr_regno);
20244 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20245 frame_reg_rtx = ptr_reg;
20246 ptr_off = info->altivec_save_offset + info->altivec_size;
20247 frame_off = -ptr_off;
20249 else if (REGNO (frame_reg_rtx) == 1)
20250 frame_off = info->total_size;
20251 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20252 sp_off = info->total_size;
20253 if (frame_reg_rtx != sp_reg_rtx)
20254 rs6000_emit_stack_tie (frame_reg_rtx, false);
20257 /* Set frame pointer, if needed. */
20258 if (frame_pointer_needed)
20260 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
20261 sp_reg_rtx);
20262 RTX_FRAME_RELATED_P (insn) = 1;
20265 /* Save AltiVec registers if needed. Save here because the red zone does
20266 not always include AltiVec registers. */
20267 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20268 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
20270 int end_save = info->altivec_save_offset + info->altivec_size;
20271 int ptr_off;
20272 /* Oddly, the vector save/restore functions point r0 at the end
20273 of the save area, then use r11 or r12 to load offsets for
20274 [reg+reg] addressing. */
20275 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20276 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
20277 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20279 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20280 NOT_INUSE (0);
20281 if (end_save + frame_off != 0)
20283 rtx offset = GEN_INT (end_save + frame_off);
20285 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20287 else
20288 emit_move_insn (ptr_reg, frame_reg_rtx);
20290 ptr_off = -end_save;
20291 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20292 info->altivec_save_offset + ptr_off,
20293 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
20294 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
20295 NULL_RTX, NULL_RTX);
20296 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20298 /* The oddity mentioned above clobbered our frame reg. */
20299 emit_move_insn (frame_reg_rtx, ptr_reg);
20300 frame_off = ptr_off;
20303 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20304 && info->altivec_size != 0)
20306 int i;
20308 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20309 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20311 rtx areg, savereg, mem;
20312 int offset;
20314 offset = (info->altivec_save_offset + frame_off
20315 + 16 * (i - info->first_altivec_reg_save));
20317 savereg = gen_rtx_REG (V4SImode, i);
20319 NOT_INUSE (0);
20320 areg = gen_rtx_REG (Pmode, 0);
20321 emit_move_insn (areg, GEN_INT (offset));
20323 /* AltiVec addressing mode is [reg+reg]. */
20324 mem = gen_frame_mem (V4SImode,
20325 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
20327 insn = emit_move_insn (mem, savereg);
20329 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20330 areg, GEN_INT (offset));
20334 /* VRSAVE is a bit vector representing which AltiVec registers
20335 are used. The OS uses this to determine which vector
20336 registers to save on a context switch. We need to save
20337 VRSAVE on the stack frame, add whatever AltiVec registers we
20338 used in this function, and do the corresponding magic in the
20339 epilogue. */
20341 if (!WORLD_SAVE_P (info)
20342 && TARGET_ALTIVEC
20343 && TARGET_ALTIVEC_VRSAVE
20344 && info->vrsave_mask != 0)
20346 rtx reg, vrsave;
20347 int offset;
20348 int save_regno;
20350 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20351 be using r12 as frame_reg_rtx and r11 as the static chain
20352 pointer for nested functions. */
20353 save_regno = 12;
20354 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
20355 save_regno = 11;
20356 else if (REGNO (frame_reg_rtx) == 12)
20358 save_regno = 11;
20359 if (using_static_chain_p)
20360 save_regno = 0;
20363 NOT_INUSE (save_regno);
20364 reg = gen_rtx_REG (SImode, save_regno);
20365 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20366 if (TARGET_MACHO)
20367 emit_insn (gen_get_vrsave_internal (reg));
20368 else
20369 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
20371 /* Save VRSAVE. */
20372 offset = info->vrsave_save_offset + frame_off;
20373 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
20375 /* Include the registers in the mask. */
20376 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
20378 insn = emit_insn (generate_set_vrsave (reg, info, 0));
20381 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20382 if (!TARGET_SINGLE_PIC_BASE
20383 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
20384 || (DEFAULT_ABI == ABI_V4
20385 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
20386 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
20388 /* If emit_load_toc_table will use the link register, we need to save
20389 it. We use R12 for this purpose because emit_load_toc_table
20390 can use register 0. This allows us to use a plain 'blr' to return
20391 from the procedure more often. */
20392 int save_LR_around_toc_setup = (TARGET_ELF
20393 && DEFAULT_ABI != ABI_AIX
20394 && flag_pic
20395 && ! info->lr_save_p
20396 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
20397 if (save_LR_around_toc_setup)
20399 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20400 rtx tmp = gen_rtx_REG (Pmode, 12);
20402 insn = emit_move_insn (tmp, lr);
20403 RTX_FRAME_RELATED_P (insn) = 1;
20405 rs6000_emit_load_toc_table (TRUE);
20407 insn = emit_move_insn (lr, tmp);
20408 add_reg_note (insn, REG_CFA_RESTORE, lr);
20409 RTX_FRAME_RELATED_P (insn) = 1;
20411 else
20412 rs6000_emit_load_toc_table (TRUE);
20415 #if TARGET_MACHO
20416 if (!TARGET_SINGLE_PIC_BASE
20417 && DEFAULT_ABI == ABI_DARWIN
20418 && flag_pic && crtl->uses_pic_offset_table)
20420 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20421 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
20423 /* Save and restore LR locally around this call (in R0). */
20424 if (!info->lr_save_p)
20425 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
20427 emit_insn (gen_load_macho_picbase (src));
20429 emit_move_insn (gen_rtx_REG (Pmode,
20430 RS6000_PIC_OFFSET_TABLE_REGNUM),
20431 lr);
20433 if (!info->lr_save_p)
20434 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
20436 #endif
20438 /* If we need to, save the TOC register after doing the stack setup.
20439 Do not emit eh frame info for this save. The unwinder wants info,
20440 conceptually attached to instructions in this function, about
20441 register values in the caller of this function. This R2 may have
20442 already been changed from the value in the caller.
20443 We don't attempt to write accurate DWARF EH frame info for R2
20444 because code emitted by gcc for a (non-pointer) function call
20445 doesn't save and restore R2. Instead, R2 is managed out-of-line
20446 by a linker generated plt call stub when the function resides in
20447 a shared library. This behaviour is costly to describe in DWARF,
20448 both in terms of the size of DWARF info and the time taken in the
20449 unwinder to interpret it. R2 changes, apart from the
20450 calls_eh_return case earlier in this function, are handled by
20451 linux-unwind.h frob_update_context. */
20452 if (rs6000_save_toc_in_prologue_p ())
20454 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
20455 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
20459 /* Write function prologue. */
20461 static void
20462 rs6000_output_function_prologue (FILE *file,
20463 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20465 rs6000_stack_t *info = rs6000_stack_info ();
20467 if (TARGET_DEBUG_STACK)
20468 debug_stack_info (info);
20470 /* Write .extern for any function we will call to save and restore
20471 fp values. */
20472 if (info->first_fp_reg_save < 64
20473 && !TARGET_MACHO
20474 && !TARGET_ELF)
20476 char *name;
20477 int regno = info->first_fp_reg_save - 32;
20479 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
20481 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20482 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20483 name = rs6000_savres_routine_name (info, regno, sel);
20484 fprintf (file, "\t.extern %s\n", name);
20486 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
20488 bool lr = (info->savres_strategy
20489 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20490 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20491 name = rs6000_savres_routine_name (info, regno, sel);
20492 fprintf (file, "\t.extern %s\n", name);
20496 rs6000_pic_labelno++;
20499 /* Non-zero if vmx regs are restored before the frame pop, zero if
20500 we restore after the pop when possible. */
20501 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20503 /* Restoring cr is a two step process: loading a reg from the frame
20504 save, then moving the reg to cr. For ABI_V4 we must let the
20505 unwinder know that the stack location is no longer valid at or
20506 before the stack deallocation, but we can't emit a cfa_restore for
20507 cr at the stack deallocation like we do for other registers.
20508 The trouble is that it is possible for the move to cr to be
20509 scheduled after the stack deallocation. So say exactly where cr
20510 is located on each of the two insns. */
20512 static rtx
20513 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
20515 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
20516 rtx reg = gen_rtx_REG (SImode, regno);
20517 rtx insn = emit_move_insn (reg, mem);
20519 if (!exit_func && DEFAULT_ABI == ABI_V4)
20521 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20522 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
20524 add_reg_note (insn, REG_CFA_REGISTER, set);
20525 RTX_FRAME_RELATED_P (insn) = 1;
20527 return reg;
20530 /* Reload CR from REG. */
20532 static void
20533 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
20535 int count = 0;
20536 int i;
20538 if (using_mfcr_multiple)
20540 for (i = 0; i < 8; i++)
20541 if (save_reg_p (CR0_REGNO + i))
20542 count++;
20543 gcc_assert (count);
20546 if (using_mfcr_multiple && count > 1)
20548 rtvec p;
20549 int ndx;
20551 p = rtvec_alloc (count);
20553 ndx = 0;
20554 for (i = 0; i < 8; i++)
20555 if (save_reg_p (CR0_REGNO + i))
20557 rtvec r = rtvec_alloc (2);
20558 RTVEC_ELT (r, 0) = reg;
20559 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
20560 RTVEC_ELT (p, ndx) =
20561 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
20562 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
20563 ndx++;
20565 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20566 gcc_assert (ndx == count);
20568 else
20569 for (i = 0; i < 8; i++)
20570 if (save_reg_p (CR0_REGNO + i))
20571 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
20572 reg));
20574 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20576 rtx insn = get_last_insn ();
20577 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20579 add_reg_note (insn, REG_CFA_RESTORE, cr);
20580 RTX_FRAME_RELATED_P (insn) = 1;
20584 /* Like cr, the move to lr instruction can be scheduled after the
20585 stack deallocation, but unlike cr, its stack frame save is still
20586 valid. So we only need to emit the cfa_restore on the correct
20587 instruction. */
20589 static void
20590 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
20592 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
20593 rtx reg = gen_rtx_REG (Pmode, regno);
20595 emit_move_insn (reg, mem);
20598 static void
20599 restore_saved_lr (int regno, bool exit_func)
20601 rtx reg = gen_rtx_REG (Pmode, regno);
20602 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20603 rtx insn = emit_move_insn (lr, reg);
20605 if (!exit_func && flag_shrink_wrap)
20607 add_reg_note (insn, REG_CFA_RESTORE, lr);
20608 RTX_FRAME_RELATED_P (insn) = 1;
20612 static rtx
20613 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
20615 if (info->cr_save_p)
20616 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20617 gen_rtx_REG (SImode, CR2_REGNO),
20618 cfa_restores);
20619 if (info->lr_save_p)
20620 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20621 gen_rtx_REG (Pmode, LR_REGNO),
20622 cfa_restores);
20623 return cfa_restores;
20626 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20627 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20628 below stack pointer not cloberred by signals. */
20630 static inline bool
20631 offset_below_red_zone_p (HOST_WIDE_INT offset)
20633 return offset < (DEFAULT_ABI == ABI_V4
20635 : TARGET_32BIT ? -220 : -288);
20638 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20640 static void
20641 emit_cfa_restores (rtx cfa_restores)
20643 rtx insn = get_last_insn ();
20644 rtx *loc = &REG_NOTES (insn);
20646 while (*loc)
20647 loc = &XEXP (*loc, 1);
20648 *loc = cfa_restores;
20649 RTX_FRAME_RELATED_P (insn) = 1;
20652 /* Emit function epilogue as insns. */
20654 void
20655 rs6000_emit_epilogue (int sibcall)
20657 rs6000_stack_t *info;
20658 int restoring_GPRs_inline;
20659 int restoring_FPRs_inline;
20660 int using_load_multiple;
20661 int using_mtcr_multiple;
20662 int use_backchain_to_restore_sp;
20663 int restore_lr;
20664 int strategy;
20665 HOST_WIDE_INT frame_off = 0;
20666 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
20667 rtx frame_reg_rtx = sp_reg_rtx;
20668 rtx cfa_restores = NULL_RTX;
20669 rtx insn;
20670 rtx cr_save_reg = NULL_RTX;
20671 enum machine_mode reg_mode = Pmode;
20672 int reg_size = TARGET_32BIT ? 4 : 8;
20673 int i;
20674 bool exit_func;
20675 unsigned ptr_regno;
20677 info = rs6000_stack_info ();
20679 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20681 reg_mode = V2SImode;
20682 reg_size = 8;
20685 strategy = info->savres_strategy;
20686 using_load_multiple = strategy & SAVRES_MULTIPLE;
20687 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
20688 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
20689 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
20690 || rs6000_cpu == PROCESSOR_PPC603
20691 || rs6000_cpu == PROCESSOR_PPC750
20692 || optimize_size);
20693 /* Restore via the backchain when we have a large frame, since this
20694 is more efficient than an addis, addi pair. The second condition
20695 here will not trigger at the moment; We don't actually need a
20696 frame pointer for alloca, but the generic parts of the compiler
20697 give us one anyway. */
20698 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
20699 || (cfun->calls_alloca
20700 && !frame_pointer_needed));
20701 restore_lr = (info->lr_save_p
20702 && (restoring_FPRs_inline
20703 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
20704 && (restoring_GPRs_inline
20705 || info->first_fp_reg_save < 64));
20707 if (WORLD_SAVE_P (info))
20709 int i, j;
20710 char rname[30];
20711 const char *alloc_rname;
20712 rtvec p;
20714 /* eh_rest_world_r10 will return to the location saved in the LR
20715 stack slot (which is not likely to be our caller.)
20716 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20717 rest_world is similar, except any R10 parameter is ignored.
20718 The exception-handling stuff that was here in 2.95 is no
20719 longer necessary. */
20721 p = rtvec_alloc (9
20723 + 32 - info->first_gp_reg_save
20724 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
20725 + 63 + 1 - info->first_fp_reg_save);
20727 strcpy (rname, ((crtl->calls_eh_return) ?
20728 "*eh_rest_world_r10" : "*rest_world"));
20729 alloc_rname = ggc_strdup (rname);
20731 j = 0;
20732 RTVEC_ELT (p, j++) = ret_rtx;
20733 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20734 gen_rtx_REG (Pmode,
20735 LR_REGNO));
20736 RTVEC_ELT (p, j++)
20737 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
20738 /* The instruction pattern requires a clobber here;
20739 it is shared with the restVEC helper. */
20740 RTVEC_ELT (p, j++)
20741 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
20744 /* CR register traditionally saved as CR2. */
20745 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
20746 RTVEC_ELT (p, j++)
20747 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
20748 if (flag_shrink_wrap)
20750 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20751 gen_rtx_REG (Pmode, LR_REGNO),
20752 cfa_restores);
20753 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20757 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20759 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20760 RTVEC_ELT (p, j++)
20761 = gen_frame_load (reg,
20762 frame_reg_rtx, info->gp_save_offset + reg_size * i);
20763 if (flag_shrink_wrap)
20764 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20766 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20768 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
20769 RTVEC_ELT (p, j++)
20770 = gen_frame_load (reg,
20771 frame_reg_rtx, info->altivec_save_offset + 16 * i);
20772 if (flag_shrink_wrap)
20773 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20775 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
20777 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20778 ? DFmode : SFmode),
20779 info->first_fp_reg_save + i);
20780 RTVEC_ELT (p, j++)
20781 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
20782 if (flag_shrink_wrap)
20783 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20785 RTVEC_ELT (p, j++)
20786 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
20787 RTVEC_ELT (p, j++)
20788 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
20789 RTVEC_ELT (p, j++)
20790 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
20791 RTVEC_ELT (p, j++)
20792 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
20793 RTVEC_ELT (p, j++)
20794 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
20795 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
20797 if (flag_shrink_wrap)
20799 REG_NOTES (insn) = cfa_restores;
20800 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20801 RTX_FRAME_RELATED_P (insn) = 1;
20803 return;
20806 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20807 if (info->push_p)
20808 frame_off = info->total_size;
20810 /* Restore AltiVec registers if we must do so before adjusting the
20811 stack. */
20812 if (TARGET_ALTIVEC_ABI
20813 && info->altivec_size != 0
20814 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20815 || (DEFAULT_ABI != ABI_V4
20816 && offset_below_red_zone_p (info->altivec_save_offset))))
20818 int i;
20819 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20821 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20822 if (use_backchain_to_restore_sp)
20824 int frame_regno = 11;
20826 if ((strategy & REST_INLINE_VRS) == 0)
20828 /* Of r11 and r12, select the one not clobbered by an
20829 out-of-line restore function for the frame register. */
20830 frame_regno = 11 + 12 - scratch_regno;
20832 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
20833 emit_move_insn (frame_reg_rtx,
20834 gen_rtx_MEM (Pmode, sp_reg_rtx));
20835 frame_off = 0;
20837 else if (frame_pointer_needed)
20838 frame_reg_rtx = hard_frame_pointer_rtx;
20840 if ((strategy & REST_INLINE_VRS) == 0)
20842 int end_save = info->altivec_save_offset + info->altivec_size;
20843 int ptr_off;
20844 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20845 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20847 if (end_save + frame_off != 0)
20849 rtx offset = GEN_INT (end_save + frame_off);
20851 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20853 else
20854 emit_move_insn (ptr_reg, frame_reg_rtx);
20856 ptr_off = -end_save;
20857 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20858 info->altivec_save_offset + ptr_off,
20859 0, V4SImode, SAVRES_VR);
20861 else
20863 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20864 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20866 rtx addr, areg, mem, reg;
20868 areg = gen_rtx_REG (Pmode, 0);
20869 emit_move_insn
20870 (areg, GEN_INT (info->altivec_save_offset
20871 + frame_off
20872 + 16 * (i - info->first_altivec_reg_save)));
20874 /* AltiVec addressing mode is [reg+reg]. */
20875 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20876 mem = gen_frame_mem (V4SImode, addr);
20878 reg = gen_rtx_REG (V4SImode, i);
20879 emit_move_insn (reg, mem);
20883 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20884 if (((strategy & REST_INLINE_VRS) == 0
20885 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20886 && (flag_shrink_wrap
20887 || (offset_below_red_zone_p
20888 (info->altivec_save_offset
20889 + 16 * (i - info->first_altivec_reg_save)))))
20891 rtx reg = gen_rtx_REG (V4SImode, i);
20892 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20896 /* Restore VRSAVE if we must do so before adjusting the stack. */
20897 if (TARGET_ALTIVEC
20898 && TARGET_ALTIVEC_VRSAVE
20899 && info->vrsave_mask != 0
20900 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20901 || (DEFAULT_ABI != ABI_V4
20902 && offset_below_red_zone_p (info->vrsave_save_offset))))
20904 rtx reg;
20906 if (frame_reg_rtx == sp_reg_rtx)
20908 if (use_backchain_to_restore_sp)
20910 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20911 emit_move_insn (frame_reg_rtx,
20912 gen_rtx_MEM (Pmode, sp_reg_rtx));
20913 frame_off = 0;
20915 else if (frame_pointer_needed)
20916 frame_reg_rtx = hard_frame_pointer_rtx;
20919 reg = gen_rtx_REG (SImode, 12);
20920 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20921 info->vrsave_save_offset + frame_off));
20923 emit_insn (generate_set_vrsave (reg, info, 1));
20926 insn = NULL_RTX;
20927 /* If we have a large stack frame, restore the old stack pointer
20928 using the backchain. */
20929 if (use_backchain_to_restore_sp)
20931 if (frame_reg_rtx == sp_reg_rtx)
20933 /* Under V.4, don't reset the stack pointer until after we're done
20934 loading the saved registers. */
20935 if (DEFAULT_ABI == ABI_V4)
20936 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20938 insn = emit_move_insn (frame_reg_rtx,
20939 gen_rtx_MEM (Pmode, sp_reg_rtx));
20940 frame_off = 0;
20942 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20943 && DEFAULT_ABI == ABI_V4)
20944 /* frame_reg_rtx has been set up by the altivec restore. */
20946 else
20948 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
20949 frame_reg_rtx = sp_reg_rtx;
20952 /* If we have a frame pointer, we can restore the old stack pointer
20953 from it. */
20954 else if (frame_pointer_needed)
20956 frame_reg_rtx = sp_reg_rtx;
20957 if (DEFAULT_ABI == ABI_V4)
20958 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20959 /* Prevent reordering memory accesses against stack pointer restore. */
20960 else if (cfun->calls_alloca
20961 || offset_below_red_zone_p (-info->total_size))
20962 rs6000_emit_stack_tie (frame_reg_rtx, true);
20964 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
20965 GEN_INT (info->total_size)));
20966 frame_off = 0;
20968 else if (info->push_p
20969 && DEFAULT_ABI != ABI_V4
20970 && !crtl->calls_eh_return)
20972 /* Prevent reordering memory accesses against stack pointer restore. */
20973 if (cfun->calls_alloca
20974 || offset_below_red_zone_p (-info->total_size))
20975 rs6000_emit_stack_tie (frame_reg_rtx, false);
20976 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
20977 GEN_INT (info->total_size)));
20978 frame_off = 0;
20980 if (insn && frame_reg_rtx == sp_reg_rtx)
20982 if (cfa_restores)
20984 REG_NOTES (insn) = cfa_restores;
20985 cfa_restores = NULL_RTX;
20987 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20988 RTX_FRAME_RELATED_P (insn) = 1;
20991 /* Restore AltiVec registers if we have not done so already. */
20992 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20993 && TARGET_ALTIVEC_ABI
20994 && info->altivec_size != 0
20995 && (DEFAULT_ABI == ABI_V4
20996 || !offset_below_red_zone_p (info->altivec_save_offset)))
20998 int i;
21000 if ((strategy & REST_INLINE_VRS) == 0)
21002 int end_save = info->altivec_save_offset + info->altivec_size;
21003 int ptr_off;
21004 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
21005 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
21006 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
21008 if (end_save + frame_off != 0)
21010 rtx offset = GEN_INT (end_save + frame_off);
21012 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
21014 else
21015 emit_move_insn (ptr_reg, frame_reg_rtx);
21017 ptr_off = -end_save;
21018 insn = rs6000_emit_savres_rtx (info, scratch_reg,
21019 info->altivec_save_offset + ptr_off,
21020 0, V4SImode, SAVRES_VR);
21021 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
21023 /* Frame reg was clobbered by out-of-line save. Restore it
21024 from ptr_reg, and if we are calling out-of-line gpr or
21025 fpr restore set up the correct pointer and offset. */
21026 unsigned newptr_regno = 1;
21027 if (!restoring_GPRs_inline)
21029 bool lr = info->gp_save_offset + info->gp_size == 0;
21030 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
21031 newptr_regno = ptr_regno_for_savres (sel);
21032 end_save = info->gp_save_offset + info->gp_size;
21034 else if (!restoring_FPRs_inline)
21036 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
21037 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21038 newptr_regno = ptr_regno_for_savres (sel);
21039 end_save = info->gp_save_offset + info->gp_size;
21042 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
21043 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
21045 if (end_save + ptr_off != 0)
21047 rtx offset = GEN_INT (end_save + ptr_off);
21049 frame_off = -end_save;
21050 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
21052 else
21054 frame_off = ptr_off;
21055 emit_move_insn (frame_reg_rtx, ptr_reg);
21059 else
21061 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21062 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
21064 rtx addr, areg, mem, reg;
21066 areg = gen_rtx_REG (Pmode, 0);
21067 emit_move_insn
21068 (areg, GEN_INT (info->altivec_save_offset
21069 + frame_off
21070 + 16 * (i - info->first_altivec_reg_save)));
21072 /* AltiVec addressing mode is [reg+reg]. */
21073 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
21074 mem = gen_frame_mem (V4SImode, addr);
21076 reg = gen_rtx_REG (V4SImode, i);
21077 emit_move_insn (reg, mem);
21081 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
21082 if (((strategy & REST_INLINE_VRS) == 0
21083 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
21084 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
21086 rtx reg = gen_rtx_REG (V4SImode, i);
21087 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21091 /* Restore VRSAVE if we have not done so already. */
21092 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
21093 && TARGET_ALTIVEC
21094 && TARGET_ALTIVEC_VRSAVE
21095 && info->vrsave_mask != 0
21096 && (DEFAULT_ABI == ABI_V4
21097 || !offset_below_red_zone_p (info->vrsave_save_offset)))
21099 rtx reg;
21101 reg = gen_rtx_REG (SImode, 12);
21102 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21103 info->vrsave_save_offset + frame_off));
21105 emit_insn (generate_set_vrsave (reg, info, 1));
21108 /* If we exit by an out-of-line restore function on ABI_V4 then that
21109 function will deallocate the stack, so we don't need to worry
21110 about the unwinder restoring cr from an invalid stack frame
21111 location. */
21112 exit_func = (!restoring_FPRs_inline
21113 || (!restoring_GPRs_inline
21114 && info->first_fp_reg_save == 64));
21116 /* Get the old lr if we saved it. If we are restoring registers
21117 out-of-line, then the out-of-line routines can do this for us. */
21118 if (restore_lr && restoring_GPRs_inline)
21119 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21121 /* Get the old cr if we saved it. */
21122 if (info->cr_save_p)
21124 unsigned cr_save_regno = 12;
21126 if (!restoring_GPRs_inline)
21128 /* Ensure we don't use the register used by the out-of-line
21129 gpr register restore below. */
21130 bool lr = info->gp_save_offset + info->gp_size == 0;
21131 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
21132 int gpr_ptr_regno = ptr_regno_for_savres (sel);
21134 if (gpr_ptr_regno == 12)
21135 cr_save_regno = 11;
21136 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
21138 else if (REGNO (frame_reg_rtx) == 12)
21139 cr_save_regno = 11;
21141 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
21142 info->cr_save_offset + frame_off,
21143 exit_func);
21146 /* Set LR here to try to overlap restores below. */
21147 if (restore_lr && restoring_GPRs_inline)
21148 restore_saved_lr (0, exit_func);
21150 /* Load exception handler data registers, if needed. */
21151 if (crtl->calls_eh_return)
21153 unsigned int i, regno;
21155 if (TARGET_AIX)
21157 rtx reg = gen_rtx_REG (reg_mode, 2);
21158 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21159 frame_off + 5 * reg_size));
21162 for (i = 0; ; ++i)
21164 rtx mem;
21166 regno = EH_RETURN_DATA_REGNO (i);
21167 if (regno == INVALID_REGNUM)
21168 break;
21170 /* Note: possible use of r0 here to address SPE regs. */
21171 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
21172 info->ehrd_offset + frame_off
21173 + reg_size * (int) i);
21175 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
21179 /* Restore GPRs. This is done as a PARALLEL if we are using
21180 the load-multiple instructions. */
21181 if (TARGET_SPE_ABI
21182 && info->spe_64bit_regs_used
21183 && info->first_gp_reg_save != 32)
21185 /* Determine whether we can address all of the registers that need
21186 to be saved with an offset from frame_reg_rtx that fits in
21187 the small const field for SPE memory instructions. */
21188 int spe_regs_addressable
21189 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21190 + reg_size * (32 - info->first_gp_reg_save - 1))
21191 && restoring_GPRs_inline);
21193 if (!spe_regs_addressable)
21195 int ool_adjust = 0;
21196 rtx old_frame_reg_rtx = frame_reg_rtx;
21197 /* Make r11 point to the start of the SPE save area. We worried about
21198 not clobbering it when we were saving registers in the prologue.
21199 There's no need to worry here because the static chain is passed
21200 anew to every function. */
21202 if (!restoring_GPRs_inline)
21203 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
21204 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21205 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
21206 GEN_INT (info->spe_gp_save_offset
21207 + frame_off
21208 - ool_adjust)));
21209 /* Keep the invariant that frame_reg_rtx + frame_off points
21210 at the top of the stack frame. */
21211 frame_off = -info->spe_gp_save_offset + ool_adjust;
21214 if (restoring_GPRs_inline)
21216 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
21218 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21219 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21221 rtx offset, addr, mem, reg;
21223 /* We're doing all this to ensure that the immediate offset
21224 fits into the immediate field of 'evldd'. */
21225 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
21227 offset = GEN_INT (spe_offset + reg_size * i);
21228 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
21229 mem = gen_rtx_MEM (V2SImode, addr);
21230 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21232 emit_move_insn (reg, mem);
21235 else
21236 rs6000_emit_savres_rtx (info, frame_reg_rtx,
21237 info->spe_gp_save_offset + frame_off,
21238 info->lr_save_offset + frame_off,
21239 reg_mode,
21240 SAVRES_GPR | SAVRES_LR);
21242 else if (!restoring_GPRs_inline)
21244 /* We are jumping to an out-of-line function. */
21245 rtx ptr_reg;
21246 int end_save = info->gp_save_offset + info->gp_size;
21247 bool can_use_exit = end_save == 0;
21248 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
21249 int ptr_off;
21251 /* Emit stack reset code if we need it. */
21252 ptr_regno = ptr_regno_for_savres (sel);
21253 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21254 if (can_use_exit)
21255 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21256 else if (end_save + frame_off != 0)
21257 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
21258 GEN_INT (end_save + frame_off)));
21259 else if (REGNO (frame_reg_rtx) != ptr_regno)
21260 emit_move_insn (ptr_reg, frame_reg_rtx);
21261 if (REGNO (frame_reg_rtx) == ptr_regno)
21262 frame_off = -end_save;
21264 if (can_use_exit && info->cr_save_p)
21265 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
21267 ptr_off = -end_save;
21268 rs6000_emit_savres_rtx (info, ptr_reg,
21269 info->gp_save_offset + ptr_off,
21270 info->lr_save_offset + ptr_off,
21271 reg_mode, sel);
21273 else if (using_load_multiple)
21275 rtvec p;
21276 p = rtvec_alloc (32 - info->first_gp_reg_save);
21277 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21278 RTVEC_ELT (p, i)
21279 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21280 frame_reg_rtx,
21281 info->gp_save_offset + frame_off + reg_size * i);
21282 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21284 else
21286 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21287 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21288 emit_insn (gen_frame_load
21289 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21290 frame_reg_rtx,
21291 info->gp_save_offset + frame_off + reg_size * i));
21294 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21296 /* If the frame pointer was used then we can't delay emitting
21297 a REG_CFA_DEF_CFA note. This must happen on the insn that
21298 restores the frame pointer, r31. We may have already emitted
21299 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21300 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21301 be harmless if emitted. */
21302 if (frame_pointer_needed)
21304 insn = get_last_insn ();
21305 add_reg_note (insn, REG_CFA_DEF_CFA,
21306 plus_constant (Pmode, frame_reg_rtx, frame_off));
21307 RTX_FRAME_RELATED_P (insn) = 1;
21310 /* Set up cfa_restores. We always need these when
21311 shrink-wrapping. If not shrink-wrapping then we only need
21312 the cfa_restore when the stack location is no longer valid.
21313 The cfa_restores must be emitted on or before the insn that
21314 invalidates the stack, and of course must not be emitted
21315 before the insn that actually does the restore. The latter
21316 is why it is a bad idea to emit the cfa_restores as a group
21317 on the last instruction here that actually does a restore:
21318 That insn may be reordered with respect to others doing
21319 restores. */
21320 if (flag_shrink_wrap
21321 && !restoring_GPRs_inline
21322 && info->first_fp_reg_save == 64)
21323 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21325 for (i = info->first_gp_reg_save; i < 32; i++)
21326 if (!restoring_GPRs_inline
21327 || using_load_multiple
21328 || rs6000_reg_live_or_pic_offset_p (i))
21330 rtx reg = gen_rtx_REG (reg_mode, i);
21332 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21336 if (!restoring_GPRs_inline
21337 && info->first_fp_reg_save == 64)
21339 /* We are jumping to an out-of-line function. */
21340 if (cfa_restores)
21341 emit_cfa_restores (cfa_restores);
21342 return;
21345 if (restore_lr && !restoring_GPRs_inline)
21347 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21348 restore_saved_lr (0, exit_func);
21351 /* Restore fpr's if we need to do it without calling a function. */
21352 if (restoring_FPRs_inline)
21353 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21354 if (save_reg_p (info->first_fp_reg_save + i))
21356 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21357 ? DFmode : SFmode),
21358 info->first_fp_reg_save + i);
21359 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21360 info->fp_save_offset + frame_off + 8 * i));
21361 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21362 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21365 /* If we saved cr, restore it here. Just those that were used. */
21366 if (info->cr_save_p)
21367 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
21369 /* If this is V.4, unwind the stack pointer after all of the loads
21370 have been done, or set up r11 if we are restoring fp out of line. */
21371 ptr_regno = 1;
21372 if (!restoring_FPRs_inline)
21374 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21375 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21376 ptr_regno = ptr_regno_for_savres (sel);
21379 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21380 if (REGNO (frame_reg_rtx) == ptr_regno)
21381 frame_off = 0;
21383 if (insn && restoring_FPRs_inline)
21385 if (cfa_restores)
21387 REG_NOTES (insn) = cfa_restores;
21388 cfa_restores = NULL_RTX;
21390 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21391 RTX_FRAME_RELATED_P (insn) = 1;
21394 if (crtl->calls_eh_return)
21396 rtx sa = EH_RETURN_STACKADJ_RTX;
21397 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
21400 if (!sibcall)
21402 rtvec p;
21403 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21404 if (! restoring_FPRs_inline)
21406 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
21407 RTVEC_ELT (p, 0) = ret_rtx;
21409 else
21411 if (cfa_restores)
21413 /* We can't hang the cfa_restores off a simple return,
21414 since the shrink-wrap code sometimes uses an existing
21415 return. This means there might be a path from
21416 pre-prologue code to this return, and dwarf2cfi code
21417 wants the eh_frame unwinder state to be the same on
21418 all paths to any point. So we need to emit the
21419 cfa_restores before the return. For -m64 we really
21420 don't need epilogue cfa_restores at all, except for
21421 this irritating dwarf2cfi with shrink-wrap
21422 requirement; The stack red-zone means eh_frame info
21423 from the prologue telling the unwinder to restore
21424 from the stack is perfectly good right to the end of
21425 the function. */
21426 emit_insn (gen_blockage ());
21427 emit_cfa_restores (cfa_restores);
21428 cfa_restores = NULL_RTX;
21430 p = rtvec_alloc (2);
21431 RTVEC_ELT (p, 0) = simple_return_rtx;
21434 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
21435 ? gen_rtx_USE (VOIDmode,
21436 gen_rtx_REG (Pmode, LR_REGNO))
21437 : gen_rtx_CLOBBER (VOIDmode,
21438 gen_rtx_REG (Pmode, LR_REGNO)));
21440 /* If we have to restore more than two FP registers, branch to the
21441 restore function. It will return to our caller. */
21442 if (! restoring_FPRs_inline)
21444 int i;
21445 rtx sym;
21447 if (flag_shrink_wrap)
21448 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21450 sym = rs6000_savres_routine_sym (info,
21451 SAVRES_FPR | (lr ? SAVRES_LR : 0));
21452 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
21453 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
21454 gen_rtx_REG (Pmode,
21455 DEFAULT_ABI == ABI_AIX
21456 ? 1 : 11));
21457 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21459 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
21461 RTVEC_ELT (p, i + 4)
21462 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
21463 if (flag_shrink_wrap)
21464 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
21465 cfa_restores);
21469 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21472 if (cfa_restores)
21474 if (sibcall)
21475 /* Ensure the cfa_restores are hung off an insn that won't
21476 be reordered above other restores. */
21477 emit_insn (gen_blockage ());
21479 emit_cfa_restores (cfa_restores);
21483 /* Write function epilogue. */
21485 static void
21486 rs6000_output_function_epilogue (FILE *file,
21487 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21489 #if TARGET_MACHO
21490 macho_branch_islands ();
21491 /* Mach-O doesn't support labels at the end of objects, so if
21492 it looks like we might want one, insert a NOP. */
21494 rtx insn = get_last_insn ();
21495 rtx deleted_debug_label = NULL_RTX;
21496 while (insn
21497 && NOTE_P (insn)
21498 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
21500 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21501 notes only, instead set their CODE_LABEL_NUMBER to -1,
21502 otherwise there would be code generation differences
21503 in between -g and -g0. */
21504 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21505 deleted_debug_label = insn;
21506 insn = PREV_INSN (insn);
21508 if (insn
21509 && (LABEL_P (insn)
21510 || (NOTE_P (insn)
21511 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
21512 fputs ("\tnop\n", file);
21513 else if (deleted_debug_label)
21514 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
21515 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21516 CODE_LABEL_NUMBER (insn) = -1;
21518 #endif
21520 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21521 on its format.
21523 We don't output a traceback table if -finhibit-size-directive was
21524 used. The documentation for -finhibit-size-directive reads
21525 ``don't output a @code{.size} assembler directive, or anything
21526 else that would cause trouble if the function is split in the
21527 middle, and the two halves are placed at locations far apart in
21528 memory.'' The traceback table has this property, since it
21529 includes the offset from the start of the function to the
21530 traceback table itself.
21532 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21533 different traceback table. */
21534 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
21535 && rs6000_traceback != traceback_none && !cfun->is_thunk)
21537 const char *fname = NULL;
21538 const char *language_string = lang_hooks.name;
21539 int fixed_parms = 0, float_parms = 0, parm_info = 0;
21540 int i;
21541 int optional_tbtab;
21542 rs6000_stack_t *info = rs6000_stack_info ();
21544 if (rs6000_traceback == traceback_full)
21545 optional_tbtab = 1;
21546 else if (rs6000_traceback == traceback_part)
21547 optional_tbtab = 0;
21548 else
21549 optional_tbtab = !optimize_size && !TARGET_ELF;
21551 if (optional_tbtab)
21553 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
21554 while (*fname == '.') /* V.4 encodes . in the name */
21555 fname++;
21557 /* Need label immediately before tbtab, so we can compute
21558 its offset from the function start. */
21559 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21560 ASM_OUTPUT_LABEL (file, fname);
21563 /* The .tbtab pseudo-op can only be used for the first eight
21564 expressions, since it can't handle the possibly variable
21565 length fields that follow. However, if you omit the optional
21566 fields, the assembler outputs zeros for all optional fields
21567 anyways, giving each variable length field is minimum length
21568 (as defined in sys/debug.h). Thus we can not use the .tbtab
21569 pseudo-op at all. */
21571 /* An all-zero word flags the start of the tbtab, for debuggers
21572 that have to find it by searching forward from the entry
21573 point or from the current pc. */
21574 fputs ("\t.long 0\n", file);
21576 /* Tbtab format type. Use format type 0. */
21577 fputs ("\t.byte 0,", file);
21579 /* Language type. Unfortunately, there does not seem to be any
21580 official way to discover the language being compiled, so we
21581 use language_string.
21582 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21583 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21584 a number, so for now use 9. LTO and Go aren't assigned numbers
21585 either, so for now use 0. */
21586 if (! strcmp (language_string, "GNU C")
21587 || ! strcmp (language_string, "GNU GIMPLE")
21588 || ! strcmp (language_string, "GNU Go"))
21589 i = 0;
21590 else if (! strcmp (language_string, "GNU F77")
21591 || ! strcmp (language_string, "GNU Fortran"))
21592 i = 1;
21593 else if (! strcmp (language_string, "GNU Pascal"))
21594 i = 2;
21595 else if (! strcmp (language_string, "GNU Ada"))
21596 i = 3;
21597 else if (! strcmp (language_string, "GNU C++")
21598 || ! strcmp (language_string, "GNU Objective-C++"))
21599 i = 9;
21600 else if (! strcmp (language_string, "GNU Java"))
21601 i = 13;
21602 else if (! strcmp (language_string, "GNU Objective-C"))
21603 i = 14;
21604 else
21605 gcc_unreachable ();
21606 fprintf (file, "%d,", i);
21608 /* 8 single bit fields: global linkage (not set for C extern linkage,
21609 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21610 from start of procedure stored in tbtab, internal function, function
21611 has controlled storage, function has no toc, function uses fp,
21612 function logs/aborts fp operations. */
21613 /* Assume that fp operations are used if any fp reg must be saved. */
21614 fprintf (file, "%d,",
21615 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
21617 /* 6 bitfields: function is interrupt handler, name present in
21618 proc table, function calls alloca, on condition directives
21619 (controls stack walks, 3 bits), saves condition reg, saves
21620 link reg. */
21621 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21622 set up as a frame pointer, even when there is no alloca call. */
21623 fprintf (file, "%d,",
21624 ((optional_tbtab << 6)
21625 | ((optional_tbtab & frame_pointer_needed) << 5)
21626 | (info->cr_save_p << 1)
21627 | (info->lr_save_p)));
21629 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21630 (6 bits). */
21631 fprintf (file, "%d,",
21632 (info->push_p << 7) | (64 - info->first_fp_reg_save));
21634 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21635 fprintf (file, "%d,", (32 - first_reg_to_save ()));
21637 if (optional_tbtab)
21639 /* Compute the parameter info from the function decl argument
21640 list. */
21641 tree decl;
21642 int next_parm_info_bit = 31;
21644 for (decl = DECL_ARGUMENTS (current_function_decl);
21645 decl; decl = DECL_CHAIN (decl))
21647 rtx parameter = DECL_INCOMING_RTL (decl);
21648 enum machine_mode mode = GET_MODE (parameter);
21650 if (GET_CODE (parameter) == REG)
21652 if (SCALAR_FLOAT_MODE_P (mode))
21654 int bits;
21656 float_parms++;
21658 switch (mode)
21660 case SFmode:
21661 case SDmode:
21662 bits = 0x2;
21663 break;
21665 case DFmode:
21666 case DDmode:
21667 case TFmode:
21668 case TDmode:
21669 bits = 0x3;
21670 break;
21672 default:
21673 gcc_unreachable ();
21676 /* If only one bit will fit, don't or in this entry. */
21677 if (next_parm_info_bit > 0)
21678 parm_info |= (bits << (next_parm_info_bit - 1));
21679 next_parm_info_bit -= 2;
21681 else
21683 fixed_parms += ((GET_MODE_SIZE (mode)
21684 + (UNITS_PER_WORD - 1))
21685 / UNITS_PER_WORD);
21686 next_parm_info_bit -= 1;
21692 /* Number of fixed point parameters. */
21693 /* This is actually the number of words of fixed point parameters; thus
21694 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21695 fprintf (file, "%d,", fixed_parms);
21697 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21698 all on stack. */
21699 /* This is actually the number of fp registers that hold parameters;
21700 and thus the maximum value is 13. */
21701 /* Set parameters on stack bit if parameters are not in their original
21702 registers, regardless of whether they are on the stack? Xlc
21703 seems to set the bit when not optimizing. */
21704 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
21706 if (! optional_tbtab)
21707 return;
21709 /* Optional fields follow. Some are variable length. */
21711 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21712 11 double float. */
21713 /* There is an entry for each parameter in a register, in the order that
21714 they occur in the parameter list. Any intervening arguments on the
21715 stack are ignored. If the list overflows a long (max possible length
21716 34 bits) then completely leave off all elements that don't fit. */
21717 /* Only emit this long if there was at least one parameter. */
21718 if (fixed_parms || float_parms)
21719 fprintf (file, "\t.long %d\n", parm_info);
21721 /* Offset from start of code to tb table. */
21722 fputs ("\t.long ", file);
21723 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21724 RS6000_OUTPUT_BASENAME (file, fname);
21725 putc ('-', file);
21726 rs6000_output_function_entry (file, fname);
21727 putc ('\n', file);
21729 /* Interrupt handler mask. */
21730 /* Omit this long, since we never set the interrupt handler bit
21731 above. */
21733 /* Number of CTL (controlled storage) anchors. */
21734 /* Omit this long, since the has_ctl bit is never set above. */
21736 /* Displacement into stack of each CTL anchor. */
21737 /* Omit this list of longs, because there are no CTL anchors. */
21739 /* Length of function name. */
21740 if (*fname == '*')
21741 ++fname;
21742 fprintf (file, "\t.short %d\n", (int) strlen (fname));
21744 /* Function name. */
21745 assemble_string (fname, strlen (fname));
21747 /* Register for alloca automatic storage; this is always reg 31.
21748 Only emit this if the alloca bit was set above. */
21749 if (frame_pointer_needed)
21750 fputs ("\t.byte 31\n", file);
21752 fputs ("\t.align 2\n", file);
21756 /* A C compound statement that outputs the assembler code for a thunk
21757 function, used to implement C++ virtual function calls with
21758 multiple inheritance. The thunk acts as a wrapper around a virtual
21759 function, adjusting the implicit object parameter before handing
21760 control off to the real function.
21762 First, emit code to add the integer DELTA to the location that
21763 contains the incoming first argument. Assume that this argument
21764 contains a pointer, and is the one used to pass the `this' pointer
21765 in C++. This is the incoming argument *before* the function
21766 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21767 values of all other incoming arguments.
21769 After the addition, emit code to jump to FUNCTION, which is a
21770 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21771 not touch the return address. Hence returning from FUNCTION will
21772 return to whoever called the current `thunk'.
21774 The effect must be as if FUNCTION had been called directly with the
21775 adjusted first argument. This macro is responsible for emitting
21776 all of the code for a thunk function; output_function_prologue()
21777 and output_function_epilogue() are not invoked.
21779 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21780 been extracted from it.) It might possibly be useful on some
21781 targets, but probably not.
21783 If you do not define this macro, the target-independent code in the
21784 C++ frontend will generate a less efficient heavyweight thunk that
21785 calls FUNCTION instead of jumping to it. The generic approach does
21786 not support varargs. */
21788 static void
21789 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
21790 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
21791 tree function)
21793 rtx this_rtx, insn, funexp;
21795 reload_completed = 1;
21796 epilogue_completed = 1;
21798 /* Mark the end of the (empty) prologue. */
21799 emit_note (NOTE_INSN_PROLOGUE_END);
21801 /* Find the "this" pointer. If the function returns a structure,
21802 the structure return pointer is in r3. */
21803 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
21804 this_rtx = gen_rtx_REG (Pmode, 4);
21805 else
21806 this_rtx = gen_rtx_REG (Pmode, 3);
21808 /* Apply the constant offset, if required. */
21809 if (delta)
21810 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
21812 /* Apply the offset from the vtable, if required. */
21813 if (vcall_offset)
21815 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
21816 rtx tmp = gen_rtx_REG (Pmode, 12);
21818 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
21819 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
21821 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
21822 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
21824 else
21826 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
21828 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
21830 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
21833 /* Generate a tail call to the target function. */
21834 if (!TREE_USED (function))
21836 assemble_external (function);
21837 TREE_USED (function) = 1;
21839 funexp = XEXP (DECL_RTL (function), 0);
21840 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
21842 #if TARGET_MACHO
21843 if (MACHOPIC_INDIRECT)
21844 funexp = machopic_indirect_call_target (funexp);
21845 #endif
21847 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21848 generate sibcall RTL explicitly. */
21849 insn = emit_call_insn (
21850 gen_rtx_PARALLEL (VOIDmode,
21851 gen_rtvec (4,
21852 gen_rtx_CALL (VOIDmode,
21853 funexp, const0_rtx),
21854 gen_rtx_USE (VOIDmode, const0_rtx),
21855 gen_rtx_USE (VOIDmode,
21856 gen_rtx_REG (SImode,
21857 LR_REGNO)),
21858 simple_return_rtx)));
21859 SIBLING_CALL_P (insn) = 1;
21860 emit_barrier ();
21862 /* Run just enough of rest_of_compilation to get the insns emitted.
21863 There's not really enough bulk here to make other passes such as
21864 instruction scheduling worth while. Note that use_thunk calls
21865 assemble_start_function and assemble_end_function. */
21866 insn = get_insns ();
21867 shorten_branches (insn);
21868 final_start_function (insn, file, 1);
21869 final (insn, file, 1);
21870 final_end_function ();
21872 reload_completed = 0;
21873 epilogue_completed = 0;
21876 /* A quick summary of the various types of 'constant-pool tables'
21877 under PowerPC:
21879 Target Flags Name One table per
21880 AIX (none) AIX TOC object file
21881 AIX -mfull-toc AIX TOC object file
21882 AIX -mminimal-toc AIX minimal TOC translation unit
21883 SVR4/EABI (none) SVR4 SDATA object file
21884 SVR4/EABI -fpic SVR4 pic object file
21885 SVR4/EABI -fPIC SVR4 PIC translation unit
21886 SVR4/EABI -mrelocatable EABI TOC function
21887 SVR4/EABI -maix AIX TOC object file
21888 SVR4/EABI -maix -mminimal-toc
21889 AIX minimal TOC translation unit
21891 Name Reg. Set by entries contains:
21892 made by addrs? fp? sum?
21894 AIX TOC 2 crt0 as Y option option
21895 AIX minimal TOC 30 prolog gcc Y Y option
21896 SVR4 SDATA 13 crt0 gcc N Y N
21897 SVR4 pic 30 prolog ld Y not yet N
21898 SVR4 PIC 30 prolog gcc Y option option
21899 EABI TOC 30 prolog gcc Y option option
21903 /* Hash functions for the hash table. */
21905 static unsigned
21906 rs6000_hash_constant (rtx k)
21908 enum rtx_code code = GET_CODE (k);
21909 enum machine_mode mode = GET_MODE (k);
21910 unsigned result = (code << 3) ^ mode;
21911 const char *format;
21912 int flen, fidx;
21914 format = GET_RTX_FORMAT (code);
21915 flen = strlen (format);
21916 fidx = 0;
21918 switch (code)
21920 case LABEL_REF:
21921 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
21923 case CONST_DOUBLE:
21924 if (mode != VOIDmode)
21925 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
21926 flen = 2;
21927 break;
21929 case CODE_LABEL:
21930 fidx = 3;
21931 break;
21933 default:
21934 break;
21937 for (; fidx < flen; fidx++)
21938 switch (format[fidx])
21940 case 's':
21942 unsigned i, len;
21943 const char *str = XSTR (k, fidx);
21944 len = strlen (str);
21945 result = result * 613 + len;
21946 for (i = 0; i < len; i++)
21947 result = result * 613 + (unsigned) str[i];
21948 break;
21950 case 'u':
21951 case 'e':
21952 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
21953 break;
21954 case 'i':
21955 case 'n':
21956 result = result * 613 + (unsigned) XINT (k, fidx);
21957 break;
21958 case 'w':
21959 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
21960 result = result * 613 + (unsigned) XWINT (k, fidx);
21961 else
21963 size_t i;
21964 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
21965 result = result * 613 + (unsigned) (XWINT (k, fidx)
21966 >> CHAR_BIT * i);
21968 break;
21969 case '0':
21970 break;
21971 default:
21972 gcc_unreachable ();
21975 return result;
21978 static unsigned
21979 toc_hash_function (const void *hash_entry)
21981 const struct toc_hash_struct *thc =
21982 (const struct toc_hash_struct *) hash_entry;
21983 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
21986 /* Compare H1 and H2 for equivalence. */
21988 static int
21989 toc_hash_eq (const void *h1, const void *h2)
21991 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
21992 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
21994 if (((const struct toc_hash_struct *) h1)->key_mode
21995 != ((const struct toc_hash_struct *) h2)->key_mode)
21996 return 0;
21998 return rtx_equal_p (r1, r2);
22001 /* These are the names given by the C++ front-end to vtables, and
22002 vtable-like objects. Ideally, this logic should not be here;
22003 instead, there should be some programmatic way of inquiring as
22004 to whether or not an object is a vtable. */
22006 #define VTABLE_NAME_P(NAME) \
22007 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
22008 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
22009 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
22010 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
22011 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
22013 #ifdef NO_DOLLAR_IN_LABEL
22014 /* Return a GGC-allocated character string translating dollar signs in
22015 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
22017 const char *
22018 rs6000_xcoff_strip_dollar (const char *name)
22020 char *strip, *p;
22021 const char *q;
22022 size_t len;
22024 q = (const char *) strchr (name, '$');
22026 if (q == 0 || q == name)
22027 return name;
22029 len = strlen (name);
22030 strip = XALLOCAVEC (char, len + 1);
22031 strcpy (strip, name);
22032 p = strip + (q - name);
22033 while (p)
22035 *p = '_';
22036 p = strchr (p + 1, '$');
22039 return ggc_alloc_string (strip, len);
22041 #endif
22043 void
22044 rs6000_output_symbol_ref (FILE *file, rtx x)
22046 /* Currently C++ toc references to vtables can be emitted before it
22047 is decided whether the vtable is public or private. If this is
22048 the case, then the linker will eventually complain that there is
22049 a reference to an unknown section. Thus, for vtables only,
22050 we emit the TOC reference to reference the symbol and not the
22051 section. */
22052 const char *name = XSTR (x, 0);
22054 if (VTABLE_NAME_P (name))
22056 RS6000_OUTPUT_BASENAME (file, name);
22058 else
22059 assemble_name (file, name);
22062 /* Output a TOC entry. We derive the entry name from what is being
22063 written. */
22065 void
22066 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
22068 char buf[256];
22069 const char *name = buf;
22070 rtx base = x;
22071 HOST_WIDE_INT offset = 0;
22073 gcc_assert (!TARGET_NO_TOC);
22075 /* When the linker won't eliminate them, don't output duplicate
22076 TOC entries (this happens on AIX if there is any kind of TOC,
22077 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
22078 CODE_LABELs. */
22079 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
22081 struct toc_hash_struct *h;
22082 void * * found;
22084 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
22085 time because GGC is not initialized at that point. */
22086 if (toc_hash_table == NULL)
22087 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
22088 toc_hash_eq, NULL);
22090 h = ggc_alloc_toc_hash_struct ();
22091 h->key = x;
22092 h->key_mode = mode;
22093 h->labelno = labelno;
22095 found = htab_find_slot (toc_hash_table, h, INSERT);
22096 if (*found == NULL)
22097 *found = h;
22098 else /* This is indeed a duplicate.
22099 Set this label equal to that label. */
22101 fputs ("\t.set ", file);
22102 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
22103 fprintf (file, "%d,", labelno);
22104 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
22105 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
22106 found)->labelno));
22108 #ifdef HAVE_AS_TLS
22109 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
22110 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
22111 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
22113 fputs ("\t.set ", file);
22114 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
22115 fprintf (file, "%d,", labelno);
22116 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
22117 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
22118 found)->labelno));
22120 #endif
22121 return;
22125 /* If we're going to put a double constant in the TOC, make sure it's
22126 aligned properly when strict alignment is on. */
22127 if (GET_CODE (x) == CONST_DOUBLE
22128 && STRICT_ALIGNMENT
22129 && GET_MODE_BITSIZE (mode) >= 64
22130 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
22131 ASM_OUTPUT_ALIGN (file, 3);
22134 (*targetm.asm_out.internal_label) (file, "LC", labelno);
22136 /* Handle FP constants specially. Note that if we have a minimal
22137 TOC, things we put here aren't actually in the TOC, so we can allow
22138 FP constants. */
22139 if (GET_CODE (x) == CONST_DOUBLE &&
22140 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
22142 REAL_VALUE_TYPE rv;
22143 long k[4];
22145 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22146 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22147 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
22148 else
22149 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
22151 if (TARGET_64BIT)
22153 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22154 fputs (DOUBLE_INT_ASM_OP, file);
22155 else
22156 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
22157 k[0] & 0xffffffff, k[1] & 0xffffffff,
22158 k[2] & 0xffffffff, k[3] & 0xffffffff);
22159 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
22160 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
22161 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
22162 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
22163 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
22164 return;
22166 else
22168 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22169 fputs ("\t.long ", file);
22170 else
22171 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
22172 k[0] & 0xffffffff, k[1] & 0xffffffff,
22173 k[2] & 0xffffffff, k[3] & 0xffffffff);
22174 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
22175 k[0] & 0xffffffff, k[1] & 0xffffffff,
22176 k[2] & 0xffffffff, k[3] & 0xffffffff);
22177 return;
22180 else if (GET_CODE (x) == CONST_DOUBLE &&
22181 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
22183 REAL_VALUE_TYPE rv;
22184 long k[2];
22186 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22188 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22189 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
22190 else
22191 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
22193 if (TARGET_64BIT)
22195 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22196 fputs (DOUBLE_INT_ASM_OP, file);
22197 else
22198 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22199 k[0] & 0xffffffff, k[1] & 0xffffffff);
22200 fprintf (file, "0x%lx%08lx\n",
22201 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
22202 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
22203 return;
22205 else
22207 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22208 fputs ("\t.long ", file);
22209 else
22210 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22211 k[0] & 0xffffffff, k[1] & 0xffffffff);
22212 fprintf (file, "0x%lx,0x%lx\n",
22213 k[0] & 0xffffffff, k[1] & 0xffffffff);
22214 return;
22217 else if (GET_CODE (x) == CONST_DOUBLE &&
22218 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
22220 REAL_VALUE_TYPE rv;
22221 long l;
22223 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22224 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22225 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
22226 else
22227 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
22229 if (TARGET_64BIT)
22231 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22232 fputs (DOUBLE_INT_ASM_OP, file);
22233 else
22234 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22235 if (WORDS_BIG_ENDIAN)
22236 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
22237 else
22238 fprintf (file, "0x%lx\n", l & 0xffffffff);
22239 return;
22241 else
22243 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22244 fputs ("\t.long ", file);
22245 else
22246 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22247 fprintf (file, "0x%lx\n", l & 0xffffffff);
22248 return;
22251 else if (GET_MODE (x) == VOIDmode
22252 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
22254 unsigned HOST_WIDE_INT low;
22255 HOST_WIDE_INT high;
22257 if (GET_CODE (x) == CONST_DOUBLE)
22259 low = CONST_DOUBLE_LOW (x);
22260 high = CONST_DOUBLE_HIGH (x);
22262 else
22263 #if HOST_BITS_PER_WIDE_INT == 32
22265 low = INTVAL (x);
22266 high = (low & 0x80000000) ? ~0 : 0;
22268 #else
22270 low = INTVAL (x) & 0xffffffff;
22271 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
22273 #endif
22275 /* TOC entries are always Pmode-sized, so when big-endian
22276 smaller integer constants in the TOC need to be padded.
22277 (This is still a win over putting the constants in
22278 a separate constant pool, because then we'd have
22279 to have both a TOC entry _and_ the actual constant.)
22281 For a 32-bit target, CONST_INT values are loaded and shifted
22282 entirely within `low' and can be stored in one TOC entry. */
22284 /* It would be easy to make this work, but it doesn't now. */
22285 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
22287 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
22289 #if HOST_BITS_PER_WIDE_INT == 32
22290 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
22291 POINTER_SIZE, &low, &high, 0);
22292 #else
22293 low |= high << 32;
22294 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
22295 high = (HOST_WIDE_INT) low >> 32;
22296 low &= 0xffffffff;
22297 #endif
22300 if (TARGET_64BIT)
22302 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22303 fputs (DOUBLE_INT_ASM_OP, file);
22304 else
22305 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22306 (long) high & 0xffffffff, (long) low & 0xffffffff);
22307 fprintf (file, "0x%lx%08lx\n",
22308 (long) high & 0xffffffff, (long) low & 0xffffffff);
22309 return;
22311 else
22313 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
22315 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22316 fputs ("\t.long ", file);
22317 else
22318 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22319 (long) high & 0xffffffff, (long) low & 0xffffffff);
22320 fprintf (file, "0x%lx,0x%lx\n",
22321 (long) high & 0xffffffff, (long) low & 0xffffffff);
22323 else
22325 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22326 fputs ("\t.long ", file);
22327 else
22328 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
22329 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
22331 return;
22335 if (GET_CODE (x) == CONST)
22337 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
22338 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
22340 base = XEXP (XEXP (x, 0), 0);
22341 offset = INTVAL (XEXP (XEXP (x, 0), 1));
22344 switch (GET_CODE (base))
22346 case SYMBOL_REF:
22347 name = XSTR (base, 0);
22348 break;
22350 case LABEL_REF:
22351 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
22352 CODE_LABEL_NUMBER (XEXP (base, 0)));
22353 break;
22355 case CODE_LABEL:
22356 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
22357 break;
22359 default:
22360 gcc_unreachable ();
22363 if (TARGET_ELF || TARGET_MINIMAL_TOC)
22364 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
22365 else
22367 fputs ("\t.tc ", file);
22368 RS6000_OUTPUT_BASENAME (file, name);
22370 if (offset < 0)
22371 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
22372 else if (offset)
22373 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
22375 /* Mark large TOC symbols on AIX with [TE] so they are mapped
22376 after other TOC symbols, reducing overflow of small TOC access
22377 to [TC] symbols. */
22378 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
22379 ? "[TE]," : "[TC],", file);
22382 /* Currently C++ toc references to vtables can be emitted before it
22383 is decided whether the vtable is public or private. If this is
22384 the case, then the linker will eventually complain that there is
22385 a TOC reference to an unknown section. Thus, for vtables only,
22386 we emit the TOC reference to reference the symbol and not the
22387 section. */
22388 if (VTABLE_NAME_P (name))
22390 RS6000_OUTPUT_BASENAME (file, name);
22391 if (offset < 0)
22392 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
22393 else if (offset > 0)
22394 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
22396 else
22397 output_addr_const (file, x);
22399 #if HAVE_AS_TLS
22400 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
22401 && SYMBOL_REF_TLS_MODEL (base) != 0)
22403 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
22404 fputs ("@le", file);
22405 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
22406 fputs ("@ie", file);
22407 /* Use global-dynamic for local-dynamic. */
22408 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
22409 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
22411 putc ('\n', file);
22412 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
22413 fputs ("\t.tc .", file);
22414 RS6000_OUTPUT_BASENAME (file, name);
22415 fputs ("[TC],", file);
22416 output_addr_const (file, x);
22417 fputs ("@m", file);
22420 #endif
22422 putc ('\n', file);
22425 /* Output an assembler pseudo-op to write an ASCII string of N characters
22426 starting at P to FILE.
22428 On the RS/6000, we have to do this using the .byte operation and
22429 write out special characters outside the quoted string.
22430 Also, the assembler is broken; very long strings are truncated,
22431 so we must artificially break them up early. */
22433 void
22434 output_ascii (FILE *file, const char *p, int n)
22436 char c;
22437 int i, count_string;
22438 const char *for_string = "\t.byte \"";
22439 const char *for_decimal = "\t.byte ";
22440 const char *to_close = NULL;
22442 count_string = 0;
22443 for (i = 0; i < n; i++)
22445 c = *p++;
22446 if (c >= ' ' && c < 0177)
22448 if (for_string)
22449 fputs (for_string, file);
22450 putc (c, file);
22452 /* Write two quotes to get one. */
22453 if (c == '"')
22455 putc (c, file);
22456 ++count_string;
22459 for_string = NULL;
22460 for_decimal = "\"\n\t.byte ";
22461 to_close = "\"\n";
22462 ++count_string;
22464 if (count_string >= 512)
22466 fputs (to_close, file);
22468 for_string = "\t.byte \"";
22469 for_decimal = "\t.byte ";
22470 to_close = NULL;
22471 count_string = 0;
22474 else
22476 if (for_decimal)
22477 fputs (for_decimal, file);
22478 fprintf (file, "%d", c);
22480 for_string = "\n\t.byte \"";
22481 for_decimal = ", ";
22482 to_close = "\n";
22483 count_string = 0;
22487 /* Now close the string if we have written one. Then end the line. */
22488 if (to_close)
22489 fputs (to_close, file);
22492 /* Generate a unique section name for FILENAME for a section type
22493 represented by SECTION_DESC. Output goes into BUF.
22495 SECTION_DESC can be any string, as long as it is different for each
22496 possible section type.
22498 We name the section in the same manner as xlc. The name begins with an
22499 underscore followed by the filename (after stripping any leading directory
22500 names) with the last period replaced by the string SECTION_DESC. If
22501 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22502 the name. */
22504 void
22505 rs6000_gen_section_name (char **buf, const char *filename,
22506 const char *section_desc)
22508 const char *q, *after_last_slash, *last_period = 0;
22509 char *p;
22510 int len;
22512 after_last_slash = filename;
22513 for (q = filename; *q; q++)
22515 if (*q == '/')
22516 after_last_slash = q + 1;
22517 else if (*q == '.')
22518 last_period = q;
22521 len = strlen (after_last_slash) + strlen (section_desc) + 2;
22522 *buf = (char *) xmalloc (len);
22524 p = *buf;
22525 *p++ = '_';
22527 for (q = after_last_slash; *q; q++)
22529 if (q == last_period)
22531 strcpy (p, section_desc);
22532 p += strlen (section_desc);
22533 break;
22536 else if (ISALNUM (*q))
22537 *p++ = *q;
22540 if (last_period == 0)
22541 strcpy (p, section_desc);
22542 else
22543 *p = '\0';
22546 /* Emit profile function. */
22548 void
22549 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
22551 /* Non-standard profiling for kernels, which just saves LR then calls
22552 _mcount without worrying about arg saves. The idea is to change
22553 the function prologue as little as possible as it isn't easy to
22554 account for arg save/restore code added just for _mcount. */
22555 if (TARGET_PROFILE_KERNEL)
22556 return;
22558 if (DEFAULT_ABI == ABI_AIX)
22560 #ifndef NO_PROFILE_COUNTERS
22561 # define NO_PROFILE_COUNTERS 0
22562 #endif
22563 if (NO_PROFILE_COUNTERS)
22564 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22565 LCT_NORMAL, VOIDmode, 0);
22566 else
22568 char buf[30];
22569 const char *label_name;
22570 rtx fun;
22572 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22573 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
22574 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
22576 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22577 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
22580 else if (DEFAULT_ABI == ABI_DARWIN)
22582 const char *mcount_name = RS6000_MCOUNT;
22583 int caller_addr_regno = LR_REGNO;
22585 /* Be conservative and always set this, at least for now. */
22586 crtl->uses_pic_offset_table = 1;
22588 #if TARGET_MACHO
22589 /* For PIC code, set up a stub and collect the caller's address
22590 from r0, which is where the prologue puts it. */
22591 if (MACHOPIC_INDIRECT
22592 && crtl->uses_pic_offset_table)
22593 caller_addr_regno = 0;
22594 #endif
22595 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
22596 LCT_NORMAL, VOIDmode, 1,
22597 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
22601 /* Write function profiler code. */
22603 void
22604 output_function_profiler (FILE *file, int labelno)
22606 char buf[100];
22608 switch (DEFAULT_ABI)
22610 default:
22611 gcc_unreachable ();
22613 case ABI_V4:
22614 if (!TARGET_32BIT)
22616 warning (0, "no profiling of 64-bit code for this ABI");
22617 return;
22619 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22620 fprintf (file, "\tmflr %s\n", reg_names[0]);
22621 if (NO_PROFILE_COUNTERS)
22623 asm_fprintf (file, "\tstw %s,4(%s)\n",
22624 reg_names[0], reg_names[1]);
22626 else if (TARGET_SECURE_PLT && flag_pic)
22628 if (TARGET_LINK_STACK)
22630 char name[32];
22631 get_ppc476_thunk_name (name);
22632 asm_fprintf (file, "\tbl %s\n", name);
22634 else
22635 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
22636 asm_fprintf (file, "\tstw %s,4(%s)\n",
22637 reg_names[0], reg_names[1]);
22638 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22639 asm_fprintf (file, "\taddis %s,%s,",
22640 reg_names[12], reg_names[12]);
22641 assemble_name (file, buf);
22642 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
22643 assemble_name (file, buf);
22644 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
22646 else if (flag_pic == 1)
22648 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
22649 asm_fprintf (file, "\tstw %s,4(%s)\n",
22650 reg_names[0], reg_names[1]);
22651 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22652 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
22653 assemble_name (file, buf);
22654 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
22656 else if (flag_pic > 1)
22658 asm_fprintf (file, "\tstw %s,4(%s)\n",
22659 reg_names[0], reg_names[1]);
22660 /* Now, we need to get the address of the label. */
22661 if (TARGET_LINK_STACK)
22663 char name[32];
22664 get_ppc476_thunk_name (name);
22665 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
22666 assemble_name (file, buf);
22667 fputs ("-.\n1:", file);
22668 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22669 asm_fprintf (file, "\taddi %s,%s,4\n",
22670 reg_names[11], reg_names[11]);
22672 else
22674 fputs ("\tbcl 20,31,1f\n\t.long ", file);
22675 assemble_name (file, buf);
22676 fputs ("-.\n1:", file);
22677 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22679 asm_fprintf (file, "\tlwz %s,0(%s)\n",
22680 reg_names[0], reg_names[11]);
22681 asm_fprintf (file, "\tadd %s,%s,%s\n",
22682 reg_names[0], reg_names[0], reg_names[11]);
22684 else
22686 asm_fprintf (file, "\tlis %s,", reg_names[12]);
22687 assemble_name (file, buf);
22688 fputs ("@ha\n", file);
22689 asm_fprintf (file, "\tstw %s,4(%s)\n",
22690 reg_names[0], reg_names[1]);
22691 asm_fprintf (file, "\tla %s,", reg_names[0]);
22692 assemble_name (file, buf);
22693 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
22696 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22697 fprintf (file, "\tbl %s%s\n",
22698 RS6000_MCOUNT, flag_pic ? "@plt" : "");
22699 break;
22701 case ABI_AIX:
22702 case ABI_DARWIN:
22703 if (!TARGET_PROFILE_KERNEL)
22705 /* Don't do anything, done in output_profile_hook (). */
22707 else
22709 gcc_assert (!TARGET_32BIT);
22711 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
22712 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
22714 if (cfun->static_chain_decl != NULL)
22716 asm_fprintf (file, "\tstd %s,24(%s)\n",
22717 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22718 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22719 asm_fprintf (file, "\tld %s,24(%s)\n",
22720 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22722 else
22723 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22725 break;
22731 /* The following variable value is the last issued insn. */
22733 static rtx last_scheduled_insn;
22735 /* The following variable helps to balance issuing of load and
22736 store instructions */
22738 static int load_store_pendulum;
22740 /* Power4 load update and store update instructions are cracked into a
22741 load or store and an integer insn which are executed in the same cycle.
22742 Branches have their own dispatch slot which does not count against the
22743 GCC issue rate, but it changes the program flow so there are no other
22744 instructions to issue in this cycle. */
22746 static int
22747 rs6000_variable_issue_1 (rtx insn, int more)
22749 last_scheduled_insn = insn;
22750 if (GET_CODE (PATTERN (insn)) == USE
22751 || GET_CODE (PATTERN (insn)) == CLOBBER)
22753 cached_can_issue_more = more;
22754 return cached_can_issue_more;
22757 if (insn_terminates_group_p (insn, current_group))
22759 cached_can_issue_more = 0;
22760 return cached_can_issue_more;
22763 /* If no reservation, but reach here */
22764 if (recog_memoized (insn) < 0)
22765 return more;
22767 if (rs6000_sched_groups)
22769 if (is_microcoded_insn (insn))
22770 cached_can_issue_more = 0;
22771 else if (is_cracked_insn (insn))
22772 cached_can_issue_more = more > 2 ? more - 2 : 0;
22773 else
22774 cached_can_issue_more = more - 1;
22776 return cached_can_issue_more;
22779 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
22780 return 0;
22782 cached_can_issue_more = more - 1;
22783 return cached_can_issue_more;
22786 static int
22787 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
22789 int r = rs6000_variable_issue_1 (insn, more);
22790 if (verbose)
22791 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
22792 return r;
22795 /* Adjust the cost of a scheduling dependency. Return the new cost of
22796 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22798 static int
22799 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22801 enum attr_type attr_type;
22803 if (! recog_memoized (insn))
22804 return 0;
22806 switch (REG_NOTE_KIND (link))
22808 case REG_DEP_TRUE:
22810 /* Data dependency; DEP_INSN writes a register that INSN reads
22811 some cycles later. */
22813 /* Separate a load from a narrower, dependent store. */
22814 if (rs6000_sched_groups
22815 && GET_CODE (PATTERN (insn)) == SET
22816 && GET_CODE (PATTERN (dep_insn)) == SET
22817 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
22818 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
22819 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
22820 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
22821 return cost + 14;
22823 attr_type = get_attr_type (insn);
22825 switch (attr_type)
22827 case TYPE_JMPREG:
22828 /* Tell the first scheduling pass about the latency between
22829 a mtctr and bctr (and mtlr and br/blr). The first
22830 scheduling pass will not know about this latency since
22831 the mtctr instruction, which has the latency associated
22832 to it, will be generated by reload. */
22833 return 4;
22834 case TYPE_BRANCH:
22835 /* Leave some extra cycles between a compare and its
22836 dependent branch, to inhibit expensive mispredicts. */
22837 if ((rs6000_cpu_attr == CPU_PPC603
22838 || rs6000_cpu_attr == CPU_PPC604
22839 || rs6000_cpu_attr == CPU_PPC604E
22840 || rs6000_cpu_attr == CPU_PPC620
22841 || rs6000_cpu_attr == CPU_PPC630
22842 || rs6000_cpu_attr == CPU_PPC750
22843 || rs6000_cpu_attr == CPU_PPC7400
22844 || rs6000_cpu_attr == CPU_PPC7450
22845 || rs6000_cpu_attr == CPU_PPCE5500
22846 || rs6000_cpu_attr == CPU_PPCE6500
22847 || rs6000_cpu_attr == CPU_POWER4
22848 || rs6000_cpu_attr == CPU_POWER5
22849 || rs6000_cpu_attr == CPU_POWER7
22850 || rs6000_cpu_attr == CPU_CELL)
22851 && recog_memoized (dep_insn)
22852 && (INSN_CODE (dep_insn) >= 0))
22854 switch (get_attr_type (dep_insn))
22856 case TYPE_CMP:
22857 case TYPE_COMPARE:
22858 case TYPE_DELAYED_COMPARE:
22859 case TYPE_IMUL_COMPARE:
22860 case TYPE_LMUL_COMPARE:
22861 case TYPE_FPCOMPARE:
22862 case TYPE_CR_LOGICAL:
22863 case TYPE_DELAYED_CR:
22864 return cost + 2;
22865 default:
22866 break;
22868 break;
22870 case TYPE_STORE:
22871 case TYPE_STORE_U:
22872 case TYPE_STORE_UX:
22873 case TYPE_FPSTORE:
22874 case TYPE_FPSTORE_U:
22875 case TYPE_FPSTORE_UX:
22876 if ((rs6000_cpu == PROCESSOR_POWER6)
22877 && recog_memoized (dep_insn)
22878 && (INSN_CODE (dep_insn) >= 0))
22881 if (GET_CODE (PATTERN (insn)) != SET)
22882 /* If this happens, we have to extend this to schedule
22883 optimally. Return default for now. */
22884 return cost;
22886 /* Adjust the cost for the case where the value written
22887 by a fixed point operation is used as the address
22888 gen value on a store. */
22889 switch (get_attr_type (dep_insn))
22891 case TYPE_LOAD:
22892 case TYPE_LOAD_U:
22893 case TYPE_LOAD_UX:
22894 case TYPE_CNTLZ:
22896 if (! store_data_bypass_p (dep_insn, insn))
22897 return 4;
22898 break;
22900 case TYPE_LOAD_EXT:
22901 case TYPE_LOAD_EXT_U:
22902 case TYPE_LOAD_EXT_UX:
22903 case TYPE_VAR_SHIFT_ROTATE:
22904 case TYPE_VAR_DELAYED_COMPARE:
22906 if (! store_data_bypass_p (dep_insn, insn))
22907 return 6;
22908 break;
22910 case TYPE_INTEGER:
22911 case TYPE_COMPARE:
22912 case TYPE_FAST_COMPARE:
22913 case TYPE_EXTS:
22914 case TYPE_SHIFT:
22915 case TYPE_INSERT_WORD:
22916 case TYPE_INSERT_DWORD:
22917 case TYPE_FPLOAD_U:
22918 case TYPE_FPLOAD_UX:
22919 case TYPE_STORE_U:
22920 case TYPE_STORE_UX:
22921 case TYPE_FPSTORE_U:
22922 case TYPE_FPSTORE_UX:
22924 if (! store_data_bypass_p (dep_insn, insn))
22925 return 3;
22926 break;
22928 case TYPE_IMUL:
22929 case TYPE_IMUL2:
22930 case TYPE_IMUL3:
22931 case TYPE_LMUL:
22932 case TYPE_IMUL_COMPARE:
22933 case TYPE_LMUL_COMPARE:
22935 if (! store_data_bypass_p (dep_insn, insn))
22936 return 17;
22937 break;
22939 case TYPE_IDIV:
22941 if (! store_data_bypass_p (dep_insn, insn))
22942 return 45;
22943 break;
22945 case TYPE_LDIV:
22947 if (! store_data_bypass_p (dep_insn, insn))
22948 return 57;
22949 break;
22951 default:
22952 break;
22955 break;
22957 case TYPE_LOAD:
22958 case TYPE_LOAD_U:
22959 case TYPE_LOAD_UX:
22960 case TYPE_LOAD_EXT:
22961 case TYPE_LOAD_EXT_U:
22962 case TYPE_LOAD_EXT_UX:
22963 if ((rs6000_cpu == PROCESSOR_POWER6)
22964 && recog_memoized (dep_insn)
22965 && (INSN_CODE (dep_insn) >= 0))
22968 /* Adjust the cost for the case where the value written
22969 by a fixed point instruction is used within the address
22970 gen portion of a subsequent load(u)(x) */
22971 switch (get_attr_type (dep_insn))
22973 case TYPE_LOAD:
22974 case TYPE_LOAD_U:
22975 case TYPE_LOAD_UX:
22976 case TYPE_CNTLZ:
22978 if (set_to_load_agen (dep_insn, insn))
22979 return 4;
22980 break;
22982 case TYPE_LOAD_EXT:
22983 case TYPE_LOAD_EXT_U:
22984 case TYPE_LOAD_EXT_UX:
22985 case TYPE_VAR_SHIFT_ROTATE:
22986 case TYPE_VAR_DELAYED_COMPARE:
22988 if (set_to_load_agen (dep_insn, insn))
22989 return 6;
22990 break;
22992 case TYPE_INTEGER:
22993 case TYPE_COMPARE:
22994 case TYPE_FAST_COMPARE:
22995 case TYPE_EXTS:
22996 case TYPE_SHIFT:
22997 case TYPE_INSERT_WORD:
22998 case TYPE_INSERT_DWORD:
22999 case TYPE_FPLOAD_U:
23000 case TYPE_FPLOAD_UX:
23001 case TYPE_STORE_U:
23002 case TYPE_STORE_UX:
23003 case TYPE_FPSTORE_U:
23004 case TYPE_FPSTORE_UX:
23006 if (set_to_load_agen (dep_insn, insn))
23007 return 3;
23008 break;
23010 case TYPE_IMUL:
23011 case TYPE_IMUL2:
23012 case TYPE_IMUL3:
23013 case TYPE_LMUL:
23014 case TYPE_IMUL_COMPARE:
23015 case TYPE_LMUL_COMPARE:
23017 if (set_to_load_agen (dep_insn, insn))
23018 return 17;
23019 break;
23021 case TYPE_IDIV:
23023 if (set_to_load_agen (dep_insn, insn))
23024 return 45;
23025 break;
23027 case TYPE_LDIV:
23029 if (set_to_load_agen (dep_insn, insn))
23030 return 57;
23031 break;
23033 default:
23034 break;
23037 break;
23039 case TYPE_FPLOAD:
23040 if ((rs6000_cpu == PROCESSOR_POWER6)
23041 && recog_memoized (dep_insn)
23042 && (INSN_CODE (dep_insn) >= 0)
23043 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
23044 return 2;
23046 default:
23047 break;
23050 /* Fall out to return default cost. */
23052 break;
23054 case REG_DEP_OUTPUT:
23055 /* Output dependency; DEP_INSN writes a register that INSN writes some
23056 cycles later. */
23057 if ((rs6000_cpu == PROCESSOR_POWER6)
23058 && recog_memoized (dep_insn)
23059 && (INSN_CODE (dep_insn) >= 0))
23061 attr_type = get_attr_type (insn);
23063 switch (attr_type)
23065 case TYPE_FP:
23066 if (get_attr_type (dep_insn) == TYPE_FP)
23067 return 1;
23068 break;
23069 case TYPE_FPLOAD:
23070 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
23071 return 2;
23072 break;
23073 default:
23074 break;
23077 case REG_DEP_ANTI:
23078 /* Anti dependency; DEP_INSN reads a register that INSN writes some
23079 cycles later. */
23080 return 0;
23082 default:
23083 gcc_unreachable ();
23086 return cost;
23089 /* Debug version of rs6000_adjust_cost. */
23091 static int
23092 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
23094 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
23096 if (ret != cost)
23098 const char *dep;
23100 switch (REG_NOTE_KIND (link))
23102 default: dep = "unknown depencency"; break;
23103 case REG_DEP_TRUE: dep = "data dependency"; break;
23104 case REG_DEP_OUTPUT: dep = "output dependency"; break;
23105 case REG_DEP_ANTI: dep = "anti depencency"; break;
23108 fprintf (stderr,
23109 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
23110 "%s, insn:\n", ret, cost, dep);
23112 debug_rtx (insn);
23115 return ret;
23118 /* The function returns a true if INSN is microcoded.
23119 Return false otherwise. */
23121 static bool
23122 is_microcoded_insn (rtx insn)
23124 if (!insn || !NONDEBUG_INSN_P (insn)
23125 || GET_CODE (PATTERN (insn)) == USE
23126 || GET_CODE (PATTERN (insn)) == CLOBBER)
23127 return false;
23129 if (rs6000_cpu_attr == CPU_CELL)
23130 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
23132 if (rs6000_sched_groups)
23134 enum attr_type type = get_attr_type (insn);
23135 if (type == TYPE_LOAD_EXT_U
23136 || type == TYPE_LOAD_EXT_UX
23137 || type == TYPE_LOAD_UX
23138 || type == TYPE_STORE_UX
23139 || type == TYPE_MFCR)
23140 return true;
23143 return false;
23146 /* The function returns true if INSN is cracked into 2 instructions
23147 by the processor (and therefore occupies 2 issue slots). */
23149 static bool
23150 is_cracked_insn (rtx insn)
23152 if (!insn || !NONDEBUG_INSN_P (insn)
23153 || GET_CODE (PATTERN (insn)) == USE
23154 || GET_CODE (PATTERN (insn)) == CLOBBER)
23155 return false;
23157 if (rs6000_sched_groups)
23159 enum attr_type type = get_attr_type (insn);
23160 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
23161 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
23162 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
23163 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
23164 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
23165 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
23166 || type == TYPE_IDIV || type == TYPE_LDIV
23167 || type == TYPE_INSERT_WORD)
23168 return true;
23171 return false;
23174 /* The function returns true if INSN can be issued only from
23175 the branch slot. */
23177 static bool
23178 is_branch_slot_insn (rtx insn)
23180 if (!insn || !NONDEBUG_INSN_P (insn)
23181 || GET_CODE (PATTERN (insn)) == USE
23182 || GET_CODE (PATTERN (insn)) == CLOBBER)
23183 return false;
23185 if (rs6000_sched_groups)
23187 enum attr_type type = get_attr_type (insn);
23188 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
23189 return true;
23190 return false;
23193 return false;
23196 /* The function returns true if out_inst sets a value that is
23197 used in the address generation computation of in_insn */
23198 static bool
23199 set_to_load_agen (rtx out_insn, rtx in_insn)
23201 rtx out_set, in_set;
23203 /* For performance reasons, only handle the simple case where
23204 both loads are a single_set. */
23205 out_set = single_set (out_insn);
23206 if (out_set)
23208 in_set = single_set (in_insn);
23209 if (in_set)
23210 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
23213 return false;
23216 /* Try to determine base/offset/size parts of the given MEM.
23217 Return true if successful, false if all the values couldn't
23218 be determined.
23220 This function only looks for REG or REG+CONST address forms.
23221 REG+REG address form will return false. */
23223 static bool
23224 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
23225 HOST_WIDE_INT *size)
23227 rtx addr_rtx;
23228 if MEM_SIZE_KNOWN_P (mem)
23229 *size = MEM_SIZE (mem);
23230 else
23231 return false;
23233 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
23234 addr_rtx = XEXP (XEXP (mem, 0), 1);
23235 else
23236 addr_rtx = (XEXP (mem, 0));
23238 if (GET_CODE (addr_rtx) == REG)
23240 *base = addr_rtx;
23241 *offset = 0;
23243 else if (GET_CODE (addr_rtx) == PLUS
23244 && CONST_INT_P (XEXP (addr_rtx, 1)))
23246 *base = XEXP (addr_rtx, 0);
23247 *offset = INTVAL (XEXP (addr_rtx, 1));
23249 else
23250 return false;
23252 return true;
23255 /* The function returns true if the target storage location of
23256 mem1 is adjacent to the target storage location of mem2 */
23257 /* Return 1 if memory locations are adjacent. */
23259 static bool
23260 adjacent_mem_locations (rtx mem1, rtx mem2)
23262 rtx reg1, reg2;
23263 HOST_WIDE_INT off1, size1, off2, size2;
23265 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23266 && get_memref_parts (mem2, &reg2, &off2, &size2))
23267 return ((REGNO (reg1) == REGNO (reg2))
23268 && ((off1 + size1 == off2)
23269 || (off2 + size2 == off1)));
23271 return false;
23274 /* This function returns true if it can be determined that the two MEM
23275 locations overlap by at least 1 byte based on base reg/offset/size. */
23277 static bool
23278 mem_locations_overlap (rtx mem1, rtx mem2)
23280 rtx reg1, reg2;
23281 HOST_WIDE_INT off1, size1, off2, size2;
23283 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23284 && get_memref_parts (mem2, &reg2, &off2, &size2))
23285 return ((REGNO (reg1) == REGNO (reg2))
23286 && (((off1 <= off2) && (off1 + size1 > off2))
23287 || ((off2 <= off1) && (off2 + size2 > off1))));
23289 return false;
23292 /* A C statement (sans semicolon) to update the integer scheduling
23293 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23294 INSN earlier, reduce the priority to execute INSN later. Do not
23295 define this macro if you do not need to adjust the scheduling
23296 priorities of insns. */
23298 static int
23299 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
23301 rtx load_mem, str_mem;
23302 /* On machines (like the 750) which have asymmetric integer units,
23303 where one integer unit can do multiply and divides and the other
23304 can't, reduce the priority of multiply/divide so it is scheduled
23305 before other integer operations. */
23307 #if 0
23308 if (! INSN_P (insn))
23309 return priority;
23311 if (GET_CODE (PATTERN (insn)) == USE)
23312 return priority;
23314 switch (rs6000_cpu_attr) {
23315 case CPU_PPC750:
23316 switch (get_attr_type (insn))
23318 default:
23319 break;
23321 case TYPE_IMUL:
23322 case TYPE_IDIV:
23323 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
23324 priority, priority);
23325 if (priority >= 0 && priority < 0x01000000)
23326 priority >>= 3;
23327 break;
23330 #endif
23332 if (insn_must_be_first_in_group (insn)
23333 && reload_completed
23334 && current_sched_info->sched_max_insns_priority
23335 && rs6000_sched_restricted_insns_priority)
23338 /* Prioritize insns that can be dispatched only in the first
23339 dispatch slot. */
23340 if (rs6000_sched_restricted_insns_priority == 1)
23341 /* Attach highest priority to insn. This means that in
23342 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23343 precede 'priority' (critical path) considerations. */
23344 return current_sched_info->sched_max_insns_priority;
23345 else if (rs6000_sched_restricted_insns_priority == 2)
23346 /* Increase priority of insn by a minimal amount. This means that in
23347 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23348 considerations precede dispatch-slot restriction considerations. */
23349 return (priority + 1);
23352 if (rs6000_cpu == PROCESSOR_POWER6
23353 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
23354 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
23355 /* Attach highest priority to insn if the scheduler has just issued two
23356 stores and this instruction is a load, or two loads and this instruction
23357 is a store. Power6 wants loads and stores scheduled alternately
23358 when possible */
23359 return current_sched_info->sched_max_insns_priority;
23361 return priority;
23364 /* Return true if the instruction is nonpipelined on the Cell. */
23365 static bool
23366 is_nonpipeline_insn (rtx insn)
23368 enum attr_type type;
23369 if (!insn || !NONDEBUG_INSN_P (insn)
23370 || GET_CODE (PATTERN (insn)) == USE
23371 || GET_CODE (PATTERN (insn)) == CLOBBER)
23372 return false;
23374 type = get_attr_type (insn);
23375 if (type == TYPE_IMUL
23376 || type == TYPE_IMUL2
23377 || type == TYPE_IMUL3
23378 || type == TYPE_LMUL
23379 || type == TYPE_IDIV
23380 || type == TYPE_LDIV
23381 || type == TYPE_SDIV
23382 || type == TYPE_DDIV
23383 || type == TYPE_SSQRT
23384 || type == TYPE_DSQRT
23385 || type == TYPE_MFCR
23386 || type == TYPE_MFCRF
23387 || type == TYPE_MFJMPR)
23389 return true;
23391 return false;
23395 /* Return how many instructions the machine can issue per cycle. */
23397 static int
23398 rs6000_issue_rate (void)
23400 /* Unless scheduling for register pressure, use issue rate of 1 for
23401 first scheduling pass to decrease degradation. */
23402 if (!reload_completed && !flag_sched_pressure)
23403 return 1;
23405 switch (rs6000_cpu_attr) {
23406 case CPU_RS64A:
23407 case CPU_PPC601: /* ? */
23408 case CPU_PPC7450:
23409 return 3;
23410 case CPU_PPC440:
23411 case CPU_PPC603:
23412 case CPU_PPC750:
23413 case CPU_PPC7400:
23414 case CPU_PPC8540:
23415 case CPU_PPC8548:
23416 case CPU_CELL:
23417 case CPU_PPCE300C2:
23418 case CPU_PPCE300C3:
23419 case CPU_PPCE500MC:
23420 case CPU_PPCE500MC64:
23421 case CPU_PPCE5500:
23422 case CPU_PPCE6500:
23423 case CPU_TITAN:
23424 return 2;
23425 case CPU_PPC476:
23426 case CPU_PPC604:
23427 case CPU_PPC604E:
23428 case CPU_PPC620:
23429 case CPU_PPC630:
23430 return 4;
23431 case CPU_POWER4:
23432 case CPU_POWER5:
23433 case CPU_POWER6:
23434 case CPU_POWER7:
23435 return 5;
23436 default:
23437 return 1;
23441 /* Return how many instructions to look ahead for better insn
23442 scheduling. */
23444 static int
23445 rs6000_use_sched_lookahead (void)
23447 switch (rs6000_cpu_attr)
23449 case CPU_PPC8540:
23450 case CPU_PPC8548:
23451 return 4;
23453 case CPU_CELL:
23454 return (reload_completed ? 8 : 0);
23456 default:
23457 return 0;
23461 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23462 static int
23463 rs6000_use_sched_lookahead_guard (rtx insn)
23465 if (rs6000_cpu_attr != CPU_CELL)
23466 return 1;
23468 if (insn == NULL_RTX || !INSN_P (insn))
23469 abort ();
23471 if (!reload_completed
23472 || is_nonpipeline_insn (insn)
23473 || is_microcoded_insn (insn))
23474 return 0;
23476 return 1;
23479 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23480 and return true. */
23482 static bool
23483 find_mem_ref (rtx pat, rtx *mem_ref)
23485 const char * fmt;
23486 int i, j;
23488 /* stack_tie does not produce any real memory traffic. */
23489 if (tie_operand (pat, VOIDmode))
23490 return false;
23492 if (GET_CODE (pat) == MEM)
23494 *mem_ref = pat;
23495 return true;
23498 /* Recursively process the pattern. */
23499 fmt = GET_RTX_FORMAT (GET_CODE (pat));
23501 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
23503 if (fmt[i] == 'e')
23505 if (find_mem_ref (XEXP (pat, i), mem_ref))
23506 return true;
23508 else if (fmt[i] == 'E')
23509 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
23511 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
23512 return true;
23516 return false;
23519 /* Determine if PAT is a PATTERN of a load insn. */
23521 static bool
23522 is_load_insn1 (rtx pat, rtx *load_mem)
23524 if (!pat || pat == NULL_RTX)
23525 return false;
23527 if (GET_CODE (pat) == SET)
23528 return find_mem_ref (SET_SRC (pat), load_mem);
23530 if (GET_CODE (pat) == PARALLEL)
23532 int i;
23534 for (i = 0; i < XVECLEN (pat, 0); i++)
23535 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
23536 return true;
23539 return false;
23542 /* Determine if INSN loads from memory. */
23544 static bool
23545 is_load_insn (rtx insn, rtx *load_mem)
23547 if (!insn || !INSN_P (insn))
23548 return false;
23550 if (GET_CODE (insn) == CALL_INSN)
23551 return false;
23553 return is_load_insn1 (PATTERN (insn), load_mem);
23556 /* Determine if PAT is a PATTERN of a store insn. */
23558 static bool
23559 is_store_insn1 (rtx pat, rtx *str_mem)
23561 if (!pat || pat == NULL_RTX)
23562 return false;
23564 if (GET_CODE (pat) == SET)
23565 return find_mem_ref (SET_DEST (pat), str_mem);
23567 if (GET_CODE (pat) == PARALLEL)
23569 int i;
23571 for (i = 0; i < XVECLEN (pat, 0); i++)
23572 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
23573 return true;
23576 return false;
23579 /* Determine if INSN stores to memory. */
23581 static bool
23582 is_store_insn (rtx insn, rtx *str_mem)
23584 if (!insn || !INSN_P (insn))
23585 return false;
23587 return is_store_insn1 (PATTERN (insn), str_mem);
23590 /* Returns whether the dependence between INSN and NEXT is considered
23591 costly by the given target. */
23593 static bool
23594 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
23596 rtx insn;
23597 rtx next;
23598 rtx load_mem, str_mem;
23600 /* If the flag is not enabled - no dependence is considered costly;
23601 allow all dependent insns in the same group.
23602 This is the most aggressive option. */
23603 if (rs6000_sched_costly_dep == no_dep_costly)
23604 return false;
23606 /* If the flag is set to 1 - a dependence is always considered costly;
23607 do not allow dependent instructions in the same group.
23608 This is the most conservative option. */
23609 if (rs6000_sched_costly_dep == all_deps_costly)
23610 return true;
23612 insn = DEP_PRO (dep);
23613 next = DEP_CON (dep);
23615 if (rs6000_sched_costly_dep == store_to_load_dep_costly
23616 && is_load_insn (next, &load_mem)
23617 && is_store_insn (insn, &str_mem))
23618 /* Prevent load after store in the same group. */
23619 return true;
23621 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
23622 && is_load_insn (next, &load_mem)
23623 && is_store_insn (insn, &str_mem)
23624 && DEP_TYPE (dep) == REG_DEP_TRUE
23625 && mem_locations_overlap(str_mem, load_mem))
23626 /* Prevent load after store in the same group if it is a true
23627 dependence. */
23628 return true;
23630 /* The flag is set to X; dependences with latency >= X are considered costly,
23631 and will not be scheduled in the same group. */
23632 if (rs6000_sched_costly_dep <= max_dep_latency
23633 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
23634 return true;
23636 return false;
23639 /* Return the next insn after INSN that is found before TAIL is reached,
23640 skipping any "non-active" insns - insns that will not actually occupy
23641 an issue slot. Return NULL_RTX if such an insn is not found. */
23643 static rtx
23644 get_next_active_insn (rtx insn, rtx tail)
23646 if (insn == NULL_RTX || insn == tail)
23647 return NULL_RTX;
23649 while (1)
23651 insn = NEXT_INSN (insn);
23652 if (insn == NULL_RTX || insn == tail)
23653 return NULL_RTX;
23655 if (CALL_P (insn)
23656 || JUMP_P (insn)
23657 || (NONJUMP_INSN_P (insn)
23658 && GET_CODE (PATTERN (insn)) != USE
23659 && GET_CODE (PATTERN (insn)) != CLOBBER
23660 && INSN_CODE (insn) != CODE_FOR_stack_tie))
23661 break;
23663 return insn;
23666 /* We are about to begin issuing insns for this clock cycle. */
23668 static int
23669 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
23670 rtx *ready ATTRIBUTE_UNUSED,
23671 int *pn_ready ATTRIBUTE_UNUSED,
23672 int clock_var ATTRIBUTE_UNUSED)
23674 int n_ready = *pn_ready;
23676 if (sched_verbose)
23677 fprintf (dump, "// rs6000_sched_reorder :\n");
23679 /* Reorder the ready list, if the second to last ready insn
23680 is a nonepipeline insn. */
23681 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
23683 if (is_nonpipeline_insn (ready[n_ready - 1])
23684 && (recog_memoized (ready[n_ready - 2]) > 0))
23685 /* Simply swap first two insns. */
23687 rtx tmp = ready[n_ready - 1];
23688 ready[n_ready - 1] = ready[n_ready - 2];
23689 ready[n_ready - 2] = tmp;
23693 if (rs6000_cpu == PROCESSOR_POWER6)
23694 load_store_pendulum = 0;
23696 return rs6000_issue_rate ();
23699 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23701 static int
23702 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
23703 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
23705 if (sched_verbose)
23706 fprintf (dump, "// rs6000_sched_reorder2 :\n");
23708 /* For Power6, we need to handle some special cases to try and keep the
23709 store queue from overflowing and triggering expensive flushes.
23711 This code monitors how load and store instructions are being issued
23712 and skews the ready list one way or the other to increase the likelihood
23713 that a desired instruction is issued at the proper time.
23715 A couple of things are done. First, we maintain a "load_store_pendulum"
23716 to track the current state of load/store issue.
23718 - If the pendulum is at zero, then no loads or stores have been
23719 issued in the current cycle so we do nothing.
23721 - If the pendulum is 1, then a single load has been issued in this
23722 cycle and we attempt to locate another load in the ready list to
23723 issue with it.
23725 - If the pendulum is -2, then two stores have already been
23726 issued in this cycle, so we increase the priority of the first load
23727 in the ready list to increase it's likelihood of being chosen first
23728 in the next cycle.
23730 - If the pendulum is -1, then a single store has been issued in this
23731 cycle and we attempt to locate another store in the ready list to
23732 issue with it, preferring a store to an adjacent memory location to
23733 facilitate store pairing in the store queue.
23735 - If the pendulum is 2, then two loads have already been
23736 issued in this cycle, so we increase the priority of the first store
23737 in the ready list to increase it's likelihood of being chosen first
23738 in the next cycle.
23740 - If the pendulum < -2 or > 2, then do nothing.
23742 Note: This code covers the most common scenarios. There exist non
23743 load/store instructions which make use of the LSU and which
23744 would need to be accounted for to strictly model the behavior
23745 of the machine. Those instructions are currently unaccounted
23746 for to help minimize compile time overhead of this code.
23748 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
23750 int pos;
23751 int i;
23752 rtx tmp, load_mem, str_mem;
23754 if (is_store_insn (last_scheduled_insn, &str_mem))
23755 /* Issuing a store, swing the load_store_pendulum to the left */
23756 load_store_pendulum--;
23757 else if (is_load_insn (last_scheduled_insn, &load_mem))
23758 /* Issuing a load, swing the load_store_pendulum to the right */
23759 load_store_pendulum++;
23760 else
23761 return cached_can_issue_more;
23763 /* If the pendulum is balanced, or there is only one instruction on
23764 the ready list, then all is well, so return. */
23765 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
23766 return cached_can_issue_more;
23768 if (load_store_pendulum == 1)
23770 /* A load has been issued in this cycle. Scan the ready list
23771 for another load to issue with it */
23772 pos = *pn_ready-1;
23774 while (pos >= 0)
23776 if (is_load_insn (ready[pos], &load_mem))
23778 /* Found a load. Move it to the head of the ready list,
23779 and adjust it's priority so that it is more likely to
23780 stay there */
23781 tmp = ready[pos];
23782 for (i=pos; i<*pn_ready-1; i++)
23783 ready[i] = ready[i + 1];
23784 ready[*pn_ready-1] = tmp;
23786 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23787 INSN_PRIORITY (tmp)++;
23788 break;
23790 pos--;
23793 else if (load_store_pendulum == -2)
23795 /* Two stores have been issued in this cycle. Increase the
23796 priority of the first load in the ready list to favor it for
23797 issuing in the next cycle. */
23798 pos = *pn_ready-1;
23800 while (pos >= 0)
23802 if (is_load_insn (ready[pos], &load_mem)
23803 && !sel_sched_p ()
23804 && INSN_PRIORITY_KNOWN (ready[pos]))
23806 INSN_PRIORITY (ready[pos])++;
23808 /* Adjust the pendulum to account for the fact that a load
23809 was found and increased in priority. This is to prevent
23810 increasing the priority of multiple loads */
23811 load_store_pendulum--;
23813 break;
23815 pos--;
23818 else if (load_store_pendulum == -1)
23820 /* A store has been issued in this cycle. Scan the ready list for
23821 another store to issue with it, preferring a store to an adjacent
23822 memory location */
23823 int first_store_pos = -1;
23825 pos = *pn_ready-1;
23827 while (pos >= 0)
23829 if (is_store_insn (ready[pos], &str_mem))
23831 rtx str_mem2;
23832 /* Maintain the index of the first store found on the
23833 list */
23834 if (first_store_pos == -1)
23835 first_store_pos = pos;
23837 if (is_store_insn (last_scheduled_insn, &str_mem2)
23838 && adjacent_mem_locations (str_mem, str_mem2))
23840 /* Found an adjacent store. Move it to the head of the
23841 ready list, and adjust it's priority so that it is
23842 more likely to stay there */
23843 tmp = ready[pos];
23844 for (i=pos; i<*pn_ready-1; i++)
23845 ready[i] = ready[i + 1];
23846 ready[*pn_ready-1] = tmp;
23848 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23849 INSN_PRIORITY (tmp)++;
23851 first_store_pos = -1;
23853 break;
23856 pos--;
23859 if (first_store_pos >= 0)
23861 /* An adjacent store wasn't found, but a non-adjacent store was,
23862 so move the non-adjacent store to the front of the ready
23863 list, and adjust its priority so that it is more likely to
23864 stay there. */
23865 tmp = ready[first_store_pos];
23866 for (i=first_store_pos; i<*pn_ready-1; i++)
23867 ready[i] = ready[i + 1];
23868 ready[*pn_ready-1] = tmp;
23869 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23870 INSN_PRIORITY (tmp)++;
23873 else if (load_store_pendulum == 2)
23875 /* Two loads have been issued in this cycle. Increase the priority
23876 of the first store in the ready list to favor it for issuing in
23877 the next cycle. */
23878 pos = *pn_ready-1;
23880 while (pos >= 0)
23882 if (is_store_insn (ready[pos], &str_mem)
23883 && !sel_sched_p ()
23884 && INSN_PRIORITY_KNOWN (ready[pos]))
23886 INSN_PRIORITY (ready[pos])++;
23888 /* Adjust the pendulum to account for the fact that a store
23889 was found and increased in priority. This is to prevent
23890 increasing the priority of multiple stores */
23891 load_store_pendulum++;
23893 break;
23895 pos--;
23900 return cached_can_issue_more;
23903 /* Return whether the presence of INSN causes a dispatch group termination
23904 of group WHICH_GROUP.
23906 If WHICH_GROUP == current_group, this function will return true if INSN
23907 causes the termination of the current group (i.e, the dispatch group to
23908 which INSN belongs). This means that INSN will be the last insn in the
23909 group it belongs to.
23911 If WHICH_GROUP == previous_group, this function will return true if INSN
23912 causes the termination of the previous group (i.e, the dispatch group that
23913 precedes the group to which INSN belongs). This means that INSN will be
23914 the first insn in the group it belongs to). */
23916 static bool
23917 insn_terminates_group_p (rtx insn, enum group_termination which_group)
23919 bool first, last;
23921 if (! insn)
23922 return false;
23924 first = insn_must_be_first_in_group (insn);
23925 last = insn_must_be_last_in_group (insn);
23927 if (first && last)
23928 return true;
23930 if (which_group == current_group)
23931 return last;
23932 else if (which_group == previous_group)
23933 return first;
23935 return false;
23939 static bool
23940 insn_must_be_first_in_group (rtx insn)
23942 enum attr_type type;
23944 if (!insn
23945 || GET_CODE (insn) == NOTE
23946 || DEBUG_INSN_P (insn)
23947 || GET_CODE (PATTERN (insn)) == USE
23948 || GET_CODE (PATTERN (insn)) == CLOBBER)
23949 return false;
23951 switch (rs6000_cpu)
23953 case PROCESSOR_POWER5:
23954 if (is_cracked_insn (insn))
23955 return true;
23956 case PROCESSOR_POWER4:
23957 if (is_microcoded_insn (insn))
23958 return true;
23960 if (!rs6000_sched_groups)
23961 return false;
23963 type = get_attr_type (insn);
23965 switch (type)
23967 case TYPE_MFCR:
23968 case TYPE_MFCRF:
23969 case TYPE_MTCR:
23970 case TYPE_DELAYED_CR:
23971 case TYPE_CR_LOGICAL:
23972 case TYPE_MTJMPR:
23973 case TYPE_MFJMPR:
23974 case TYPE_IDIV:
23975 case TYPE_LDIV:
23976 case TYPE_LOAD_L:
23977 case TYPE_STORE_C:
23978 case TYPE_ISYNC:
23979 case TYPE_SYNC:
23980 return true;
23981 default:
23982 break;
23984 break;
23985 case PROCESSOR_POWER6:
23986 type = get_attr_type (insn);
23988 switch (type)
23990 case TYPE_INSERT_DWORD:
23991 case TYPE_EXTS:
23992 case TYPE_CNTLZ:
23993 case TYPE_SHIFT:
23994 case TYPE_VAR_SHIFT_ROTATE:
23995 case TYPE_TRAP:
23996 case TYPE_IMUL:
23997 case TYPE_IMUL2:
23998 case TYPE_IMUL3:
23999 case TYPE_LMUL:
24000 case TYPE_IDIV:
24001 case TYPE_INSERT_WORD:
24002 case TYPE_DELAYED_COMPARE:
24003 case TYPE_IMUL_COMPARE:
24004 case TYPE_LMUL_COMPARE:
24005 case TYPE_FPCOMPARE:
24006 case TYPE_MFCR:
24007 case TYPE_MTCR:
24008 case TYPE_MFJMPR:
24009 case TYPE_MTJMPR:
24010 case TYPE_ISYNC:
24011 case TYPE_SYNC:
24012 case TYPE_LOAD_L:
24013 case TYPE_STORE_C:
24014 case TYPE_LOAD_U:
24015 case TYPE_LOAD_UX:
24016 case TYPE_LOAD_EXT_UX:
24017 case TYPE_STORE_U:
24018 case TYPE_STORE_UX:
24019 case TYPE_FPLOAD_U:
24020 case TYPE_FPLOAD_UX:
24021 case TYPE_FPSTORE_U:
24022 case TYPE_FPSTORE_UX:
24023 return true;
24024 default:
24025 break;
24027 break;
24028 case PROCESSOR_POWER7:
24029 type = get_attr_type (insn);
24031 switch (type)
24033 case TYPE_CR_LOGICAL:
24034 case TYPE_MFCR:
24035 case TYPE_MFCRF:
24036 case TYPE_MTCR:
24037 case TYPE_IDIV:
24038 case TYPE_LDIV:
24039 case TYPE_COMPARE:
24040 case TYPE_DELAYED_COMPARE:
24041 case TYPE_VAR_DELAYED_COMPARE:
24042 case TYPE_ISYNC:
24043 case TYPE_LOAD_L:
24044 case TYPE_STORE_C:
24045 case TYPE_LOAD_U:
24046 case TYPE_LOAD_UX:
24047 case TYPE_LOAD_EXT:
24048 case TYPE_LOAD_EXT_U:
24049 case TYPE_LOAD_EXT_UX:
24050 case TYPE_STORE_U:
24051 case TYPE_STORE_UX:
24052 case TYPE_FPLOAD_U:
24053 case TYPE_FPLOAD_UX:
24054 case TYPE_FPSTORE_U:
24055 case TYPE_FPSTORE_UX:
24056 case TYPE_MFJMPR:
24057 case TYPE_MTJMPR:
24058 return true;
24059 default:
24060 break;
24062 break;
24063 default:
24064 break;
24067 return false;
24070 static bool
24071 insn_must_be_last_in_group (rtx insn)
24073 enum attr_type type;
24075 if (!insn
24076 || GET_CODE (insn) == NOTE
24077 || DEBUG_INSN_P (insn)
24078 || GET_CODE (PATTERN (insn)) == USE
24079 || GET_CODE (PATTERN (insn)) == CLOBBER)
24080 return false;
24082 switch (rs6000_cpu) {
24083 case PROCESSOR_POWER4:
24084 case PROCESSOR_POWER5:
24085 if (is_microcoded_insn (insn))
24086 return true;
24088 if (is_branch_slot_insn (insn))
24089 return true;
24091 break;
24092 case PROCESSOR_POWER6:
24093 type = get_attr_type (insn);
24095 switch (type)
24097 case TYPE_EXTS:
24098 case TYPE_CNTLZ:
24099 case TYPE_SHIFT:
24100 case TYPE_VAR_SHIFT_ROTATE:
24101 case TYPE_TRAP:
24102 case TYPE_IMUL:
24103 case TYPE_IMUL2:
24104 case TYPE_IMUL3:
24105 case TYPE_LMUL:
24106 case TYPE_IDIV:
24107 case TYPE_DELAYED_COMPARE:
24108 case TYPE_IMUL_COMPARE:
24109 case TYPE_LMUL_COMPARE:
24110 case TYPE_FPCOMPARE:
24111 case TYPE_MFCR:
24112 case TYPE_MTCR:
24113 case TYPE_MFJMPR:
24114 case TYPE_MTJMPR:
24115 case TYPE_ISYNC:
24116 case TYPE_SYNC:
24117 case TYPE_LOAD_L:
24118 case TYPE_STORE_C:
24119 return true;
24120 default:
24121 break;
24123 break;
24124 case PROCESSOR_POWER7:
24125 type = get_attr_type (insn);
24127 switch (type)
24129 case TYPE_ISYNC:
24130 case TYPE_SYNC:
24131 case TYPE_LOAD_L:
24132 case TYPE_STORE_C:
24133 case TYPE_LOAD_EXT_U:
24134 case TYPE_LOAD_EXT_UX:
24135 case TYPE_STORE_UX:
24136 return true;
24137 default:
24138 break;
24140 break;
24141 default:
24142 break;
24145 return false;
24148 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
24149 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
24151 static bool
24152 is_costly_group (rtx *group_insns, rtx next_insn)
24154 int i;
24155 int issue_rate = rs6000_issue_rate ();
24157 for (i = 0; i < issue_rate; i++)
24159 sd_iterator_def sd_it;
24160 dep_t dep;
24161 rtx insn = group_insns[i];
24163 if (!insn)
24164 continue;
24166 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
24168 rtx next = DEP_CON (dep);
24170 if (next == next_insn
24171 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
24172 return true;
24176 return false;
24179 /* Utility of the function redefine_groups.
24180 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
24181 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
24182 to keep it "far" (in a separate group) from GROUP_INSNS, following
24183 one of the following schemes, depending on the value of the flag
24184 -minsert_sched_nops = X:
24185 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
24186 in order to force NEXT_INSN into a separate group.
24187 (2) X < sched_finish_regroup_exact: insert exactly X nops.
24188 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
24189 insertion (has a group just ended, how many vacant issue slots remain in the
24190 last group, and how many dispatch groups were encountered so far). */
24192 static int
24193 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
24194 rtx next_insn, bool *group_end, int can_issue_more,
24195 int *group_count)
24197 rtx nop;
24198 bool force;
24199 int issue_rate = rs6000_issue_rate ();
24200 bool end = *group_end;
24201 int i;
24203 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
24204 return can_issue_more;
24206 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
24207 return can_issue_more;
24209 force = is_costly_group (group_insns, next_insn);
24210 if (!force)
24211 return can_issue_more;
24213 if (sched_verbose > 6)
24214 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
24215 *group_count ,can_issue_more);
24217 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
24219 if (*group_end)
24220 can_issue_more = 0;
24222 /* Since only a branch can be issued in the last issue_slot, it is
24223 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
24224 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
24225 in this case the last nop will start a new group and the branch
24226 will be forced to the new group. */
24227 if (can_issue_more && !is_branch_slot_insn (next_insn))
24228 can_issue_more--;
24230 /* Power6 and Power7 have special group ending nop. */
24231 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
24233 nop = gen_group_ending_nop ();
24234 emit_insn_before (nop, next_insn);
24235 can_issue_more = 0;
24237 else
24238 while (can_issue_more > 0)
24240 nop = gen_nop ();
24241 emit_insn_before (nop, next_insn);
24242 can_issue_more--;
24245 *group_end = true;
24246 return 0;
24249 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
24251 int n_nops = rs6000_sched_insert_nops;
24253 /* Nops can't be issued from the branch slot, so the effective
24254 issue_rate for nops is 'issue_rate - 1'. */
24255 if (can_issue_more == 0)
24256 can_issue_more = issue_rate;
24257 can_issue_more--;
24258 if (can_issue_more == 0)
24260 can_issue_more = issue_rate - 1;
24261 (*group_count)++;
24262 end = true;
24263 for (i = 0; i < issue_rate; i++)
24265 group_insns[i] = 0;
24269 while (n_nops > 0)
24271 nop = gen_nop ();
24272 emit_insn_before (nop, next_insn);
24273 if (can_issue_more == issue_rate - 1) /* new group begins */
24274 end = false;
24275 can_issue_more--;
24276 if (can_issue_more == 0)
24278 can_issue_more = issue_rate - 1;
24279 (*group_count)++;
24280 end = true;
24281 for (i = 0; i < issue_rate; i++)
24283 group_insns[i] = 0;
24286 n_nops--;
24289 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24290 can_issue_more++;
24292 /* Is next_insn going to start a new group? */
24293 *group_end
24294 = (end
24295 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24296 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24297 || (can_issue_more < issue_rate &&
24298 insn_terminates_group_p (next_insn, previous_group)));
24299 if (*group_end && end)
24300 (*group_count)--;
24302 if (sched_verbose > 6)
24303 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
24304 *group_count, can_issue_more);
24305 return can_issue_more;
24308 return can_issue_more;
24311 /* This function tries to synch the dispatch groups that the compiler "sees"
24312 with the dispatch groups that the processor dispatcher is expected to
24313 form in practice. It tries to achieve this synchronization by forcing the
24314 estimated processor grouping on the compiler (as opposed to the function
24315 'pad_goups' which tries to force the scheduler's grouping on the processor).
24317 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24318 examines the (estimated) dispatch groups that will be formed by the processor
24319 dispatcher. It marks these group boundaries to reflect the estimated
24320 processor grouping, overriding the grouping that the scheduler had marked.
24321 Depending on the value of the flag '-minsert-sched-nops' this function can
24322 force certain insns into separate groups or force a certain distance between
24323 them by inserting nops, for example, if there exists a "costly dependence"
24324 between the insns.
24326 The function estimates the group boundaries that the processor will form as
24327 follows: It keeps track of how many vacant issue slots are available after
24328 each insn. A subsequent insn will start a new group if one of the following
24329 4 cases applies:
24330 - no more vacant issue slots remain in the current dispatch group.
24331 - only the last issue slot, which is the branch slot, is vacant, but the next
24332 insn is not a branch.
24333 - only the last 2 or less issue slots, including the branch slot, are vacant,
24334 which means that a cracked insn (which occupies two issue slots) can't be
24335 issued in this group.
24336 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24337 start a new group. */
24339 static int
24340 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24342 rtx insn, next_insn;
24343 int issue_rate;
24344 int can_issue_more;
24345 int slot, i;
24346 bool group_end;
24347 int group_count = 0;
24348 rtx *group_insns;
24350 /* Initialize. */
24351 issue_rate = rs6000_issue_rate ();
24352 group_insns = XALLOCAVEC (rtx, issue_rate);
24353 for (i = 0; i < issue_rate; i++)
24355 group_insns[i] = 0;
24357 can_issue_more = issue_rate;
24358 slot = 0;
24359 insn = get_next_active_insn (prev_head_insn, tail);
24360 group_end = false;
24362 while (insn != NULL_RTX)
24364 slot = (issue_rate - can_issue_more);
24365 group_insns[slot] = insn;
24366 can_issue_more =
24367 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24368 if (insn_terminates_group_p (insn, current_group))
24369 can_issue_more = 0;
24371 next_insn = get_next_active_insn (insn, tail);
24372 if (next_insn == NULL_RTX)
24373 return group_count + 1;
24375 /* Is next_insn going to start a new group? */
24376 group_end
24377 = (can_issue_more == 0
24378 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24379 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24380 || (can_issue_more < issue_rate &&
24381 insn_terminates_group_p (next_insn, previous_group)));
24383 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
24384 next_insn, &group_end, can_issue_more,
24385 &group_count);
24387 if (group_end)
24389 group_count++;
24390 can_issue_more = 0;
24391 for (i = 0; i < issue_rate; i++)
24393 group_insns[i] = 0;
24397 if (GET_MODE (next_insn) == TImode && can_issue_more)
24398 PUT_MODE (next_insn, VOIDmode);
24399 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
24400 PUT_MODE (next_insn, TImode);
24402 insn = next_insn;
24403 if (can_issue_more == 0)
24404 can_issue_more = issue_rate;
24405 } /* while */
24407 return group_count;
24410 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24411 dispatch group boundaries that the scheduler had marked. Pad with nops
24412 any dispatch groups which have vacant issue slots, in order to force the
24413 scheduler's grouping on the processor dispatcher. The function
24414 returns the number of dispatch groups found. */
24416 static int
24417 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24419 rtx insn, next_insn;
24420 rtx nop;
24421 int issue_rate;
24422 int can_issue_more;
24423 int group_end;
24424 int group_count = 0;
24426 /* Initialize issue_rate. */
24427 issue_rate = rs6000_issue_rate ();
24428 can_issue_more = issue_rate;
24430 insn = get_next_active_insn (prev_head_insn, tail);
24431 next_insn = get_next_active_insn (insn, tail);
24433 while (insn != NULL_RTX)
24435 can_issue_more =
24436 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24438 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
24440 if (next_insn == NULL_RTX)
24441 break;
24443 if (group_end)
24445 /* If the scheduler had marked group termination at this location
24446 (between insn and next_insn), and neither insn nor next_insn will
24447 force group termination, pad the group with nops to force group
24448 termination. */
24449 if (can_issue_more
24450 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
24451 && !insn_terminates_group_p (insn, current_group)
24452 && !insn_terminates_group_p (next_insn, previous_group))
24454 if (!is_branch_slot_insn (next_insn))
24455 can_issue_more--;
24457 while (can_issue_more)
24459 nop = gen_nop ();
24460 emit_insn_before (nop, next_insn);
24461 can_issue_more--;
24465 can_issue_more = issue_rate;
24466 group_count++;
24469 insn = next_insn;
24470 next_insn = get_next_active_insn (insn, tail);
24473 return group_count;
24476 /* We're beginning a new block. Initialize data structures as necessary. */
24478 static void
24479 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
24480 int sched_verbose ATTRIBUTE_UNUSED,
24481 int max_ready ATTRIBUTE_UNUSED)
24483 last_scheduled_insn = NULL_RTX;
24484 load_store_pendulum = 0;
24487 /* The following function is called at the end of scheduling BB.
24488 After reload, it inserts nops at insn group bundling. */
24490 static void
24491 rs6000_sched_finish (FILE *dump, int sched_verbose)
24493 int n_groups;
24495 if (sched_verbose)
24496 fprintf (dump, "=== Finishing schedule.\n");
24498 if (reload_completed && rs6000_sched_groups)
24500 /* Do not run sched_finish hook when selective scheduling enabled. */
24501 if (sel_sched_p ())
24502 return;
24504 if (rs6000_sched_insert_nops == sched_finish_none)
24505 return;
24507 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
24508 n_groups = pad_groups (dump, sched_verbose,
24509 current_sched_info->prev_head,
24510 current_sched_info->next_tail);
24511 else
24512 n_groups = redefine_groups (dump, sched_verbose,
24513 current_sched_info->prev_head,
24514 current_sched_info->next_tail);
24516 if (sched_verbose >= 6)
24518 fprintf (dump, "ngroups = %d\n", n_groups);
24519 print_rtl (dump, current_sched_info->prev_head);
24520 fprintf (dump, "Done finish_sched\n");
24525 struct _rs6000_sched_context
24527 short cached_can_issue_more;
24528 rtx last_scheduled_insn;
24529 int load_store_pendulum;
24532 typedef struct _rs6000_sched_context rs6000_sched_context_def;
24533 typedef rs6000_sched_context_def *rs6000_sched_context_t;
24535 /* Allocate store for new scheduling context. */
24536 static void *
24537 rs6000_alloc_sched_context (void)
24539 return xmalloc (sizeof (rs6000_sched_context_def));
24542 /* If CLEAN_P is true then initializes _SC with clean data,
24543 and from the global context otherwise. */
24544 static void
24545 rs6000_init_sched_context (void *_sc, bool clean_p)
24547 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24549 if (clean_p)
24551 sc->cached_can_issue_more = 0;
24552 sc->last_scheduled_insn = NULL_RTX;
24553 sc->load_store_pendulum = 0;
24555 else
24557 sc->cached_can_issue_more = cached_can_issue_more;
24558 sc->last_scheduled_insn = last_scheduled_insn;
24559 sc->load_store_pendulum = load_store_pendulum;
24563 /* Sets the global scheduling context to the one pointed to by _SC. */
24564 static void
24565 rs6000_set_sched_context (void *_sc)
24567 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24569 gcc_assert (sc != NULL);
24571 cached_can_issue_more = sc->cached_can_issue_more;
24572 last_scheduled_insn = sc->last_scheduled_insn;
24573 load_store_pendulum = sc->load_store_pendulum;
24576 /* Free _SC. */
24577 static void
24578 rs6000_free_sched_context (void *_sc)
24580 gcc_assert (_sc != NULL);
24582 free (_sc);
24586 /* Length in units of the trampoline for entering a nested function. */
24589 rs6000_trampoline_size (void)
24591 int ret = 0;
24593 switch (DEFAULT_ABI)
24595 default:
24596 gcc_unreachable ();
24598 case ABI_AIX:
24599 ret = (TARGET_32BIT) ? 12 : 24;
24600 break;
24602 case ABI_DARWIN:
24603 case ABI_V4:
24604 ret = (TARGET_32BIT) ? 40 : 48;
24605 break;
24608 return ret;
24611 /* Emit RTL insns to initialize the variable parts of a trampoline.
24612 FNADDR is an RTX for the address of the function's pure code.
24613 CXT is an RTX for the static chain value for the function. */
24615 static void
24616 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
24618 int regsize = (TARGET_32BIT) ? 4 : 8;
24619 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
24620 rtx ctx_reg = force_reg (Pmode, cxt);
24621 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
24623 switch (DEFAULT_ABI)
24625 default:
24626 gcc_unreachable ();
24628 /* Under AIX, just build the 3 word function descriptor */
24629 case ABI_AIX:
24631 rtx fnmem, fn_reg, toc_reg;
24633 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
24634 error ("You cannot take the address of a nested function if you use "
24635 "the -mno-pointers-to-nested-functions option.");
24637 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
24638 fn_reg = gen_reg_rtx (Pmode);
24639 toc_reg = gen_reg_rtx (Pmode);
24641 /* Macro to shorten the code expansions below. */
24642 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24644 m_tramp = replace_equiv_address (m_tramp, addr);
24646 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
24647 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
24648 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
24649 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
24650 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
24652 # undef MEM_PLUS
24654 break;
24656 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24657 case ABI_DARWIN:
24658 case ABI_V4:
24659 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
24660 LCT_NORMAL, VOIDmode, 4,
24661 addr, Pmode,
24662 GEN_INT (rs6000_trampoline_size ()), SImode,
24663 fnaddr, Pmode,
24664 ctx_reg, Pmode);
24665 break;
24670 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24671 identifier as an argument, so the front end shouldn't look it up. */
24673 static bool
24674 rs6000_attribute_takes_identifier_p (const_tree attr_id)
24676 return is_attribute_p ("altivec", attr_id);
24679 /* Handle the "altivec" attribute. The attribute may have
24680 arguments as follows:
24682 __attribute__((altivec(vector__)))
24683 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24684 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24686 and may appear more than once (e.g., 'vector bool char') in a
24687 given declaration. */
24689 static tree
24690 rs6000_handle_altivec_attribute (tree *node,
24691 tree name ATTRIBUTE_UNUSED,
24692 tree args,
24693 int flags ATTRIBUTE_UNUSED,
24694 bool *no_add_attrs)
24696 tree type = *node, result = NULL_TREE;
24697 enum machine_mode mode;
24698 int unsigned_p;
24699 char altivec_type
24700 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
24701 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
24702 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
24703 : '?');
24705 while (POINTER_TYPE_P (type)
24706 || TREE_CODE (type) == FUNCTION_TYPE
24707 || TREE_CODE (type) == METHOD_TYPE
24708 || TREE_CODE (type) == ARRAY_TYPE)
24709 type = TREE_TYPE (type);
24711 mode = TYPE_MODE (type);
24713 /* Check for invalid AltiVec type qualifiers. */
24714 if (type == long_double_type_node)
24715 error ("use of %<long double%> in AltiVec types is invalid");
24716 else if (type == boolean_type_node)
24717 error ("use of boolean types in AltiVec types is invalid");
24718 else if (TREE_CODE (type) == COMPLEX_TYPE)
24719 error ("use of %<complex%> in AltiVec types is invalid");
24720 else if (DECIMAL_FLOAT_MODE_P (mode))
24721 error ("use of decimal floating point types in AltiVec types is invalid");
24722 else if (!TARGET_VSX)
24724 if (type == long_unsigned_type_node || type == long_integer_type_node)
24726 if (TARGET_64BIT)
24727 error ("use of %<long%> in AltiVec types is invalid for "
24728 "64-bit code without -mvsx");
24729 else if (rs6000_warn_altivec_long)
24730 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24731 "use %<int%>");
24733 else if (type == long_long_unsigned_type_node
24734 || type == long_long_integer_type_node)
24735 error ("use of %<long long%> in AltiVec types is invalid without "
24736 "-mvsx");
24737 else if (type == double_type_node)
24738 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24741 switch (altivec_type)
24743 case 'v':
24744 unsigned_p = TYPE_UNSIGNED (type);
24745 switch (mode)
24747 case DImode:
24748 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
24749 break;
24750 case SImode:
24751 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
24752 break;
24753 case HImode:
24754 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
24755 break;
24756 case QImode:
24757 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
24758 break;
24759 case SFmode: result = V4SF_type_node; break;
24760 case DFmode: result = V2DF_type_node; break;
24761 /* If the user says 'vector int bool', we may be handed the 'bool'
24762 attribute _before_ the 'vector' attribute, and so select the
24763 proper type in the 'b' case below. */
24764 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
24765 case V2DImode: case V2DFmode:
24766 result = type;
24767 default: break;
24769 break;
24770 case 'b':
24771 switch (mode)
24773 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
24774 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
24775 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
24776 case QImode: case V16QImode: result = bool_V16QI_type_node;
24777 default: break;
24779 break;
24780 case 'p':
24781 switch (mode)
24783 case V8HImode: result = pixel_V8HI_type_node;
24784 default: break;
24786 default: break;
24789 /* Propagate qualifiers attached to the element type
24790 onto the vector type. */
24791 if (result && result != type && TYPE_QUALS (type))
24792 result = build_qualified_type (result, TYPE_QUALS (type));
24794 *no_add_attrs = true; /* No need to hang on to the attribute. */
24796 if (result)
24797 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
24799 return NULL_TREE;
24802 /* AltiVec defines four built-in scalar types that serve as vector
24803 elements; we must teach the compiler how to mangle them. */
24805 static const char *
24806 rs6000_mangle_type (const_tree type)
24808 type = TYPE_MAIN_VARIANT (type);
24810 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24811 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24812 return NULL;
24814 if (type == bool_char_type_node) return "U6__boolc";
24815 if (type == bool_short_type_node) return "U6__bools";
24816 if (type == pixel_type_node) return "u7__pixel";
24817 if (type == bool_int_type_node) return "U6__booli";
24818 if (type == bool_long_type_node) return "U6__booll";
24820 /* Mangle IBM extended float long double as `g' (__float128) on
24821 powerpc*-linux where long-double-64 previously was the default. */
24822 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
24823 && TARGET_ELF
24824 && TARGET_LONG_DOUBLE_128
24825 && !TARGET_IEEEQUAD)
24826 return "g";
24828 /* For all other types, use normal C++ mangling. */
24829 return NULL;
24832 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24833 struct attribute_spec.handler. */
24835 static tree
24836 rs6000_handle_longcall_attribute (tree *node, tree name,
24837 tree args ATTRIBUTE_UNUSED,
24838 int flags ATTRIBUTE_UNUSED,
24839 bool *no_add_attrs)
24841 if (TREE_CODE (*node) != FUNCTION_TYPE
24842 && TREE_CODE (*node) != FIELD_DECL
24843 && TREE_CODE (*node) != TYPE_DECL)
24845 warning (OPT_Wattributes, "%qE attribute only applies to functions",
24846 name);
24847 *no_add_attrs = true;
24850 return NULL_TREE;
24853 /* Set longcall attributes on all functions declared when
24854 rs6000_default_long_calls is true. */
24855 static void
24856 rs6000_set_default_type_attributes (tree type)
24858 if (rs6000_default_long_calls
24859 && (TREE_CODE (type) == FUNCTION_TYPE
24860 || TREE_CODE (type) == METHOD_TYPE))
24861 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
24862 NULL_TREE,
24863 TYPE_ATTRIBUTES (type));
24865 #if TARGET_MACHO
24866 darwin_set_default_type_attributes (type);
24867 #endif
24870 /* Return a reference suitable for calling a function with the
24871 longcall attribute. */
24874 rs6000_longcall_ref (rtx call_ref)
24876 const char *call_name;
24877 tree node;
24879 if (GET_CODE (call_ref) != SYMBOL_REF)
24880 return call_ref;
24882 /* System V adds '.' to the internal name, so skip them. */
24883 call_name = XSTR (call_ref, 0);
24884 if (*call_name == '.')
24886 while (*call_name == '.')
24887 call_name++;
24889 node = get_identifier (call_name);
24890 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
24893 return force_reg (Pmode, call_ref);
24896 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24897 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24898 #endif
24900 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24901 struct attribute_spec.handler. */
24902 static tree
24903 rs6000_handle_struct_attribute (tree *node, tree name,
24904 tree args ATTRIBUTE_UNUSED,
24905 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24907 tree *type = NULL;
24908 if (DECL_P (*node))
24910 if (TREE_CODE (*node) == TYPE_DECL)
24911 type = &TREE_TYPE (*node);
24913 else
24914 type = node;
24916 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
24917 || TREE_CODE (*type) == UNION_TYPE)))
24919 warning (OPT_Wattributes, "%qE attribute ignored", name);
24920 *no_add_attrs = true;
24923 else if ((is_attribute_p ("ms_struct", name)
24924 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
24925 || ((is_attribute_p ("gcc_struct", name)
24926 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
24928 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
24929 name);
24930 *no_add_attrs = true;
24933 return NULL_TREE;
24936 static bool
24937 rs6000_ms_bitfield_layout_p (const_tree record_type)
24939 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
24940 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
24941 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
24944 #ifdef USING_ELFOS_H
24946 /* A get_unnamed_section callback, used for switching to toc_section. */
24948 static void
24949 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
24951 if (DEFAULT_ABI == ABI_AIX
24952 && TARGET_MINIMAL_TOC
24953 && !TARGET_RELOCATABLE)
24955 if (!toc_initialized)
24957 toc_initialized = 1;
24958 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24959 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
24960 fprintf (asm_out_file, "\t.tc ");
24961 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
24962 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24963 fprintf (asm_out_file, "\n");
24965 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24966 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24967 fprintf (asm_out_file, " = .+32768\n");
24969 else
24970 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24972 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
24973 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24974 else
24976 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24977 if (!toc_initialized)
24979 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24980 fprintf (asm_out_file, " = .+32768\n");
24981 toc_initialized = 1;
24986 /* Implement TARGET_ASM_INIT_SECTIONS. */
24988 static void
24989 rs6000_elf_asm_init_sections (void)
24991 toc_section
24992 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
24994 sdata2_section
24995 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
24996 SDATA2_SECTION_ASM_OP);
24999 /* Implement TARGET_SELECT_RTX_SECTION. */
25001 static section *
25002 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
25003 unsigned HOST_WIDE_INT align)
25005 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25006 return toc_section;
25007 else
25008 return default_elf_select_rtx_section (mode, x, align);
25011 /* For a SYMBOL_REF, set generic flags and then perform some
25012 target-specific processing.
25014 When the AIX ABI is requested on a non-AIX system, replace the
25015 function name with the real name (with a leading .) rather than the
25016 function descriptor name. This saves a lot of overriding code to
25017 read the prefixes. */
25019 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
25020 static void
25021 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
25023 default_encode_section_info (decl, rtl, first);
25025 if (first
25026 && TREE_CODE (decl) == FUNCTION_DECL
25027 && !TARGET_AIX
25028 && DEFAULT_ABI == ABI_AIX)
25030 rtx sym_ref = XEXP (rtl, 0);
25031 size_t len = strlen (XSTR (sym_ref, 0));
25032 char *str = XALLOCAVEC (char, len + 2);
25033 str[0] = '.';
25034 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
25035 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
25039 static inline bool
25040 compare_section_name (const char *section, const char *templ)
25042 int len;
25044 len = strlen (templ);
25045 return (strncmp (section, templ, len) == 0
25046 && (section[len] == 0 || section[len] == '.'));
25049 bool
25050 rs6000_elf_in_small_data_p (const_tree decl)
25052 if (rs6000_sdata == SDATA_NONE)
25053 return false;
25055 /* We want to merge strings, so we never consider them small data. */
25056 if (TREE_CODE (decl) == STRING_CST)
25057 return false;
25059 /* Functions are never in the small data area. */
25060 if (TREE_CODE (decl) == FUNCTION_DECL)
25061 return false;
25063 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
25065 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
25066 if (compare_section_name (section, ".sdata")
25067 || compare_section_name (section, ".sdata2")
25068 || compare_section_name (section, ".gnu.linkonce.s")
25069 || compare_section_name (section, ".sbss")
25070 || compare_section_name (section, ".sbss2")
25071 || compare_section_name (section, ".gnu.linkonce.sb")
25072 || strcmp (section, ".PPC.EMB.sdata0") == 0
25073 || strcmp (section, ".PPC.EMB.sbss0") == 0)
25074 return true;
25076 else
25078 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
25080 if (size > 0
25081 && size <= g_switch_value
25082 /* If it's not public, and we're not going to reference it there,
25083 there's no need to put it in the small data section. */
25084 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
25085 return true;
25088 return false;
25091 #endif /* USING_ELFOS_H */
25093 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
25095 static bool
25096 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
25098 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
25101 /* Do not place thread-local symbols refs in the object blocks. */
25103 static bool
25104 rs6000_use_blocks_for_decl_p (const_tree decl)
25106 return !DECL_THREAD_LOCAL_P (decl);
25109 /* Return a REG that occurs in ADDR with coefficient 1.
25110 ADDR can be effectively incremented by incrementing REG.
25112 r0 is special and we must not select it as an address
25113 register by this routine since our caller will try to
25114 increment the returned register via an "la" instruction. */
25117 find_addr_reg (rtx addr)
25119 while (GET_CODE (addr) == PLUS)
25121 if (GET_CODE (XEXP (addr, 0)) == REG
25122 && REGNO (XEXP (addr, 0)) != 0)
25123 addr = XEXP (addr, 0);
25124 else if (GET_CODE (XEXP (addr, 1)) == REG
25125 && REGNO (XEXP (addr, 1)) != 0)
25126 addr = XEXP (addr, 1);
25127 else if (CONSTANT_P (XEXP (addr, 0)))
25128 addr = XEXP (addr, 1);
25129 else if (CONSTANT_P (XEXP (addr, 1)))
25130 addr = XEXP (addr, 0);
25131 else
25132 gcc_unreachable ();
25134 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
25135 return addr;
25138 void
25139 rs6000_fatal_bad_address (rtx op)
25141 fatal_insn ("bad address", op);
25144 #if TARGET_MACHO
25146 typedef struct branch_island_d {
25147 tree function_name;
25148 tree label_name;
25149 int line_number;
25150 } branch_island;
25153 static vec<branch_island, va_gc> *branch_islands;
25155 /* Remember to generate a branch island for far calls to the given
25156 function. */
25158 static void
25159 add_compiler_branch_island (tree label_name, tree function_name,
25160 int line_number)
25162 branch_island bi = {function_name, label_name, line_number};
25163 vec_safe_push (branch_islands, bi);
25166 /* Generate far-jump branch islands for everything recorded in
25167 branch_islands. Invoked immediately after the last instruction of
25168 the epilogue has been emitted; the branch islands must be appended
25169 to, and contiguous with, the function body. Mach-O stubs are
25170 generated in machopic_output_stub(). */
25172 static void
25173 macho_branch_islands (void)
25175 char tmp_buf[512];
25177 while (!vec_safe_is_empty (branch_islands))
25179 branch_island *bi = &branch_islands->last ();
25180 const char *label = IDENTIFIER_POINTER (bi->label_name);
25181 const char *name = IDENTIFIER_POINTER (bi->function_name);
25182 char name_buf[512];
25183 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
25184 if (name[0] == '*' || name[0] == '&')
25185 strcpy (name_buf, name+1);
25186 else
25188 name_buf[0] = '_';
25189 strcpy (name_buf+1, name);
25191 strcpy (tmp_buf, "\n");
25192 strcat (tmp_buf, label);
25193 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25194 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25195 dbxout_stabd (N_SLINE, bi->line_number);
25196 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25197 if (flag_pic)
25199 if (TARGET_LINK_STACK)
25201 char name[32];
25202 get_ppc476_thunk_name (name);
25203 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
25204 strcat (tmp_buf, name);
25205 strcat (tmp_buf, "\n");
25206 strcat (tmp_buf, label);
25207 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
25209 else
25211 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
25212 strcat (tmp_buf, label);
25213 strcat (tmp_buf, "_pic\n");
25214 strcat (tmp_buf, label);
25215 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
25218 strcat (tmp_buf, "\taddis r11,r11,ha16(");
25219 strcat (tmp_buf, name_buf);
25220 strcat (tmp_buf, " - ");
25221 strcat (tmp_buf, label);
25222 strcat (tmp_buf, "_pic)\n");
25224 strcat (tmp_buf, "\tmtlr r0\n");
25226 strcat (tmp_buf, "\taddi r12,r11,lo16(");
25227 strcat (tmp_buf, name_buf);
25228 strcat (tmp_buf, " - ");
25229 strcat (tmp_buf, label);
25230 strcat (tmp_buf, "_pic)\n");
25232 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
25234 else
25236 strcat (tmp_buf, ":\nlis r12,hi16(");
25237 strcat (tmp_buf, name_buf);
25238 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
25239 strcat (tmp_buf, name_buf);
25240 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
25242 output_asm_insn (tmp_buf, 0);
25243 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25244 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25245 dbxout_stabd (N_SLINE, bi->line_number);
25246 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25247 branch_islands->pop ();
25251 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25252 already there or not. */
25254 static int
25255 no_previous_def (tree function_name)
25257 branch_island *bi;
25258 unsigned ix;
25260 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
25261 if (function_name == bi->function_name)
25262 return 0;
25263 return 1;
25266 /* GET_PREV_LABEL gets the label name from the previous definition of
25267 the function. */
25269 static tree
25270 get_prev_label (tree function_name)
25272 branch_island *bi;
25273 unsigned ix;
25275 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
25276 if (function_name == bi->function_name)
25277 return bi->label_name;
25278 return NULL_TREE;
25281 /* INSN is either a function call or a millicode call. It may have an
25282 unconditional jump in its delay slot.
25284 CALL_DEST is the routine we are calling. */
25286 char *
25287 output_call (rtx insn, rtx *operands, int dest_operand_number,
25288 int cookie_operand_number)
25290 static char buf[256];
25291 if (darwin_emit_branch_islands
25292 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
25293 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
25295 tree labelname;
25296 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
25298 if (no_previous_def (funname))
25300 rtx label_rtx = gen_label_rtx ();
25301 char *label_buf, temp_buf[256];
25302 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
25303 CODE_LABEL_NUMBER (label_rtx));
25304 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
25305 labelname = get_identifier (label_buf);
25306 add_compiler_branch_island (labelname, funname, insn_line (insn));
25308 else
25309 labelname = get_prev_label (funname);
25311 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25312 instruction will reach 'foo', otherwise link as 'bl L42'".
25313 "L42" should be a 'branch island', that will do a far jump to
25314 'foo'. Branch islands are generated in
25315 macho_branch_islands(). */
25316 sprintf (buf, "jbsr %%z%d,%.246s",
25317 dest_operand_number, IDENTIFIER_POINTER (labelname));
25319 else
25320 sprintf (buf, "bl %%z%d", dest_operand_number);
25321 return buf;
25324 /* Generate PIC and indirect symbol stubs. */
25326 void
25327 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25329 unsigned int length;
25330 char *symbol_name, *lazy_ptr_name;
25331 char *local_label_0;
25332 static int label = 0;
25334 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25335 symb = (*targetm.strip_name_encoding) (symb);
25338 length = strlen (symb);
25339 symbol_name = XALLOCAVEC (char, length + 32);
25340 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25342 lazy_ptr_name = XALLOCAVEC (char, length + 32);
25343 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
25345 if (flag_pic == 2)
25346 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
25347 else
25348 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
25350 if (flag_pic == 2)
25352 fprintf (file, "\t.align 5\n");
25354 fprintf (file, "%s:\n", stub);
25355 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25357 label++;
25358 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25359 sprintf (local_label_0, "\"L%011d$spb\"", label);
25361 fprintf (file, "\tmflr r0\n");
25362 if (TARGET_LINK_STACK)
25364 char name[32];
25365 get_ppc476_thunk_name (name);
25366 fprintf (file, "\tbl %s\n", name);
25367 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25369 else
25371 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
25372 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25374 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
25375 lazy_ptr_name, local_label_0);
25376 fprintf (file, "\tmtlr r0\n");
25377 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
25378 (TARGET_64BIT ? "ldu" : "lwzu"),
25379 lazy_ptr_name, local_label_0);
25380 fprintf (file, "\tmtctr r12\n");
25381 fprintf (file, "\tbctr\n");
25383 else
25385 fprintf (file, "\t.align 4\n");
25387 fprintf (file, "%s:\n", stub);
25388 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25390 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
25391 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
25392 (TARGET_64BIT ? "ldu" : "lwzu"),
25393 lazy_ptr_name);
25394 fprintf (file, "\tmtctr r12\n");
25395 fprintf (file, "\tbctr\n");
25398 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25399 fprintf (file, "%s:\n", lazy_ptr_name);
25400 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25401 fprintf (file, "%sdyld_stub_binding_helper\n",
25402 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
25405 /* Legitimize PIC addresses. If the address is already
25406 position-independent, we return ORIG. Newly generated
25407 position-independent addresses go into a reg. This is REG if non
25408 zero, otherwise we allocate register(s) as necessary. */
25410 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25413 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
25414 rtx reg)
25416 rtx base, offset;
25418 if (reg == NULL && ! reload_in_progress && ! reload_completed)
25419 reg = gen_reg_rtx (Pmode);
25421 if (GET_CODE (orig) == CONST)
25423 rtx reg_temp;
25425 if (GET_CODE (XEXP (orig, 0)) == PLUS
25426 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
25427 return orig;
25429 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
25431 /* Use a different reg for the intermediate value, as
25432 it will be marked UNCHANGING. */
25433 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
25434 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
25435 Pmode, reg_temp);
25436 offset =
25437 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
25438 Pmode, reg);
25440 if (GET_CODE (offset) == CONST_INT)
25442 if (SMALL_INT (offset))
25443 return plus_constant (Pmode, base, INTVAL (offset));
25444 else if (! reload_in_progress && ! reload_completed)
25445 offset = force_reg (Pmode, offset);
25446 else
25448 rtx mem = force_const_mem (Pmode, orig);
25449 return machopic_legitimize_pic_address (mem, Pmode, reg);
25452 return gen_rtx_PLUS (Pmode, base, offset);
25455 /* Fall back on generic machopic code. */
25456 return machopic_legitimize_pic_address (orig, mode, reg);
25459 /* Output a .machine directive for the Darwin assembler, and call
25460 the generic start_file routine. */
25462 static void
25463 rs6000_darwin_file_start (void)
25465 static const struct
25467 const char *arg;
25468 const char *name;
25469 HOST_WIDE_INT if_set;
25470 } mapping[] = {
25471 { "ppc64", "ppc64", MASK_64BIT },
25472 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
25473 { "power4", "ppc970", 0 },
25474 { "G5", "ppc970", 0 },
25475 { "7450", "ppc7450", 0 },
25476 { "7400", "ppc7400", MASK_ALTIVEC },
25477 { "G4", "ppc7400", 0 },
25478 { "750", "ppc750", 0 },
25479 { "740", "ppc750", 0 },
25480 { "G3", "ppc750", 0 },
25481 { "604e", "ppc604e", 0 },
25482 { "604", "ppc604", 0 },
25483 { "603e", "ppc603", 0 },
25484 { "603", "ppc603", 0 },
25485 { "601", "ppc601", 0 },
25486 { NULL, "ppc", 0 } };
25487 const char *cpu_id = "";
25488 size_t i;
25490 rs6000_file_start ();
25491 darwin_file_start ();
25493 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25495 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
25496 cpu_id = rs6000_default_cpu;
25498 if (global_options_set.x_rs6000_cpu_index)
25499 cpu_id = processor_target_table[rs6000_cpu_index].name;
25501 /* Look through the mapping array. Pick the first name that either
25502 matches the argument, has a bit set in IF_SET that is also set
25503 in the target flags, or has a NULL name. */
25505 i = 0;
25506 while (mapping[i].arg != NULL
25507 && strcmp (mapping[i].arg, cpu_id) != 0
25508 && (mapping[i].if_set & rs6000_isa_flags) == 0)
25509 i++;
25511 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
25514 #endif /* TARGET_MACHO */
25516 #if TARGET_ELF
25517 static int
25518 rs6000_elf_reloc_rw_mask (void)
25520 if (flag_pic)
25521 return 3;
25522 else if (DEFAULT_ABI == ABI_AIX)
25523 return 2;
25524 else
25525 return 0;
25528 /* Record an element in the table of global constructors. SYMBOL is
25529 a SYMBOL_REF of the function to be called; PRIORITY is a number
25530 between 0 and MAX_INIT_PRIORITY.
25532 This differs from default_named_section_asm_out_constructor in
25533 that we have special handling for -mrelocatable. */
25535 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
25536 static void
25537 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
25539 const char *section = ".ctors";
25540 char buf[16];
25542 if (priority != DEFAULT_INIT_PRIORITY)
25544 sprintf (buf, ".ctors.%.5u",
25545 /* Invert the numbering so the linker puts us in the proper
25546 order; constructors are run from right to left, and the
25547 linker sorts in increasing order. */
25548 MAX_INIT_PRIORITY - priority);
25549 section = buf;
25552 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25553 assemble_align (POINTER_SIZE);
25555 if (TARGET_RELOCATABLE)
25557 fputs ("\t.long (", asm_out_file);
25558 output_addr_const (asm_out_file, symbol);
25559 fputs (")@fixup\n", asm_out_file);
25561 else
25562 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25565 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
25566 static void
25567 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
25569 const char *section = ".dtors";
25570 char buf[16];
25572 if (priority != DEFAULT_INIT_PRIORITY)
25574 sprintf (buf, ".dtors.%.5u",
25575 /* Invert the numbering so the linker puts us in the proper
25576 order; constructors are run from right to left, and the
25577 linker sorts in increasing order. */
25578 MAX_INIT_PRIORITY - priority);
25579 section = buf;
25582 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25583 assemble_align (POINTER_SIZE);
25585 if (TARGET_RELOCATABLE)
25587 fputs ("\t.long (", asm_out_file);
25588 output_addr_const (asm_out_file, symbol);
25589 fputs (")@fixup\n", asm_out_file);
25591 else
25592 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25595 void
25596 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
25598 if (TARGET_64BIT)
25600 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
25601 ASM_OUTPUT_LABEL (file, name);
25602 fputs (DOUBLE_INT_ASM_OP, file);
25603 rs6000_output_function_entry (file, name);
25604 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
25605 if (DOT_SYMBOLS)
25607 fputs ("\t.size\t", file);
25608 assemble_name (file, name);
25609 fputs (",24\n\t.type\t.", file);
25610 assemble_name (file, name);
25611 fputs (",@function\n", file);
25612 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
25614 fputs ("\t.globl\t.", file);
25615 assemble_name (file, name);
25616 putc ('\n', file);
25619 else
25620 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25621 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25622 rs6000_output_function_entry (file, name);
25623 fputs (":\n", file);
25624 return;
25627 if (TARGET_RELOCATABLE
25628 && !TARGET_SECURE_PLT
25629 && (get_pool_size () != 0 || crtl->profile)
25630 && uses_TOC ())
25632 char buf[256];
25634 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
25636 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
25637 fprintf (file, "\t.long ");
25638 assemble_name (file, buf);
25639 putc ('-', file);
25640 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25641 assemble_name (file, buf);
25642 putc ('\n', file);
25645 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25646 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25648 if (DEFAULT_ABI == ABI_AIX)
25650 const char *desc_name, *orig_name;
25652 orig_name = (*targetm.strip_name_encoding) (name);
25653 desc_name = orig_name;
25654 while (*desc_name == '.')
25655 desc_name++;
25657 if (TREE_PUBLIC (decl))
25658 fprintf (file, "\t.globl %s\n", desc_name);
25660 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25661 fprintf (file, "%s:\n", desc_name);
25662 fprintf (file, "\t.long %s\n", orig_name);
25663 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
25664 if (DEFAULT_ABI == ABI_AIX)
25665 fputs ("\t.long 0\n", file);
25666 fprintf (file, "\t.previous\n");
25668 ASM_OUTPUT_LABEL (file, name);
25671 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
25672 static void
25673 rs6000_elf_file_end (void)
25675 #ifdef HAVE_AS_GNU_ATTRIBUTE
25676 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
25678 if (rs6000_passes_float)
25679 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
25680 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
25681 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
25682 : 2));
25683 if (rs6000_passes_vector)
25684 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
25685 (TARGET_ALTIVEC_ABI ? 2
25686 : TARGET_SPE_ABI ? 3
25687 : 1));
25688 if (rs6000_returns_struct)
25689 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
25690 aix_struct_return ? 2 : 1);
25692 #endif
25693 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25694 if (TARGET_32BIT)
25695 file_end_indicate_exec_stack ();
25696 #endif
25698 #endif
25700 #if TARGET_XCOFF
25701 static void
25702 rs6000_xcoff_asm_output_anchor (rtx symbol)
25704 char buffer[100];
25706 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
25707 SYMBOL_REF_BLOCK_OFFSET (symbol));
25708 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
25711 static void
25712 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
25714 fputs (GLOBAL_ASM_OP, stream);
25715 RS6000_OUTPUT_BASENAME (stream, name);
25716 putc ('\n', stream);
25719 /* A get_unnamed_decl callback, used for read-only sections. PTR
25720 points to the section string variable. */
25722 static void
25723 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
25725 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
25726 *(const char *const *) directive,
25727 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25730 /* Likewise for read-write sections. */
25732 static void
25733 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
25735 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
25736 *(const char *const *) directive,
25737 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25740 static void
25741 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
25743 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
25744 *(const char *const *) directive,
25745 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25748 /* A get_unnamed_section callback, used for switching to toc_section. */
25750 static void
25751 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
25753 if (TARGET_MINIMAL_TOC)
25755 /* toc_section is always selected at least once from
25756 rs6000_xcoff_file_start, so this is guaranteed to
25757 always be defined once and only once in each file. */
25758 if (!toc_initialized)
25760 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
25761 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
25762 toc_initialized = 1;
25764 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
25765 (TARGET_32BIT ? "" : ",3"));
25767 else
25768 fputs ("\t.toc\n", asm_out_file);
25771 /* Implement TARGET_ASM_INIT_SECTIONS. */
25773 static void
25774 rs6000_xcoff_asm_init_sections (void)
25776 read_only_data_section
25777 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25778 &xcoff_read_only_section_name);
25780 private_data_section
25781 = get_unnamed_section (SECTION_WRITE,
25782 rs6000_xcoff_output_readwrite_section_asm_op,
25783 &xcoff_private_data_section_name);
25785 tls_data_section
25786 = get_unnamed_section (SECTION_TLS,
25787 rs6000_xcoff_output_tls_section_asm_op,
25788 &xcoff_tls_data_section_name);
25790 tls_private_data_section
25791 = get_unnamed_section (SECTION_TLS,
25792 rs6000_xcoff_output_tls_section_asm_op,
25793 &xcoff_private_data_section_name);
25795 read_only_private_data_section
25796 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25797 &xcoff_private_data_section_name);
25799 toc_section
25800 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
25802 readonly_data_section = read_only_data_section;
25803 exception_section = data_section;
25806 static int
25807 rs6000_xcoff_reloc_rw_mask (void)
25809 return 3;
25812 static void
25813 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
25814 tree decl ATTRIBUTE_UNUSED)
25816 int smclass;
25817 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
25819 if (flags & SECTION_CODE)
25820 smclass = 0;
25821 else if (flags & SECTION_TLS)
25822 smclass = 3;
25823 else if (flags & SECTION_WRITE)
25824 smclass = 2;
25825 else
25826 smclass = 1;
25828 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
25829 (flags & SECTION_CODE) ? "." : "",
25830 name, suffix[smclass], flags & SECTION_ENTSIZE);
25833 static section *
25834 rs6000_xcoff_select_section (tree decl, int reloc,
25835 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25837 if (decl_readonly_section (decl, reloc))
25839 if (TREE_PUBLIC (decl))
25840 return read_only_data_section;
25841 else
25842 return read_only_private_data_section;
25844 else
25846 #if HAVE_AS_TLS
25847 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
25849 if (TREE_PUBLIC (decl))
25850 return tls_data_section;
25851 else if (bss_initializer_p (decl))
25853 /* Convert to COMMON to emit in BSS. */
25854 DECL_COMMON (decl) = 1;
25855 return tls_comm_section;
25857 else
25858 return tls_private_data_section;
25860 else
25861 #endif
25862 if (TREE_PUBLIC (decl))
25863 return data_section;
25864 else
25865 return private_data_section;
25869 static void
25870 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
25872 const char *name;
25874 /* Use select_section for private and uninitialized data. */
25875 if (!TREE_PUBLIC (decl)
25876 || DECL_COMMON (decl)
25877 || DECL_INITIAL (decl) == NULL_TREE
25878 || DECL_INITIAL (decl) == error_mark_node
25879 || (flag_zero_initialized_in_bss
25880 && initializer_zerop (DECL_INITIAL (decl))))
25881 return;
25883 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
25884 name = (*targetm.strip_name_encoding) (name);
25885 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
25888 /* Select section for constant in constant pool.
25890 On RS/6000, all constants are in the private read-only data area.
25891 However, if this is being placed in the TOC it must be output as a
25892 toc entry. */
25894 static section *
25895 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
25896 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25898 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25899 return toc_section;
25900 else
25901 return read_only_private_data_section;
25904 /* Remove any trailing [DS] or the like from the symbol name. */
25906 static const char *
25907 rs6000_xcoff_strip_name_encoding (const char *name)
25909 size_t len;
25910 if (*name == '*')
25911 name++;
25912 len = strlen (name);
25913 if (name[len - 1] == ']')
25914 return ggc_alloc_string (name, len - 4);
25915 else
25916 return name;
25919 /* Section attributes. AIX is always PIC. */
25921 static unsigned int
25922 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
25924 unsigned int align;
25925 unsigned int flags = default_section_type_flags (decl, name, reloc);
25927 /* Align to at least UNIT size. */
25928 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
25929 align = MIN_UNITS_PER_WORD;
25930 else
25931 /* Increase alignment of large objects if not already stricter. */
25932 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
25933 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
25934 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
25936 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
25939 /* Output at beginning of assembler file.
25941 Initialize the section names for the RS/6000 at this point.
25943 Specify filename, including full path, to assembler.
25945 We want to go into the TOC section so at least one .toc will be emitted.
25946 Also, in order to output proper .bs/.es pairs, we need at least one static
25947 [RW] section emitted.
25949 Finally, declare mcount when profiling to make the assembler happy. */
25951 static void
25952 rs6000_xcoff_file_start (void)
25954 rs6000_gen_section_name (&xcoff_bss_section_name,
25955 main_input_filename, ".bss_");
25956 rs6000_gen_section_name (&xcoff_private_data_section_name,
25957 main_input_filename, ".rw_");
25958 rs6000_gen_section_name (&xcoff_read_only_section_name,
25959 main_input_filename, ".ro_");
25960 rs6000_gen_section_name (&xcoff_tls_data_section_name,
25961 main_input_filename, ".tls_");
25962 rs6000_gen_section_name (&xcoff_tbss_section_name,
25963 main_input_filename, ".tbss_[UL]");
25965 fputs ("\t.file\t", asm_out_file);
25966 output_quoted_string (asm_out_file, main_input_filename);
25967 fputc ('\n', asm_out_file);
25968 if (write_symbols != NO_DEBUG)
25969 switch_to_section (private_data_section);
25970 switch_to_section (text_section);
25971 if (profile_flag)
25972 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
25973 rs6000_file_start ();
25976 /* Output at end of assembler file.
25977 On the RS/6000, referencing data should automatically pull in text. */
25979 static void
25980 rs6000_xcoff_file_end (void)
25982 switch_to_section (text_section);
25983 fputs ("_section_.text:\n", asm_out_file);
25984 switch_to_section (data_section);
25985 fputs (TARGET_32BIT
25986 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25987 asm_out_file);
25990 #ifdef HAVE_AS_TLS
25991 static void
25992 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
25994 rtx symbol;
25995 int flags;
25997 default_encode_section_info (decl, rtl, first);
25999 /* Careful not to prod global register variables. */
26000 if (!MEM_P (rtl))
26001 return;
26002 symbol = XEXP (rtl, 0);
26003 if (GET_CODE (symbol) != SYMBOL_REF)
26004 return;
26006 flags = SYMBOL_REF_FLAGS (symbol);
26008 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
26009 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
26011 SYMBOL_REF_FLAGS (symbol) = flags;
26013 #endif /* HAVE_AS_TLS */
26014 #endif /* TARGET_XCOFF */
26016 /* Compute a (partial) cost for rtx X. Return true if the complete
26017 cost has been computed, and false if subexpressions should be
26018 scanned. In either case, *TOTAL contains the cost result. */
26020 static bool
26021 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
26022 int *total, bool speed)
26024 enum machine_mode mode = GET_MODE (x);
26026 switch (code)
26028 /* On the RS/6000, if it is valid in the insn, it is free. */
26029 case CONST_INT:
26030 if (((outer_code == SET
26031 || outer_code == PLUS
26032 || outer_code == MINUS)
26033 && (satisfies_constraint_I (x)
26034 || satisfies_constraint_L (x)))
26035 || (outer_code == AND
26036 && (satisfies_constraint_K (x)
26037 || (mode == SImode
26038 ? satisfies_constraint_L (x)
26039 : satisfies_constraint_J (x))
26040 || mask_operand (x, mode)
26041 || (mode == DImode
26042 && mask64_operand (x, DImode))))
26043 || ((outer_code == IOR || outer_code == XOR)
26044 && (satisfies_constraint_K (x)
26045 || (mode == SImode
26046 ? satisfies_constraint_L (x)
26047 : satisfies_constraint_J (x))))
26048 || outer_code == ASHIFT
26049 || outer_code == ASHIFTRT
26050 || outer_code == LSHIFTRT
26051 || outer_code == ROTATE
26052 || outer_code == ROTATERT
26053 || outer_code == ZERO_EXTRACT
26054 || (outer_code == MULT
26055 && satisfies_constraint_I (x))
26056 || ((outer_code == DIV || outer_code == UDIV
26057 || outer_code == MOD || outer_code == UMOD)
26058 && exact_log2 (INTVAL (x)) >= 0)
26059 || (outer_code == COMPARE
26060 && (satisfies_constraint_I (x)
26061 || satisfies_constraint_K (x)))
26062 || ((outer_code == EQ || outer_code == NE)
26063 && (satisfies_constraint_I (x)
26064 || satisfies_constraint_K (x)
26065 || (mode == SImode
26066 ? satisfies_constraint_L (x)
26067 : satisfies_constraint_J (x))))
26068 || (outer_code == GTU
26069 && satisfies_constraint_I (x))
26070 || (outer_code == LTU
26071 && satisfies_constraint_P (x)))
26073 *total = 0;
26074 return true;
26076 else if ((outer_code == PLUS
26077 && reg_or_add_cint_operand (x, VOIDmode))
26078 || (outer_code == MINUS
26079 && reg_or_sub_cint_operand (x, VOIDmode))
26080 || ((outer_code == SET
26081 || outer_code == IOR
26082 || outer_code == XOR)
26083 && (INTVAL (x)
26084 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
26086 *total = COSTS_N_INSNS (1);
26087 return true;
26089 /* FALLTHRU */
26091 case CONST_DOUBLE:
26092 if (mode == DImode && code == CONST_DOUBLE)
26094 if ((outer_code == IOR || outer_code == XOR)
26095 && CONST_DOUBLE_HIGH (x) == 0
26096 && (CONST_DOUBLE_LOW (x)
26097 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
26099 *total = 0;
26100 return true;
26102 else if ((outer_code == AND && and64_2_operand (x, DImode))
26103 || ((outer_code == SET
26104 || outer_code == IOR
26105 || outer_code == XOR)
26106 && CONST_DOUBLE_HIGH (x) == 0))
26108 *total = COSTS_N_INSNS (1);
26109 return true;
26112 /* FALLTHRU */
26114 case CONST:
26115 case HIGH:
26116 case SYMBOL_REF:
26117 case MEM:
26118 /* When optimizing for size, MEM should be slightly more expensive
26119 than generating address, e.g., (plus (reg) (const)).
26120 L1 cache latency is about two instructions. */
26121 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
26122 return true;
26124 case LABEL_REF:
26125 *total = 0;
26126 return true;
26128 case PLUS:
26129 case MINUS:
26130 if (FLOAT_MODE_P (mode))
26131 *total = rs6000_cost->fp;
26132 else
26133 *total = COSTS_N_INSNS (1);
26134 return false;
26136 case MULT:
26137 if (GET_CODE (XEXP (x, 1)) == CONST_INT
26138 && satisfies_constraint_I (XEXP (x, 1)))
26140 if (INTVAL (XEXP (x, 1)) >= -256
26141 && INTVAL (XEXP (x, 1)) <= 255)
26142 *total = rs6000_cost->mulsi_const9;
26143 else
26144 *total = rs6000_cost->mulsi_const;
26146 else if (mode == SFmode)
26147 *total = rs6000_cost->fp;
26148 else if (FLOAT_MODE_P (mode))
26149 *total = rs6000_cost->dmul;
26150 else if (mode == DImode)
26151 *total = rs6000_cost->muldi;
26152 else
26153 *total = rs6000_cost->mulsi;
26154 return false;
26156 case FMA:
26157 if (mode == SFmode)
26158 *total = rs6000_cost->fp;
26159 else
26160 *total = rs6000_cost->dmul;
26161 break;
26163 case DIV:
26164 case MOD:
26165 if (FLOAT_MODE_P (mode))
26167 *total = mode == DFmode ? rs6000_cost->ddiv
26168 : rs6000_cost->sdiv;
26169 return false;
26171 /* FALLTHRU */
26173 case UDIV:
26174 case UMOD:
26175 if (GET_CODE (XEXP (x, 1)) == CONST_INT
26176 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
26178 if (code == DIV || code == MOD)
26179 /* Shift, addze */
26180 *total = COSTS_N_INSNS (2);
26181 else
26182 /* Shift */
26183 *total = COSTS_N_INSNS (1);
26185 else
26187 if (GET_MODE (XEXP (x, 1)) == DImode)
26188 *total = rs6000_cost->divdi;
26189 else
26190 *total = rs6000_cost->divsi;
26192 /* Add in shift and subtract for MOD. */
26193 if (code == MOD || code == UMOD)
26194 *total += COSTS_N_INSNS (2);
26195 return false;
26197 case CTZ:
26198 case FFS:
26199 *total = COSTS_N_INSNS (4);
26200 return false;
26202 case POPCOUNT:
26203 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
26204 return false;
26206 case PARITY:
26207 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
26208 return false;
26210 case NOT:
26211 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
26213 *total = 0;
26214 return false;
26216 /* FALLTHRU */
26218 case AND:
26219 case CLZ:
26220 case IOR:
26221 case XOR:
26222 case ZERO_EXTRACT:
26223 *total = COSTS_N_INSNS (1);
26224 return false;
26226 case ASHIFT:
26227 case ASHIFTRT:
26228 case LSHIFTRT:
26229 case ROTATE:
26230 case ROTATERT:
26231 /* Handle mul_highpart. */
26232 if (outer_code == TRUNCATE
26233 && GET_CODE (XEXP (x, 0)) == MULT)
26235 if (mode == DImode)
26236 *total = rs6000_cost->muldi;
26237 else
26238 *total = rs6000_cost->mulsi;
26239 return true;
26241 else if (outer_code == AND)
26242 *total = 0;
26243 else
26244 *total = COSTS_N_INSNS (1);
26245 return false;
26247 case SIGN_EXTEND:
26248 case ZERO_EXTEND:
26249 if (GET_CODE (XEXP (x, 0)) == MEM)
26250 *total = 0;
26251 else
26252 *total = COSTS_N_INSNS (1);
26253 return false;
26255 case COMPARE:
26256 case NEG:
26257 case ABS:
26258 if (!FLOAT_MODE_P (mode))
26260 *total = COSTS_N_INSNS (1);
26261 return false;
26263 /* FALLTHRU */
26265 case FLOAT:
26266 case UNSIGNED_FLOAT:
26267 case FIX:
26268 case UNSIGNED_FIX:
26269 case FLOAT_TRUNCATE:
26270 *total = rs6000_cost->fp;
26271 return false;
26273 case FLOAT_EXTEND:
26274 if (mode == DFmode)
26275 *total = 0;
26276 else
26277 *total = rs6000_cost->fp;
26278 return false;
26280 case UNSPEC:
26281 switch (XINT (x, 1))
26283 case UNSPEC_FRSP:
26284 *total = rs6000_cost->fp;
26285 return true;
26287 default:
26288 break;
26290 break;
26292 case CALL:
26293 case IF_THEN_ELSE:
26294 if (!speed)
26296 *total = COSTS_N_INSNS (1);
26297 return true;
26299 else if (FLOAT_MODE_P (mode)
26300 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
26302 *total = rs6000_cost->fp;
26303 return false;
26305 break;
26307 case EQ:
26308 case GTU:
26309 case LTU:
26310 /* Carry bit requires mode == Pmode.
26311 NEG or PLUS already counted so only add one. */
26312 if (mode == Pmode
26313 && (outer_code == NEG || outer_code == PLUS))
26315 *total = COSTS_N_INSNS (1);
26316 return true;
26318 if (outer_code == SET)
26320 if (XEXP (x, 1) == const0_rtx)
26322 if (TARGET_ISEL && !TARGET_MFCRF)
26323 *total = COSTS_N_INSNS (8);
26324 else
26325 *total = COSTS_N_INSNS (2);
26326 return true;
26328 else if (mode == Pmode)
26330 *total = COSTS_N_INSNS (3);
26331 return false;
26334 /* FALLTHRU */
26336 case GT:
26337 case LT:
26338 case UNORDERED:
26339 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
26341 if (TARGET_ISEL && !TARGET_MFCRF)
26342 *total = COSTS_N_INSNS (8);
26343 else
26344 *total = COSTS_N_INSNS (2);
26345 return true;
26347 /* CC COMPARE. */
26348 if (outer_code == COMPARE)
26350 *total = 0;
26351 return true;
26353 break;
26355 default:
26356 break;
26359 return false;
26362 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26364 static bool
26365 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
26366 bool speed)
26368 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
26370 fprintf (stderr,
26371 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26372 "opno = %d, total = %d, speed = %s, x:\n",
26373 ret ? "complete" : "scan inner",
26374 GET_RTX_NAME (code),
26375 GET_RTX_NAME (outer_code),
26376 opno,
26377 *total,
26378 speed ? "true" : "false");
26380 debug_rtx (x);
26382 return ret;
26385 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26387 static int
26388 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
26389 addr_space_t as, bool speed)
26391 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
26393 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26394 ret, speed ? "true" : "false");
26395 debug_rtx (x);
26397 return ret;
26401 /* A C expression returning the cost of moving data from a register of class
26402 CLASS1 to one of CLASS2. */
26404 static int
26405 rs6000_register_move_cost (enum machine_mode mode,
26406 reg_class_t from, reg_class_t to)
26408 int ret;
26410 if (TARGET_DEBUG_COST)
26411 dbg_cost_ctrl++;
26413 /* Moves from/to GENERAL_REGS. */
26414 if (reg_classes_intersect_p (to, GENERAL_REGS)
26415 || reg_classes_intersect_p (from, GENERAL_REGS))
26417 reg_class_t rclass = from;
26419 if (! reg_classes_intersect_p (to, GENERAL_REGS))
26420 rclass = to;
26422 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
26423 ret = (rs6000_memory_move_cost (mode, rclass, false)
26424 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
26426 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26427 shift. */
26428 else if (rclass == CR_REGS)
26429 ret = 4;
26431 /* For those processors that have slow LR/CTR moves, make them more
26432 expensive than memory in order to bias spills to memory .*/
26433 else if ((rs6000_cpu == PROCESSOR_POWER6
26434 || rs6000_cpu == PROCESSOR_POWER7)
26435 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
26436 ret = 6 * hard_regno_nregs[0][mode];
26438 else
26439 /* A move will cost one instruction per GPR moved. */
26440 ret = 2 * hard_regno_nregs[0][mode];
26443 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26444 else if (VECTOR_UNIT_VSX_P (mode)
26445 && reg_classes_intersect_p (to, VSX_REGS)
26446 && reg_classes_intersect_p (from, VSX_REGS))
26447 ret = 2 * hard_regno_nregs[32][mode];
26449 /* Moving between two similar registers is just one instruction. */
26450 else if (reg_classes_intersect_p (to, from))
26451 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
26453 /* Everything else has to go through GENERAL_REGS. */
26454 else
26455 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
26456 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
26458 if (TARGET_DEBUG_COST)
26460 if (dbg_cost_ctrl == 1)
26461 fprintf (stderr,
26462 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26463 ret, GET_MODE_NAME (mode), reg_class_names[from],
26464 reg_class_names[to]);
26465 dbg_cost_ctrl--;
26468 return ret;
26471 /* A C expressions returning the cost of moving data of MODE from a register to
26472 or from memory. */
26474 static int
26475 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
26476 bool in ATTRIBUTE_UNUSED)
26478 int ret;
26480 if (TARGET_DEBUG_COST)
26481 dbg_cost_ctrl++;
26483 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
26484 ret = 4 * hard_regno_nregs[0][mode];
26485 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
26486 ret = 4 * hard_regno_nregs[32][mode];
26487 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
26488 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
26489 else
26490 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
26492 if (TARGET_DEBUG_COST)
26494 if (dbg_cost_ctrl == 1)
26495 fprintf (stderr,
26496 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26497 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
26498 dbg_cost_ctrl--;
26501 return ret;
26504 /* Returns a code for a target-specific builtin that implements
26505 reciprocal of the function, or NULL_TREE if not available. */
26507 static tree
26508 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
26509 bool sqrt ATTRIBUTE_UNUSED)
26511 if (optimize_insn_for_size_p ())
26512 return NULL_TREE;
26514 if (md_fn)
26515 switch (fn)
26517 case VSX_BUILTIN_XVSQRTDP:
26518 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
26519 return NULL_TREE;
26521 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
26523 case VSX_BUILTIN_XVSQRTSP:
26524 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
26525 return NULL_TREE;
26527 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
26529 default:
26530 return NULL_TREE;
26533 else
26534 switch (fn)
26536 case BUILT_IN_SQRT:
26537 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
26538 return NULL_TREE;
26540 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
26542 case BUILT_IN_SQRTF:
26543 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
26544 return NULL_TREE;
26546 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
26548 default:
26549 return NULL_TREE;
26553 /* Load up a constant. If the mode is a vector mode, splat the value across
26554 all of the vector elements. */
26556 static rtx
26557 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
26559 rtx reg;
26561 if (mode == SFmode || mode == DFmode)
26563 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
26564 reg = force_reg (mode, d);
26566 else if (mode == V4SFmode)
26568 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
26569 rtvec v = gen_rtvec (4, d, d, d, d);
26570 reg = gen_reg_rtx (mode);
26571 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26573 else if (mode == V2DFmode)
26575 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
26576 rtvec v = gen_rtvec (2, d, d);
26577 reg = gen_reg_rtx (mode);
26578 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26580 else
26581 gcc_unreachable ();
26583 return reg;
26586 /* Generate an FMA instruction. */
26588 static void
26589 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
26591 enum machine_mode mode = GET_MODE (target);
26592 rtx dst;
26594 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26595 gcc_assert (dst != NULL);
26597 if (dst != target)
26598 emit_move_insn (target, dst);
26601 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26603 static void
26604 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
26606 enum machine_mode mode = GET_MODE (target);
26607 rtx dst;
26609 /* Altivec does not support fms directly;
26610 generate in terms of fma in that case. */
26611 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
26612 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
26613 else
26615 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
26616 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26618 gcc_assert (dst != NULL);
26620 if (dst != target)
26621 emit_move_insn (target, dst);
26624 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26626 static void
26627 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
26629 enum machine_mode mode = GET_MODE (dst);
26630 rtx r;
26632 /* This is a tad more complicated, since the fnma_optab is for
26633 a different expression: fma(-m1, m2, a), which is the same
26634 thing except in the case of signed zeros.
26636 Fortunately we know that if FMA is supported that FNMSUB is
26637 also supported in the ISA. Just expand it directly. */
26639 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
26641 r = gen_rtx_NEG (mode, a);
26642 r = gen_rtx_FMA (mode, m1, m2, r);
26643 r = gen_rtx_NEG (mode, r);
26644 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
26647 /* Newton-Raphson approximation of floating point divide with just 2 passes
26648 (either single precision floating point, or newer machines with higher
26649 accuracy estimates). Support both scalar and vector divide. Assumes no
26650 trapping math and finite arguments. */
26652 static void
26653 rs6000_emit_swdiv_high_precision (rtx dst, rtx n, rtx d)
26655 enum machine_mode mode = GET_MODE (dst);
26656 rtx x0, e0, e1, y1, u0, v0;
26657 enum insn_code code = optab_handler (smul_optab, mode);
26658 insn_gen_fn gen_mul = GEN_FCN (code);
26659 rtx one = rs6000_load_constant_and_splat (mode, dconst1);
26661 gcc_assert (code != CODE_FOR_nothing);
26663 /* x0 = 1./d estimate */
26664 x0 = gen_reg_rtx (mode);
26665 emit_insn (gen_rtx_SET (VOIDmode, x0,
26666 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26667 UNSPEC_FRES)));
26669 e0 = gen_reg_rtx (mode);
26670 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - (d * x0) */
26672 e1 = gen_reg_rtx (mode);
26673 rs6000_emit_madd (e1, e0, e0, e0); /* e1 = (e0 * e0) + e0 */
26675 y1 = gen_reg_rtx (mode);
26676 rs6000_emit_madd (y1, e1, x0, x0); /* y1 = (e1 * x0) + x0 */
26678 u0 = gen_reg_rtx (mode);
26679 emit_insn (gen_mul (u0, n, y1)); /* u0 = n * y1 */
26681 v0 = gen_reg_rtx (mode);
26682 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - (d * u0) */
26684 rs6000_emit_madd (dst, v0, y1, u0); /* dst = (v0 * y1) + u0 */
26687 /* Newton-Raphson approximation of floating point divide that has a low
26688 precision estimate. Assumes no trapping math and finite arguments. */
26690 static void
26691 rs6000_emit_swdiv_low_precision (rtx dst, rtx n, rtx d)
26693 enum machine_mode mode = GET_MODE (dst);
26694 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
26695 enum insn_code code = optab_handler (smul_optab, mode);
26696 insn_gen_fn gen_mul = GEN_FCN (code);
26698 gcc_assert (code != CODE_FOR_nothing);
26700 one = rs6000_load_constant_and_splat (mode, dconst1);
26702 /* x0 = 1./d estimate */
26703 x0 = gen_reg_rtx (mode);
26704 emit_insn (gen_rtx_SET (VOIDmode, x0,
26705 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26706 UNSPEC_FRES)));
26708 e0 = gen_reg_rtx (mode);
26709 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - d * x0 */
26711 y1 = gen_reg_rtx (mode);
26712 rs6000_emit_madd (y1, e0, x0, x0); /* y1 = x0 + e0 * x0 */
26714 e1 = gen_reg_rtx (mode);
26715 emit_insn (gen_mul (e1, e0, e0)); /* e1 = e0 * e0 */
26717 y2 = gen_reg_rtx (mode);
26718 rs6000_emit_madd (y2, e1, y1, y1); /* y2 = y1 + e1 * y1 */
26720 e2 = gen_reg_rtx (mode);
26721 emit_insn (gen_mul (e2, e1, e1)); /* e2 = e1 * e1 */
26723 y3 = gen_reg_rtx (mode);
26724 rs6000_emit_madd (y3, e2, y2, y2); /* y3 = y2 + e2 * y2 */
26726 u0 = gen_reg_rtx (mode);
26727 emit_insn (gen_mul (u0, n, y3)); /* u0 = n * y3 */
26729 v0 = gen_reg_rtx (mode);
26730 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - d * u0 */
26732 rs6000_emit_madd (dst, v0, y3, u0); /* dst = u0 + v0 * y3 */
26735 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26736 add a reg_note saying that this was a division. Support both scalar and
26737 vector divide. Assumes no trapping math and finite arguments. */
26739 void
26740 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
26742 enum machine_mode mode = GET_MODE (dst);
26744 if (RS6000_RECIP_HIGH_PRECISION_P (mode))
26745 rs6000_emit_swdiv_high_precision (dst, n, d);
26746 else
26747 rs6000_emit_swdiv_low_precision (dst, n, d);
26749 if (note_p)
26750 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
26753 /* Newton-Raphson approximation of single/double-precision floating point
26754 rsqrt. Assumes no trapping math and finite arguments. */
26756 void
26757 rs6000_emit_swrsqrt (rtx dst, rtx src)
26759 enum machine_mode mode = GET_MODE (src);
26760 rtx x0 = gen_reg_rtx (mode);
26761 rtx y = gen_reg_rtx (mode);
26762 int passes = (TARGET_RECIP_PRECISION) ? 2 : 3;
26763 REAL_VALUE_TYPE dconst3_2;
26764 int i;
26765 rtx halfthree;
26766 enum insn_code code = optab_handler (smul_optab, mode);
26767 insn_gen_fn gen_mul = GEN_FCN (code);
26769 gcc_assert (code != CODE_FOR_nothing);
26771 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26772 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
26773 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
26775 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
26777 /* x0 = rsqrt estimate */
26778 emit_insn (gen_rtx_SET (VOIDmode, x0,
26779 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
26780 UNSPEC_RSQRT)));
26782 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26783 rs6000_emit_msub (y, src, halfthree, src);
26785 for (i = 0; i < passes; i++)
26787 rtx x1 = gen_reg_rtx (mode);
26788 rtx u = gen_reg_rtx (mode);
26789 rtx v = gen_reg_rtx (mode);
26791 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26792 emit_insn (gen_mul (u, x0, x0));
26793 rs6000_emit_nmsub (v, y, u, halfthree);
26794 emit_insn (gen_mul (x1, x0, v));
26795 x0 = x1;
26798 emit_move_insn (dst, x0);
26799 return;
26802 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26803 (Power7) targets. DST is the target, and SRC is the argument operand. */
26805 void
26806 rs6000_emit_popcount (rtx dst, rtx src)
26808 enum machine_mode mode = GET_MODE (dst);
26809 rtx tmp1, tmp2;
26811 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26812 if (TARGET_POPCNTD)
26814 if (mode == SImode)
26815 emit_insn (gen_popcntdsi2 (dst, src));
26816 else
26817 emit_insn (gen_popcntddi2 (dst, src));
26818 return;
26821 tmp1 = gen_reg_rtx (mode);
26823 if (mode == SImode)
26825 emit_insn (gen_popcntbsi2 (tmp1, src));
26826 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
26827 NULL_RTX, 0);
26828 tmp2 = force_reg (SImode, tmp2);
26829 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
26831 else
26833 emit_insn (gen_popcntbdi2 (tmp1, src));
26834 tmp2 = expand_mult (DImode, tmp1,
26835 GEN_INT ((HOST_WIDE_INT)
26836 0x01010101 << 32 | 0x01010101),
26837 NULL_RTX, 0);
26838 tmp2 = force_reg (DImode, tmp2);
26839 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
26844 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26845 target, and SRC is the argument operand. */
26847 void
26848 rs6000_emit_parity (rtx dst, rtx src)
26850 enum machine_mode mode = GET_MODE (dst);
26851 rtx tmp;
26853 tmp = gen_reg_rtx (mode);
26855 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26856 if (TARGET_CMPB)
26858 if (mode == SImode)
26860 emit_insn (gen_popcntbsi2 (tmp, src));
26861 emit_insn (gen_paritysi2_cmpb (dst, tmp));
26863 else
26865 emit_insn (gen_popcntbdi2 (tmp, src));
26866 emit_insn (gen_paritydi2_cmpb (dst, tmp));
26868 return;
26871 if (mode == SImode)
26873 /* Is mult+shift >= shift+xor+shift+xor? */
26874 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
26876 rtx tmp1, tmp2, tmp3, tmp4;
26878 tmp1 = gen_reg_rtx (SImode);
26879 emit_insn (gen_popcntbsi2 (tmp1, src));
26881 tmp2 = gen_reg_rtx (SImode);
26882 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
26883 tmp3 = gen_reg_rtx (SImode);
26884 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
26886 tmp4 = gen_reg_rtx (SImode);
26887 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
26888 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
26890 else
26891 rs6000_emit_popcount (tmp, src);
26892 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
26894 else
26896 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26897 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
26899 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
26901 tmp1 = gen_reg_rtx (DImode);
26902 emit_insn (gen_popcntbdi2 (tmp1, src));
26904 tmp2 = gen_reg_rtx (DImode);
26905 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
26906 tmp3 = gen_reg_rtx (DImode);
26907 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
26909 tmp4 = gen_reg_rtx (DImode);
26910 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
26911 tmp5 = gen_reg_rtx (DImode);
26912 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
26914 tmp6 = gen_reg_rtx (DImode);
26915 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
26916 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
26918 else
26919 rs6000_emit_popcount (tmp, src);
26920 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
26924 /* Expand an Altivec constant permutation. Return true if we match
26925 an efficient implementation; false to fall back to VPERM. */
26927 bool
26928 altivec_expand_vec_perm_const (rtx operands[4])
26930 struct altivec_perm_insn {
26931 enum insn_code impl;
26932 unsigned char perm[16];
26934 static const struct altivec_perm_insn patterns[] = {
26935 { CODE_FOR_altivec_vpkuhum,
26936 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26937 { CODE_FOR_altivec_vpkuwum,
26938 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26939 { CODE_FOR_altivec_vmrghb,
26940 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26941 { CODE_FOR_altivec_vmrghh,
26942 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26943 { CODE_FOR_altivec_vmrghw,
26944 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26945 { CODE_FOR_altivec_vmrglb,
26946 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26947 { CODE_FOR_altivec_vmrglh,
26948 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26949 { CODE_FOR_altivec_vmrglw,
26950 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26953 unsigned int i, j, elt, which;
26954 unsigned char perm[16];
26955 rtx target, op0, op1, sel, x;
26956 bool one_vec;
26958 target = operands[0];
26959 op0 = operands[1];
26960 op1 = operands[2];
26961 sel = operands[3];
26963 /* Unpack the constant selector. */
26964 for (i = which = 0; i < 16; ++i)
26966 rtx e = XVECEXP (sel, 0, i);
26967 elt = INTVAL (e) & 31;
26968 which |= (elt < 16 ? 1 : 2);
26969 perm[i] = elt;
26972 /* Simplify the constant selector based on operands. */
26973 switch (which)
26975 default:
26976 gcc_unreachable ();
26978 case 3:
26979 one_vec = false;
26980 if (!rtx_equal_p (op0, op1))
26981 break;
26982 /* FALLTHRU */
26984 case 2:
26985 for (i = 0; i < 16; ++i)
26986 perm[i] &= 15;
26987 op0 = op1;
26988 one_vec = true;
26989 break;
26991 case 1:
26992 op1 = op0;
26993 one_vec = true;
26994 break;
26997 /* Look for splat patterns. */
26998 if (one_vec)
27000 elt = perm[0];
27002 for (i = 0; i < 16; ++i)
27003 if (perm[i] != elt)
27004 break;
27005 if (i == 16)
27007 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
27008 return true;
27011 if (elt % 2 == 0)
27013 for (i = 0; i < 16; i += 2)
27014 if (perm[i] != elt || perm[i + 1] != elt + 1)
27015 break;
27016 if (i == 16)
27018 x = gen_reg_rtx (V8HImode);
27019 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
27020 GEN_INT (elt / 2)));
27021 emit_move_insn (target, gen_lowpart (V16QImode, x));
27022 return true;
27026 if (elt % 4 == 0)
27028 for (i = 0; i < 16; i += 4)
27029 if (perm[i] != elt
27030 || perm[i + 1] != elt + 1
27031 || perm[i + 2] != elt + 2
27032 || perm[i + 3] != elt + 3)
27033 break;
27034 if (i == 16)
27036 x = gen_reg_rtx (V4SImode);
27037 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
27038 GEN_INT (elt / 4)));
27039 emit_move_insn (target, gen_lowpart (V16QImode, x));
27040 return true;
27045 /* Look for merge and pack patterns. */
27046 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
27048 bool swapped;
27050 elt = patterns[j].perm[0];
27051 if (perm[0] == elt)
27052 swapped = false;
27053 else if (perm[0] == elt + 16)
27054 swapped = true;
27055 else
27056 continue;
27057 for (i = 1; i < 16; ++i)
27059 elt = patterns[j].perm[i];
27060 if (swapped)
27061 elt = (elt >= 16 ? elt - 16 : elt + 16);
27062 else if (one_vec && elt >= 16)
27063 elt -= 16;
27064 if (perm[i] != elt)
27065 break;
27067 if (i == 16)
27069 enum insn_code icode = patterns[j].impl;
27070 enum machine_mode omode = insn_data[icode].operand[0].mode;
27071 enum machine_mode imode = insn_data[icode].operand[1].mode;
27073 if (swapped)
27074 x = op0, op0 = op1, op1 = x;
27075 if (imode != V16QImode)
27077 op0 = gen_lowpart (imode, op0);
27078 op1 = gen_lowpart (imode, op1);
27080 if (omode == V16QImode)
27081 x = target;
27082 else
27083 x = gen_reg_rtx (omode);
27084 emit_insn (GEN_FCN (icode) (x, op0, op1));
27085 if (omode != V16QImode)
27086 emit_move_insn (target, gen_lowpart (V16QImode, x));
27087 return true;
27091 return false;
27094 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
27095 Return true if we match an efficient implementation. */
27097 static bool
27098 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
27099 unsigned char perm0, unsigned char perm1)
27101 rtx x;
27103 /* If both selectors come from the same operand, fold to single op. */
27104 if ((perm0 & 2) == (perm1 & 2))
27106 if (perm0 & 2)
27107 op0 = op1;
27108 else
27109 op1 = op0;
27111 /* If both operands are equal, fold to simpler permutation. */
27112 if (rtx_equal_p (op0, op1))
27114 perm0 = perm0 & 1;
27115 perm1 = (perm1 & 1) + 2;
27117 /* If the first selector comes from the second operand, swap. */
27118 else if (perm0 & 2)
27120 if (perm1 & 2)
27121 return false;
27122 perm0 -= 2;
27123 perm1 += 2;
27124 x = op0, op0 = op1, op1 = x;
27126 /* If the second selector does not come from the second operand, fail. */
27127 else if ((perm1 & 2) == 0)
27128 return false;
27130 /* Success! */
27131 if (target != NULL)
27133 enum machine_mode vmode, dmode;
27134 rtvec v;
27136 vmode = GET_MODE (target);
27137 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
27138 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
27140 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
27141 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
27142 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
27143 emit_insn (gen_rtx_SET (VOIDmode, target, x));
27145 return true;
27148 bool
27149 rs6000_expand_vec_perm_const (rtx operands[4])
27151 rtx target, op0, op1, sel;
27152 unsigned char perm0, perm1;
27154 target = operands[0];
27155 op0 = operands[1];
27156 op1 = operands[2];
27157 sel = operands[3];
27159 /* Unpack the constant selector. */
27160 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
27161 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
27163 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
27166 /* Test whether a constant permutation is supported. */
27168 static bool
27169 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
27170 const unsigned char *sel)
27172 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
27173 if (TARGET_ALTIVEC)
27174 return true;
27176 /* Check for ps_merge* or evmerge* insns. */
27177 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
27178 || (TARGET_SPE && vmode == V2SImode))
27180 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
27181 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
27182 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
27185 return false;
27188 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
27190 static void
27191 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
27192 enum machine_mode vmode, unsigned nelt, rtx perm[])
27194 enum machine_mode imode;
27195 rtx x;
27197 imode = vmode;
27198 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
27200 imode = GET_MODE_INNER (vmode);
27201 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
27202 imode = mode_for_vector (imode, nelt);
27205 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
27206 x = expand_vec_perm (vmode, op0, op1, x, target);
27207 if (x != target)
27208 emit_move_insn (target, x);
27211 /* Expand an extract even operation. */
27213 void
27214 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
27216 enum machine_mode vmode = GET_MODE (target);
27217 unsigned i, nelt = GET_MODE_NUNITS (vmode);
27218 rtx perm[16];
27220 for (i = 0; i < nelt; i++)
27221 perm[i] = GEN_INT (i * 2);
27223 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
27226 /* Expand a vector interleave operation. */
27228 void
27229 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
27231 enum machine_mode vmode = GET_MODE (target);
27232 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
27233 rtx perm[16];
27235 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
27236 for (i = 0; i < nelt / 2; i++)
27238 perm[i * 2] = GEN_INT (i + high);
27239 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
27242 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
27245 /* Return an RTX representing where to find the function value of a
27246 function returning MODE. */
27247 static rtx
27248 rs6000_complex_function_value (enum machine_mode mode)
27250 unsigned int regno;
27251 rtx r1, r2;
27252 enum machine_mode inner = GET_MODE_INNER (mode);
27253 unsigned int inner_bytes = GET_MODE_SIZE (inner);
27255 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27256 regno = FP_ARG_RETURN;
27257 else
27259 regno = GP_ARG_RETURN;
27261 /* 32-bit is OK since it'll go in r3/r4. */
27262 if (TARGET_32BIT && inner_bytes >= 4)
27263 return gen_rtx_REG (mode, regno);
27266 if (inner_bytes >= 8)
27267 return gen_rtx_REG (mode, regno);
27269 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
27270 const0_rtx);
27271 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
27272 GEN_INT (inner_bytes));
27273 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
27276 /* Target hook for TARGET_FUNCTION_VALUE.
27278 On the SPE, both FPs and vectors are returned in r3.
27280 On RS/6000 an integer value is in r3 and a floating-point value is in
27281 fp1, unless -msoft-float. */
27283 static rtx
27284 rs6000_function_value (const_tree valtype,
27285 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
27286 bool outgoing ATTRIBUTE_UNUSED)
27288 enum machine_mode mode;
27289 unsigned int regno;
27291 /* Special handling for structs in darwin64. */
27292 if (TARGET_MACHO
27293 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
27295 CUMULATIVE_ARGS valcum;
27296 rtx valret;
27298 valcum.words = 0;
27299 valcum.fregno = FP_ARG_MIN_REG;
27300 valcum.vregno = ALTIVEC_ARG_MIN_REG;
27301 /* Do a trial code generation as if this were going to be passed as
27302 an argument; if any part goes in memory, we return NULL. */
27303 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
27304 if (valret)
27305 return valret;
27306 /* Otherwise fall through to standard ABI rules. */
27309 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
27311 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27312 return gen_rtx_PARALLEL (DImode,
27313 gen_rtvec (2,
27314 gen_rtx_EXPR_LIST (VOIDmode,
27315 gen_rtx_REG (SImode, GP_ARG_RETURN),
27316 const0_rtx),
27317 gen_rtx_EXPR_LIST (VOIDmode,
27318 gen_rtx_REG (SImode,
27319 GP_ARG_RETURN + 1),
27320 GEN_INT (4))));
27322 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
27324 return gen_rtx_PARALLEL (DCmode,
27325 gen_rtvec (4,
27326 gen_rtx_EXPR_LIST (VOIDmode,
27327 gen_rtx_REG (SImode, GP_ARG_RETURN),
27328 const0_rtx),
27329 gen_rtx_EXPR_LIST (VOIDmode,
27330 gen_rtx_REG (SImode,
27331 GP_ARG_RETURN + 1),
27332 GEN_INT (4)),
27333 gen_rtx_EXPR_LIST (VOIDmode,
27334 gen_rtx_REG (SImode,
27335 GP_ARG_RETURN + 2),
27336 GEN_INT (8)),
27337 gen_rtx_EXPR_LIST (VOIDmode,
27338 gen_rtx_REG (SImode,
27339 GP_ARG_RETURN + 3),
27340 GEN_INT (12))));
27343 mode = TYPE_MODE (valtype);
27344 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
27345 || POINTER_TYPE_P (valtype))
27346 mode = TARGET_32BIT ? SImode : DImode;
27348 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27349 /* _Decimal128 must use an even/odd register pair. */
27350 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27351 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
27352 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
27353 regno = FP_ARG_RETURN;
27354 else if (TREE_CODE (valtype) == COMPLEX_TYPE
27355 && targetm.calls.split_complex_arg)
27356 return rs6000_complex_function_value (mode);
27357 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27358 return register is used in both cases, and we won't see V2DImode/V2DFmode
27359 for pure altivec, combine the two cases. */
27360 else if (TREE_CODE (valtype) == VECTOR_TYPE
27361 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
27362 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
27363 regno = ALTIVEC_ARG_RETURN;
27364 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27365 && (mode == DFmode || mode == DCmode
27366 || mode == TFmode || mode == TCmode))
27367 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27368 else
27369 regno = GP_ARG_RETURN;
27371 return gen_rtx_REG (mode, regno);
27374 /* Define how to find the value returned by a library function
27375 assuming the value has mode MODE. */
27377 rs6000_libcall_value (enum machine_mode mode)
27379 unsigned int regno;
27381 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
27383 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27384 return gen_rtx_PARALLEL (DImode,
27385 gen_rtvec (2,
27386 gen_rtx_EXPR_LIST (VOIDmode,
27387 gen_rtx_REG (SImode, GP_ARG_RETURN),
27388 const0_rtx),
27389 gen_rtx_EXPR_LIST (VOIDmode,
27390 gen_rtx_REG (SImode,
27391 GP_ARG_RETURN + 1),
27392 GEN_INT (4))));
27395 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27396 /* _Decimal128 must use an even/odd register pair. */
27397 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27398 else if (SCALAR_FLOAT_MODE_P (mode)
27399 && TARGET_HARD_FLOAT && TARGET_FPRS
27400 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
27401 regno = FP_ARG_RETURN;
27402 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27403 return register is used in both cases, and we won't see V2DImode/V2DFmode
27404 for pure altivec, combine the two cases. */
27405 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
27406 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
27407 regno = ALTIVEC_ARG_RETURN;
27408 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
27409 return rs6000_complex_function_value (mode);
27410 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27411 && (mode == DFmode || mode == DCmode
27412 || mode == TFmode || mode == TCmode))
27413 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27414 else
27415 regno = GP_ARG_RETURN;
27417 return gen_rtx_REG (mode, regno);
27421 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27422 Frame pointer elimination is automatically handled.
27424 For the RS/6000, if frame pointer elimination is being done, we would like
27425 to convert ap into fp, not sp.
27427 We need r30 if -mminimal-toc was specified, and there are constant pool
27428 references. */
27430 static bool
27431 rs6000_can_eliminate (const int from, const int to)
27433 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
27434 ? ! frame_pointer_needed
27435 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
27436 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
27437 : true);
27440 /* Define the offset between two registers, FROM to be eliminated and its
27441 replacement TO, at the start of a routine. */
27442 HOST_WIDE_INT
27443 rs6000_initial_elimination_offset (int from, int to)
27445 rs6000_stack_t *info = rs6000_stack_info ();
27446 HOST_WIDE_INT offset;
27448 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27449 offset = info->push_p ? 0 : -info->total_size;
27450 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27452 offset = info->push_p ? 0 : -info->total_size;
27453 if (FRAME_GROWS_DOWNWARD)
27454 offset += info->fixed_size + info->vars_size + info->parm_size;
27456 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27457 offset = FRAME_GROWS_DOWNWARD
27458 ? info->fixed_size + info->vars_size + info->parm_size
27459 : 0;
27460 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27461 offset = info->total_size;
27462 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27463 offset = info->push_p ? info->total_size : 0;
27464 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
27465 offset = 0;
27466 else
27467 gcc_unreachable ();
27469 return offset;
27472 static rtx
27473 rs6000_dwarf_register_span (rtx reg)
27475 rtx parts[8];
27476 int i, words;
27477 unsigned regno = REGNO (reg);
27478 enum machine_mode mode = GET_MODE (reg);
27480 if (TARGET_SPE
27481 && regno < 32
27482 && (SPE_VECTOR_MODE (GET_MODE (reg))
27483 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
27484 && mode != SFmode && mode != SDmode && mode != SCmode)))
27486 else
27487 return NULL_RTX;
27489 regno = REGNO (reg);
27491 /* The duality of the SPE register size wreaks all kinds of havoc.
27492 This is a way of distinguishing r0 in 32-bits from r0 in
27493 64-bits. */
27494 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
27495 gcc_assert (words <= 4);
27496 for (i = 0; i < words; i++, regno++)
27498 if (BYTES_BIG_ENDIAN)
27500 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
27501 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
27503 else
27505 parts[2 * i] = gen_rtx_REG (SImode, regno);
27506 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
27510 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
27513 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27515 static void
27516 rs6000_init_dwarf_reg_sizes_extra (tree address)
27518 if (TARGET_SPE)
27520 int i;
27521 enum machine_mode mode = TYPE_MODE (char_type_node);
27522 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
27523 rtx mem = gen_rtx_MEM (BLKmode, addr);
27524 rtx value = gen_int_mode (4, mode);
27526 for (i = 1201; i < 1232; i++)
27528 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
27529 HOST_WIDE_INT offset
27530 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
27532 emit_move_insn (adjust_address (mem, mode, offset), value);
27537 /* Map internal gcc register numbers to DWARF2 register numbers. */
27539 unsigned int
27540 rs6000_dbx_register_number (unsigned int regno)
27542 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
27543 return regno;
27544 if (regno == LR_REGNO)
27545 return 108;
27546 if (regno == CTR_REGNO)
27547 return 109;
27548 if (CR_REGNO_P (regno))
27549 return regno - CR0_REGNO + 86;
27550 if (regno == CA_REGNO)
27551 return 101; /* XER */
27552 if (ALTIVEC_REGNO_P (regno))
27553 return regno - FIRST_ALTIVEC_REGNO + 1124;
27554 if (regno == VRSAVE_REGNO)
27555 return 356;
27556 if (regno == VSCR_REGNO)
27557 return 67;
27558 if (regno == SPE_ACC_REGNO)
27559 return 99;
27560 if (regno == SPEFSCR_REGNO)
27561 return 612;
27562 /* SPE high reg number. We get these values of regno from
27563 rs6000_dwarf_register_span. */
27564 gcc_assert (regno >= 1200 && regno < 1232);
27565 return regno;
27568 /* target hook eh_return_filter_mode */
27569 static enum machine_mode
27570 rs6000_eh_return_filter_mode (void)
27572 return TARGET_32BIT ? SImode : word_mode;
27575 /* Target hook for scalar_mode_supported_p. */
27576 static bool
27577 rs6000_scalar_mode_supported_p (enum machine_mode mode)
27579 if (DECIMAL_FLOAT_MODE_P (mode))
27580 return default_decimal_float_supported_p ();
27581 else
27582 return default_scalar_mode_supported_p (mode);
27585 /* Target hook for vector_mode_supported_p. */
27586 static bool
27587 rs6000_vector_mode_supported_p (enum machine_mode mode)
27590 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
27591 return true;
27593 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
27594 return true;
27596 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
27597 return true;
27599 else
27600 return false;
27603 /* Target hook for invalid_arg_for_unprototyped_fn. */
27604 static const char *
27605 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
27607 return (!rs6000_darwin64_abi
27608 && typelist == 0
27609 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
27610 && (funcdecl == NULL_TREE
27611 || (TREE_CODE (funcdecl) == FUNCTION_DECL
27612 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
27613 ? N_("AltiVec argument passed to unprototyped function")
27614 : NULL;
27617 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27618 setup by using __stack_chk_fail_local hidden function instead of
27619 calling __stack_chk_fail directly. Otherwise it is better to call
27620 __stack_chk_fail directly. */
27622 static tree ATTRIBUTE_UNUSED
27623 rs6000_stack_protect_fail (void)
27625 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
27626 ? default_hidden_stack_protect_fail ()
27627 : default_external_stack_protect_fail ();
27630 void
27631 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
27632 int num_operands ATTRIBUTE_UNUSED)
27634 if (rs6000_warn_cell_microcode)
27636 const char *temp;
27637 int insn_code_number = recog_memoized (insn);
27638 location_t location = INSN_LOCATION (insn);
27640 /* Punt on insns we cannot recognize. */
27641 if (insn_code_number < 0)
27642 return;
27644 temp = get_insn_template (insn_code_number, insn);
27646 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
27647 warning_at (location, OPT_mwarn_cell_microcode,
27648 "emitting microcode insn %s\t[%s] #%d",
27649 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27650 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
27651 warning_at (location, OPT_mwarn_cell_microcode,
27652 "emitting conditional microcode insn %s\t[%s] #%d",
27653 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27657 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
27659 #if TARGET_ELF
27660 static unsigned HOST_WIDE_INT
27661 rs6000_asan_shadow_offset (void)
27663 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
27665 #endif
27667 /* Mask options that we want to support inside of attribute((target)) and
27668 #pragma GCC target operations. Note, we do not include things like
27669 64/32-bit, endianess, hard/soft floating point, etc. that would have
27670 different calling sequences. */
27672 struct rs6000_opt_mask {
27673 const char *name; /* option name */
27674 HOST_WIDE_INT mask; /* mask to set */
27675 bool invert; /* invert sense of mask */
27676 bool valid_target; /* option is a target option */
27679 static struct rs6000_opt_mask const rs6000_opt_masks[] =
27681 { "altivec", OPTION_MASK_ALTIVEC, false, true },
27682 { "cmpb", OPTION_MASK_CMPB, false, true },
27683 { "dlmzb", OPTION_MASK_DLMZB, false, true },
27684 { "fprnd", OPTION_MASK_FPRND, false, true },
27685 { "hard-dfp", OPTION_MASK_DFP, false, true },
27686 { "isel", OPTION_MASK_ISEL, false, true },
27687 { "mfcrf", OPTION_MASK_MFCRF, false, true },
27688 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
27689 { "mulhw", OPTION_MASK_MULHW, false, true },
27690 { "multiple", OPTION_MASK_MULTIPLE, false, true },
27691 { "update", OPTION_MASK_NO_UPDATE, true , true },
27692 { "popcntb", OPTION_MASK_POPCNTB, false, true },
27693 { "popcntd", OPTION_MASK_POPCNTD, false, true },
27694 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
27695 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
27696 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
27697 { "string", OPTION_MASK_STRING, false, true },
27698 { "vsx", OPTION_MASK_VSX, false, true },
27699 #ifdef OPTION_MASK_64BIT
27700 #if TARGET_AIX_OS
27701 { "aix64", OPTION_MASK_64BIT, false, false },
27702 { "aix32", OPTION_MASK_64BIT, true, false },
27703 #else
27704 { "64", OPTION_MASK_64BIT, false, false },
27705 { "32", OPTION_MASK_64BIT, true, false },
27706 #endif
27707 #endif
27708 #ifdef OPTION_MASK_EABI
27709 { "eabi", OPTION_MASK_EABI, false, false },
27710 #endif
27711 #ifdef OPTION_MASK_LITTLE_ENDIAN
27712 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
27713 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
27714 #endif
27715 #ifdef OPTION_MASK_RELOCATABLE
27716 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
27717 #endif
27718 #ifdef OPTION_MASK_STRICT_ALIGN
27719 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
27720 #endif
27721 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
27722 { "string", OPTION_MASK_STRING, false, false },
27725 /* Builtin mask mapping for printing the flags. */
27726 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
27728 { "altivec", RS6000_BTM_ALTIVEC, false, false },
27729 { "vsx", RS6000_BTM_VSX, false, false },
27730 { "spe", RS6000_BTM_SPE, false, false },
27731 { "paired", RS6000_BTM_PAIRED, false, false },
27732 { "fre", RS6000_BTM_FRE, false, false },
27733 { "fres", RS6000_BTM_FRES, false, false },
27734 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
27735 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
27736 { "popcntd", RS6000_BTM_POPCNTD, false, false },
27737 { "cell", RS6000_BTM_CELL, false, false },
27740 /* Option variables that we want to support inside attribute((target)) and
27741 #pragma GCC target operations. */
27743 struct rs6000_opt_var {
27744 const char *name; /* option name */
27745 size_t global_offset; /* offset of the option in global_options. */
27746 size_t target_offset; /* offset of the option in target optiosn. */
27749 static struct rs6000_opt_var const rs6000_opt_vars[] =
27751 { "friz",
27752 offsetof (struct gcc_options, x_TARGET_FRIZ),
27753 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
27754 { "avoid-indexed-addresses",
27755 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
27756 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
27757 { "paired",
27758 offsetof (struct gcc_options, x_rs6000_paired_float),
27759 offsetof (struct cl_target_option, x_rs6000_paired_float), },
27760 { "longcall",
27761 offsetof (struct gcc_options, x_rs6000_default_long_calls),
27762 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
27765 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27766 parsing. Return true if there were no errors. */
27768 static bool
27769 rs6000_inner_target_options (tree args, bool attr_p)
27771 bool ret = true;
27773 if (args == NULL_TREE)
27776 else if (TREE_CODE (args) == STRING_CST)
27778 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27779 char *q;
27781 while ((q = strtok (p, ",")) != NULL)
27783 bool error_p = false;
27784 bool not_valid_p = false;
27785 const char *cpu_opt = NULL;
27787 p = NULL;
27788 if (strncmp (q, "cpu=", 4) == 0)
27790 int cpu_index = rs6000_cpu_name_lookup (q+4);
27791 if (cpu_index >= 0)
27792 rs6000_cpu_index = cpu_index;
27793 else
27795 error_p = true;
27796 cpu_opt = q+4;
27799 else if (strncmp (q, "tune=", 5) == 0)
27801 int tune_index = rs6000_cpu_name_lookup (q+5);
27802 if (tune_index >= 0)
27803 rs6000_tune_index = tune_index;
27804 else
27806 error_p = true;
27807 cpu_opt = q+5;
27810 else
27812 size_t i;
27813 bool invert = false;
27814 char *r = q;
27816 error_p = true;
27817 if (strncmp (r, "no-", 3) == 0)
27819 invert = true;
27820 r += 3;
27823 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27824 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
27826 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
27828 if (!rs6000_opt_masks[i].valid_target)
27829 not_valid_p = true;
27830 else
27832 error_p = false;
27833 rs6000_isa_flags_explicit |= mask;
27835 /* VSX needs altivec, so -mvsx automagically sets
27836 altivec. */
27837 if (mask == OPTION_MASK_VSX && !invert)
27838 mask |= OPTION_MASK_ALTIVEC;
27840 if (rs6000_opt_masks[i].invert)
27841 invert = !invert;
27843 if (invert)
27844 rs6000_isa_flags &= ~mask;
27845 else
27846 rs6000_isa_flags |= mask;
27848 break;
27851 if (error_p && !not_valid_p)
27853 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27854 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
27856 size_t j = rs6000_opt_vars[i].global_offset;
27857 *((int *) ((char *)&global_options + j)) = !invert;
27858 error_p = false;
27859 break;
27864 if (error_p)
27866 const char *eprefix, *esuffix;
27868 ret = false;
27869 if (attr_p)
27871 eprefix = "__attribute__((__target__(";
27872 esuffix = ")))";
27874 else
27876 eprefix = "#pragma GCC target ";
27877 esuffix = "";
27880 if (cpu_opt)
27881 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
27882 q, esuffix);
27883 else if (not_valid_p)
27884 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
27885 else
27886 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
27891 else if (TREE_CODE (args) == TREE_LIST)
27895 tree value = TREE_VALUE (args);
27896 if (value)
27898 bool ret2 = rs6000_inner_target_options (value, attr_p);
27899 if (!ret2)
27900 ret = false;
27902 args = TREE_CHAIN (args);
27904 while (args != NULL_TREE);
27907 else
27908 gcc_unreachable ();
27910 return ret;
27913 /* Print out the target options as a list for -mdebug=target. */
27915 static void
27916 rs6000_debug_target_options (tree args, const char *prefix)
27918 if (args == NULL_TREE)
27919 fprintf (stderr, "%s<NULL>", prefix);
27921 else if (TREE_CODE (args) == STRING_CST)
27923 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27924 char *q;
27926 while ((q = strtok (p, ",")) != NULL)
27928 p = NULL;
27929 fprintf (stderr, "%s\"%s\"", prefix, q);
27930 prefix = ", ";
27934 else if (TREE_CODE (args) == TREE_LIST)
27938 tree value = TREE_VALUE (args);
27939 if (value)
27941 rs6000_debug_target_options (value, prefix);
27942 prefix = ", ";
27944 args = TREE_CHAIN (args);
27946 while (args != NULL_TREE);
27949 else
27950 gcc_unreachable ();
27952 return;
27956 /* Hook to validate attribute((target("..."))). */
27958 static bool
27959 rs6000_valid_attribute_p (tree fndecl,
27960 tree ARG_UNUSED (name),
27961 tree args,
27962 int flags)
27964 struct cl_target_option cur_target;
27965 bool ret;
27966 tree old_optimize = build_optimization_node ();
27967 tree new_target, new_optimize;
27968 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27970 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
27972 if (TARGET_DEBUG_TARGET)
27974 tree tname = DECL_NAME (fndecl);
27975 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
27976 if (tname)
27977 fprintf (stderr, "function: %.*s\n",
27978 (int) IDENTIFIER_LENGTH (tname),
27979 IDENTIFIER_POINTER (tname));
27980 else
27981 fprintf (stderr, "function: unknown\n");
27983 fprintf (stderr, "args:");
27984 rs6000_debug_target_options (args, " ");
27985 fprintf (stderr, "\n");
27987 if (flags)
27988 fprintf (stderr, "flags: 0x%x\n", flags);
27990 fprintf (stderr, "--------------------\n");
27993 old_optimize = build_optimization_node ();
27994 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27996 /* If the function changed the optimization levels as well as setting target
27997 options, start with the optimizations specified. */
27998 if (func_optimize && func_optimize != old_optimize)
27999 cl_optimization_restore (&global_options,
28000 TREE_OPTIMIZATION (func_optimize));
28002 /* The target attributes may also change some optimization flags, so update
28003 the optimization options if necessary. */
28004 cl_target_option_save (&cur_target, &global_options);
28005 rs6000_cpu_index = rs6000_tune_index = -1;
28006 ret = rs6000_inner_target_options (args, true);
28008 /* Set up any additional state. */
28009 if (ret)
28011 ret = rs6000_option_override_internal (false);
28012 new_target = build_target_option_node ();
28014 else
28015 new_target = NULL;
28017 new_optimize = build_optimization_node ();
28019 if (!new_target)
28020 ret = false;
28022 else if (fndecl)
28024 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
28026 if (old_optimize != new_optimize)
28027 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
28030 cl_target_option_restore (&global_options, &cur_target);
28032 if (old_optimize != new_optimize)
28033 cl_optimization_restore (&global_options,
28034 TREE_OPTIMIZATION (old_optimize));
28036 return ret;
28040 /* Hook to validate the current #pragma GCC target and set the state, and
28041 update the macros based on what was changed. If ARGS is NULL, then
28042 POP_TARGET is used to reset the options. */
28044 bool
28045 rs6000_pragma_target_parse (tree args, tree pop_target)
28047 tree prev_tree = build_target_option_node ();
28048 tree cur_tree;
28049 struct cl_target_option *prev_opt, *cur_opt;
28050 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
28051 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
28053 if (TARGET_DEBUG_TARGET)
28055 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
28056 fprintf (stderr, "args:");
28057 rs6000_debug_target_options (args, " ");
28058 fprintf (stderr, "\n");
28060 if (pop_target)
28062 fprintf (stderr, "pop_target:\n");
28063 debug_tree (pop_target);
28065 else
28066 fprintf (stderr, "pop_target: <NULL>\n");
28068 fprintf (stderr, "--------------------\n");
28071 if (! args)
28073 cur_tree = ((pop_target)
28074 ? pop_target
28075 : target_option_default_node);
28076 cl_target_option_restore (&global_options,
28077 TREE_TARGET_OPTION (cur_tree));
28079 else
28081 rs6000_cpu_index = rs6000_tune_index = -1;
28082 if (!rs6000_inner_target_options (args, false)
28083 || !rs6000_option_override_internal (false)
28084 || (cur_tree = build_target_option_node ()) == NULL_TREE)
28086 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
28087 fprintf (stderr, "invalid pragma\n");
28089 return false;
28093 target_option_current_node = cur_tree;
28095 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
28096 change the macros that are defined. */
28097 if (rs6000_target_modify_macros_ptr)
28099 prev_opt = TREE_TARGET_OPTION (prev_tree);
28100 prev_bumask = prev_opt->x_rs6000_builtin_mask;
28101 prev_flags = prev_opt->x_rs6000_isa_flags;
28103 cur_opt = TREE_TARGET_OPTION (cur_tree);
28104 cur_flags = cur_opt->x_rs6000_isa_flags;
28105 cur_bumask = cur_opt->x_rs6000_builtin_mask;
28107 diff_bumask = (prev_bumask ^ cur_bumask);
28108 diff_flags = (prev_flags ^ cur_flags);
28110 if ((diff_flags != 0) || (diff_bumask != 0))
28112 /* Delete old macros. */
28113 rs6000_target_modify_macros_ptr (false,
28114 prev_flags & diff_flags,
28115 prev_bumask & diff_bumask);
28117 /* Define new macros. */
28118 rs6000_target_modify_macros_ptr (true,
28119 cur_flags & diff_flags,
28120 cur_bumask & diff_bumask);
28124 return true;
28128 /* Remember the last target of rs6000_set_current_function. */
28129 static GTY(()) tree rs6000_previous_fndecl;
28131 /* Establish appropriate back-end context for processing the function
28132 FNDECL. The argument might be NULL to indicate processing at top
28133 level, outside of any function scope. */
28134 static void
28135 rs6000_set_current_function (tree fndecl)
28137 tree old_tree = (rs6000_previous_fndecl
28138 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
28139 : NULL_TREE);
28141 tree new_tree = (fndecl
28142 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
28143 : NULL_TREE);
28145 if (TARGET_DEBUG_TARGET)
28147 bool print_final = false;
28148 fprintf (stderr, "\n==================== rs6000_set_current_function");
28150 if (fndecl)
28151 fprintf (stderr, ", fndecl %s (%p)",
28152 (DECL_NAME (fndecl)
28153 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
28154 : "<unknown>"), (void *)fndecl);
28156 if (rs6000_previous_fndecl)
28157 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
28159 fprintf (stderr, "\n");
28160 if (new_tree)
28162 fprintf (stderr, "\nnew fndecl target specific options:\n");
28163 debug_tree (new_tree);
28164 print_final = true;
28167 if (old_tree)
28169 fprintf (stderr, "\nold fndecl target specific options:\n");
28170 debug_tree (old_tree);
28171 print_final = true;
28174 if (print_final)
28175 fprintf (stderr, "--------------------\n");
28178 /* Only change the context if the function changes. This hook is called
28179 several times in the course of compiling a function, and we don't want to
28180 slow things down too much or call target_reinit when it isn't safe. */
28181 if (fndecl && fndecl != rs6000_previous_fndecl)
28183 rs6000_previous_fndecl = fndecl;
28184 if (old_tree == new_tree)
28187 else if (new_tree)
28189 cl_target_option_restore (&global_options,
28190 TREE_TARGET_OPTION (new_tree));
28191 target_reinit ();
28194 else if (old_tree)
28196 struct cl_target_option *def
28197 = TREE_TARGET_OPTION (target_option_current_node);
28199 cl_target_option_restore (&global_options, def);
28200 target_reinit ();
28206 /* Save the current options */
28208 static void
28209 rs6000_function_specific_save (struct cl_target_option *ptr)
28211 ptr->x_rs6000_isa_flags = rs6000_isa_flags;
28212 ptr->x_rs6000_isa_flags_explicit = rs6000_isa_flags_explicit;
28215 /* Restore the current options */
28217 static void
28218 rs6000_function_specific_restore (struct cl_target_option *ptr)
28220 rs6000_isa_flags = ptr->x_rs6000_isa_flags;
28221 rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
28222 (void) rs6000_option_override_internal (false);
28225 /* Print the current options */
28227 static void
28228 rs6000_function_specific_print (FILE *file, int indent,
28229 struct cl_target_option *ptr)
28231 rs6000_print_isa_options (file, indent, "Isa options set",
28232 ptr->x_rs6000_isa_flags);
28234 rs6000_print_isa_options (file, indent, "Isa options explicit",
28235 ptr->x_rs6000_isa_flags_explicit);
28238 /* Helper function to print the current isa or misc options on a line. */
28240 static void
28241 rs6000_print_options_internal (FILE *file,
28242 int indent,
28243 const char *string,
28244 HOST_WIDE_INT flags,
28245 const char *prefix,
28246 const struct rs6000_opt_mask *opts,
28247 size_t num_elements)
28249 size_t i;
28250 size_t start_column = 0;
28251 size_t cur_column;
28252 size_t max_column = 76;
28253 const char *comma = "";
28254 const char *nl = "\n";
28256 if (indent)
28257 start_column += fprintf (file, "%*s", indent, "");
28259 if (!flags)
28261 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
28262 return;
28265 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
28267 /* Print the various mask options. */
28268 cur_column = start_column;
28269 for (i = 0; i < num_elements; i++)
28271 if ((flags & opts[i].mask) != 0)
28273 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
28274 size_t len = (strlen (comma)
28275 + strlen (prefix)
28276 + strlen (no_str)
28277 + strlen (rs6000_opt_masks[i].name));
28279 cur_column += len;
28280 if (cur_column > max_column)
28282 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
28283 cur_column = start_column + len;
28284 comma = "";
28285 nl = "\n\n";
28288 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
28289 rs6000_opt_masks[i].name);
28290 flags &= ~ opts[i].mask;
28291 comma = ", ";
28295 fputs (nl, file);
28298 /* Helper function to print the current isa options on a line. */
28300 static void
28301 rs6000_print_isa_options (FILE *file, int indent, const char *string,
28302 HOST_WIDE_INT flags)
28304 rs6000_print_options_internal (file, indent, string, flags, "-m",
28305 &rs6000_opt_masks[0],
28306 ARRAY_SIZE (rs6000_opt_masks));
28309 static void
28310 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
28311 HOST_WIDE_INT flags)
28313 rs6000_print_options_internal (file, indent, string, flags, "",
28314 &rs6000_builtin_mask_names[0],
28315 ARRAY_SIZE (rs6000_builtin_mask_names));
28319 /* Hook to determine if one function can safely inline another. */
28321 static bool
28322 rs6000_can_inline_p (tree caller, tree callee)
28324 bool ret = false;
28325 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
28326 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
28328 /* If callee has no option attributes, then it is ok to inline. */
28329 if (!callee_tree)
28330 ret = true;
28332 /* If caller has no option attributes, but callee does then it is not ok to
28333 inline. */
28334 else if (!caller_tree)
28335 ret = false;
28337 else
28339 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
28340 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
28342 /* Callee's options should a subset of the caller's, i.e. a vsx function
28343 can inline an altivec function but a non-vsx function can't inline a
28344 vsx function. */
28345 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
28346 == callee_opts->x_rs6000_isa_flags)
28347 ret = true;
28350 if (TARGET_DEBUG_TARGET)
28351 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
28352 (DECL_NAME (caller)
28353 ? IDENTIFIER_POINTER (DECL_NAME (caller))
28354 : "<unknown>"),
28355 (DECL_NAME (callee)
28356 ? IDENTIFIER_POINTER (DECL_NAME (callee))
28357 : "<unknown>"),
28358 (ret ? "can" : "cannot"));
28360 return ret;
28363 /* Allocate a stack temp and fixup the address so it meets the particular
28364 memory requirements (either offetable or REG+REG addressing). */
28367 rs6000_allocate_stack_temp (enum machine_mode mode,
28368 bool offsettable_p,
28369 bool reg_reg_p)
28371 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
28372 rtx addr = XEXP (stack, 0);
28373 int strict_p = (reload_in_progress || reload_completed);
28375 if (!legitimate_indirect_address_p (addr, strict_p))
28377 if (offsettable_p
28378 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
28379 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28381 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
28382 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28385 return stack;
28388 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28389 to such a form to deal with memory reference instructions like STFIWX that
28390 only take reg+reg addressing. */
28393 rs6000_address_for_fpconvert (rtx x)
28395 int strict_p = (reload_in_progress || reload_completed);
28396 rtx addr;
28398 gcc_assert (MEM_P (x));
28399 addr = XEXP (x, 0);
28400 if (! legitimate_indirect_address_p (addr, strict_p)
28401 && ! legitimate_indexed_address_p (addr, strict_p))
28403 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
28405 rtx reg = XEXP (addr, 0);
28406 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
28407 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
28408 gcc_assert (REG_P (reg));
28409 emit_insn (gen_add3_insn (reg, reg, size_rtx));
28410 addr = reg;
28412 else if (GET_CODE (addr) == PRE_MODIFY)
28414 rtx reg = XEXP (addr, 0);
28415 rtx expr = XEXP (addr, 1);
28416 gcc_assert (REG_P (reg));
28417 gcc_assert (GET_CODE (expr) == PLUS);
28418 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
28419 addr = reg;
28422 x = replace_equiv_address (x, copy_addr_to_reg (addr));
28425 return x;
28428 /* Given a memory reference, if it is not in the form for altivec memory
28429 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28430 convert to the altivec format. */
28433 rs6000_address_for_altivec (rtx x)
28435 gcc_assert (MEM_P (x));
28436 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
28438 rtx addr = XEXP (x, 0);
28439 int strict_p = (reload_in_progress || reload_completed);
28441 if (!legitimate_indexed_address_p (addr, strict_p)
28442 && !legitimate_indirect_address_p (addr, strict_p))
28443 addr = copy_to_mode_reg (Pmode, addr);
28445 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
28446 x = change_address (x, GET_MODE (x), addr);
28449 return x;
28452 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28454 On the RS/6000, all integer constants are acceptable, most won't be valid
28455 for particular insns, though. Only easy FP constants are acceptable. */
28457 static bool
28458 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
28460 if (TARGET_ELF && rs6000_tls_referenced_p (x))
28461 return false;
28463 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
28464 || GET_MODE (x) == VOIDmode
28465 || (TARGET_POWERPC64 && mode == DImode)
28466 || easy_fp_constant (x, mode)
28467 || easy_vector_constant (x, mode));
28471 /* A function pointer under AIX is a pointer to a data area whose first word
28472 contains the actual address of the function, whose second word contains a
28473 pointer to its TOC, and whose third word contains a value to place in the
28474 static chain register (r11). Note that if we load the static chain, our
28475 "trampoline" need not have any executable code. */
28477 void
28478 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
28480 rtx func_addr;
28481 rtx toc_reg;
28482 rtx sc_reg;
28483 rtx stack_ptr;
28484 rtx stack_toc_offset;
28485 rtx stack_toc_mem;
28486 rtx func_toc_offset;
28487 rtx func_toc_mem;
28488 rtx func_sc_offset;
28489 rtx func_sc_mem;
28490 rtx insn;
28491 rtx (*call_func) (rtx, rtx, rtx, rtx);
28492 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
28494 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28495 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
28497 /* Load up address of the actual function. */
28498 func_desc = force_reg (Pmode, func_desc);
28499 func_addr = gen_reg_rtx (Pmode);
28500 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
28502 if (TARGET_32BIT)
28505 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
28506 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
28507 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
28508 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28510 call_func = gen_call_indirect_aix32bit;
28511 call_value_func = gen_call_value_indirect_aix32bit;
28513 else
28515 call_func = gen_call_indirect_aix32bit_nor11;
28516 call_value_func = gen_call_value_indirect_aix32bit_nor11;
28519 else
28521 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
28522 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
28523 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
28524 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28526 call_func = gen_call_indirect_aix64bit;
28527 call_value_func = gen_call_value_indirect_aix64bit;
28529 else
28531 call_func = gen_call_indirect_aix64bit_nor11;
28532 call_value_func = gen_call_value_indirect_aix64bit_nor11;
28536 /* Reserved spot to store the TOC. */
28537 stack_toc_mem = gen_frame_mem (Pmode,
28538 gen_rtx_PLUS (Pmode,
28539 stack_ptr,
28540 stack_toc_offset));
28542 gcc_assert (cfun);
28543 gcc_assert (cfun->machine);
28545 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28546 every call? */
28547 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
28548 cfun->machine->save_toc_in_prologue = true;
28550 else
28552 MEM_VOLATILE_P (stack_toc_mem) = 1;
28553 emit_move_insn (stack_toc_mem, toc_reg);
28556 /* Calculate the address to load the TOC of the called function. We don't
28557 actually load this until the split after reload. */
28558 func_toc_mem = gen_rtx_MEM (Pmode,
28559 gen_rtx_PLUS (Pmode,
28560 func_desc,
28561 func_toc_offset));
28563 /* If we have a static chain, load it up. */
28564 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28566 func_sc_mem = gen_rtx_MEM (Pmode,
28567 gen_rtx_PLUS (Pmode,
28568 func_desc,
28569 func_sc_offset));
28571 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
28572 emit_move_insn (sc_reg, func_sc_mem);
28575 /* Create the call. */
28576 if (value)
28577 insn = call_value_func (value, func_addr, flag, func_toc_mem,
28578 stack_toc_mem);
28579 else
28580 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
28582 emit_call_insn (insn);
28585 /* Return whether we need to always update the saved TOC pointer when we update
28586 the stack pointer. */
28588 static bool
28589 rs6000_save_toc_in_prologue_p (void)
28591 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
28594 #ifdef HAVE_GAS_HIDDEN
28595 # define USE_HIDDEN_LINKONCE 1
28596 #else
28597 # define USE_HIDDEN_LINKONCE 0
28598 #endif
28600 /* Fills in the label name that should be used for a 476 link stack thunk. */
28602 void
28603 get_ppc476_thunk_name (char name[32])
28605 gcc_assert (TARGET_LINK_STACK);
28607 if (USE_HIDDEN_LINKONCE)
28608 sprintf (name, "__ppc476.get_thunk");
28609 else
28610 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
28613 /* This function emits the simple thunk routine that is used to preserve
28614 the link stack on the 476 cpu. */
28616 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
28617 static void
28618 rs6000_code_end (void)
28620 char name[32];
28621 tree decl;
28623 if (!TARGET_LINK_STACK)
28624 return;
28626 get_ppc476_thunk_name (name);
28628 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
28629 build_function_type_list (void_type_node, NULL_TREE));
28630 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
28631 NULL_TREE, void_type_node);
28632 TREE_PUBLIC (decl) = 1;
28633 TREE_STATIC (decl) = 1;
28635 #if RS6000_WEAK
28636 if (USE_HIDDEN_LINKONCE)
28638 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
28639 targetm.asm_out.unique_section (decl, 0);
28640 switch_to_section (get_named_section (decl, NULL, 0));
28641 DECL_WEAK (decl) = 1;
28642 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
28643 targetm.asm_out.globalize_label (asm_out_file, name);
28644 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
28645 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
28647 else
28648 #endif
28650 switch_to_section (text_section);
28651 ASM_OUTPUT_LABEL (asm_out_file, name);
28654 DECL_INITIAL (decl) = make_node (BLOCK);
28655 current_function_decl = decl;
28656 init_function_start (decl);
28657 first_function_block_is_cold = false;
28658 /* Make sure unwind info is emitted for the thunk if needed. */
28659 final_start_function (emit_barrier (), asm_out_file, 1);
28661 fputs ("\tblr\n", asm_out_file);
28663 final_end_function ();
28664 init_insn_lengths ();
28665 free_after_compilation (cfun);
28666 set_cfun (NULL);
28667 current_function_decl = NULL;
28670 /* Add r30 to hard reg set if the prologue sets it up and it is not
28671 pic_offset_table_rtx. */
28673 static void
28674 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
28676 if (!TARGET_SINGLE_PIC_BASE
28677 && TARGET_TOC
28678 && TARGET_MINIMAL_TOC
28679 && get_pool_size () != 0)
28680 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28683 struct gcc_target targetm = TARGET_INITIALIZER;
28685 #include "gt-rs6000.h"