* config/rs6000/rs6000.c (rs6000_option_override_internal): Do not
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
bloba3f99092900e8e483a44c1545c305f7319aec111
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2012 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #include "dumpfile.h"
62 #if TARGET_XCOFF
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #endif
65 #if TARGET_MACHO
66 #include "gstab.h" /* for N_SLINE */
67 #endif
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
71 #endif
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack {
78 int reload_completed; /* stack info won't change from here on */
79 int first_gp_reg_save; /* first callee saved GP register used */
80 int first_fp_reg_save; /* first callee saved FP register used */
81 int first_altivec_reg_save; /* first callee saved AltiVec register used */
82 int lr_save_p; /* true if the link reg needs to be saved */
83 int cr_save_p; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask; /* mask of vec registers to save */
85 int push_p; /* true if we need to allocate stack space */
86 int calls_p; /* true if the function makes any calls */
87 int world_save_p; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi; /* which ABI to use */
90 int gp_save_offset; /* offset to save GP regs from initial SP */
91 int fp_save_offset; /* offset to save FP regs from initial SP */
92 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset; /* offset to save LR from initial SP */
94 int cr_save_offset; /* offset to save CR from initial SP */
95 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset; /* offset to save the varargs registers */
98 int ehrd_offset; /* offset to EH return data */
99 int reg_size; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size; /* variable save area size */
101 int parm_size; /* outgoing parameter size */
102 int save_size; /* save area size */
103 int fixed_size; /* fixed size of stack frame */
104 int gp_size; /* size of saved GP registers */
105 int fp_size; /* size of saved FP registers */
106 int altivec_size; /* size of saved AltiVec registers */
107 int cr_size; /* size to hold CR if not in save_size */
108 int vrsave_size; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size; /* size of altivec alignment padding if
110 not in save_size */
111 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size;
113 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
114 int spe_64bit_regs_used;
115 int savres_strategy;
116 } rs6000_stack_t;
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame;
128 /* Flags if __builtin_return_address (0) was used. */
129 int ra_need_lr;
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 int lr_save_state;
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot;
142 } machine_function;
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load;
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined;
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno;
154 #ifdef USING_ELFOS_H
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno = 0;
157 #endif
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 int dot_symbols;
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode;
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size;
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct;
178 #endif
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
192 /* Reload functions based on the type and the vector unit. */
193 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
195 static int dbg_cost_ctrl;
197 /* Built in types. */
198 tree rs6000_builtin_types[RS6000_BTI_MAX];
199 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
201 /* Flag to say the TOC is initialized */
202 int toc_initialized;
203 char toc_label_name[10];
205 /* Cached value of rs6000_variable_issue. This is cached in
206 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
207 static short cached_can_issue_more;
209 static GTY(()) section *read_only_data_section;
210 static GTY(()) section *private_data_section;
211 static GTY(()) section *read_only_private_data_section;
212 static GTY(()) section *sdata2_section;
213 static GTY(()) section *toc_section;
215 struct builtin_description
217 const unsigned int mask;
218 const enum insn_code icode;
219 const char *const name;
220 const enum rs6000_builtins code;
223 /* Describe the vector unit used for modes. */
224 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
225 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
227 /* Register classes for various constraints that are based on the target
228 switches. */
229 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
231 /* Describe the alignment of a vector. */
232 int rs6000_vector_align[NUM_MACHINE_MODES];
234 /* Map selected modes to types for builtins. */
235 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
237 /* What modes to automatically generate reciprocal divide estimate (fre) and
238 reciprocal sqrt (frsqrte) for. */
239 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
241 /* Masks to determine which reciprocal esitmate instructions to generate
242 automatically. */
243 enum rs6000_recip_mask {
244 RECIP_SF_DIV = 0x001, /* Use divide estimate */
245 RECIP_DF_DIV = 0x002,
246 RECIP_V4SF_DIV = 0x004,
247 RECIP_V2DF_DIV = 0x008,
249 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
250 RECIP_DF_RSQRT = 0x020,
251 RECIP_V4SF_RSQRT = 0x040,
252 RECIP_V2DF_RSQRT = 0x080,
254 /* Various combination of flags for -mrecip=xxx. */
255 RECIP_NONE = 0,
256 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
257 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
258 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
260 RECIP_HIGH_PRECISION = RECIP_ALL,
262 /* On low precision machines like the power5, don't enable double precision
263 reciprocal square root estimate, since it isn't accurate enough. */
264 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
267 /* -mrecip options. */
268 static struct
270 const char *string; /* option name */
271 unsigned int mask; /* mask bits to set */
272 } recip_options[] = {
273 { "all", RECIP_ALL },
274 { "none", RECIP_NONE },
275 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
276 | RECIP_V2DF_DIV) },
277 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
278 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
279 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
280 | RECIP_V2DF_RSQRT) },
281 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
282 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
285 /* 2 argument gen function typedef. */
286 typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx);
288 /* Pointer to function (in rs6000-c.c) that can define or undefine target
289 macros that have changed. Languages that don't support the preprocessor
290 don't link in rs6000-c.c, so we can't call it directly. */
291 void (*rs6000_target_modify_macros_ptr) (bool, int, unsigned);
294 /* Target cpu costs. */
296 struct processor_costs {
297 const int mulsi; /* cost of SImode multiplication. */
298 const int mulsi_const; /* cost of SImode multiplication by constant. */
299 const int mulsi_const9; /* cost of SImode mult by short constant. */
300 const int muldi; /* cost of DImode multiplication. */
301 const int divsi; /* cost of SImode division. */
302 const int divdi; /* cost of DImode division. */
303 const int fp; /* cost of simple SFmode and DFmode insns. */
304 const int dmul; /* cost of DFmode multiplication (and fmadd). */
305 const int sdiv; /* cost of SFmode division (fdivs). */
306 const int ddiv; /* cost of DFmode division (fdiv). */
307 const int cache_line_size; /* cache line size in bytes. */
308 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
309 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
310 const int simultaneous_prefetches; /* number of parallel prefetch
311 operations. */
314 const struct processor_costs *rs6000_cost;
316 /* Processor costs (relative to an add) */
318 /* Instruction size costs on 32bit processors. */
319 static const
320 struct processor_costs size32_cost = {
321 COSTS_N_INSNS (1), /* mulsi */
322 COSTS_N_INSNS (1), /* mulsi_const */
323 COSTS_N_INSNS (1), /* mulsi_const9 */
324 COSTS_N_INSNS (1), /* muldi */
325 COSTS_N_INSNS (1), /* divsi */
326 COSTS_N_INSNS (1), /* divdi */
327 COSTS_N_INSNS (1), /* fp */
328 COSTS_N_INSNS (1), /* dmul */
329 COSTS_N_INSNS (1), /* sdiv */
330 COSTS_N_INSNS (1), /* ddiv */
337 /* Instruction size costs on 64bit processors. */
338 static const
339 struct processor_costs size64_cost = {
340 COSTS_N_INSNS (1), /* mulsi */
341 COSTS_N_INSNS (1), /* mulsi_const */
342 COSTS_N_INSNS (1), /* mulsi_const9 */
343 COSTS_N_INSNS (1), /* muldi */
344 COSTS_N_INSNS (1), /* divsi */
345 COSTS_N_INSNS (1), /* divdi */
346 COSTS_N_INSNS (1), /* fp */
347 COSTS_N_INSNS (1), /* dmul */
348 COSTS_N_INSNS (1), /* sdiv */
349 COSTS_N_INSNS (1), /* ddiv */
350 128,
356 /* Instruction costs on RS64A processors. */
357 static const
358 struct processor_costs rs64a_cost = {
359 COSTS_N_INSNS (20), /* mulsi */
360 COSTS_N_INSNS (12), /* mulsi_const */
361 COSTS_N_INSNS (8), /* mulsi_const9 */
362 COSTS_N_INSNS (34), /* muldi */
363 COSTS_N_INSNS (65), /* divsi */
364 COSTS_N_INSNS (67), /* divdi */
365 COSTS_N_INSNS (4), /* fp */
366 COSTS_N_INSNS (4), /* dmul */
367 COSTS_N_INSNS (31), /* sdiv */
368 COSTS_N_INSNS (31), /* ddiv */
369 128, /* cache line size */
370 128, /* l1 cache */
371 2048, /* l2 cache */
372 1, /* streams */
375 /* Instruction costs on MPCCORE processors. */
376 static const
377 struct processor_costs mpccore_cost = {
378 COSTS_N_INSNS (2), /* mulsi */
379 COSTS_N_INSNS (2), /* mulsi_const */
380 COSTS_N_INSNS (2), /* mulsi_const9 */
381 COSTS_N_INSNS (2), /* muldi */
382 COSTS_N_INSNS (6), /* divsi */
383 COSTS_N_INSNS (6), /* divdi */
384 COSTS_N_INSNS (4), /* fp */
385 COSTS_N_INSNS (5), /* dmul */
386 COSTS_N_INSNS (10), /* sdiv */
387 COSTS_N_INSNS (17), /* ddiv */
388 32, /* cache line size */
389 4, /* l1 cache */
390 16, /* l2 cache */
391 1, /* streams */
394 /* Instruction costs on PPC403 processors. */
395 static const
396 struct processor_costs ppc403_cost = {
397 COSTS_N_INSNS (4), /* mulsi */
398 COSTS_N_INSNS (4), /* mulsi_const */
399 COSTS_N_INSNS (4), /* mulsi_const9 */
400 COSTS_N_INSNS (4), /* muldi */
401 COSTS_N_INSNS (33), /* divsi */
402 COSTS_N_INSNS (33), /* divdi */
403 COSTS_N_INSNS (11), /* fp */
404 COSTS_N_INSNS (11), /* dmul */
405 COSTS_N_INSNS (11), /* sdiv */
406 COSTS_N_INSNS (11), /* ddiv */
407 32, /* cache line size */
408 4, /* l1 cache */
409 16, /* l2 cache */
410 1, /* streams */
413 /* Instruction costs on PPC405 processors. */
414 static const
415 struct processor_costs ppc405_cost = {
416 COSTS_N_INSNS (5), /* mulsi */
417 COSTS_N_INSNS (4), /* mulsi_const */
418 COSTS_N_INSNS (3), /* mulsi_const9 */
419 COSTS_N_INSNS (5), /* muldi */
420 COSTS_N_INSNS (35), /* divsi */
421 COSTS_N_INSNS (35), /* divdi */
422 COSTS_N_INSNS (11), /* fp */
423 COSTS_N_INSNS (11), /* dmul */
424 COSTS_N_INSNS (11), /* sdiv */
425 COSTS_N_INSNS (11), /* ddiv */
426 32, /* cache line size */
427 16, /* l1 cache */
428 128, /* l2 cache */
429 1, /* streams */
432 /* Instruction costs on PPC440 processors. */
433 static const
434 struct processor_costs ppc440_cost = {
435 COSTS_N_INSNS (3), /* mulsi */
436 COSTS_N_INSNS (2), /* mulsi_const */
437 COSTS_N_INSNS (2), /* mulsi_const9 */
438 COSTS_N_INSNS (3), /* muldi */
439 COSTS_N_INSNS (34), /* divsi */
440 COSTS_N_INSNS (34), /* divdi */
441 COSTS_N_INSNS (5), /* fp */
442 COSTS_N_INSNS (5), /* dmul */
443 COSTS_N_INSNS (19), /* sdiv */
444 COSTS_N_INSNS (33), /* ddiv */
445 32, /* cache line size */
446 32, /* l1 cache */
447 256, /* l2 cache */
448 1, /* streams */
451 /* Instruction costs on PPC476 processors. */
452 static const
453 struct processor_costs ppc476_cost = {
454 COSTS_N_INSNS (4), /* mulsi */
455 COSTS_N_INSNS (4), /* mulsi_const */
456 COSTS_N_INSNS (4), /* mulsi_const9 */
457 COSTS_N_INSNS (4), /* muldi */
458 COSTS_N_INSNS (11), /* divsi */
459 COSTS_N_INSNS (11), /* divdi */
460 COSTS_N_INSNS (6), /* fp */
461 COSTS_N_INSNS (6), /* dmul */
462 COSTS_N_INSNS (19), /* sdiv */
463 COSTS_N_INSNS (33), /* ddiv */
464 32, /* l1 cache line size */
465 32, /* l1 cache */
466 512, /* l2 cache */
467 1, /* streams */
470 /* Instruction costs on PPC601 processors. */
471 static const
472 struct processor_costs ppc601_cost = {
473 COSTS_N_INSNS (5), /* mulsi */
474 COSTS_N_INSNS (5), /* mulsi_const */
475 COSTS_N_INSNS (5), /* mulsi_const9 */
476 COSTS_N_INSNS (5), /* muldi */
477 COSTS_N_INSNS (36), /* divsi */
478 COSTS_N_INSNS (36), /* divdi */
479 COSTS_N_INSNS (4), /* fp */
480 COSTS_N_INSNS (5), /* dmul */
481 COSTS_N_INSNS (17), /* sdiv */
482 COSTS_N_INSNS (31), /* ddiv */
483 32, /* cache line size */
484 32, /* l1 cache */
485 256, /* l2 cache */
486 1, /* streams */
489 /* Instruction costs on PPC603 processors. */
490 static const
491 struct processor_costs ppc603_cost = {
492 COSTS_N_INSNS (5), /* mulsi */
493 COSTS_N_INSNS (3), /* mulsi_const */
494 COSTS_N_INSNS (2), /* mulsi_const9 */
495 COSTS_N_INSNS (5), /* muldi */
496 COSTS_N_INSNS (37), /* divsi */
497 COSTS_N_INSNS (37), /* divdi */
498 COSTS_N_INSNS (3), /* fp */
499 COSTS_N_INSNS (4), /* dmul */
500 COSTS_N_INSNS (18), /* sdiv */
501 COSTS_N_INSNS (33), /* ddiv */
502 32, /* cache line size */
503 8, /* l1 cache */
504 64, /* l2 cache */
505 1, /* streams */
508 /* Instruction costs on PPC604 processors. */
509 static const
510 struct processor_costs ppc604_cost = {
511 COSTS_N_INSNS (4), /* mulsi */
512 COSTS_N_INSNS (4), /* mulsi_const */
513 COSTS_N_INSNS (4), /* mulsi_const9 */
514 COSTS_N_INSNS (4), /* muldi */
515 COSTS_N_INSNS (20), /* divsi */
516 COSTS_N_INSNS (20), /* divdi */
517 COSTS_N_INSNS (3), /* fp */
518 COSTS_N_INSNS (3), /* dmul */
519 COSTS_N_INSNS (18), /* sdiv */
520 COSTS_N_INSNS (32), /* ddiv */
521 32, /* cache line size */
522 16, /* l1 cache */
523 512, /* l2 cache */
524 1, /* streams */
527 /* Instruction costs on PPC604e processors. */
528 static const
529 struct processor_costs ppc604e_cost = {
530 COSTS_N_INSNS (2), /* mulsi */
531 COSTS_N_INSNS (2), /* mulsi_const */
532 COSTS_N_INSNS (2), /* mulsi_const9 */
533 COSTS_N_INSNS (2), /* muldi */
534 COSTS_N_INSNS (20), /* divsi */
535 COSTS_N_INSNS (20), /* divdi */
536 COSTS_N_INSNS (3), /* fp */
537 COSTS_N_INSNS (3), /* dmul */
538 COSTS_N_INSNS (18), /* sdiv */
539 COSTS_N_INSNS (32), /* ddiv */
540 32, /* cache line size */
541 32, /* l1 cache */
542 1024, /* l2 cache */
543 1, /* streams */
546 /* Instruction costs on PPC620 processors. */
547 static const
548 struct processor_costs ppc620_cost = {
549 COSTS_N_INSNS (5), /* mulsi */
550 COSTS_N_INSNS (4), /* mulsi_const */
551 COSTS_N_INSNS (3), /* mulsi_const9 */
552 COSTS_N_INSNS (7), /* muldi */
553 COSTS_N_INSNS (21), /* divsi */
554 COSTS_N_INSNS (37), /* divdi */
555 COSTS_N_INSNS (3), /* fp */
556 COSTS_N_INSNS (3), /* dmul */
557 COSTS_N_INSNS (18), /* sdiv */
558 COSTS_N_INSNS (32), /* ddiv */
559 128, /* cache line size */
560 32, /* l1 cache */
561 1024, /* l2 cache */
562 1, /* streams */
565 /* Instruction costs on PPC630 processors. */
566 static const
567 struct processor_costs ppc630_cost = {
568 COSTS_N_INSNS (5), /* mulsi */
569 COSTS_N_INSNS (4), /* mulsi_const */
570 COSTS_N_INSNS (3), /* mulsi_const9 */
571 COSTS_N_INSNS (7), /* muldi */
572 COSTS_N_INSNS (21), /* divsi */
573 COSTS_N_INSNS (37), /* divdi */
574 COSTS_N_INSNS (3), /* fp */
575 COSTS_N_INSNS (3), /* dmul */
576 COSTS_N_INSNS (17), /* sdiv */
577 COSTS_N_INSNS (21), /* ddiv */
578 128, /* cache line size */
579 64, /* l1 cache */
580 1024, /* l2 cache */
581 1, /* streams */
584 /* Instruction costs on Cell processor. */
585 /* COSTS_N_INSNS (1) ~ one add. */
586 static const
587 struct processor_costs ppccell_cost = {
588 COSTS_N_INSNS (9/2)+2, /* mulsi */
589 COSTS_N_INSNS (6/2), /* mulsi_const */
590 COSTS_N_INSNS (6/2), /* mulsi_const9 */
591 COSTS_N_INSNS (15/2)+2, /* muldi */
592 COSTS_N_INSNS (38/2), /* divsi */
593 COSTS_N_INSNS (70/2), /* divdi */
594 COSTS_N_INSNS (10/2), /* fp */
595 COSTS_N_INSNS (10/2), /* dmul */
596 COSTS_N_INSNS (74/2), /* sdiv */
597 COSTS_N_INSNS (74/2), /* ddiv */
598 128, /* cache line size */
599 32, /* l1 cache */
600 512, /* l2 cache */
601 6, /* streams */
604 /* Instruction costs on PPC750 and PPC7400 processors. */
605 static const
606 struct processor_costs ppc750_cost = {
607 COSTS_N_INSNS (5), /* mulsi */
608 COSTS_N_INSNS (3), /* mulsi_const */
609 COSTS_N_INSNS (2), /* mulsi_const9 */
610 COSTS_N_INSNS (5), /* muldi */
611 COSTS_N_INSNS (17), /* divsi */
612 COSTS_N_INSNS (17), /* divdi */
613 COSTS_N_INSNS (3), /* fp */
614 COSTS_N_INSNS (3), /* dmul */
615 COSTS_N_INSNS (17), /* sdiv */
616 COSTS_N_INSNS (31), /* ddiv */
617 32, /* cache line size */
618 32, /* l1 cache */
619 512, /* l2 cache */
620 1, /* streams */
623 /* Instruction costs on PPC7450 processors. */
624 static const
625 struct processor_costs ppc7450_cost = {
626 COSTS_N_INSNS (4), /* mulsi */
627 COSTS_N_INSNS (3), /* mulsi_const */
628 COSTS_N_INSNS (3), /* mulsi_const9 */
629 COSTS_N_INSNS (4), /* muldi */
630 COSTS_N_INSNS (23), /* divsi */
631 COSTS_N_INSNS (23), /* divdi */
632 COSTS_N_INSNS (5), /* fp */
633 COSTS_N_INSNS (5), /* dmul */
634 COSTS_N_INSNS (21), /* sdiv */
635 COSTS_N_INSNS (35), /* ddiv */
636 32, /* cache line size */
637 32, /* l1 cache */
638 1024, /* l2 cache */
639 1, /* streams */
642 /* Instruction costs on PPC8540 processors. */
643 static const
644 struct processor_costs ppc8540_cost = {
645 COSTS_N_INSNS (4), /* mulsi */
646 COSTS_N_INSNS (4), /* mulsi_const */
647 COSTS_N_INSNS (4), /* mulsi_const9 */
648 COSTS_N_INSNS (4), /* muldi */
649 COSTS_N_INSNS (19), /* divsi */
650 COSTS_N_INSNS (19), /* divdi */
651 COSTS_N_INSNS (4), /* fp */
652 COSTS_N_INSNS (4), /* dmul */
653 COSTS_N_INSNS (29), /* sdiv */
654 COSTS_N_INSNS (29), /* ddiv */
655 32, /* cache line size */
656 32, /* l1 cache */
657 256, /* l2 cache */
658 1, /* prefetch streams /*/
661 /* Instruction costs on E300C2 and E300C3 cores. */
662 static const
663 struct processor_costs ppce300c2c3_cost = {
664 COSTS_N_INSNS (4), /* mulsi */
665 COSTS_N_INSNS (4), /* mulsi_const */
666 COSTS_N_INSNS (4), /* mulsi_const9 */
667 COSTS_N_INSNS (4), /* muldi */
668 COSTS_N_INSNS (19), /* divsi */
669 COSTS_N_INSNS (19), /* divdi */
670 COSTS_N_INSNS (3), /* fp */
671 COSTS_N_INSNS (4), /* dmul */
672 COSTS_N_INSNS (18), /* sdiv */
673 COSTS_N_INSNS (33), /* ddiv */
675 16, /* l1 cache */
676 16, /* l2 cache */
677 1, /* prefetch streams /*/
680 /* Instruction costs on PPCE500MC processors. */
681 static const
682 struct processor_costs ppce500mc_cost = {
683 COSTS_N_INSNS (4), /* mulsi */
684 COSTS_N_INSNS (4), /* mulsi_const */
685 COSTS_N_INSNS (4), /* mulsi_const9 */
686 COSTS_N_INSNS (4), /* muldi */
687 COSTS_N_INSNS (14), /* divsi */
688 COSTS_N_INSNS (14), /* divdi */
689 COSTS_N_INSNS (8), /* fp */
690 COSTS_N_INSNS (10), /* dmul */
691 COSTS_N_INSNS (36), /* sdiv */
692 COSTS_N_INSNS (66), /* ddiv */
693 64, /* cache line size */
694 32, /* l1 cache */
695 128, /* l2 cache */
696 1, /* prefetch streams /*/
699 /* Instruction costs on PPCE500MC64 processors. */
700 static const
701 struct processor_costs ppce500mc64_cost = {
702 COSTS_N_INSNS (4), /* mulsi */
703 COSTS_N_INSNS (4), /* mulsi_const */
704 COSTS_N_INSNS (4), /* mulsi_const9 */
705 COSTS_N_INSNS (4), /* muldi */
706 COSTS_N_INSNS (14), /* divsi */
707 COSTS_N_INSNS (14), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (10), /* dmul */
710 COSTS_N_INSNS (36), /* sdiv */
711 COSTS_N_INSNS (66), /* ddiv */
712 64, /* cache line size */
713 32, /* l1 cache */
714 128, /* l2 cache */
715 1, /* prefetch streams /*/
718 /* Instruction costs on PPCE5500 processors. */
719 static const
720 struct processor_costs ppce5500_cost = {
721 COSTS_N_INSNS (5), /* mulsi */
722 COSTS_N_INSNS (5), /* mulsi_const */
723 COSTS_N_INSNS (4), /* mulsi_const9 */
724 COSTS_N_INSNS (5), /* muldi */
725 COSTS_N_INSNS (14), /* divsi */
726 COSTS_N_INSNS (14), /* divdi */
727 COSTS_N_INSNS (7), /* fp */
728 COSTS_N_INSNS (10), /* dmul */
729 COSTS_N_INSNS (36), /* sdiv */
730 COSTS_N_INSNS (66), /* ddiv */
731 64, /* cache line size */
732 32, /* l1 cache */
733 128, /* l2 cache */
734 1, /* prefetch streams /*/
737 /* Instruction costs on PPCE6500 processors. */
738 static const
739 struct processor_costs ppce6500_cost = {
740 COSTS_N_INSNS (5), /* mulsi */
741 COSTS_N_INSNS (5), /* mulsi_const */
742 COSTS_N_INSNS (4), /* mulsi_const9 */
743 COSTS_N_INSNS (5), /* muldi */
744 COSTS_N_INSNS (14), /* divsi */
745 COSTS_N_INSNS (14), /* divdi */
746 COSTS_N_INSNS (7), /* fp */
747 COSTS_N_INSNS (10), /* dmul */
748 COSTS_N_INSNS (36), /* sdiv */
749 COSTS_N_INSNS (66), /* ddiv */
750 64, /* cache line size */
751 32, /* l1 cache */
752 128, /* l2 cache */
753 1, /* prefetch streams /*/
756 /* Instruction costs on AppliedMicro Titan processors. */
757 static const
758 struct processor_costs titan_cost = {
759 COSTS_N_INSNS (5), /* mulsi */
760 COSTS_N_INSNS (5), /* mulsi_const */
761 COSTS_N_INSNS (5), /* mulsi_const9 */
762 COSTS_N_INSNS (5), /* muldi */
763 COSTS_N_INSNS (18), /* divsi */
764 COSTS_N_INSNS (18), /* divdi */
765 COSTS_N_INSNS (10), /* fp */
766 COSTS_N_INSNS (10), /* dmul */
767 COSTS_N_INSNS (46), /* sdiv */
768 COSTS_N_INSNS (72), /* ddiv */
769 32, /* cache line size */
770 32, /* l1 cache */
771 512, /* l2 cache */
772 1, /* prefetch streams /*/
775 /* Instruction costs on POWER4 and POWER5 processors. */
776 static const
777 struct processor_costs power4_cost = {
778 COSTS_N_INSNS (3), /* mulsi */
779 COSTS_N_INSNS (2), /* mulsi_const */
780 COSTS_N_INSNS (2), /* mulsi_const9 */
781 COSTS_N_INSNS (4), /* muldi */
782 COSTS_N_INSNS (18), /* divsi */
783 COSTS_N_INSNS (34), /* divdi */
784 COSTS_N_INSNS (3), /* fp */
785 COSTS_N_INSNS (3), /* dmul */
786 COSTS_N_INSNS (17), /* sdiv */
787 COSTS_N_INSNS (17), /* ddiv */
788 128, /* cache line size */
789 32, /* l1 cache */
790 1024, /* l2 cache */
791 8, /* prefetch streams /*/
794 /* Instruction costs on POWER6 processors. */
795 static const
796 struct processor_costs power6_cost = {
797 COSTS_N_INSNS (8), /* mulsi */
798 COSTS_N_INSNS (8), /* mulsi_const */
799 COSTS_N_INSNS (8), /* mulsi_const9 */
800 COSTS_N_INSNS (8), /* muldi */
801 COSTS_N_INSNS (22), /* divsi */
802 COSTS_N_INSNS (28), /* divdi */
803 COSTS_N_INSNS (3), /* fp */
804 COSTS_N_INSNS (3), /* dmul */
805 COSTS_N_INSNS (13), /* sdiv */
806 COSTS_N_INSNS (16), /* ddiv */
807 128, /* cache line size */
808 64, /* l1 cache */
809 2048, /* l2 cache */
810 16, /* prefetch streams */
813 /* Instruction costs on POWER7 processors. */
814 static const
815 struct processor_costs power7_cost = {
816 COSTS_N_INSNS (2), /* mulsi */
817 COSTS_N_INSNS (2), /* mulsi_const */
818 COSTS_N_INSNS (2), /* mulsi_const9 */
819 COSTS_N_INSNS (2), /* muldi */
820 COSTS_N_INSNS (18), /* divsi */
821 COSTS_N_INSNS (34), /* divdi */
822 COSTS_N_INSNS (3), /* fp */
823 COSTS_N_INSNS (3), /* dmul */
824 COSTS_N_INSNS (13), /* sdiv */
825 COSTS_N_INSNS (16), /* ddiv */
826 128, /* cache line size */
827 32, /* l1 cache */
828 256, /* l2 cache */
829 12, /* prefetch streams */
832 /* Instruction costs on POWER A2 processors. */
833 static const
834 struct processor_costs ppca2_cost = {
835 COSTS_N_INSNS (16), /* mulsi */
836 COSTS_N_INSNS (16), /* mulsi_const */
837 COSTS_N_INSNS (16), /* mulsi_const9 */
838 COSTS_N_INSNS (16), /* muldi */
839 COSTS_N_INSNS (22), /* divsi */
840 COSTS_N_INSNS (28), /* divdi */
841 COSTS_N_INSNS (3), /* fp */
842 COSTS_N_INSNS (3), /* dmul */
843 COSTS_N_INSNS (59), /* sdiv */
844 COSTS_N_INSNS (72), /* ddiv */
846 16, /* l1 cache */
847 2048, /* l2 cache */
848 16, /* prefetch streams */
852 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
853 #undef RS6000_BUILTIN_1
854 #undef RS6000_BUILTIN_2
855 #undef RS6000_BUILTIN_3
856 #undef RS6000_BUILTIN_A
857 #undef RS6000_BUILTIN_D
858 #undef RS6000_BUILTIN_E
859 #undef RS6000_BUILTIN_P
860 #undef RS6000_BUILTIN_Q
861 #undef RS6000_BUILTIN_S
862 #undef RS6000_BUILTIN_X
864 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
865 { NAME, ICODE, MASK, ATTR },
867 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
868 { NAME, ICODE, MASK, ATTR },
870 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
871 { NAME, ICODE, MASK, ATTR },
873 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
874 { NAME, ICODE, MASK, ATTR },
876 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
877 { NAME, ICODE, MASK, ATTR },
879 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
880 { NAME, ICODE, MASK, ATTR },
882 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
883 { NAME, ICODE, MASK, ATTR },
885 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
886 { NAME, ICODE, MASK, ATTR },
888 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
889 { NAME, ICODE, MASK, ATTR },
891 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
892 { NAME, ICODE, MASK, ATTR },
894 struct rs6000_builtin_info_type {
895 const char *name;
896 const enum insn_code icode;
897 const unsigned mask;
898 const unsigned attr;
901 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
903 #include "rs6000-builtin.def"
906 #undef RS6000_BUILTIN_1
907 #undef RS6000_BUILTIN_2
908 #undef RS6000_BUILTIN_3
909 #undef RS6000_BUILTIN_A
910 #undef RS6000_BUILTIN_D
911 #undef RS6000_BUILTIN_E
912 #undef RS6000_BUILTIN_P
913 #undef RS6000_BUILTIN_Q
914 #undef RS6000_BUILTIN_S
915 #undef RS6000_BUILTIN_X
917 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
918 static tree (*rs6000_veclib_handler) (tree, tree, tree);
921 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
922 static bool spe_func_has_64bit_regs_p (void);
923 static struct machine_function * rs6000_init_machine_status (void);
924 static int rs6000_ra_ever_killed (void);
925 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
926 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
927 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
928 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
929 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
930 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
931 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
932 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
933 bool);
934 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
935 static bool is_microcoded_insn (rtx);
936 static bool is_nonpipeline_insn (rtx);
937 static bool is_cracked_insn (rtx);
938 static bool is_load_insn (rtx, rtx *);
939 static bool is_store_insn (rtx, rtx *);
940 static bool set_to_load_agen (rtx,rtx);
941 static bool insn_terminates_group_p (rtx , enum group_termination);
942 static bool insn_must_be_first_in_group (rtx);
943 static bool insn_must_be_last_in_group (rtx);
944 static void altivec_init_builtins (void);
945 static tree builtin_function_type (enum machine_mode, enum machine_mode,
946 enum machine_mode, enum machine_mode,
947 enum rs6000_builtins, const char *name);
948 static void rs6000_common_init_builtins (void);
949 static void paired_init_builtins (void);
950 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
951 static void spe_init_builtins (void);
952 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
953 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
954 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
955 static rs6000_stack_t *rs6000_stack_info (void);
956 static void is_altivec_return_reg (rtx, void *);
957 int easy_vector_constant (rtx, enum machine_mode);
958 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
959 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
960 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
961 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
962 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
963 bool, bool);
964 #if TARGET_MACHO
965 static void macho_branch_islands (void);
966 #endif
967 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
968 int, int *);
969 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
970 int, int, int *);
971 static bool rs6000_mode_dependent_address (const_rtx);
972 static bool rs6000_debug_mode_dependent_address (const_rtx);
973 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
974 enum machine_mode, rtx);
975 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
976 enum machine_mode,
977 rtx);
978 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
979 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
980 enum reg_class);
981 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
982 enum machine_mode);
983 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
984 enum reg_class,
985 enum machine_mode);
986 static bool rs6000_cannot_change_mode_class (enum machine_mode,
987 enum machine_mode,
988 enum reg_class);
989 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
990 enum machine_mode,
991 enum reg_class);
992 static bool rs6000_save_toc_in_prologue_p (void);
994 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
995 int, int *)
996 = rs6000_legitimize_reload_address;
998 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
999 = rs6000_mode_dependent_address;
1001 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1002 enum machine_mode, rtx)
1003 = rs6000_secondary_reload_class;
1005 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1006 = rs6000_preferred_reload_class;
1008 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1009 enum machine_mode)
1010 = rs6000_secondary_memory_needed;
1012 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1013 enum machine_mode,
1014 enum reg_class)
1015 = rs6000_cannot_change_mode_class;
1017 const int INSN_NOT_AVAILABLE = -1;
1019 /* Hash table stuff for keeping track of TOC entries. */
1021 struct GTY(()) toc_hash_struct
1023 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1024 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1025 rtx key;
1026 enum machine_mode key_mode;
1027 int labelno;
1030 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1032 /* Hash table to keep track of the argument types for builtin functions. */
1034 struct GTY(()) builtin_hash_struct
1036 tree type;
1037 enum machine_mode mode[4]; /* return value + 3 arguments. */
1038 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1041 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1044 /* Default register names. */
1045 char rs6000_reg_names[][8] =
1047 "0", "1", "2", "3", "4", "5", "6", "7",
1048 "8", "9", "10", "11", "12", "13", "14", "15",
1049 "16", "17", "18", "19", "20", "21", "22", "23",
1050 "24", "25", "26", "27", "28", "29", "30", "31",
1051 "0", "1", "2", "3", "4", "5", "6", "7",
1052 "8", "9", "10", "11", "12", "13", "14", "15",
1053 "16", "17", "18", "19", "20", "21", "22", "23",
1054 "24", "25", "26", "27", "28", "29", "30", "31",
1055 "mq", "lr", "ctr","ap",
1056 "0", "1", "2", "3", "4", "5", "6", "7",
1057 "ca",
1058 /* AltiVec registers. */
1059 "0", "1", "2", "3", "4", "5", "6", "7",
1060 "8", "9", "10", "11", "12", "13", "14", "15",
1061 "16", "17", "18", "19", "20", "21", "22", "23",
1062 "24", "25", "26", "27", "28", "29", "30", "31",
1063 "vrsave", "vscr",
1064 /* SPE registers. */
1065 "spe_acc", "spefscr",
1066 /* Soft frame pointer. */
1067 "sfp"
1070 #ifdef TARGET_REGNAMES
1071 static const char alt_reg_names[][8] =
1073 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1074 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1075 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1076 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1077 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1078 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1079 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1080 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1081 "mq", "lr", "ctr", "ap",
1082 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1083 "ca",
1084 /* AltiVec registers. */
1085 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1086 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1087 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1088 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1089 "vrsave", "vscr",
1090 /* SPE registers. */
1091 "spe_acc", "spefscr",
1092 /* Soft frame pointer. */
1093 "sfp"
1095 #endif
1097 /* Table of valid machine attributes. */
1099 static const struct attribute_spec rs6000_attribute_table[] =
1101 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1102 affects_type_identity } */
1103 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1104 false },
1105 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1106 false },
1107 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1108 false },
1109 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1110 false },
1111 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1112 false },
1113 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1114 SUBTARGET_ATTRIBUTE_TABLE,
1115 #endif
1116 { NULL, 0, 0, false, false, false, NULL, false }
1119 #ifndef MASK_STRICT_ALIGN
1120 #define MASK_STRICT_ALIGN 0
1121 #endif
1122 #ifndef TARGET_PROFILE_KERNEL
1123 #define TARGET_PROFILE_KERNEL 0
1124 #endif
1126 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1127 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1129 /* Initialize the GCC target structure. */
1130 #undef TARGET_ATTRIBUTE_TABLE
1131 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1132 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1133 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1134 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1135 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1137 #undef TARGET_ASM_ALIGNED_DI_OP
1138 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1140 /* Default unaligned ops are only provided for ELF. Find the ops needed
1141 for non-ELF systems. */
1142 #ifndef OBJECT_FORMAT_ELF
1143 #if TARGET_XCOFF
1144 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1145 64-bit targets. */
1146 #undef TARGET_ASM_UNALIGNED_HI_OP
1147 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1148 #undef TARGET_ASM_UNALIGNED_SI_OP
1149 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1150 #undef TARGET_ASM_UNALIGNED_DI_OP
1151 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1152 #else
1153 /* For Darwin. */
1154 #undef TARGET_ASM_UNALIGNED_HI_OP
1155 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1156 #undef TARGET_ASM_UNALIGNED_SI_OP
1157 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1158 #undef TARGET_ASM_UNALIGNED_DI_OP
1159 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1160 #undef TARGET_ASM_ALIGNED_DI_OP
1161 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1162 #endif
1163 #endif
1165 /* This hook deals with fixups for relocatable code and DI-mode objects
1166 in 64-bit code. */
1167 #undef TARGET_ASM_INTEGER
1168 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1170 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1171 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1172 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1173 #endif
1175 #undef TARGET_SET_UP_BY_PROLOGUE
1176 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1178 #undef TARGET_HAVE_TLS
1179 #define TARGET_HAVE_TLS HAVE_AS_TLS
1181 #undef TARGET_CANNOT_FORCE_CONST_MEM
1182 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1184 #undef TARGET_DELEGITIMIZE_ADDRESS
1185 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1187 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1188 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1190 #undef TARGET_ASM_FUNCTION_PROLOGUE
1191 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1192 #undef TARGET_ASM_FUNCTION_EPILOGUE
1193 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1195 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1196 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1198 #undef TARGET_LEGITIMIZE_ADDRESS
1199 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1201 #undef TARGET_SCHED_VARIABLE_ISSUE
1202 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1204 #undef TARGET_SCHED_ISSUE_RATE
1205 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1206 #undef TARGET_SCHED_ADJUST_COST
1207 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1208 #undef TARGET_SCHED_ADJUST_PRIORITY
1209 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1210 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1211 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1212 #undef TARGET_SCHED_INIT
1213 #define TARGET_SCHED_INIT rs6000_sched_init
1214 #undef TARGET_SCHED_FINISH
1215 #define TARGET_SCHED_FINISH rs6000_sched_finish
1216 #undef TARGET_SCHED_REORDER
1217 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1218 #undef TARGET_SCHED_REORDER2
1219 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1221 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1222 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1224 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1225 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1227 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1228 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1229 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1230 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1231 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1232 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1233 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1234 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1236 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1237 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1238 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1239 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1240 rs6000_builtin_support_vector_misalignment
1241 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1242 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1243 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1244 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1245 rs6000_builtin_vectorization_cost
1246 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1247 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1248 rs6000_preferred_simd_mode
1249 #undef TARGET_VECTORIZE_INIT_COST
1250 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1251 #undef TARGET_VECTORIZE_ADD_STMT_COST
1252 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1253 #undef TARGET_VECTORIZE_FINISH_COST
1254 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1255 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1256 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1258 #undef TARGET_INIT_BUILTINS
1259 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1260 #undef TARGET_BUILTIN_DECL
1261 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1263 #undef TARGET_EXPAND_BUILTIN
1264 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1266 #undef TARGET_MANGLE_TYPE
1267 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1269 #undef TARGET_INIT_LIBFUNCS
1270 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1272 #if TARGET_MACHO
1273 #undef TARGET_BINDS_LOCAL_P
1274 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1275 #endif
1277 #undef TARGET_MS_BITFIELD_LAYOUT_P
1278 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1280 #undef TARGET_ASM_OUTPUT_MI_THUNK
1281 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1286 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1287 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1289 #undef TARGET_INVALID_WITHIN_DOLOOP
1290 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1292 #undef TARGET_REGISTER_MOVE_COST
1293 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1294 #undef TARGET_MEMORY_MOVE_COST
1295 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1296 #undef TARGET_RTX_COSTS
1297 #define TARGET_RTX_COSTS rs6000_rtx_costs
1298 #undef TARGET_ADDRESS_COST
1299 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1301 #undef TARGET_DWARF_REGISTER_SPAN
1302 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1304 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1305 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1307 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1308 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1310 /* On rs6000, function arguments are promoted, as are function return
1311 values. */
1312 #undef TARGET_PROMOTE_FUNCTION_MODE
1313 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1315 #undef TARGET_RETURN_IN_MEMORY
1316 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1318 #undef TARGET_SETUP_INCOMING_VARARGS
1319 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1321 /* Always strict argument naming on rs6000. */
1322 #undef TARGET_STRICT_ARGUMENT_NAMING
1323 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1324 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1325 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1326 #undef TARGET_SPLIT_COMPLEX_ARG
1327 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1328 #undef TARGET_MUST_PASS_IN_STACK
1329 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1330 #undef TARGET_PASS_BY_REFERENCE
1331 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1332 #undef TARGET_ARG_PARTIAL_BYTES
1333 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1334 #undef TARGET_FUNCTION_ARG_ADVANCE
1335 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1336 #undef TARGET_FUNCTION_ARG
1337 #define TARGET_FUNCTION_ARG rs6000_function_arg
1338 #undef TARGET_FUNCTION_ARG_BOUNDARY
1339 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1341 #undef TARGET_BUILD_BUILTIN_VA_LIST
1342 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1344 #undef TARGET_EXPAND_BUILTIN_VA_START
1345 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1347 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1348 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1350 #undef TARGET_EH_RETURN_FILTER_MODE
1351 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1353 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1354 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1356 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1357 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1359 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1360 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1362 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1363 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1365 #undef TARGET_OPTION_OVERRIDE
1366 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1368 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1369 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1370 rs6000_builtin_vectorized_function
1372 #if !TARGET_MACHO
1373 #undef TARGET_STACK_PROTECT_FAIL
1374 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1375 #endif
1377 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1378 The PowerPC architecture requires only weak consistency among
1379 processors--that is, memory accesses between processors need not be
1380 sequentially consistent and memory accesses among processors can occur
1381 in any order. The ability to order memory accesses weakly provides
1382 opportunities for more efficient use of the system bus. Unless a
1383 dependency exists, the 604e allows read operations to precede store
1384 operations. */
1385 #undef TARGET_RELAXED_ORDERING
1386 #define TARGET_RELAXED_ORDERING true
1388 #ifdef HAVE_AS_TLS
1389 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1390 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1391 #endif
1393 /* Use a 32-bit anchor range. This leads to sequences like:
1395 addis tmp,anchor,high
1396 add dest,tmp,low
1398 where tmp itself acts as an anchor, and can be shared between
1399 accesses to the same 64k page. */
1400 #undef TARGET_MIN_ANCHOR_OFFSET
1401 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1402 #undef TARGET_MAX_ANCHOR_OFFSET
1403 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1404 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1405 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1407 #undef TARGET_BUILTIN_RECIPROCAL
1408 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1410 #undef TARGET_EXPAND_TO_RTL_HOOK
1411 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1413 #undef TARGET_INSTANTIATE_DECLS
1414 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1416 #undef TARGET_SECONDARY_RELOAD
1417 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1419 #undef TARGET_LEGITIMATE_ADDRESS_P
1420 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1422 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1423 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1425 #undef TARGET_CAN_ELIMINATE
1426 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1428 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1429 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1431 #undef TARGET_TRAMPOLINE_INIT
1432 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1434 #undef TARGET_FUNCTION_VALUE
1435 #define TARGET_FUNCTION_VALUE rs6000_function_value
1437 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1438 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1440 #undef TARGET_OPTION_SAVE
1441 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1443 #undef TARGET_OPTION_RESTORE
1444 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1446 #undef TARGET_OPTION_PRINT
1447 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1449 #undef TARGET_CAN_INLINE_P
1450 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1452 #undef TARGET_SET_CURRENT_FUNCTION
1453 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1455 #undef TARGET_LEGITIMATE_CONSTANT_P
1456 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1458 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1459 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1462 /* Simplifications for entries below. */
1464 enum {
1465 POWERPC_7400_MASK = MASK_PPC_GFXOPT | MASK_ALTIVEC
1468 /* Some OSs don't support saving the high part of 64-bit registers on context
1469 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1470 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1471 either, the user must explicitly specify them and we won't interfere with
1472 the user's specification. */
1474 enum {
1475 POWERPC_MASKS = (MASK_PPC_GPOPT | MASK_STRICT_ALIGN
1476 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
1477 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
1478 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
1479 | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE
1480 | MASK_RECIP_PRECISION)
1483 /* Masks for instructions set at various powerpc ISAs. */
1484 enum {
1485 ISA_2_1_MASKS = MASK_MFCRF,
1486 ISA_2_2_MASKS = (ISA_2_1_MASKS | MASK_POPCNTB),
1487 ISA_2_4_MASKS = (ISA_2_2_MASKS | MASK_FPRND),
1489 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1490 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1491 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1492 server and embedded. */
1493 ISA_2_5_MASKS_EMBEDDED = (ISA_2_2_MASKS | MASK_CMPB | MASK_RECIP_PRECISION
1494 | MASK_PPC_GFXOPT | MASK_PPC_GPOPT),
1495 ISA_2_5_MASKS_SERVER = (ISA_2_5_MASKS_EMBEDDED | MASK_DFP),
1497 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1498 altivec is a win so enable it. */
1499 ISA_2_6_MASKS_EMBEDDED = (ISA_2_5_MASKS_EMBEDDED | MASK_POPCNTD),
1500 ISA_2_6_MASKS_SERVER = (ISA_2_5_MASKS_SERVER | MASK_POPCNTD | MASK_ALTIVEC
1501 | MASK_VSX)
1504 struct rs6000_ptt
1506 const char *const name; /* Canonical processor name. */
1507 const enum processor_type processor; /* Processor type enum value. */
1508 const int target_enable; /* Target flags to enable. */
1511 static struct rs6000_ptt const processor_target_table[] =
1513 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1514 #include "rs6000-cpus.def"
1515 #undef RS6000_CPU
1518 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1519 name is invalid. */
1521 static int
1522 rs6000_cpu_name_lookup (const char *name)
1524 size_t i;
1526 if (name != NULL)
1528 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1529 if (! strcmp (name, processor_target_table[i].name))
1530 return (int)i;
1533 return -1;
1537 /* Return number of consecutive hard regs needed starting at reg REGNO
1538 to hold something of mode MODE.
1539 This is ordinarily the length in words of a value of mode MODE
1540 but can be less for certain modes in special long registers.
1542 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1543 scalar instructions. The upper 32 bits are only available to the
1544 SIMD instructions.
1546 POWER and PowerPC GPRs hold 32 bits worth;
1547 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1549 static int
1550 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1552 unsigned HOST_WIDE_INT reg_size;
1554 if (FP_REGNO_P (regno))
1555 reg_size = (VECTOR_MEM_VSX_P (mode)
1556 ? UNITS_PER_VSX_WORD
1557 : UNITS_PER_FP_WORD);
1559 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1560 reg_size = UNITS_PER_SPE_WORD;
1562 else if (ALTIVEC_REGNO_P (regno))
1563 reg_size = UNITS_PER_ALTIVEC_WORD;
1565 /* The value returned for SCmode in the E500 double case is 2 for
1566 ABI compatibility; storing an SCmode value in a single register
1567 would require function_arg and rs6000_spe_function_arg to handle
1568 SCmode so as to pass the value correctly in a pair of
1569 registers. */
1570 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1571 && !DECIMAL_FLOAT_MODE_P (mode))
1572 reg_size = UNITS_PER_FP_WORD;
1574 else
1575 reg_size = UNITS_PER_WORD;
1577 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1580 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1581 MODE. */
1582 static int
1583 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1585 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1587 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1588 implementations. Don't allow an item to be split between a FP register
1589 and an Altivec register. */
1590 if (VECTOR_MEM_VSX_P (mode))
1592 if (FP_REGNO_P (regno))
1593 return FP_REGNO_P (last_regno);
1595 if (ALTIVEC_REGNO_P (regno))
1596 return ALTIVEC_REGNO_P (last_regno);
1599 /* The GPRs can hold any mode, but values bigger than one register
1600 cannot go past R31. */
1601 if (INT_REGNO_P (regno))
1602 return INT_REGNO_P (last_regno);
1604 /* The float registers (except for VSX vector modes) can only hold floating
1605 modes and DImode. This excludes the 32-bit decimal float mode for
1606 now. */
1607 if (FP_REGNO_P (regno))
1609 if (SCALAR_FLOAT_MODE_P (mode)
1610 && (mode != TDmode || (regno % 2) == 0)
1611 && FP_REGNO_P (last_regno))
1612 return 1;
1614 if (GET_MODE_CLASS (mode) == MODE_INT
1615 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1616 return 1;
1618 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1619 && PAIRED_VECTOR_MODE (mode))
1620 return 1;
1622 return 0;
1625 /* The CR register can only hold CC modes. */
1626 if (CR_REGNO_P (regno))
1627 return GET_MODE_CLASS (mode) == MODE_CC;
1629 if (CA_REGNO_P (regno))
1630 return mode == BImode;
1632 /* AltiVec only in AldyVec registers. */
1633 if (ALTIVEC_REGNO_P (regno))
1634 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1636 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1637 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1638 return 1;
1640 /* We cannot put TImode anywhere except general register and it must be able
1641 to fit within the register set. In the future, allow TImode in the
1642 Altivec or VSX registers. */
1644 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1647 /* Print interesting facts about registers. */
1648 static void
1649 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1651 int r, m;
1653 for (r = first_regno; r <= last_regno; ++r)
1655 const char *comma = "";
1656 int len;
1658 if (first_regno == last_regno)
1659 fprintf (stderr, "%s:\t", reg_name);
1660 else
1661 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1663 len = 8;
1664 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1665 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1667 if (len > 70)
1669 fprintf (stderr, ",\n\t");
1670 len = 8;
1671 comma = "";
1674 if (rs6000_hard_regno_nregs[m][r] > 1)
1675 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1676 rs6000_hard_regno_nregs[m][r]);
1677 else
1678 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1680 comma = ", ";
1683 if (call_used_regs[r])
1685 if (len > 70)
1687 fprintf (stderr, ",\n\t");
1688 len = 8;
1689 comma = "";
1692 len += fprintf (stderr, "%s%s", comma, "call-used");
1693 comma = ", ";
1696 if (fixed_regs[r])
1698 if (len > 70)
1700 fprintf (stderr, ",\n\t");
1701 len = 8;
1702 comma = "";
1705 len += fprintf (stderr, "%s%s", comma, "fixed");
1706 comma = ", ";
1709 if (len > 70)
1711 fprintf (stderr, ",\n\t");
1712 comma = "";
1715 fprintf (stderr, "%sregno = %d\n", comma, r);
1719 #define DEBUG_FMT_D "%-32s= %d\n"
1720 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1721 #define DEBUG_FMT_S "%-32s= %s\n"
1723 /* Print various interesting information with -mdebug=reg. */
1724 static void
1725 rs6000_debug_reg_global (void)
1727 static const char *const tf[2] = { "false", "true" };
1728 const char *nl = (const char *)0;
1729 int m;
1730 char costly_num[20];
1731 char nop_num[20];
1732 const char *costly_str;
1733 const char *nop_str;
1734 const char *trace_str;
1735 const char *abi_str;
1736 const char *cmodel_str;
1738 /* Map enum rs6000_vector to string. */
1739 static const char *rs6000_debug_vector_unit[] = {
1740 "none",
1741 "altivec",
1742 "vsx",
1743 "paired",
1744 "spe",
1745 "other"
1748 fprintf (stderr, "Register information: (last virtual reg = %d)\n",
1749 LAST_VIRTUAL_REGISTER);
1750 rs6000_debug_reg_print (0, 31, "gr");
1751 rs6000_debug_reg_print (32, 63, "fp");
1752 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1753 LAST_ALTIVEC_REGNO,
1754 "vs");
1755 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1756 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1757 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1758 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1759 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1760 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1761 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1762 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1764 fprintf (stderr,
1765 "\n"
1766 "d reg_class = %s\n"
1767 "f reg_class = %s\n"
1768 "v reg_class = %s\n"
1769 "wa reg_class = %s\n"
1770 "wd reg_class = %s\n"
1771 "wf reg_class = %s\n"
1772 "ws reg_class = %s\n\n",
1773 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1774 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1775 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1776 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1777 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1778 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1779 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]);
1781 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1782 if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
1784 nl = "\n";
1785 fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1786 GET_MODE_NAME (m),
1787 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1788 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
1791 if (nl)
1792 fputs (nl, stderr);
1794 if (rs6000_recip_control)
1796 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1798 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1799 if (rs6000_recip_bits[m])
1801 fprintf (stderr,
1802 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1803 GET_MODE_NAME (m),
1804 (RS6000_RECIP_AUTO_RE_P (m)
1805 ? "auto"
1806 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1807 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1808 ? "auto"
1809 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1812 fputs ("\n", stderr);
1815 if (rs6000_cpu_index >= 0)
1816 fprintf (stderr, DEBUG_FMT_S, "cpu",
1817 processor_target_table[rs6000_cpu_index].name);
1819 if (rs6000_tune_index >= 0)
1820 fprintf (stderr, DEBUG_FMT_S, "tune",
1821 processor_target_table[rs6000_tune_index].name);
1823 switch (rs6000_sched_costly_dep)
1825 case max_dep_latency:
1826 costly_str = "max_dep_latency";
1827 break;
1829 case no_dep_costly:
1830 costly_str = "no_dep_costly";
1831 break;
1833 case all_deps_costly:
1834 costly_str = "all_deps_costly";
1835 break;
1837 case true_store_to_load_dep_costly:
1838 costly_str = "true_store_to_load_dep_costly";
1839 break;
1841 case store_to_load_dep_costly:
1842 costly_str = "store_to_load_dep_costly";
1843 break;
1845 default:
1846 costly_str = costly_num;
1847 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
1848 break;
1851 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
1853 switch (rs6000_sched_insert_nops)
1855 case sched_finish_regroup_exact:
1856 nop_str = "sched_finish_regroup_exact";
1857 break;
1859 case sched_finish_pad_groups:
1860 nop_str = "sched_finish_pad_groups";
1861 break;
1863 case sched_finish_none:
1864 nop_str = "sched_finish_none";
1865 break;
1867 default:
1868 nop_str = nop_num;
1869 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
1870 break;
1873 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
1875 switch (rs6000_sdata)
1877 default:
1878 case SDATA_NONE:
1879 break;
1881 case SDATA_DATA:
1882 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
1883 break;
1885 case SDATA_SYSV:
1886 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
1887 break;
1889 case SDATA_EABI:
1890 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
1891 break;
1895 switch (rs6000_traceback)
1897 case traceback_default: trace_str = "default"; break;
1898 case traceback_none: trace_str = "none"; break;
1899 case traceback_part: trace_str = "part"; break;
1900 case traceback_full: trace_str = "full"; break;
1901 default: trace_str = "unknown"; break;
1904 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
1906 switch (rs6000_current_cmodel)
1908 case CMODEL_SMALL: cmodel_str = "small"; break;
1909 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
1910 case CMODEL_LARGE: cmodel_str = "large"; break;
1911 default: cmodel_str = "unknown"; break;
1914 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
1916 switch (rs6000_current_abi)
1918 case ABI_NONE: abi_str = "none"; break;
1919 case ABI_AIX: abi_str = "aix"; break;
1920 case ABI_V4: abi_str = "V4"; break;
1921 case ABI_DARWIN: abi_str = "darwin"; break;
1922 default: abi_str = "unknown"; break;
1925 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
1927 if (rs6000_altivec_abi)
1928 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
1930 if (rs6000_spe_abi)
1931 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
1933 if (rs6000_darwin64_abi)
1934 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
1936 if (rs6000_float_gprs)
1937 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
1939 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
1940 fprintf (stderr, DEBUG_FMT_S, "align_branch",
1941 tf[!!rs6000_align_branch_targets]);
1942 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
1943 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
1944 rs6000_long_double_type_size);
1945 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
1946 (int)rs6000_sched_restricted_insns_priority);
1947 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
1948 (int)END_BUILTINS);
1949 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
1950 (int)RS6000_BUILTIN_COUNT);
1951 fprintf (stderr, DEBUG_FMT_X, "Builtin mask", rs6000_builtin_mask);
1954 /* Initialize the various global tables that are based on register size. */
1955 static void
1956 rs6000_init_hard_regno_mode_ok (bool global_init_p)
1958 int r, m, c;
1959 int align64;
1960 int align32;
1962 /* Precalculate REGNO_REG_CLASS. */
1963 rs6000_regno_regclass[0] = GENERAL_REGS;
1964 for (r = 1; r < 32; ++r)
1965 rs6000_regno_regclass[r] = BASE_REGS;
1967 for (r = 32; r < 64; ++r)
1968 rs6000_regno_regclass[r] = FLOAT_REGS;
1970 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
1971 rs6000_regno_regclass[r] = NO_REGS;
1973 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
1974 rs6000_regno_regclass[r] = ALTIVEC_REGS;
1976 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
1977 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
1978 rs6000_regno_regclass[r] = CR_REGS;
1980 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
1981 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
1982 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
1983 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
1984 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
1985 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
1986 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
1987 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
1988 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
1990 /* Precalculate vector information, this must be set up before the
1991 rs6000_hard_regno_nregs_internal below. */
1992 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1994 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
1995 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
1996 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
1999 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
2000 rs6000_constraints[c] = NO_REGS;
2002 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2003 believes it can use native alignment or still uses 128-bit alignment. */
2004 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2006 align64 = 64;
2007 align32 = 32;
2009 else
2011 align64 = 128;
2012 align32 = 128;
2015 /* V2DF mode, VSX only. */
2016 if (TARGET_VSX)
2018 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2019 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2020 rs6000_vector_align[V2DFmode] = align64;
2023 /* V4SF mode, either VSX or Altivec. */
2024 if (TARGET_VSX)
2026 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2027 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2028 rs6000_vector_align[V4SFmode] = align32;
2030 else if (TARGET_ALTIVEC)
2032 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2033 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2034 rs6000_vector_align[V4SFmode] = align32;
2037 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2038 and stores. */
2039 if (TARGET_ALTIVEC)
2041 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2042 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2043 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2044 rs6000_vector_align[V4SImode] = align32;
2045 rs6000_vector_align[V8HImode] = align32;
2046 rs6000_vector_align[V16QImode] = align32;
2048 if (TARGET_VSX)
2050 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2051 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2052 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2054 else
2056 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2057 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2058 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2062 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2063 Altivec doesn't have 64-bit support. */
2064 if (TARGET_VSX)
2066 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2067 rs6000_vector_unit[V2DImode] = VECTOR_NONE;
2068 rs6000_vector_align[V2DImode] = align64;
2071 /* DFmode, see if we want to use the VSX unit. */
2072 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2074 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2075 rs6000_vector_mem[DFmode]
2076 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2077 rs6000_vector_align[DFmode] = align64;
2080 /* TODO add SPE and paired floating point vector support. */
2082 /* Register class constraints for the constraints that depend on compile
2083 switches. */
2084 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2085 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2087 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2088 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2090 if (TARGET_VSX)
2092 /* At present, we just use VSX_REGS, but we have different constraints
2093 based on the use, in case we want to fine tune the default register
2094 class used. wa = any VSX register, wf = register class to use for
2095 V4SF, wd = register class to use for V2DF, and ws = register classs to
2096 use for DF scalars. */
2097 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2098 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2099 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2100 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2101 ? VSX_REGS
2102 : FLOAT_REGS);
2105 if (TARGET_ALTIVEC)
2106 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2108 /* Set up the reload helper functions. */
2109 if (TARGET_VSX || TARGET_ALTIVEC)
2111 if (TARGET_64BIT)
2113 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2114 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2115 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2116 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2117 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2118 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2119 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2120 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2121 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2122 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2123 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2124 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2125 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2127 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2128 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2131 else
2133 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2134 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2135 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2136 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2137 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2138 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2139 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2140 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2141 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2142 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2143 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2144 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2145 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2147 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2148 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2153 /* Precalculate HARD_REGNO_NREGS. */
2154 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2155 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2156 rs6000_hard_regno_nregs[m][r]
2157 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2159 /* Precalculate HARD_REGNO_MODE_OK. */
2160 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2161 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2162 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2163 rs6000_hard_regno_mode_ok_p[m][r] = true;
2165 /* Precalculate CLASS_MAX_NREGS sizes. */
2166 for (c = 0; c < LIM_REG_CLASSES; ++c)
2168 int reg_size;
2170 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2171 reg_size = UNITS_PER_VSX_WORD;
2173 else if (c == ALTIVEC_REGS)
2174 reg_size = UNITS_PER_ALTIVEC_WORD;
2176 else if (c == FLOAT_REGS)
2177 reg_size = UNITS_PER_FP_WORD;
2179 else
2180 reg_size = UNITS_PER_WORD;
2182 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2183 rs6000_class_max_nregs[m][c]
2184 = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size;
2187 if (TARGET_E500_DOUBLE)
2188 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2190 /* Calculate which modes to automatically generate code to use a the
2191 reciprocal divide and square root instructions. In the future, possibly
2192 automatically generate the instructions even if the user did not specify
2193 -mrecip. The older machines double precision reciprocal sqrt estimate is
2194 not accurate enough. */
2195 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2196 if (TARGET_FRES)
2197 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2198 if (TARGET_FRE)
2199 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2200 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2201 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2202 if (VECTOR_UNIT_VSX_P (V2DFmode))
2203 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2205 if (TARGET_FRSQRTES)
2206 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2207 if (TARGET_FRSQRTE)
2208 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2209 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2210 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2211 if (VECTOR_UNIT_VSX_P (V2DFmode))
2212 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2214 if (rs6000_recip_control)
2216 if (!flag_finite_math_only)
2217 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2218 if (flag_trapping_math)
2219 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2220 if (!flag_reciprocal_math)
2221 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2222 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2224 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2225 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2226 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2228 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2229 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2230 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2232 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2233 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2234 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2236 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2237 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2238 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2240 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2241 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2242 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2244 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2245 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2246 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2248 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2249 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2250 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2252 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2253 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2254 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2258 if (global_init_p || TARGET_DEBUG_TARGET)
2260 if (TARGET_DEBUG_REG)
2261 rs6000_debug_reg_global ();
2263 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2264 fprintf (stderr,
2265 "SImode variable mult cost = %d\n"
2266 "SImode constant mult cost = %d\n"
2267 "SImode short constant mult cost = %d\n"
2268 "DImode multipliciation cost = %d\n"
2269 "SImode division cost = %d\n"
2270 "DImode division cost = %d\n"
2271 "Simple fp operation cost = %d\n"
2272 "DFmode multiplication cost = %d\n"
2273 "SFmode division cost = %d\n"
2274 "DFmode division cost = %d\n"
2275 "cache line size = %d\n"
2276 "l1 cache size = %d\n"
2277 "l2 cache size = %d\n"
2278 "simultaneous prefetches = %d\n"
2279 "\n",
2280 rs6000_cost->mulsi,
2281 rs6000_cost->mulsi_const,
2282 rs6000_cost->mulsi_const9,
2283 rs6000_cost->muldi,
2284 rs6000_cost->divsi,
2285 rs6000_cost->divdi,
2286 rs6000_cost->fp,
2287 rs6000_cost->dmul,
2288 rs6000_cost->sdiv,
2289 rs6000_cost->ddiv,
2290 rs6000_cost->cache_line_size,
2291 rs6000_cost->l1_cache_size,
2292 rs6000_cost->l2_cache_size,
2293 rs6000_cost->simultaneous_prefetches);
2297 #if TARGET_MACHO
2298 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2300 static void
2301 darwin_rs6000_override_options (void)
2303 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2304 off. */
2305 rs6000_altivec_abi = 1;
2306 TARGET_ALTIVEC_VRSAVE = 1;
2307 rs6000_current_abi = ABI_DARWIN;
2309 if (DEFAULT_ABI == ABI_DARWIN
2310 && TARGET_64BIT)
2311 darwin_one_byte_bool = 1;
2313 if (TARGET_64BIT && ! TARGET_POWERPC64)
2315 target_flags |= MASK_POWERPC64;
2316 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2318 if (flag_mkernel)
2320 rs6000_default_long_calls = 1;
2321 target_flags |= MASK_SOFT_FLOAT;
2324 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2325 Altivec. */
2326 if (!flag_mkernel && !flag_apple_kext
2327 && TARGET_64BIT
2328 && ! (target_flags_explicit & MASK_ALTIVEC))
2329 target_flags |= MASK_ALTIVEC;
2331 /* Unless the user (not the configurer) has explicitly overridden
2332 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2333 G4 unless targeting the kernel. */
2334 if (!flag_mkernel
2335 && !flag_apple_kext
2336 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2337 && ! (target_flags_explicit & MASK_ALTIVEC)
2338 && ! global_options_set.x_rs6000_cpu_index)
2340 target_flags |= MASK_ALTIVEC;
2343 #endif
2345 /* If not otherwise specified by a target, make 'long double' equivalent to
2346 'double'. */
2348 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2349 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2350 #endif
2352 /* Return the builtin mask of the various options used that could affect which
2353 builtins were used. In the past we used target_flags, but we've run out of
2354 bits, and some options like SPE and PAIRED are no longer in
2355 target_flags. */
2357 unsigned
2358 rs6000_builtin_mask_calculate (void)
2360 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2361 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2362 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2363 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2364 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2365 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2366 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2367 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2368 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2369 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0));
2372 /* Override command line options. Mostly we process the processor type and
2373 sometimes adjust other TARGET_ options. */
2375 static bool
2376 rs6000_option_override_internal (bool global_init_p)
2378 bool ret = true;
2379 bool have_cpu = false;
2381 /* The default cpu requested at configure time, if any. */
2382 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2384 int set_masks;
2385 int cpu_index;
2386 int tune_index;
2387 struct cl_target_option *main_target_opt
2388 = ((global_init_p || target_option_default_node == NULL)
2389 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2391 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2392 library functions, so warn about it. The flag may be useful for
2393 performance studies from time to time though, so don't disable it
2394 entirely. */
2395 if (global_options_set.x_rs6000_alignment_flags
2396 && rs6000_alignment_flags == MASK_ALIGN_POWER
2397 && DEFAULT_ABI == ABI_DARWIN
2398 && TARGET_64BIT)
2399 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2400 " it is incompatible with the installed C and C++ libraries");
2402 /* Numerous experiment shows that IRA based loop pressure
2403 calculation works better for RTL loop invariant motion on targets
2404 with enough (>= 32) registers. It is an expensive optimization.
2405 So it is on only for peak performance. */
2406 if (optimize >= 3 && global_init_p)
2407 flag_ira_loop_pressure = 1;
2409 /* Set the pointer size. */
2410 if (TARGET_64BIT)
2412 rs6000_pmode = (int)DImode;
2413 rs6000_pointer_size = 64;
2415 else
2417 rs6000_pmode = (int)SImode;
2418 rs6000_pointer_size = 32;
2421 set_masks = POWERPC_MASKS | MASK_SOFT_FLOAT;
2422 #ifdef OS_MISSING_POWERPC64
2423 if (OS_MISSING_POWERPC64)
2424 set_masks &= ~MASK_POWERPC64;
2425 #endif
2426 #ifdef OS_MISSING_ALTIVEC
2427 if (OS_MISSING_ALTIVEC)
2428 set_masks &= ~MASK_ALTIVEC;
2429 #endif
2431 /* Don't override by the processor default if given explicitly. */
2432 set_masks &= ~target_flags_explicit;
2434 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2435 the cpu in a target attribute or pragma, but did not specify a tuning
2436 option, use the cpu for the tuning option rather than the option specified
2437 with -mtune on the command line. Process a '--with-cpu' configuration
2438 request as an implicit --cpu. */
2439 if (rs6000_cpu_index >= 0)
2441 cpu_index = rs6000_cpu_index;
2442 have_cpu = true;
2444 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2446 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2447 have_cpu = true;
2449 else
2451 const char *default_cpu =
2452 (implicit_cpu ? implicit_cpu
2453 : (TARGET_POWERPC64 ? "powerpc64" : "powerpc"));
2455 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2456 have_cpu = implicit_cpu != 0;
2459 gcc_assert (cpu_index >= 0);
2461 target_flags &= ~set_masks;
2462 target_flags |= (processor_target_table[cpu_index].target_enable
2463 & set_masks);
2465 if (rs6000_tune_index >= 0)
2466 tune_index = rs6000_tune_index;
2467 else if (have_cpu)
2468 rs6000_tune_index = tune_index = cpu_index;
2469 else
2471 size_t i;
2472 enum processor_type tune_proc
2473 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2475 tune_index = -1;
2476 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2477 if (processor_target_table[i].processor == tune_proc)
2479 rs6000_tune_index = tune_index = i;
2480 break;
2484 gcc_assert (tune_index >= 0);
2485 rs6000_cpu = processor_target_table[tune_index].processor;
2487 /* Pick defaults for SPE related control flags. Do this early to make sure
2488 that the TARGET_ macros are representative ASAP. */
2490 int spe_capable_cpu =
2491 (rs6000_cpu == PROCESSOR_PPC8540
2492 || rs6000_cpu == PROCESSOR_PPC8548);
2494 if (!global_options_set.x_rs6000_spe_abi)
2495 rs6000_spe_abi = spe_capable_cpu;
2497 if (!global_options_set.x_rs6000_spe)
2498 rs6000_spe = spe_capable_cpu;
2500 if (!global_options_set.x_rs6000_float_gprs)
2501 rs6000_float_gprs =
2502 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2503 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2504 : 0);
2507 if (global_options_set.x_rs6000_spe_abi
2508 && rs6000_spe_abi
2509 && !TARGET_SPE_ABI)
2510 error ("not configured for SPE ABI");
2512 if (global_options_set.x_rs6000_spe
2513 && rs6000_spe
2514 && !TARGET_SPE)
2515 error ("not configured for SPE instruction set");
2517 if (main_target_opt != NULL
2518 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2519 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2520 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2521 error ("target attribute or pragma changes SPE ABI");
2523 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2524 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2525 || rs6000_cpu == PROCESSOR_PPCE5500)
2527 if (TARGET_ALTIVEC)
2528 error ("AltiVec not supported in this target");
2529 if (TARGET_SPE)
2530 error ("SPE not supported in this target");
2532 if (rs6000_cpu == PROCESSOR_PPCE6500)
2534 if (TARGET_SPE)
2535 error ("SPE not supported in this target");
2538 /* Disable Cell microcode if we are optimizing for the Cell
2539 and not optimizing for size. */
2540 if (rs6000_gen_cell_microcode == -1)
2541 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2542 && !optimize_size);
2544 /* If we are optimizing big endian systems for space and it's OK to
2545 use instructions that would be microcoded on the Cell, use the
2546 load/store multiple and string instructions. */
2547 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2548 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
2550 /* Don't allow -mmultiple or -mstring on little endian systems
2551 unless the cpu is a 750, because the hardware doesn't support the
2552 instructions used in little endian mode, and causes an alignment
2553 trap. The 750 does not cause an alignment trap (except when the
2554 target is unaligned). */
2556 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2558 if (TARGET_MULTIPLE)
2560 target_flags &= ~MASK_MULTIPLE;
2561 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
2562 warning (0, "-mmultiple is not supported on little endian systems");
2565 if (TARGET_STRING)
2567 target_flags &= ~MASK_STRING;
2568 if ((target_flags_explicit & MASK_STRING) != 0)
2569 warning (0, "-mstring is not supported on little endian systems");
2573 /* Add some warnings for VSX. */
2574 if (TARGET_VSX)
2576 const char *msg = NULL;
2577 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2578 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2580 if (target_flags_explicit & MASK_VSX)
2581 msg = N_("-mvsx requires hardware floating point");
2582 else
2583 target_flags &= ~ MASK_VSX;
2585 else if (TARGET_PAIRED_FLOAT)
2586 msg = N_("-mvsx and -mpaired are incompatible");
2587 /* The hardware will allow VSX and little endian, but until we make sure
2588 things like vector select, etc. work don't allow VSX on little endian
2589 systems at this point. */
2590 else if (!BYTES_BIG_ENDIAN)
2591 msg = N_("-mvsx used with little endian code");
2592 else if (TARGET_AVOID_XFORM > 0)
2593 msg = N_("-mvsx needs indexed addressing");
2594 else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC))
2596 if (target_flags_explicit & MASK_VSX)
2597 msg = N_("-mvsx and -mno-altivec are incompatible");
2598 else
2599 msg = N_("-mno-altivec disables vsx");
2602 if (msg)
2604 warning (0, msg);
2605 target_flags &= ~ MASK_VSX;
2606 target_flags_explicit |= MASK_VSX;
2610 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2611 unless the user explicitly used the -mno-<option> to disable the code. */
2612 if (TARGET_VSX)
2613 target_flags |= (ISA_2_6_MASKS_SERVER & ~target_flags_explicit);
2614 else if (TARGET_POPCNTD)
2615 target_flags |= (ISA_2_6_MASKS_EMBEDDED & ~target_flags_explicit);
2616 else if (TARGET_DFP)
2617 target_flags |= (ISA_2_5_MASKS_SERVER & ~target_flags_explicit);
2618 else if (TARGET_CMPB)
2619 target_flags |= (ISA_2_5_MASKS_EMBEDDED & ~target_flags_explicit);
2620 else if (TARGET_FPRND)
2621 target_flags |= (ISA_2_4_MASKS & ~target_flags_explicit);
2622 else if (TARGET_POPCNTB)
2623 target_flags |= (ISA_2_2_MASKS & ~target_flags_explicit);
2624 else if (TARGET_ALTIVEC)
2625 target_flags |= (MASK_PPC_GFXOPT & ~target_flags_explicit);
2627 /* E500mc does "better" if we inline more aggressively. Respect the
2628 user's opinion, though. */
2629 if (rs6000_block_move_inline_limit == 0
2630 && (rs6000_cpu == PROCESSOR_PPCE500MC
2631 || rs6000_cpu == PROCESSOR_PPCE500MC64
2632 || rs6000_cpu == PROCESSOR_PPCE5500
2633 || rs6000_cpu == PROCESSOR_PPCE6500))
2634 rs6000_block_move_inline_limit = 128;
2636 /* store_one_arg depends on expand_block_move to handle at least the
2637 size of reg_parm_stack_space. */
2638 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
2639 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
2641 if (global_init_p)
2643 /* If the appropriate debug option is enabled, replace the target hooks
2644 with debug versions that call the real version and then prints
2645 debugging information. */
2646 if (TARGET_DEBUG_COST)
2648 targetm.rtx_costs = rs6000_debug_rtx_costs;
2649 targetm.address_cost = rs6000_debug_address_cost;
2650 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2653 if (TARGET_DEBUG_ADDR)
2655 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2656 targetm.legitimize_address = rs6000_debug_legitimize_address;
2657 rs6000_secondary_reload_class_ptr
2658 = rs6000_debug_secondary_reload_class;
2659 rs6000_secondary_memory_needed_ptr
2660 = rs6000_debug_secondary_memory_needed;
2661 rs6000_cannot_change_mode_class_ptr
2662 = rs6000_debug_cannot_change_mode_class;
2663 rs6000_preferred_reload_class_ptr
2664 = rs6000_debug_preferred_reload_class;
2665 rs6000_legitimize_reload_address_ptr
2666 = rs6000_debug_legitimize_reload_address;
2667 rs6000_mode_dependent_address_ptr
2668 = rs6000_debug_mode_dependent_address;
2671 if (rs6000_veclibabi_name)
2673 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
2674 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
2675 else
2677 error ("unknown vectorization library ABI type (%s) for "
2678 "-mveclibabi= switch", rs6000_veclibabi_name);
2679 ret = false;
2684 if (!global_options_set.x_rs6000_long_double_type_size)
2686 if (main_target_opt != NULL
2687 && (main_target_opt->x_rs6000_long_double_type_size
2688 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
2689 error ("target attribute or pragma changes long double size");
2690 else
2691 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2694 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2695 if (!global_options_set.x_rs6000_ieeequad)
2696 rs6000_ieeequad = 1;
2697 #endif
2699 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2700 target attribute or pragma which automatically enables both options,
2701 unless the altivec ABI was set. This is set by default for 64-bit, but
2702 not for 32-bit. */
2703 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2704 target_flags &= ~((MASK_VSX | MASK_ALTIVEC) & ~target_flags_explicit);
2706 /* Enable Altivec ABI for AIX -maltivec. */
2707 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
2709 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2710 error ("target attribute or pragma changes AltiVec ABI");
2711 else
2712 rs6000_altivec_abi = 1;
2715 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2716 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2717 be explicitly overridden in either case. */
2718 if (TARGET_ELF)
2720 if (!global_options_set.x_rs6000_altivec_abi
2721 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
2723 if (main_target_opt != NULL &&
2724 !main_target_opt->x_rs6000_altivec_abi)
2725 error ("target attribute or pragma changes AltiVec ABI");
2726 else
2727 rs6000_altivec_abi = 1;
2731 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2732 So far, the only darwin64 targets are also MACH-O. */
2733 if (TARGET_MACHO
2734 && DEFAULT_ABI == ABI_DARWIN
2735 && TARGET_64BIT)
2737 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
2738 error ("target attribute or pragma changes darwin64 ABI");
2739 else
2741 rs6000_darwin64_abi = 1;
2742 /* Default to natural alignment, for better performance. */
2743 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2747 /* Place FP constants in the constant pool instead of TOC
2748 if section anchors enabled. */
2749 if (flag_section_anchors)
2750 TARGET_NO_FP_IN_TOC = 1;
2752 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2753 SUBTARGET_OVERRIDE_OPTIONS;
2754 #endif
2755 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2756 SUBSUBTARGET_OVERRIDE_OPTIONS;
2757 #endif
2758 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2759 SUB3TARGET_OVERRIDE_OPTIONS;
2760 #endif
2762 /* For the E500 family of cores, reset the single/double FP flags to let us
2763 check that they remain constant across attributes or pragmas. Also,
2764 clear a possible request for string instructions, not supported and which
2765 we might have silently queried above for -Os.
2767 For other families, clear ISEL in case it was set implicitly.
2770 switch (rs6000_cpu)
2772 case PROCESSOR_PPC8540:
2773 case PROCESSOR_PPC8548:
2774 case PROCESSOR_PPCE500MC:
2775 case PROCESSOR_PPCE500MC64:
2776 case PROCESSOR_PPCE5500:
2777 case PROCESSOR_PPCE6500:
2779 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
2780 rs6000_double_float = TARGET_E500_DOUBLE;
2782 target_flags &= ~MASK_STRING;
2784 break;
2786 default:
2788 if (have_cpu && !(target_flags_explicit & MASK_ISEL))
2789 target_flags &= ~MASK_ISEL;
2791 break;
2794 if (main_target_opt)
2796 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
2797 error ("target attribute or pragma changes single precision floating "
2798 "point");
2799 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
2800 error ("target attribute or pragma changes double precision floating "
2801 "point");
2804 /* Detect invalid option combinations with E500. */
2805 CHECK_E500_OPTIONS;
2807 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
2808 && rs6000_cpu != PROCESSOR_POWER5
2809 && rs6000_cpu != PROCESSOR_POWER6
2810 && rs6000_cpu != PROCESSOR_POWER7
2811 && rs6000_cpu != PROCESSOR_PPCA2
2812 && rs6000_cpu != PROCESSOR_CELL
2813 && rs6000_cpu != PROCESSOR_PPC476);
2814 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
2815 || rs6000_cpu == PROCESSOR_POWER5
2816 || rs6000_cpu == PROCESSOR_POWER7);
2817 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
2818 || rs6000_cpu == PROCESSOR_POWER5
2819 || rs6000_cpu == PROCESSOR_POWER6
2820 || rs6000_cpu == PROCESSOR_POWER7
2821 || rs6000_cpu == PROCESSOR_PPCE500MC
2822 || rs6000_cpu == PROCESSOR_PPCE500MC64
2823 || rs6000_cpu == PROCESSOR_PPCE5500
2824 || rs6000_cpu == PROCESSOR_PPCE6500);
2826 /* Allow debug switches to override the above settings. These are set to -1
2827 in rs6000.opt to indicate the user hasn't directly set the switch. */
2828 if (TARGET_ALWAYS_HINT >= 0)
2829 rs6000_always_hint = TARGET_ALWAYS_HINT;
2831 if (TARGET_SCHED_GROUPS >= 0)
2832 rs6000_sched_groups = TARGET_SCHED_GROUPS;
2834 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
2835 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
2837 rs6000_sched_restricted_insns_priority
2838 = (rs6000_sched_groups ? 1 : 0);
2840 /* Handle -msched-costly-dep option. */
2841 rs6000_sched_costly_dep
2842 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
2844 if (rs6000_sched_costly_dep_str)
2846 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
2847 rs6000_sched_costly_dep = no_dep_costly;
2848 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
2849 rs6000_sched_costly_dep = all_deps_costly;
2850 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
2851 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
2852 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
2853 rs6000_sched_costly_dep = store_to_load_dep_costly;
2854 else
2855 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
2856 atoi (rs6000_sched_costly_dep_str));
2859 /* Handle -minsert-sched-nops option. */
2860 rs6000_sched_insert_nops
2861 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
2863 if (rs6000_sched_insert_nops_str)
2865 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
2866 rs6000_sched_insert_nops = sched_finish_none;
2867 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
2868 rs6000_sched_insert_nops = sched_finish_pad_groups;
2869 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
2870 rs6000_sched_insert_nops = sched_finish_regroup_exact;
2871 else
2872 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
2873 atoi (rs6000_sched_insert_nops_str));
2876 if (global_init_p)
2878 #ifdef TARGET_REGNAMES
2879 /* If the user desires alternate register names, copy in the
2880 alternate names now. */
2881 if (TARGET_REGNAMES)
2882 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
2883 #endif
2885 /* Set aix_struct_return last, after the ABI is determined.
2886 If -maix-struct-return or -msvr4-struct-return was explicitly
2887 used, don't override with the ABI default. */
2888 if (!global_options_set.x_aix_struct_return)
2889 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
2891 #if 0
2892 /* IBM XL compiler defaults to unsigned bitfields. */
2893 if (TARGET_XL_COMPAT)
2894 flag_signed_bitfields = 0;
2895 #endif
2897 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
2898 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
2900 if (TARGET_TOC)
2901 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
2903 /* We can only guarantee the availability of DI pseudo-ops when
2904 assembling for 64-bit targets. */
2905 if (!TARGET_64BIT)
2907 targetm.asm_out.aligned_op.di = NULL;
2908 targetm.asm_out.unaligned_op.di = NULL;
2912 /* Set branch target alignment, if not optimizing for size. */
2913 if (!optimize_size)
2915 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2916 aligned 8byte to avoid misprediction by the branch predictor. */
2917 if (rs6000_cpu == PROCESSOR_TITAN
2918 || rs6000_cpu == PROCESSOR_CELL)
2920 if (align_functions <= 0)
2921 align_functions = 8;
2922 if (align_jumps <= 0)
2923 align_jumps = 8;
2924 if (align_loops <= 0)
2925 align_loops = 8;
2927 if (rs6000_align_branch_targets)
2929 if (align_functions <= 0)
2930 align_functions = 16;
2931 if (align_jumps <= 0)
2932 align_jumps = 16;
2933 if (align_loops <= 0)
2935 can_override_loop_align = 1;
2936 align_loops = 16;
2939 if (align_jumps_max_skip <= 0)
2940 align_jumps_max_skip = 15;
2941 if (align_loops_max_skip <= 0)
2942 align_loops_max_skip = 15;
2945 /* Arrange to save and restore machine status around nested functions. */
2946 init_machine_status = rs6000_init_machine_status;
2948 /* We should always be splitting complex arguments, but we can't break
2949 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2950 if (DEFAULT_ABI != ABI_AIX)
2951 targetm.calls.split_complex_arg = NULL;
2954 /* Initialize rs6000_cost with the appropriate target costs. */
2955 if (optimize_size)
2956 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
2957 else
2958 switch (rs6000_cpu)
2960 case PROCESSOR_RS64A:
2961 rs6000_cost = &rs64a_cost;
2962 break;
2964 case PROCESSOR_MPCCORE:
2965 rs6000_cost = &mpccore_cost;
2966 break;
2968 case PROCESSOR_PPC403:
2969 rs6000_cost = &ppc403_cost;
2970 break;
2972 case PROCESSOR_PPC405:
2973 rs6000_cost = &ppc405_cost;
2974 break;
2976 case PROCESSOR_PPC440:
2977 rs6000_cost = &ppc440_cost;
2978 break;
2980 case PROCESSOR_PPC476:
2981 rs6000_cost = &ppc476_cost;
2982 break;
2984 case PROCESSOR_PPC601:
2985 rs6000_cost = &ppc601_cost;
2986 break;
2988 case PROCESSOR_PPC603:
2989 rs6000_cost = &ppc603_cost;
2990 break;
2992 case PROCESSOR_PPC604:
2993 rs6000_cost = &ppc604_cost;
2994 break;
2996 case PROCESSOR_PPC604e:
2997 rs6000_cost = &ppc604e_cost;
2998 break;
3000 case PROCESSOR_PPC620:
3001 rs6000_cost = &ppc620_cost;
3002 break;
3004 case PROCESSOR_PPC630:
3005 rs6000_cost = &ppc630_cost;
3006 break;
3008 case PROCESSOR_CELL:
3009 rs6000_cost = &ppccell_cost;
3010 break;
3012 case PROCESSOR_PPC750:
3013 case PROCESSOR_PPC7400:
3014 rs6000_cost = &ppc750_cost;
3015 break;
3017 case PROCESSOR_PPC7450:
3018 rs6000_cost = &ppc7450_cost;
3019 break;
3021 case PROCESSOR_PPC8540:
3022 case PROCESSOR_PPC8548:
3023 rs6000_cost = &ppc8540_cost;
3024 break;
3026 case PROCESSOR_PPCE300C2:
3027 case PROCESSOR_PPCE300C3:
3028 rs6000_cost = &ppce300c2c3_cost;
3029 break;
3031 case PROCESSOR_PPCE500MC:
3032 rs6000_cost = &ppce500mc_cost;
3033 break;
3035 case PROCESSOR_PPCE500MC64:
3036 rs6000_cost = &ppce500mc64_cost;
3037 break;
3039 case PROCESSOR_PPCE5500:
3040 rs6000_cost = &ppce5500_cost;
3041 break;
3043 case PROCESSOR_PPCE6500:
3044 rs6000_cost = &ppce6500_cost;
3045 break;
3047 case PROCESSOR_TITAN:
3048 rs6000_cost = &titan_cost;
3049 break;
3051 case PROCESSOR_POWER4:
3052 case PROCESSOR_POWER5:
3053 rs6000_cost = &power4_cost;
3054 break;
3056 case PROCESSOR_POWER6:
3057 rs6000_cost = &power6_cost;
3058 break;
3060 case PROCESSOR_POWER7:
3061 rs6000_cost = &power7_cost;
3062 break;
3064 case PROCESSOR_PPCA2:
3065 rs6000_cost = &ppca2_cost;
3066 break;
3068 default:
3069 gcc_unreachable ();
3072 if (global_init_p)
3074 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3075 rs6000_cost->simultaneous_prefetches,
3076 global_options.x_param_values,
3077 global_options_set.x_param_values);
3078 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3079 global_options.x_param_values,
3080 global_options_set.x_param_values);
3081 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3082 rs6000_cost->cache_line_size,
3083 global_options.x_param_values,
3084 global_options_set.x_param_values);
3085 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3086 global_options.x_param_values,
3087 global_options_set.x_param_values);
3089 /* If using typedef char *va_list, signal that
3090 __builtin_va_start (&ap, 0) can be optimized to
3091 ap = __builtin_next_arg (0). */
3092 if (DEFAULT_ABI != ABI_V4)
3093 targetm.expand_builtin_va_start = NULL;
3096 /* Set up single/double float flags.
3097 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3098 then set both flags. */
3099 if (TARGET_HARD_FLOAT && TARGET_FPRS
3100 && rs6000_single_float == 0 && rs6000_double_float == 0)
3101 rs6000_single_float = rs6000_double_float = 1;
3103 /* If not explicitly specified via option, decide whether to generate indexed
3104 load/store instructions. */
3105 if (TARGET_AVOID_XFORM == -1)
3106 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3107 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3108 need indexed accesses and the type used is the scalar type of the element
3109 being loaded or stored. */
3110 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3111 && !TARGET_ALTIVEC);
3113 /* Set the -mrecip options. */
3114 if (rs6000_recip_name)
3116 char *p = ASTRDUP (rs6000_recip_name);
3117 char *q;
3118 unsigned int mask, i;
3119 bool invert;
3121 while ((q = strtok (p, ",")) != NULL)
3123 p = NULL;
3124 if (*q == '!')
3126 invert = true;
3127 q++;
3129 else
3130 invert = false;
3132 if (!strcmp (q, "default"))
3133 mask = ((TARGET_RECIP_PRECISION)
3134 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3135 else
3137 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3138 if (!strcmp (q, recip_options[i].string))
3140 mask = recip_options[i].mask;
3141 break;
3144 if (i == ARRAY_SIZE (recip_options))
3146 error ("unknown option for -mrecip=%s", q);
3147 invert = false;
3148 mask = 0;
3149 ret = false;
3153 if (invert)
3154 rs6000_recip_control &= ~mask;
3155 else
3156 rs6000_recip_control |= mask;
3160 /* Set the builtin mask of the various options used that could affect which
3161 builtins were used. In the past we used target_flags, but we've run out
3162 of bits, and some options like SPE and PAIRED are no longer in
3163 target_flags. */
3164 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3165 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3166 fprintf (stderr, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask,
3167 (rs6000_builtin_mask & RS6000_BTM_ALTIVEC) ? ", altivec" : "",
3168 (rs6000_builtin_mask & RS6000_BTM_VSX) ? ", vsx" : "",
3169 (rs6000_builtin_mask & RS6000_BTM_PAIRED) ? ", paired" : "",
3170 (rs6000_builtin_mask & RS6000_BTM_SPE) ? ", spe" : "");
3172 /* Initialize all of the registers. */
3173 rs6000_init_hard_regno_mode_ok (global_init_p);
3175 /* Save the initial options in case the user does function specific options */
3176 if (global_init_p)
3177 target_option_default_node = target_option_current_node
3178 = build_target_option_node ();
3180 /* If not explicitly specified via option, decide whether to generate the
3181 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3182 if (TARGET_LINK_STACK == -1)
3183 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3185 return ret;
3188 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3189 define the target cpu type. */
3191 static void
3192 rs6000_option_override (void)
3194 (void) rs6000_option_override_internal (true);
3198 /* Implement targetm.vectorize.builtin_mask_for_load. */
3199 static tree
3200 rs6000_builtin_mask_for_load (void)
3202 if (TARGET_ALTIVEC || TARGET_VSX)
3203 return altivec_builtin_mask_for_load;
3204 else
3205 return 0;
3208 /* Implement LOOP_ALIGN. */
3210 rs6000_loop_align (rtx label)
3212 basic_block bb;
3213 int ninsns;
3215 /* Don't override loop alignment if -falign-loops was specified. */
3216 if (!can_override_loop_align)
3217 return align_loops_log;
3219 bb = BLOCK_FOR_INSN (label);
3220 ninsns = num_loop_insns(bb->loop_father);
3222 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3223 if (ninsns > 4 && ninsns <= 8
3224 && (rs6000_cpu == PROCESSOR_POWER4
3225 || rs6000_cpu == PROCESSOR_POWER5
3226 || rs6000_cpu == PROCESSOR_POWER6
3227 || rs6000_cpu == PROCESSOR_POWER7))
3228 return 5;
3229 else
3230 return align_loops_log;
3233 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3234 static int
3235 rs6000_loop_align_max_skip (rtx label)
3237 return (1 << rs6000_loop_align (label)) - 1;
3240 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3241 after applying N number of iterations. This routine does not determine
3242 how may iterations are required to reach desired alignment. */
3244 static bool
3245 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3247 if (is_packed)
3248 return false;
3250 if (TARGET_32BIT)
3252 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3253 return true;
3255 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3256 return true;
3258 return false;
3260 else
3262 if (TARGET_MACHO)
3263 return false;
3265 /* Assuming that all other types are naturally aligned. CHECKME! */
3266 return true;
3270 /* Return true if the vector misalignment factor is supported by the
3271 target. */
3272 static bool
3273 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3274 const_tree type,
3275 int misalignment,
3276 bool is_packed)
3278 if (TARGET_VSX)
3280 /* Return if movmisalign pattern is not supported for this mode. */
3281 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3282 return false;
3284 if (misalignment == -1)
3286 /* Misalignment factor is unknown at compile time but we know
3287 it's word aligned. */
3288 if (rs6000_vector_alignment_reachable (type, is_packed))
3290 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3292 if (element_size == 64 || element_size == 32)
3293 return true;
3296 return false;
3299 /* VSX supports word-aligned vector. */
3300 if (misalignment % 4 == 0)
3301 return true;
3303 return false;
3306 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3307 static int
3308 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3309 tree vectype, int misalign)
3311 unsigned elements;
3312 tree elem_type;
3314 switch (type_of_cost)
3316 case scalar_stmt:
3317 case scalar_load:
3318 case scalar_store:
3319 case vector_stmt:
3320 case vector_load:
3321 case vector_store:
3322 case vec_to_scalar:
3323 case scalar_to_vec:
3324 case cond_branch_not_taken:
3325 return 1;
3327 case vec_perm:
3328 if (TARGET_VSX)
3329 return 3;
3330 else
3331 return 1;
3333 case vec_promote_demote:
3334 if (TARGET_VSX)
3335 return 4;
3336 else
3337 return 1;
3339 case cond_branch_taken:
3340 return 3;
3342 case unaligned_load:
3343 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3345 elements = TYPE_VECTOR_SUBPARTS (vectype);
3346 if (elements == 2)
3347 /* Double word aligned. */
3348 return 2;
3350 if (elements == 4)
3352 switch (misalign)
3354 case 8:
3355 /* Double word aligned. */
3356 return 2;
3358 case -1:
3359 /* Unknown misalignment. */
3360 case 4:
3361 case 12:
3362 /* Word aligned. */
3363 return 22;
3365 default:
3366 gcc_unreachable ();
3371 if (TARGET_ALTIVEC)
3372 /* Misaligned loads are not supported. */
3373 gcc_unreachable ();
3375 return 2;
3377 case unaligned_store:
3378 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3380 elements = TYPE_VECTOR_SUBPARTS (vectype);
3381 if (elements == 2)
3382 /* Double word aligned. */
3383 return 2;
3385 if (elements == 4)
3387 switch (misalign)
3389 case 8:
3390 /* Double word aligned. */
3391 return 2;
3393 case -1:
3394 /* Unknown misalignment. */
3395 case 4:
3396 case 12:
3397 /* Word aligned. */
3398 return 23;
3400 default:
3401 gcc_unreachable ();
3406 if (TARGET_ALTIVEC)
3407 /* Misaligned stores are not supported. */
3408 gcc_unreachable ();
3410 return 2;
3412 case vec_construct:
3413 elements = TYPE_VECTOR_SUBPARTS (vectype);
3414 elem_type = TREE_TYPE (vectype);
3415 /* 32-bit vectors loaded into registers are stored as double
3416 precision, so we need n/2 converts in addition to the usual
3417 n/2 merges to construct a vector of short floats from them. */
3418 if (SCALAR_FLOAT_TYPE_P (elem_type)
3419 && TYPE_PRECISION (elem_type) == 32)
3420 return elements + 1;
3421 else
3422 return elements / 2 + 1;
3424 default:
3425 gcc_unreachable ();
3429 /* Implement targetm.vectorize.preferred_simd_mode. */
3431 static enum machine_mode
3432 rs6000_preferred_simd_mode (enum machine_mode mode)
3434 if (TARGET_VSX)
3435 switch (mode)
3437 case DFmode:
3438 return V2DFmode;
3439 default:;
3441 if (TARGET_ALTIVEC || TARGET_VSX)
3442 switch (mode)
3444 case SFmode:
3445 return V4SFmode;
3446 case DImode:
3447 return V2DImode;
3448 case SImode:
3449 return V4SImode;
3450 case HImode:
3451 return V8HImode;
3452 case QImode:
3453 return V16QImode;
3454 default:;
3456 if (TARGET_SPE)
3457 switch (mode)
3459 case SFmode:
3460 return V2SFmode;
3461 case SImode:
3462 return V2SImode;
3463 default:;
3465 if (TARGET_PAIRED_FLOAT
3466 && mode == SFmode)
3467 return V2SFmode;
3468 return word_mode;
3471 typedef struct _rs6000_cost_data
3473 struct loop *loop_info;
3474 unsigned cost[3];
3475 } rs6000_cost_data;
3477 /* Test for likely overcommitment of vector hardware resources. If a
3478 loop iteration is relatively large, and too large a percentage of
3479 instructions in the loop are vectorized, the cost model may not
3480 adequately reflect delays from unavailable vector resources.
3481 Penalize the loop body cost for this case. */
3483 static void
3484 rs6000_density_test (rs6000_cost_data *data)
3486 const int DENSITY_PCT_THRESHOLD = 85;
3487 const int DENSITY_SIZE_THRESHOLD = 70;
3488 const int DENSITY_PENALTY = 10;
3489 struct loop *loop = data->loop_info;
3490 basic_block *bbs = get_loop_body (loop);
3491 int nbbs = loop->num_nodes;
3492 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
3493 int i, density_pct;
3495 for (i = 0; i < nbbs; i++)
3497 basic_block bb = bbs[i];
3498 gimple_stmt_iterator gsi;
3500 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3502 gimple stmt = gsi_stmt (gsi);
3503 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3505 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3506 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
3507 not_vec_cost++;
3511 free (bbs);
3512 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
3514 if (density_pct > DENSITY_PCT_THRESHOLD
3515 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
3517 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
3518 if (dump_kind_p (MSG_NOTE))
3519 dump_printf_loc (MSG_NOTE, vect_location,
3520 "density %d%%, cost %d exceeds threshold, penalizing "
3521 "loop body cost by %d%%", density_pct,
3522 vec_cost + not_vec_cost, DENSITY_PENALTY);
3526 /* Implement targetm.vectorize.init_cost. */
3528 static void *
3529 rs6000_init_cost (struct loop *loop_info)
3531 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
3532 data->loop_info = loop_info;
3533 data->cost[vect_prologue] = 0;
3534 data->cost[vect_body] = 0;
3535 data->cost[vect_epilogue] = 0;
3536 return data;
3539 /* Implement targetm.vectorize.add_stmt_cost. */
3541 static unsigned
3542 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
3543 struct _stmt_vec_info *stmt_info, int misalign,
3544 enum vect_cost_model_location where)
3546 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3547 unsigned retval = 0;
3549 if (flag_vect_cost_model)
3551 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
3552 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
3553 misalign);
3554 /* Statements in an inner loop relative to the loop being
3555 vectorized are weighted more heavily. The value here is
3556 arbitrary and could potentially be improved with analysis. */
3557 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
3558 count *= 50; /* FIXME. */
3560 retval = (unsigned) (count * stmt_cost);
3561 cost_data->cost[where] += retval;
3564 return retval;
3567 /* Implement targetm.vectorize.finish_cost. */
3569 static void
3570 rs6000_finish_cost (void *data, unsigned *prologue_cost,
3571 unsigned *body_cost, unsigned *epilogue_cost)
3573 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3575 if (cost_data->loop_info)
3576 rs6000_density_test (cost_data);
3578 *prologue_cost = cost_data->cost[vect_prologue];
3579 *body_cost = cost_data->cost[vect_body];
3580 *epilogue_cost = cost_data->cost[vect_epilogue];
3583 /* Implement targetm.vectorize.destroy_cost_data. */
3585 static void
3586 rs6000_destroy_cost_data (void *data)
3588 free (data);
3591 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3592 library with vectorized intrinsics. */
3594 static tree
3595 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
3597 char name[32];
3598 const char *suffix = NULL;
3599 tree fntype, new_fndecl, bdecl = NULL_TREE;
3600 int n_args = 1;
3601 const char *bname;
3602 enum machine_mode el_mode, in_mode;
3603 int n, in_n;
3605 /* Libmass is suitable for unsafe math only as it does not correctly support
3606 parts of IEEE with the required precision such as denormals. Only support
3607 it if we have VSX to use the simd d2 or f4 functions.
3608 XXX: Add variable length support. */
3609 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
3610 return NULL_TREE;
3612 el_mode = TYPE_MODE (TREE_TYPE (type_out));
3613 n = TYPE_VECTOR_SUBPARTS (type_out);
3614 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3615 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3616 if (el_mode != in_mode
3617 || n != in_n)
3618 return NULL_TREE;
3620 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3622 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3623 switch (fn)
3625 case BUILT_IN_ATAN2:
3626 case BUILT_IN_HYPOT:
3627 case BUILT_IN_POW:
3628 n_args = 2;
3629 /* fall through */
3631 case BUILT_IN_ACOS:
3632 case BUILT_IN_ACOSH:
3633 case BUILT_IN_ASIN:
3634 case BUILT_IN_ASINH:
3635 case BUILT_IN_ATAN:
3636 case BUILT_IN_ATANH:
3637 case BUILT_IN_CBRT:
3638 case BUILT_IN_COS:
3639 case BUILT_IN_COSH:
3640 case BUILT_IN_ERF:
3641 case BUILT_IN_ERFC:
3642 case BUILT_IN_EXP2:
3643 case BUILT_IN_EXP:
3644 case BUILT_IN_EXPM1:
3645 case BUILT_IN_LGAMMA:
3646 case BUILT_IN_LOG10:
3647 case BUILT_IN_LOG1P:
3648 case BUILT_IN_LOG2:
3649 case BUILT_IN_LOG:
3650 case BUILT_IN_SIN:
3651 case BUILT_IN_SINH:
3652 case BUILT_IN_SQRT:
3653 case BUILT_IN_TAN:
3654 case BUILT_IN_TANH:
3655 bdecl = builtin_decl_implicit (fn);
3656 suffix = "d2"; /* pow -> powd2 */
3657 if (el_mode != DFmode
3658 || n != 2)
3659 return NULL_TREE;
3660 break;
3662 case BUILT_IN_ATAN2F:
3663 case BUILT_IN_HYPOTF:
3664 case BUILT_IN_POWF:
3665 n_args = 2;
3666 /* fall through */
3668 case BUILT_IN_ACOSF:
3669 case BUILT_IN_ACOSHF:
3670 case BUILT_IN_ASINF:
3671 case BUILT_IN_ASINHF:
3672 case BUILT_IN_ATANF:
3673 case BUILT_IN_ATANHF:
3674 case BUILT_IN_CBRTF:
3675 case BUILT_IN_COSF:
3676 case BUILT_IN_COSHF:
3677 case BUILT_IN_ERFF:
3678 case BUILT_IN_ERFCF:
3679 case BUILT_IN_EXP2F:
3680 case BUILT_IN_EXPF:
3681 case BUILT_IN_EXPM1F:
3682 case BUILT_IN_LGAMMAF:
3683 case BUILT_IN_LOG10F:
3684 case BUILT_IN_LOG1PF:
3685 case BUILT_IN_LOG2F:
3686 case BUILT_IN_LOGF:
3687 case BUILT_IN_SINF:
3688 case BUILT_IN_SINHF:
3689 case BUILT_IN_SQRTF:
3690 case BUILT_IN_TANF:
3691 case BUILT_IN_TANHF:
3692 bdecl = builtin_decl_implicit (fn);
3693 suffix = "4"; /* powf -> powf4 */
3694 if (el_mode != SFmode
3695 || n != 4)
3696 return NULL_TREE;
3697 break;
3699 default:
3700 return NULL_TREE;
3703 else
3704 return NULL_TREE;
3706 gcc_assert (suffix != NULL);
3707 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
3708 strcpy (name, bname + sizeof ("__builtin_") - 1);
3709 strcat (name, suffix);
3711 if (n_args == 1)
3712 fntype = build_function_type_list (type_out, type_in, NULL);
3713 else if (n_args == 2)
3714 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
3715 else
3716 gcc_unreachable ();
3718 /* Build a function declaration for the vectorized function. */
3719 new_fndecl = build_decl (BUILTINS_LOCATION,
3720 FUNCTION_DECL, get_identifier (name), fntype);
3721 TREE_PUBLIC (new_fndecl) = 1;
3722 DECL_EXTERNAL (new_fndecl) = 1;
3723 DECL_IS_NOVOPS (new_fndecl) = 1;
3724 TREE_READONLY (new_fndecl) = 1;
3726 return new_fndecl;
3729 /* Returns a function decl for a vectorized version of the builtin function
3730 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3731 if it is not available. */
3733 static tree
3734 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
3735 tree type_in)
3737 enum machine_mode in_mode, out_mode;
3738 int in_n, out_n;
3740 if (TARGET_DEBUG_BUILTIN)
3741 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3742 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
3743 GET_MODE_NAME (TYPE_MODE (type_out)),
3744 GET_MODE_NAME (TYPE_MODE (type_in)));
3746 if (TREE_CODE (type_out) != VECTOR_TYPE
3747 || TREE_CODE (type_in) != VECTOR_TYPE
3748 || !TARGET_VECTORIZE_BUILTINS)
3749 return NULL_TREE;
3751 out_mode = TYPE_MODE (TREE_TYPE (type_out));
3752 out_n = TYPE_VECTOR_SUBPARTS (type_out);
3753 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3754 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3756 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3758 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3759 switch (fn)
3761 case BUILT_IN_COPYSIGN:
3762 if (VECTOR_UNIT_VSX_P (V2DFmode)
3763 && out_mode == DFmode && out_n == 2
3764 && in_mode == DFmode && in_n == 2)
3765 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
3766 break;
3767 case BUILT_IN_COPYSIGNF:
3768 if (out_mode != SFmode || out_n != 4
3769 || in_mode != SFmode || in_n != 4)
3770 break;
3771 if (VECTOR_UNIT_VSX_P (V4SFmode))
3772 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
3773 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3774 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
3775 break;
3776 case BUILT_IN_SQRT:
3777 if (VECTOR_UNIT_VSX_P (V2DFmode)
3778 && out_mode == DFmode && out_n == 2
3779 && in_mode == DFmode && in_n == 2)
3780 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
3781 break;
3782 case BUILT_IN_SQRTF:
3783 if (VECTOR_UNIT_VSX_P (V4SFmode)
3784 && out_mode == SFmode && out_n == 4
3785 && in_mode == SFmode && in_n == 4)
3786 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
3787 break;
3788 case BUILT_IN_CEIL:
3789 if (VECTOR_UNIT_VSX_P (V2DFmode)
3790 && out_mode == DFmode && out_n == 2
3791 && in_mode == DFmode && in_n == 2)
3792 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
3793 break;
3794 case BUILT_IN_CEILF:
3795 if (out_mode != SFmode || out_n != 4
3796 || in_mode != SFmode || in_n != 4)
3797 break;
3798 if (VECTOR_UNIT_VSX_P (V4SFmode))
3799 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
3800 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3801 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
3802 break;
3803 case BUILT_IN_FLOOR:
3804 if (VECTOR_UNIT_VSX_P (V2DFmode)
3805 && out_mode == DFmode && out_n == 2
3806 && in_mode == DFmode && in_n == 2)
3807 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
3808 break;
3809 case BUILT_IN_FLOORF:
3810 if (out_mode != SFmode || out_n != 4
3811 || in_mode != SFmode || in_n != 4)
3812 break;
3813 if (VECTOR_UNIT_VSX_P (V4SFmode))
3814 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
3815 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3816 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
3817 break;
3818 case BUILT_IN_FMA:
3819 if (VECTOR_UNIT_VSX_P (V2DFmode)
3820 && out_mode == DFmode && out_n == 2
3821 && in_mode == DFmode && in_n == 2)
3822 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
3823 break;
3824 case BUILT_IN_FMAF:
3825 if (VECTOR_UNIT_VSX_P (V4SFmode)
3826 && out_mode == SFmode && out_n == 4
3827 && in_mode == SFmode && in_n == 4)
3828 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
3829 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
3830 && out_mode == SFmode && out_n == 4
3831 && in_mode == SFmode && in_n == 4)
3832 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
3833 break;
3834 case BUILT_IN_TRUNC:
3835 if (VECTOR_UNIT_VSX_P (V2DFmode)
3836 && out_mode == DFmode && out_n == 2
3837 && in_mode == DFmode && in_n == 2)
3838 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
3839 break;
3840 case BUILT_IN_TRUNCF:
3841 if (out_mode != SFmode || out_n != 4
3842 || in_mode != SFmode || in_n != 4)
3843 break;
3844 if (VECTOR_UNIT_VSX_P (V4SFmode))
3845 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
3846 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3847 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
3848 break;
3849 case BUILT_IN_NEARBYINT:
3850 if (VECTOR_UNIT_VSX_P (V2DFmode)
3851 && flag_unsafe_math_optimizations
3852 && out_mode == DFmode && out_n == 2
3853 && in_mode == DFmode && in_n == 2)
3854 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
3855 break;
3856 case BUILT_IN_NEARBYINTF:
3857 if (VECTOR_UNIT_VSX_P (V4SFmode)
3858 && flag_unsafe_math_optimizations
3859 && out_mode == SFmode && out_n == 4
3860 && in_mode == SFmode && in_n == 4)
3861 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
3862 break;
3863 case BUILT_IN_RINT:
3864 if (VECTOR_UNIT_VSX_P (V2DFmode)
3865 && !flag_trapping_math
3866 && out_mode == DFmode && out_n == 2
3867 && in_mode == DFmode && in_n == 2)
3868 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
3869 break;
3870 case BUILT_IN_RINTF:
3871 if (VECTOR_UNIT_VSX_P (V4SFmode)
3872 && !flag_trapping_math
3873 && out_mode == SFmode && out_n == 4
3874 && in_mode == SFmode && in_n == 4)
3875 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
3876 break;
3877 default:
3878 break;
3882 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
3884 enum rs6000_builtins fn
3885 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
3886 switch (fn)
3888 case RS6000_BUILTIN_RSQRTF:
3889 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3890 && out_mode == SFmode && out_n == 4
3891 && in_mode == SFmode && in_n == 4)
3892 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
3893 break;
3894 case RS6000_BUILTIN_RSQRT:
3895 if (VECTOR_UNIT_VSX_P (V2DFmode)
3896 && out_mode == DFmode && out_n == 2
3897 && in_mode == DFmode && in_n == 2)
3898 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
3899 break;
3900 case RS6000_BUILTIN_RECIPF:
3901 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3902 && out_mode == SFmode && out_n == 4
3903 && in_mode == SFmode && in_n == 4)
3904 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
3905 break;
3906 case RS6000_BUILTIN_RECIP:
3907 if (VECTOR_UNIT_VSX_P (V2DFmode)
3908 && out_mode == DFmode && out_n == 2
3909 && in_mode == DFmode && in_n == 2)
3910 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
3911 break;
3912 default:
3913 break;
3917 /* Generate calls to libmass if appropriate. */
3918 if (rs6000_veclib_handler)
3919 return rs6000_veclib_handler (fndecl, type_out, type_in);
3921 return NULL_TREE;
3924 /* Default CPU string for rs6000*_file_start functions. */
3925 static const char *rs6000_default_cpu;
3927 /* Do anything needed at the start of the asm file. */
3929 static void
3930 rs6000_file_start (void)
3932 char buffer[80];
3933 const char *start = buffer;
3934 FILE *file = asm_out_file;
3936 rs6000_default_cpu = TARGET_CPU_DEFAULT;
3938 default_file_start ();
3940 if (flag_verbose_asm)
3942 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
3944 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
3946 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
3947 start = "";
3950 if (global_options_set.x_rs6000_cpu_index)
3952 fprintf (file, "%s -mcpu=%s", start,
3953 processor_target_table[rs6000_cpu_index].name);
3954 start = "";
3957 if (global_options_set.x_rs6000_tune_index)
3959 fprintf (file, "%s -mtune=%s", start,
3960 processor_target_table[rs6000_tune_index].name);
3961 start = "";
3964 if (PPC405_ERRATUM77)
3966 fprintf (file, "%s PPC405CR_ERRATUM77", start);
3967 start = "";
3970 #ifdef USING_ELFOS_H
3971 switch (rs6000_sdata)
3973 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
3974 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
3975 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
3976 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
3979 if (rs6000_sdata && g_switch_value)
3981 fprintf (file, "%s -G %d", start,
3982 g_switch_value);
3983 start = "";
3985 #endif
3987 if (*start == '\0')
3988 putc ('\n', file);
3991 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
3993 switch_to_section (toc_section);
3994 switch_to_section (text_section);
3999 /* Return nonzero if this function is known to have a null epilogue. */
4002 direct_return (void)
4004 if (reload_completed)
4006 rs6000_stack_t *info = rs6000_stack_info ();
4008 if (info->first_gp_reg_save == 32
4009 && info->first_fp_reg_save == 64
4010 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4011 && ! info->lr_save_p
4012 && ! info->cr_save_p
4013 && info->vrsave_mask == 0
4014 && ! info->push_p)
4015 return 1;
4018 return 0;
4021 /* Return the number of instructions it takes to form a constant in an
4022 integer register. */
4025 num_insns_constant_wide (HOST_WIDE_INT value)
4027 /* signed constant loadable with addi */
4028 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4029 return 1;
4031 /* constant loadable with addis */
4032 else if ((value & 0xffff) == 0
4033 && (value >> 31 == -1 || value >> 31 == 0))
4034 return 1;
4036 #if HOST_BITS_PER_WIDE_INT == 64
4037 else if (TARGET_POWERPC64)
4039 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4040 HOST_WIDE_INT high = value >> 31;
4042 if (high == 0 || high == -1)
4043 return 2;
4045 high >>= 1;
4047 if (low == 0)
4048 return num_insns_constant_wide (high) + 1;
4049 else if (high == 0)
4050 return num_insns_constant_wide (low) + 1;
4051 else
4052 return (num_insns_constant_wide (high)
4053 + num_insns_constant_wide (low) + 1);
4055 #endif
4057 else
4058 return 2;
4062 num_insns_constant (rtx op, enum machine_mode mode)
4064 HOST_WIDE_INT low, high;
4066 switch (GET_CODE (op))
4068 case CONST_INT:
4069 #if HOST_BITS_PER_WIDE_INT == 64
4070 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4071 && mask64_operand (op, mode))
4072 return 2;
4073 else
4074 #endif
4075 return num_insns_constant_wide (INTVAL (op));
4077 case CONST_DOUBLE:
4078 if (mode == SFmode || mode == SDmode)
4080 long l;
4081 REAL_VALUE_TYPE rv;
4083 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4084 if (DECIMAL_FLOAT_MODE_P (mode))
4085 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4086 else
4087 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4088 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4091 if (mode == VOIDmode || mode == DImode)
4093 high = CONST_DOUBLE_HIGH (op);
4094 low = CONST_DOUBLE_LOW (op);
4096 else
4098 long l[2];
4099 REAL_VALUE_TYPE rv;
4101 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4102 if (DECIMAL_FLOAT_MODE_P (mode))
4103 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4104 else
4105 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4106 high = l[WORDS_BIG_ENDIAN == 0];
4107 low = l[WORDS_BIG_ENDIAN != 0];
4110 if (TARGET_32BIT)
4111 return (num_insns_constant_wide (low)
4112 + num_insns_constant_wide (high));
4113 else
4115 if ((high == 0 && low >= 0)
4116 || (high == -1 && low < 0))
4117 return num_insns_constant_wide (low);
4119 else if (mask64_operand (op, mode))
4120 return 2;
4122 else if (low == 0)
4123 return num_insns_constant_wide (high) + 1;
4125 else
4126 return (num_insns_constant_wide (high)
4127 + num_insns_constant_wide (low) + 1);
4130 default:
4131 gcc_unreachable ();
4135 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4136 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4137 corresponding element of the vector, but for V4SFmode and V2SFmode,
4138 the corresponding "float" is interpreted as an SImode integer. */
4140 HOST_WIDE_INT
4141 const_vector_elt_as_int (rtx op, unsigned int elt)
4143 rtx tmp;
4145 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4146 gcc_assert (GET_MODE (op) != V2DImode
4147 && GET_MODE (op) != V2DFmode);
4149 tmp = CONST_VECTOR_ELT (op, elt);
4150 if (GET_MODE (op) == V4SFmode
4151 || GET_MODE (op) == V2SFmode)
4152 tmp = gen_lowpart (SImode, tmp);
4153 return INTVAL (tmp);
4156 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4157 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4158 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4159 all items are set to the same value and contain COPIES replicas of the
4160 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4161 operand and the others are set to the value of the operand's msb. */
4163 static bool
4164 vspltis_constant (rtx op, unsigned step, unsigned copies)
4166 enum machine_mode mode = GET_MODE (op);
4167 enum machine_mode inner = GET_MODE_INNER (mode);
4169 unsigned i;
4170 unsigned nunits;
4171 unsigned bitsize;
4172 unsigned mask;
4174 HOST_WIDE_INT val;
4175 HOST_WIDE_INT splat_val;
4176 HOST_WIDE_INT msb_val;
4178 if (mode == V2DImode || mode == V2DFmode)
4179 return false;
4181 nunits = GET_MODE_NUNITS (mode);
4182 bitsize = GET_MODE_BITSIZE (inner);
4183 mask = GET_MODE_MASK (inner);
4185 val = const_vector_elt_as_int (op, nunits - 1);
4186 splat_val = val;
4187 msb_val = val > 0 ? 0 : -1;
4189 /* Construct the value to be splatted, if possible. If not, return 0. */
4190 for (i = 2; i <= copies; i *= 2)
4192 HOST_WIDE_INT small_val;
4193 bitsize /= 2;
4194 small_val = splat_val >> bitsize;
4195 mask >>= bitsize;
4196 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4197 return false;
4198 splat_val = small_val;
4201 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4202 if (EASY_VECTOR_15 (splat_val))
4205 /* Also check if we can splat, and then add the result to itself. Do so if
4206 the value is positive, of if the splat instruction is using OP's mode;
4207 for splat_val < 0, the splat and the add should use the same mode. */
4208 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4209 && (splat_val >= 0 || (step == 1 && copies == 1)))
4212 /* Also check if are loading up the most significant bit which can be done by
4213 loading up -1 and shifting the value left by -1. */
4214 else if (EASY_VECTOR_MSB (splat_val, inner))
4217 else
4218 return false;
4220 /* Check if VAL is present in every STEP-th element, and the
4221 other elements are filled with its most significant bit. */
4222 for (i = 0; i < nunits - 1; ++i)
4224 HOST_WIDE_INT desired_val;
4225 if (((i + 1) & (step - 1)) == 0)
4226 desired_val = val;
4227 else
4228 desired_val = msb_val;
4230 if (desired_val != const_vector_elt_as_int (op, i))
4231 return false;
4234 return true;
4238 /* Return true if OP is of the given MODE and can be synthesized
4239 with a vspltisb, vspltish or vspltisw. */
4241 bool
4242 easy_altivec_constant (rtx op, enum machine_mode mode)
4244 unsigned step, copies;
4246 if (mode == VOIDmode)
4247 mode = GET_MODE (op);
4248 else if (mode != GET_MODE (op))
4249 return false;
4251 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4252 constants. */
4253 if (mode == V2DFmode)
4254 return zero_constant (op, mode);
4256 if (mode == V2DImode)
4258 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4259 easy. */
4260 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4261 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4262 return false;
4264 if (zero_constant (op, mode))
4265 return true;
4267 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4268 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4269 return true;
4271 return false;
4274 /* Start with a vspltisw. */
4275 step = GET_MODE_NUNITS (mode) / 4;
4276 copies = 1;
4278 if (vspltis_constant (op, step, copies))
4279 return true;
4281 /* Then try with a vspltish. */
4282 if (step == 1)
4283 copies <<= 1;
4284 else
4285 step >>= 1;
4287 if (vspltis_constant (op, step, copies))
4288 return true;
4290 /* And finally a vspltisb. */
4291 if (step == 1)
4292 copies <<= 1;
4293 else
4294 step >>= 1;
4296 if (vspltis_constant (op, step, copies))
4297 return true;
4299 return false;
4302 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4303 result is OP. Abort if it is not possible. */
4306 gen_easy_altivec_constant (rtx op)
4308 enum machine_mode mode = GET_MODE (op);
4309 int nunits = GET_MODE_NUNITS (mode);
4310 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
4311 unsigned step = nunits / 4;
4312 unsigned copies = 1;
4314 /* Start with a vspltisw. */
4315 if (vspltis_constant (op, step, copies))
4316 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
4318 /* Then try with a vspltish. */
4319 if (step == 1)
4320 copies <<= 1;
4321 else
4322 step >>= 1;
4324 if (vspltis_constant (op, step, copies))
4325 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
4327 /* And finally a vspltisb. */
4328 if (step == 1)
4329 copies <<= 1;
4330 else
4331 step >>= 1;
4333 if (vspltis_constant (op, step, copies))
4334 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
4336 gcc_unreachable ();
4339 const char *
4340 output_vec_const_move (rtx *operands)
4342 int cst, cst2;
4343 enum machine_mode mode;
4344 rtx dest, vec;
4346 dest = operands[0];
4347 vec = operands[1];
4348 mode = GET_MODE (dest);
4350 if (TARGET_VSX)
4352 if (zero_constant (vec, mode))
4353 return "xxlxor %x0,%x0,%x0";
4355 if (mode == V2DImode
4356 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4357 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4358 return "vspltisw %0,-1";
4361 if (TARGET_ALTIVEC)
4363 rtx splat_vec;
4364 if (zero_constant (vec, mode))
4365 return "vxor %0,%0,%0";
4367 splat_vec = gen_easy_altivec_constant (vec);
4368 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4369 operands[1] = XEXP (splat_vec, 0);
4370 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4371 return "#";
4373 switch (GET_MODE (splat_vec))
4375 case V4SImode:
4376 return "vspltisw %0,%1";
4378 case V8HImode:
4379 return "vspltish %0,%1";
4381 case V16QImode:
4382 return "vspltisb %0,%1";
4384 default:
4385 gcc_unreachable ();
4389 gcc_assert (TARGET_SPE);
4391 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4392 pattern of V1DI, V4HI, and V2SF.
4394 FIXME: We should probably return # and add post reload
4395 splitters for these, but this way is so easy ;-). */
4396 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4397 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4398 operands[1] = CONST_VECTOR_ELT (vec, 0);
4399 operands[2] = CONST_VECTOR_ELT (vec, 1);
4400 if (cst == cst2)
4401 return "li %0,%1\n\tevmergelo %0,%0,%0";
4402 else
4403 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4406 /* Initialize TARGET of vector PAIRED to VALS. */
4408 void
4409 paired_expand_vector_init (rtx target, rtx vals)
4411 enum machine_mode mode = GET_MODE (target);
4412 int n_elts = GET_MODE_NUNITS (mode);
4413 int n_var = 0;
4414 rtx x, new_rtx, tmp, constant_op, op1, op2;
4415 int i;
4417 for (i = 0; i < n_elts; ++i)
4419 x = XVECEXP (vals, 0, i);
4420 if (!(CONST_INT_P (x)
4421 || GET_CODE (x) == CONST_DOUBLE
4422 || GET_CODE (x) == CONST_FIXED))
4423 ++n_var;
4425 if (n_var == 0)
4427 /* Load from constant pool. */
4428 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4429 return;
4432 if (n_var == 2)
4434 /* The vector is initialized only with non-constants. */
4435 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4436 XVECEXP (vals, 0, 1));
4438 emit_move_insn (target, new_rtx);
4439 return;
4442 /* One field is non-constant and the other one is a constant. Load the
4443 constant from the constant pool and use ps_merge instruction to
4444 construct the whole vector. */
4445 op1 = XVECEXP (vals, 0, 0);
4446 op2 = XVECEXP (vals, 0, 1);
4448 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4450 tmp = gen_reg_rtx (GET_MODE (constant_op));
4451 emit_move_insn (tmp, constant_op);
4453 if (CONSTANT_P (op1))
4454 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4455 else
4456 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4458 emit_move_insn (target, new_rtx);
4461 void
4462 paired_expand_vector_move (rtx operands[])
4464 rtx op0 = operands[0], op1 = operands[1];
4466 emit_move_insn (op0, op1);
4469 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4470 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4471 operands for the relation operation COND. This is a recursive
4472 function. */
4474 static void
4475 paired_emit_vector_compare (enum rtx_code rcode,
4476 rtx dest, rtx op0, rtx op1,
4477 rtx cc_op0, rtx cc_op1)
4479 rtx tmp = gen_reg_rtx (V2SFmode);
4480 rtx tmp1, max, min;
4482 gcc_assert (TARGET_PAIRED_FLOAT);
4483 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4485 switch (rcode)
4487 case LT:
4488 case LTU:
4489 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4490 return;
4491 case GE:
4492 case GEU:
4493 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4494 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4495 return;
4496 case LE:
4497 case LEU:
4498 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4499 return;
4500 case GT:
4501 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4502 return;
4503 case EQ:
4504 tmp1 = gen_reg_rtx (V2SFmode);
4505 max = gen_reg_rtx (V2SFmode);
4506 min = gen_reg_rtx (V2SFmode);
4507 gen_reg_rtx (V2SFmode);
4509 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4510 emit_insn (gen_selv2sf4
4511 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4512 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4513 emit_insn (gen_selv2sf4
4514 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4515 emit_insn (gen_subv2sf3 (tmp1, min, max));
4516 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4517 return;
4518 case NE:
4519 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4520 return;
4521 case UNLE:
4522 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4523 return;
4524 case UNLT:
4525 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4526 return;
4527 case UNGE:
4528 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4529 return;
4530 case UNGT:
4531 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4532 return;
4533 default:
4534 gcc_unreachable ();
4537 return;
4540 /* Emit vector conditional expression.
4541 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4542 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4545 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4546 rtx cond, rtx cc_op0, rtx cc_op1)
4548 enum rtx_code rcode = GET_CODE (cond);
4550 if (!TARGET_PAIRED_FLOAT)
4551 return 0;
4553 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4555 return 1;
4558 /* Initialize vector TARGET to VALS. */
4560 void
4561 rs6000_expand_vector_init (rtx target, rtx vals)
4563 enum machine_mode mode = GET_MODE (target);
4564 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4565 int n_elts = GET_MODE_NUNITS (mode);
4566 int n_var = 0, one_var = -1;
4567 bool all_same = true, all_const_zero = true;
4568 rtx x, mem;
4569 int i;
4571 for (i = 0; i < n_elts; ++i)
4573 x = XVECEXP (vals, 0, i);
4574 if (!(CONST_INT_P (x)
4575 || GET_CODE (x) == CONST_DOUBLE
4576 || GET_CODE (x) == CONST_FIXED))
4577 ++n_var, one_var = i;
4578 else if (x != CONST0_RTX (inner_mode))
4579 all_const_zero = false;
4581 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4582 all_same = false;
4585 if (n_var == 0)
4587 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4588 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4589 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4591 /* Zero register. */
4592 emit_insn (gen_rtx_SET (VOIDmode, target,
4593 gen_rtx_XOR (mode, target, target)));
4594 return;
4596 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4598 /* Splat immediate. */
4599 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4600 return;
4602 else
4604 /* Load from constant pool. */
4605 emit_move_insn (target, const_vec);
4606 return;
4610 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4611 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4613 rtx op0 = XVECEXP (vals, 0, 0);
4614 rtx op1 = XVECEXP (vals, 0, 1);
4615 if (all_same)
4617 if (!MEM_P (op0) && !REG_P (op0))
4618 op0 = force_reg (inner_mode, op0);
4619 if (mode == V2DFmode)
4620 emit_insn (gen_vsx_splat_v2df (target, op0));
4621 else
4622 emit_insn (gen_vsx_splat_v2di (target, op0));
4624 else
4626 op0 = force_reg (inner_mode, op0);
4627 op1 = force_reg (inner_mode, op1);
4628 if (mode == V2DFmode)
4629 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4630 else
4631 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4633 return;
4636 /* With single precision floating point on VSX, know that internally single
4637 precision is actually represented as a double, and either make 2 V2DF
4638 vectors, and convert these vectors to single precision, or do one
4639 conversion, and splat the result to the other elements. */
4640 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4642 if (all_same)
4644 rtx freg = gen_reg_rtx (V4SFmode);
4645 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
4647 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4648 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4650 else
4652 rtx dbl_even = gen_reg_rtx (V2DFmode);
4653 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4654 rtx flt_even = gen_reg_rtx (V4SFmode);
4655 rtx flt_odd = gen_reg_rtx (V4SFmode);
4656 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
4657 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
4658 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
4659 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
4661 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
4662 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
4663 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4664 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4665 rs6000_expand_extract_even (target, flt_even, flt_odd);
4667 return;
4670 /* Store value to stack temp. Load vector element. Splat. However, splat
4671 of 64-bit items is not supported on Altivec. */
4672 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
4674 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4675 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4676 XVECEXP (vals, 0, 0));
4677 x = gen_rtx_UNSPEC (VOIDmode,
4678 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4679 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4680 gen_rtvec (2,
4681 gen_rtx_SET (VOIDmode,
4682 target, mem),
4683 x)));
4684 x = gen_rtx_VEC_SELECT (inner_mode, target,
4685 gen_rtx_PARALLEL (VOIDmode,
4686 gen_rtvec (1, const0_rtx)));
4687 emit_insn (gen_rtx_SET (VOIDmode, target,
4688 gen_rtx_VEC_DUPLICATE (mode, x)));
4689 return;
4692 /* One field is non-constant. Load constant then overwrite
4693 varying field. */
4694 if (n_var == 1)
4696 rtx copy = copy_rtx (vals);
4698 /* Load constant part of vector, substitute neighboring value for
4699 varying element. */
4700 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
4701 rs6000_expand_vector_init (target, copy);
4703 /* Insert variable. */
4704 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
4705 return;
4708 /* Construct the vector in memory one field at a time
4709 and load the whole vector. */
4710 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4711 for (i = 0; i < n_elts; i++)
4712 emit_move_insn (adjust_address_nv (mem, inner_mode,
4713 i * GET_MODE_SIZE (inner_mode)),
4714 XVECEXP (vals, 0, i));
4715 emit_move_insn (target, mem);
4718 /* Set field ELT of TARGET to VAL. */
4720 void
4721 rs6000_expand_vector_set (rtx target, rtx val, int elt)
4723 enum machine_mode mode = GET_MODE (target);
4724 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4725 rtx reg = gen_reg_rtx (mode);
4726 rtx mask, mem, x;
4727 int width = GET_MODE_SIZE (inner_mode);
4728 int i;
4730 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4732 rtx (*set_func) (rtx, rtx, rtx, rtx)
4733 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
4734 emit_insn (set_func (target, target, val, GEN_INT (elt)));
4735 return;
4738 /* Load single variable value. */
4739 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4740 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
4741 x = gen_rtx_UNSPEC (VOIDmode,
4742 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4743 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4744 gen_rtvec (2,
4745 gen_rtx_SET (VOIDmode,
4746 reg, mem),
4747 x)));
4749 /* Linear sequence. */
4750 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
4751 for (i = 0; i < 16; ++i)
4752 XVECEXP (mask, 0, i) = GEN_INT (i);
4754 /* Set permute mask to insert element into target. */
4755 for (i = 0; i < width; ++i)
4756 XVECEXP (mask, 0, elt*width + i)
4757 = GEN_INT (i + 0x10);
4758 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
4759 x = gen_rtx_UNSPEC (mode,
4760 gen_rtvec (3, target, reg,
4761 force_reg (V16QImode, x)),
4762 UNSPEC_VPERM);
4763 emit_insn (gen_rtx_SET (VOIDmode, target, x));
4766 /* Extract field ELT from VEC into TARGET. */
4768 void
4769 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
4771 enum machine_mode mode = GET_MODE (vec);
4772 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4773 rtx mem;
4775 if (VECTOR_MEM_VSX_P (mode))
4777 switch (mode)
4779 default:
4780 break;
4781 case V2DFmode:
4782 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
4783 return;
4784 case V2DImode:
4785 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
4786 return;
4787 case V4SFmode:
4788 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
4789 return;
4793 /* Allocate mode-sized buffer. */
4794 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4796 emit_move_insn (mem, vec);
4798 /* Add offset to field within buffer matching vector element. */
4799 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
4801 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
4804 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4805 implement ANDing by the mask IN. */
4806 void
4807 build_mask64_2_operands (rtx in, rtx *out)
4809 #if HOST_BITS_PER_WIDE_INT >= 64
4810 unsigned HOST_WIDE_INT c, lsb, m1, m2;
4811 int shift;
4813 gcc_assert (GET_CODE (in) == CONST_INT);
4815 c = INTVAL (in);
4816 if (c & 1)
4818 /* Assume c initially something like 0x00fff000000fffff. The idea
4819 is to rotate the word so that the middle ^^^^^^ group of zeros
4820 is at the MS end and can be cleared with an rldicl mask. We then
4821 rotate back and clear off the MS ^^ group of zeros with a
4822 second rldicl. */
4823 c = ~c; /* c == 0xff000ffffff00000 */
4824 lsb = c & -c; /* lsb == 0x0000000000100000 */
4825 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
4826 c = ~c; /* c == 0x00fff000000fffff */
4827 c &= -lsb; /* c == 0x00fff00000000000 */
4828 lsb = c & -c; /* lsb == 0x0000100000000000 */
4829 c = ~c; /* c == 0xff000fffffffffff */
4830 c &= -lsb; /* c == 0xff00000000000000 */
4831 shift = 0;
4832 while ((lsb >>= 1) != 0)
4833 shift++; /* shift == 44 on exit from loop */
4834 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
4835 m1 = ~m1; /* m1 == 0x000000ffffffffff */
4836 m2 = ~c; /* m2 == 0x00ffffffffffffff */
4838 else
4840 /* Assume c initially something like 0xff000f0000000000. The idea
4841 is to rotate the word so that the ^^^ middle group of zeros
4842 is at the LS end and can be cleared with an rldicr mask. We then
4843 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4844 a second rldicr. */
4845 lsb = c & -c; /* lsb == 0x0000010000000000 */
4846 m2 = -lsb; /* m2 == 0xffffff0000000000 */
4847 c = ~c; /* c == 0x00fff0ffffffffff */
4848 c &= -lsb; /* c == 0x00fff00000000000 */
4849 lsb = c & -c; /* lsb == 0x0000100000000000 */
4850 c = ~c; /* c == 0xff000fffffffffff */
4851 c &= -lsb; /* c == 0xff00000000000000 */
4852 shift = 0;
4853 while ((lsb >>= 1) != 0)
4854 shift++; /* shift == 44 on exit from loop */
4855 m1 = ~c; /* m1 == 0x00ffffffffffffff */
4856 m1 >>= shift; /* m1 == 0x0000000000000fff */
4857 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
4860 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4861 masks will be all 1's. We are guaranteed more than one transition. */
4862 out[0] = GEN_INT (64 - shift);
4863 out[1] = GEN_INT (m1);
4864 out[2] = GEN_INT (shift);
4865 out[3] = GEN_INT (m2);
4866 #else
4867 (void)in;
4868 (void)out;
4869 gcc_unreachable ();
4870 #endif
4873 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4875 bool
4876 invalid_e500_subreg (rtx op, enum machine_mode mode)
4878 if (TARGET_E500_DOUBLE)
4880 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4881 subreg:TI and reg:TF. Decimal float modes are like integer
4882 modes (only low part of each register used) for this
4883 purpose. */
4884 if (GET_CODE (op) == SUBREG
4885 && (mode == SImode || mode == DImode || mode == TImode
4886 || mode == DDmode || mode == TDmode)
4887 && REG_P (SUBREG_REG (op))
4888 && (GET_MODE (SUBREG_REG (op)) == DFmode
4889 || GET_MODE (SUBREG_REG (op)) == TFmode))
4890 return true;
4892 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4893 reg:TI. */
4894 if (GET_CODE (op) == SUBREG
4895 && (mode == DFmode || mode == TFmode)
4896 && REG_P (SUBREG_REG (op))
4897 && (GET_MODE (SUBREG_REG (op)) == DImode
4898 || GET_MODE (SUBREG_REG (op)) == TImode
4899 || GET_MODE (SUBREG_REG (op)) == DDmode
4900 || GET_MODE (SUBREG_REG (op)) == TDmode))
4901 return true;
4904 if (TARGET_SPE
4905 && GET_CODE (op) == SUBREG
4906 && mode == SImode
4907 && REG_P (SUBREG_REG (op))
4908 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
4909 return true;
4911 return false;
4914 /* AIX increases natural record alignment to doubleword if the first
4915 field is an FP double while the FP fields remain word aligned. */
4917 unsigned int
4918 rs6000_special_round_type_align (tree type, unsigned int computed,
4919 unsigned int specified)
4921 unsigned int align = MAX (computed, specified);
4922 tree field = TYPE_FIELDS (type);
4924 /* Skip all non field decls */
4925 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4926 field = DECL_CHAIN (field);
4928 if (field != NULL && field != type)
4930 type = TREE_TYPE (field);
4931 while (TREE_CODE (type) == ARRAY_TYPE)
4932 type = TREE_TYPE (type);
4934 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
4935 align = MAX (align, 64);
4938 return align;
4941 /* Darwin increases record alignment to the natural alignment of
4942 the first field. */
4944 unsigned int
4945 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
4946 unsigned int specified)
4948 unsigned int align = MAX (computed, specified);
4950 if (TYPE_PACKED (type))
4951 return align;
4953 /* Find the first field, looking down into aggregates. */
4954 do {
4955 tree field = TYPE_FIELDS (type);
4956 /* Skip all non field decls */
4957 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4958 field = DECL_CHAIN (field);
4959 if (! field)
4960 break;
4961 /* A packed field does not contribute any extra alignment. */
4962 if (DECL_PACKED (field))
4963 return align;
4964 type = TREE_TYPE (field);
4965 while (TREE_CODE (type) == ARRAY_TYPE)
4966 type = TREE_TYPE (type);
4967 } while (AGGREGATE_TYPE_P (type));
4969 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
4970 align = MAX (align, TYPE_ALIGN (type));
4972 return align;
4975 /* Return 1 for an operand in small memory on V.4/eabi. */
4978 small_data_operand (rtx op ATTRIBUTE_UNUSED,
4979 enum machine_mode mode ATTRIBUTE_UNUSED)
4981 #if TARGET_ELF
4982 rtx sym_ref;
4984 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
4985 return 0;
4987 if (DEFAULT_ABI != ABI_V4)
4988 return 0;
4990 /* Vector and float memory instructions have a limited offset on the
4991 SPE, so using a vector or float variable directly as an operand is
4992 not useful. */
4993 if (TARGET_SPE
4994 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
4995 return 0;
4997 if (GET_CODE (op) == SYMBOL_REF)
4998 sym_ref = op;
5000 else if (GET_CODE (op) != CONST
5001 || GET_CODE (XEXP (op, 0)) != PLUS
5002 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5003 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5004 return 0;
5006 else
5008 rtx sum = XEXP (op, 0);
5009 HOST_WIDE_INT summand;
5011 /* We have to be careful here, because it is the referenced address
5012 that must be 32k from _SDA_BASE_, not just the symbol. */
5013 summand = INTVAL (XEXP (sum, 1));
5014 if (summand < 0 || summand > g_switch_value)
5015 return 0;
5017 sym_ref = XEXP (sum, 0);
5020 return SYMBOL_REF_SMALL_P (sym_ref);
5021 #else
5022 return 0;
5023 #endif
5026 /* Return true if either operand is a general purpose register. */
5028 bool
5029 gpr_or_gpr_p (rtx op0, rtx op1)
5031 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5032 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5035 /* Given an address, return a constant offset term if one exists. */
5037 static rtx
5038 address_offset (rtx op)
5040 if (GET_CODE (op) == PRE_INC
5041 || GET_CODE (op) == PRE_DEC)
5042 op = XEXP (op, 0);
5043 else if (GET_CODE (op) == PRE_MODIFY
5044 || GET_CODE (op) == LO_SUM)
5045 op = XEXP (op, 1);
5047 if (GET_CODE (op) == CONST)
5048 op = XEXP (op, 0);
5050 if (GET_CODE (op) == PLUS)
5051 op = XEXP (op, 1);
5053 if (CONST_INT_P (op))
5054 return op;
5056 return NULL_RTX;
5059 /* Return true if the MEM operand is a memory operand suitable for use
5060 with a (full width, possibly multiple) gpr load/store. On
5061 powerpc64 this means the offset must be divisible by 4.
5062 Implements 'Y' constraint.
5064 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5065 a constraint function we know the operand has satisfied a suitable
5066 memory predicate. Also accept some odd rtl generated by reload
5067 (see rs6000_legitimize_reload_address for various forms). It is
5068 important that reload rtl be accepted by appropriate constraints
5069 but not by the operand predicate.
5071 Offsetting a lo_sum should not be allowed, except where we know by
5072 alignment that a 32k boundary is not crossed, but see the ???
5073 comment in rs6000_legitimize_reload_address. Note that by
5074 "offsetting" here we mean a further offset to access parts of the
5075 MEM. It's fine to have a lo_sum where the inner address is offset
5076 from a sym, since the same sym+offset will appear in the high part
5077 of the address calculation. */
5079 bool
5080 mem_operand_gpr (rtx op, enum machine_mode mode)
5082 unsigned HOST_WIDE_INT offset;
5083 int extra;
5084 rtx addr = XEXP (op, 0);
5086 op = address_offset (addr);
5087 if (op == NULL_RTX)
5088 return true;
5090 offset = INTVAL (op);
5091 if (TARGET_POWERPC64 && (offset & 3) != 0)
5092 return false;
5094 if (GET_CODE (addr) == LO_SUM)
5095 /* We know by alignment that ABI_AIX medium/large model toc refs
5096 will not cross a 32k boundary, since all entries in the
5097 constant pool are naturally aligned and we check alignment for
5098 other medium model toc-relative addresses. For ABI_V4 and
5099 ABI_DARWIN lo_sum addresses, we just check that 64-bit
5100 offsets are 4-byte aligned. */
5101 return true;
5103 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5104 gcc_assert (extra >= 0);
5105 return offset + 0x8000 < 0x10000u - extra;
5108 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5110 static bool
5111 reg_offset_addressing_ok_p (enum machine_mode mode)
5113 switch (mode)
5115 case V16QImode:
5116 case V8HImode:
5117 case V4SFmode:
5118 case V4SImode:
5119 case V2DFmode:
5120 case V2DImode:
5121 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5122 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5123 return false;
5124 break;
5126 case V4HImode:
5127 case V2SImode:
5128 case V1DImode:
5129 case V2SFmode:
5130 /* Paired vector modes. Only reg+reg addressing is valid. */
5131 if (TARGET_PAIRED_FLOAT)
5132 return false;
5133 break;
5135 default:
5136 break;
5139 return true;
5142 static bool
5143 virtual_stack_registers_memory_p (rtx op)
5145 int regnum;
5147 if (GET_CODE (op) == REG)
5148 regnum = REGNO (op);
5150 else if (GET_CODE (op) == PLUS
5151 && GET_CODE (XEXP (op, 0)) == REG
5152 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5153 regnum = REGNO (XEXP (op, 0));
5155 else
5156 return false;
5158 return (regnum >= FIRST_VIRTUAL_REGISTER
5159 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5162 /* Return true if memory accesses to OP are known to never straddle
5163 a 32k boundary. */
5165 static bool
5166 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5167 enum machine_mode mode)
5169 tree decl, type;
5170 unsigned HOST_WIDE_INT dsize, dalign;
5172 if (GET_CODE (op) != SYMBOL_REF)
5173 return false;
5175 decl = SYMBOL_REF_DECL (op);
5176 if (!decl)
5178 if (GET_MODE_SIZE (mode) == 0)
5179 return false;
5181 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5182 replacing memory addresses with an anchor plus offset. We
5183 could find the decl by rummaging around in the block->objects
5184 VEC for the given offset but that seems like too much work. */
5185 dalign = 1;
5186 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5187 && SYMBOL_REF_ANCHOR_P (op)
5188 && SYMBOL_REF_BLOCK (op) != NULL)
5190 struct object_block *block = SYMBOL_REF_BLOCK (op);
5191 HOST_WIDE_INT lsb, mask;
5193 /* Given the alignment of the block.. */
5194 dalign = block->alignment;
5195 mask = dalign / BITS_PER_UNIT - 1;
5197 /* ..and the combined offset of the anchor and any offset
5198 to this block object.. */
5199 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5200 lsb = offset & -offset;
5202 /* ..find how many bits of the alignment we know for the
5203 object. */
5204 mask &= lsb - 1;
5205 dalign = mask + 1;
5207 return dalign >= GET_MODE_SIZE (mode);
5210 if (DECL_P (decl))
5212 if (TREE_CODE (decl) == FUNCTION_DECL)
5213 return true;
5215 if (!DECL_SIZE_UNIT (decl))
5216 return false;
5218 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5219 return false;
5221 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5222 if (dsize > 32768)
5223 return false;
5225 dalign = DECL_ALIGN_UNIT (decl);
5226 return dalign >= dsize;
5229 type = TREE_TYPE (decl);
5231 if (TREE_CODE (decl) == STRING_CST)
5232 dsize = TREE_STRING_LENGTH (decl);
5233 else if (TYPE_SIZE_UNIT (type)
5234 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5235 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5236 else
5237 return false;
5238 if (dsize > 32768)
5239 return false;
5241 dalign = TYPE_ALIGN (type);
5242 if (CONSTANT_CLASS_P (decl))
5243 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5244 else
5245 dalign = DATA_ALIGNMENT (decl, dalign);
5246 dalign /= BITS_PER_UNIT;
5247 return dalign >= dsize;
5250 static bool
5251 constant_pool_expr_p (rtx op)
5253 rtx base, offset;
5255 split_const (op, &base, &offset);
5256 return (GET_CODE (base) == SYMBOL_REF
5257 && CONSTANT_POOL_ADDRESS_P (base)
5258 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5261 static const_rtx tocrel_base, tocrel_offset;
5263 /* Return true if OP is a toc pointer relative address (the output
5264 of create_TOC_reference). If STRICT, do not match high part or
5265 non-split -mcmodel=large/medium toc pointer relative addresses. */
5267 bool
5268 toc_relative_expr_p (const_rtx op, bool strict)
5270 if (!TARGET_TOC)
5271 return false;
5273 if (TARGET_CMODEL != CMODEL_SMALL)
5275 /* Only match the low part. */
5276 if (GET_CODE (op) == LO_SUM
5277 && REG_P (XEXP (op, 0))
5278 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5279 op = XEXP (op, 1);
5280 else if (strict)
5281 return false;
5284 tocrel_base = op;
5285 tocrel_offset = const0_rtx;
5286 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
5288 tocrel_base = XEXP (op, 0);
5289 tocrel_offset = XEXP (op, 1);
5292 return (GET_CODE (tocrel_base) == UNSPEC
5293 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5296 /* Return true if X is a constant pool address, and also for cmodel=medium
5297 if X is a toc-relative address known to be offsettable within MODE. */
5299 bool
5300 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5301 bool strict)
5303 return (toc_relative_expr_p (x, strict)
5304 && (TARGET_CMODEL != CMODEL_MEDIUM
5305 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5306 || mode == QImode
5307 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5308 INTVAL (tocrel_offset), mode)));
5311 static bool
5312 legitimate_small_data_p (enum machine_mode mode, rtx x)
5314 return (DEFAULT_ABI == ABI_V4
5315 && !flag_pic && !TARGET_TOC
5316 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5317 && small_data_operand (x, mode));
5320 /* SPE offset addressing is limited to 5-bits worth of double words. */
5321 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5323 bool
5324 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
5325 bool strict, bool worst_case)
5327 unsigned HOST_WIDE_INT offset;
5328 unsigned int extra;
5330 if (GET_CODE (x) != PLUS)
5331 return false;
5332 if (!REG_P (XEXP (x, 0)))
5333 return false;
5334 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5335 return false;
5336 if (!reg_offset_addressing_ok_p (mode))
5337 return virtual_stack_registers_memory_p (x);
5338 if (legitimate_constant_pool_address_p (x, mode, strict))
5339 return true;
5340 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5341 return false;
5343 offset = INTVAL (XEXP (x, 1));
5344 extra = 0;
5345 switch (mode)
5347 case V4HImode:
5348 case V2SImode:
5349 case V1DImode:
5350 case V2SFmode:
5351 /* SPE vector modes. */
5352 return SPE_CONST_OFFSET_OK (offset);
5354 case DFmode:
5355 case DDmode:
5356 case DImode:
5357 /* On e500v2, we may have:
5359 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5361 Which gets addressed with evldd instructions. */
5362 if (TARGET_E500_DOUBLE)
5363 return SPE_CONST_OFFSET_OK (offset);
5365 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5366 addressing. */
5367 if (mode == DFmode && VECTOR_MEM_VSX_P (DFmode))
5368 return false;
5370 if (!worst_case)
5371 break;
5372 if (!TARGET_POWERPC64)
5373 extra = 4;
5374 else if (offset & 3)
5375 return false;
5376 break;
5378 case TFmode:
5379 case TDmode:
5380 case TImode:
5381 if (TARGET_E500_DOUBLE)
5382 return (SPE_CONST_OFFSET_OK (offset)
5383 && SPE_CONST_OFFSET_OK (offset + 8));
5385 extra = 8;
5386 if (!worst_case)
5387 break;
5388 if (!TARGET_POWERPC64)
5389 extra = 12;
5390 else if (offset & 3)
5391 return false;
5392 break;
5394 default:
5395 break;
5398 offset += 0x8000;
5399 return offset < 0x10000 - extra;
5402 bool
5403 legitimate_indexed_address_p (rtx x, int strict)
5405 rtx op0, op1;
5407 if (GET_CODE (x) != PLUS)
5408 return false;
5410 op0 = XEXP (x, 0);
5411 op1 = XEXP (x, 1);
5413 /* Recognize the rtl generated by reload which we know will later be
5414 replaced with proper base and index regs. */
5415 if (!strict
5416 && reload_in_progress
5417 && (REG_P (op0) || GET_CODE (op0) == PLUS)
5418 && REG_P (op1))
5419 return true;
5421 return (REG_P (op0) && REG_P (op1)
5422 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
5423 && INT_REG_OK_FOR_INDEX_P (op1, strict))
5424 || (INT_REG_OK_FOR_BASE_P (op1, strict)
5425 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
5428 bool
5429 avoiding_indexed_address_p (enum machine_mode mode)
5431 /* Avoid indexed addressing for modes that have non-indexed
5432 load/store instruction forms. */
5433 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
5436 inline bool
5437 legitimate_indirect_address_p (rtx x, int strict)
5439 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
5442 bool
5443 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
5445 if (!TARGET_MACHO || !flag_pic
5446 || mode != SImode || GET_CODE (x) != MEM)
5447 return false;
5448 x = XEXP (x, 0);
5450 if (GET_CODE (x) != LO_SUM)
5451 return false;
5452 if (GET_CODE (XEXP (x, 0)) != REG)
5453 return false;
5454 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
5455 return false;
5456 x = XEXP (x, 1);
5458 return CONSTANT_P (x);
5461 static bool
5462 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
5464 if (GET_CODE (x) != LO_SUM)
5465 return false;
5466 if (GET_CODE (XEXP (x, 0)) != REG)
5467 return false;
5468 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5469 return false;
5470 /* Restrict addressing for DI because of our SUBREG hackery. */
5471 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
5472 return false;
5473 x = XEXP (x, 1);
5475 if (TARGET_ELF || TARGET_MACHO)
5477 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
5478 return false;
5479 if (TARGET_TOC)
5480 return false;
5481 if (GET_MODE_NUNITS (mode) != 1)
5482 return false;
5483 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5484 && !(/* ??? Assume floating point reg based on mode? */
5485 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5486 && (mode == DFmode || mode == DDmode)))
5487 return false;
5489 return CONSTANT_P (x);
5492 return false;
5496 /* Try machine-dependent ways of modifying an illegitimate address
5497 to be legitimate. If we find one, return the new, valid address.
5498 This is used from only one place: `memory_address' in explow.c.
5500 OLDX is the address as it was before break_out_memory_refs was
5501 called. In some cases it is useful to look at this to decide what
5502 needs to be done.
5504 It is always safe for this function to do nothing. It exists to
5505 recognize opportunities to optimize the output.
5507 On RS/6000, first check for the sum of a register with a constant
5508 integer that is out of range. If so, generate code to add the
5509 constant with the low-order 16 bits masked to the register and force
5510 this result into another register (this can be done with `cau').
5511 Then generate an address of REG+(CONST&0xffff), allowing for the
5512 possibility of bit 16 being a one.
5514 Then check for the sum of a register and something not constant, try to
5515 load the other things into a register and return the sum. */
5517 static rtx
5518 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5519 enum machine_mode mode)
5521 unsigned int extra;
5523 if (!reg_offset_addressing_ok_p (mode))
5525 if (virtual_stack_registers_memory_p (x))
5526 return x;
5528 /* In theory we should not be seeing addresses of the form reg+0,
5529 but just in case it is generated, optimize it away. */
5530 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5531 return force_reg (Pmode, XEXP (x, 0));
5533 /* Make sure both operands are registers. */
5534 else if (GET_CODE (x) == PLUS)
5535 return gen_rtx_PLUS (Pmode,
5536 force_reg (Pmode, XEXP (x, 0)),
5537 force_reg (Pmode, XEXP (x, 1)));
5538 else
5539 return force_reg (Pmode, x);
5541 if (GET_CODE (x) == SYMBOL_REF)
5543 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5544 if (model != 0)
5545 return rs6000_legitimize_tls_address (x, model);
5548 extra = 0;
5549 switch (mode)
5551 case TFmode:
5552 case TDmode:
5553 case TImode:
5554 /* As in legitimate_offset_address_p we do not assume
5555 worst-case. The mode here is just a hint as to the registers
5556 used. A TImode is usually in gprs, but may actually be in
5557 fprs. Leave worst-case scenario for reload to handle via
5558 insn constraints. */
5559 extra = 8;
5560 break;
5561 default:
5562 break;
5565 if (GET_CODE (x) == PLUS
5566 && GET_CODE (XEXP (x, 0)) == REG
5567 && GET_CODE (XEXP (x, 1)) == CONST_INT
5568 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5569 >= 0x10000 - extra)
5570 && !(SPE_VECTOR_MODE (mode)
5571 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
5573 HOST_WIDE_INT high_int, low_int;
5574 rtx sum;
5575 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5576 if (low_int >= 0x8000 - extra)
5577 low_int = 0;
5578 high_int = INTVAL (XEXP (x, 1)) - low_int;
5579 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5580 GEN_INT (high_int)), 0);
5581 return plus_constant (Pmode, sum, low_int);
5583 else if (GET_CODE (x) == PLUS
5584 && GET_CODE (XEXP (x, 0)) == REG
5585 && GET_CODE (XEXP (x, 1)) != CONST_INT
5586 && GET_MODE_NUNITS (mode) == 1
5587 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5588 || (/* ??? Assume floating point reg based on mode? */
5589 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5590 && (mode == DFmode || mode == DDmode)))
5591 && !avoiding_indexed_address_p (mode))
5593 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5594 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5596 else if (SPE_VECTOR_MODE (mode)
5597 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
5599 if (mode == DImode)
5600 return x;
5601 /* We accept [reg + reg] and [reg + OFFSET]. */
5603 if (GET_CODE (x) == PLUS)
5605 rtx op1 = XEXP (x, 0);
5606 rtx op2 = XEXP (x, 1);
5607 rtx y;
5609 op1 = force_reg (Pmode, op1);
5611 if (GET_CODE (op2) != REG
5612 && (GET_CODE (op2) != CONST_INT
5613 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5614 || (GET_MODE_SIZE (mode) > 8
5615 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5616 op2 = force_reg (Pmode, op2);
5618 /* We can't always do [reg + reg] for these, because [reg +
5619 reg + offset] is not a legitimate addressing mode. */
5620 y = gen_rtx_PLUS (Pmode, op1, op2);
5622 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5623 return force_reg (Pmode, y);
5624 else
5625 return y;
5628 return force_reg (Pmode, x);
5630 else if ((TARGET_ELF
5631 #if TARGET_MACHO
5632 || !MACHO_DYNAMIC_NO_PIC_P
5633 #endif
5635 && TARGET_32BIT
5636 && TARGET_NO_TOC
5637 && ! flag_pic
5638 && GET_CODE (x) != CONST_INT
5639 && GET_CODE (x) != CONST_DOUBLE
5640 && CONSTANT_P (x)
5641 && GET_MODE_NUNITS (mode) == 1
5642 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5643 || (/* ??? Assume floating point reg based on mode? */
5644 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5645 && (mode == DFmode || mode == DDmode))))
5647 rtx reg = gen_reg_rtx (Pmode);
5648 if (TARGET_ELF)
5649 emit_insn (gen_elf_high (reg, x));
5650 else
5651 emit_insn (gen_macho_high (reg, x));
5652 return gen_rtx_LO_SUM (Pmode, reg, x);
5654 else if (TARGET_TOC
5655 && GET_CODE (x) == SYMBOL_REF
5656 && constant_pool_expr_p (x)
5657 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5658 return create_TOC_reference (x, NULL_RTX);
5659 else
5660 return x;
5663 /* Debug version of rs6000_legitimize_address. */
5664 static rtx
5665 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5667 rtx ret;
5668 rtx insns;
5670 start_sequence ();
5671 ret = rs6000_legitimize_address (x, oldx, mode);
5672 insns = get_insns ();
5673 end_sequence ();
5675 if (ret != x)
5677 fprintf (stderr,
5678 "\nrs6000_legitimize_address: mode %s, old code %s, "
5679 "new code %s, modified\n",
5680 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5681 GET_RTX_NAME (GET_CODE (ret)));
5683 fprintf (stderr, "Original address:\n");
5684 debug_rtx (x);
5686 fprintf (stderr, "oldx:\n");
5687 debug_rtx (oldx);
5689 fprintf (stderr, "New address:\n");
5690 debug_rtx (ret);
5692 if (insns)
5694 fprintf (stderr, "Insns added:\n");
5695 debug_rtx_list (insns, 20);
5698 else
5700 fprintf (stderr,
5701 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5702 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
5704 debug_rtx (x);
5707 if (insns)
5708 emit_insn (insns);
5710 return ret;
5713 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5714 We need to emit DTP-relative relocations. */
5716 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5717 static void
5718 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
5720 switch (size)
5722 case 4:
5723 fputs ("\t.long\t", file);
5724 break;
5725 case 8:
5726 fputs (DOUBLE_INT_ASM_OP, file);
5727 break;
5728 default:
5729 gcc_unreachable ();
5731 output_addr_const (file, x);
5732 fputs ("@dtprel+0x8000", file);
5735 /* In the name of slightly smaller debug output, and to cater to
5736 general assembler lossage, recognize various UNSPEC sequences
5737 and turn them back into a direct symbol reference. */
5739 static rtx
5740 rs6000_delegitimize_address (rtx orig_x)
5742 rtx x, y, offset;
5744 orig_x = delegitimize_mem_from_attrs (orig_x);
5745 x = orig_x;
5746 if (MEM_P (x))
5747 x = XEXP (x, 0);
5749 y = x;
5750 if (TARGET_CMODEL != CMODEL_SMALL
5751 && GET_CODE (y) == LO_SUM)
5752 y = XEXP (y, 1);
5754 offset = NULL_RTX;
5755 if (GET_CODE (y) == PLUS
5756 && GET_MODE (y) == Pmode
5757 && CONST_INT_P (XEXP (y, 1)))
5759 offset = XEXP (y, 1);
5760 y = XEXP (y, 0);
5763 if (GET_CODE (y) == UNSPEC
5764 && XINT (y, 1) == UNSPEC_TOCREL)
5766 #ifdef ENABLE_CHECKING
5767 if (REG_P (XVECEXP (y, 0, 1))
5768 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
5770 /* All good. */
5772 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
5774 /* Weirdness alert. df_note_compute can replace r2 with a
5775 debug_expr when this unspec is in a debug_insn.
5776 Seen in gcc.dg/pr51957-1.c */
5778 else
5780 debug_rtx (orig_x);
5781 abort ();
5783 #endif
5784 y = XVECEXP (y, 0, 0);
5785 if (offset != NULL_RTX)
5786 y = gen_rtx_PLUS (Pmode, y, offset);
5787 if (!MEM_P (orig_x))
5788 return y;
5789 else
5790 return replace_equiv_address_nv (orig_x, y);
5793 if (TARGET_MACHO
5794 && GET_CODE (orig_x) == LO_SUM
5795 && GET_CODE (XEXP (orig_x, 1)) == CONST)
5797 y = XEXP (XEXP (orig_x, 1), 0);
5798 if (GET_CODE (y) == UNSPEC
5799 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
5800 return XVECEXP (y, 0, 0);
5803 return orig_x;
5806 /* Return true if X shouldn't be emitted into the debug info.
5807 The linker doesn't like .toc section references from
5808 .debug_* sections, so reject .toc section symbols. */
5810 static bool
5811 rs6000_const_not_ok_for_debug_p (rtx x)
5813 if (GET_CODE (x) == SYMBOL_REF
5814 && CONSTANT_POOL_ADDRESS_P (x))
5816 rtx c = get_pool_constant (x);
5817 enum machine_mode cmode = get_pool_mode (x);
5818 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
5819 return true;
5822 return false;
5825 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5827 static GTY(()) rtx rs6000_tls_symbol;
5828 static rtx
5829 rs6000_tls_get_addr (void)
5831 if (!rs6000_tls_symbol)
5832 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
5834 return rs6000_tls_symbol;
5837 /* Construct the SYMBOL_REF for TLS GOT references. */
5839 static GTY(()) rtx rs6000_got_symbol;
5840 static rtx
5841 rs6000_got_sym (void)
5843 if (!rs6000_got_symbol)
5845 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
5846 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
5847 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
5850 return rs6000_got_symbol;
5853 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5854 this (thread-local) address. */
5856 static rtx
5857 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
5859 rtx dest, insn;
5861 dest = gen_reg_rtx (Pmode);
5862 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
5864 rtx tlsreg;
5866 if (TARGET_64BIT)
5868 tlsreg = gen_rtx_REG (Pmode, 13);
5869 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
5871 else
5873 tlsreg = gen_rtx_REG (Pmode, 2);
5874 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
5876 emit_insn (insn);
5878 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
5880 rtx tlsreg, tmp;
5882 tmp = gen_reg_rtx (Pmode);
5883 if (TARGET_64BIT)
5885 tlsreg = gen_rtx_REG (Pmode, 13);
5886 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
5888 else
5890 tlsreg = gen_rtx_REG (Pmode, 2);
5891 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
5893 emit_insn (insn);
5894 if (TARGET_64BIT)
5895 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
5896 else
5897 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
5898 emit_insn (insn);
5900 else
5902 rtx r3, got, tga, tmp1, tmp2, call_insn;
5904 /* We currently use relocations like @got@tlsgd for tls, which
5905 means the linker will handle allocation of tls entries, placing
5906 them in the .got section. So use a pointer to the .got section,
5907 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5908 or to secondary GOT sections used by 32-bit -fPIC. */
5909 if (TARGET_64BIT)
5910 got = gen_rtx_REG (Pmode, 2);
5911 else
5913 if (flag_pic == 1)
5914 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
5915 else
5917 rtx gsym = rs6000_got_sym ();
5918 got = gen_reg_rtx (Pmode);
5919 if (flag_pic == 0)
5920 rs6000_emit_move (got, gsym, Pmode);
5921 else
5923 rtx mem, lab, last;
5925 tmp1 = gen_reg_rtx (Pmode);
5926 tmp2 = gen_reg_rtx (Pmode);
5927 mem = gen_const_mem (Pmode, tmp1);
5928 lab = gen_label_rtx ();
5929 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
5930 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
5931 if (TARGET_LINK_STACK)
5932 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
5933 emit_move_insn (tmp2, mem);
5934 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
5935 set_unique_reg_note (last, REG_EQUAL, gsym);
5940 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
5942 tga = rs6000_tls_get_addr ();
5943 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
5944 1, const0_rtx, Pmode);
5946 r3 = gen_rtx_REG (Pmode, 3);
5947 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5948 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
5949 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5950 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
5951 else if (DEFAULT_ABI == ABI_V4)
5952 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
5953 else
5954 gcc_unreachable ();
5955 call_insn = last_call_insn ();
5956 PATTERN (call_insn) = insn;
5957 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5958 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5959 pic_offset_table_rtx);
5961 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
5963 tga = rs6000_tls_get_addr ();
5964 tmp1 = gen_reg_rtx (Pmode);
5965 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
5966 1, const0_rtx, Pmode);
5968 r3 = gen_rtx_REG (Pmode, 3);
5969 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5970 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
5971 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5972 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
5973 else if (DEFAULT_ABI == ABI_V4)
5974 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
5975 else
5976 gcc_unreachable ();
5977 call_insn = last_call_insn ();
5978 PATTERN (call_insn) = insn;
5979 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5980 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5981 pic_offset_table_rtx);
5983 if (rs6000_tls_size == 16)
5985 if (TARGET_64BIT)
5986 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
5987 else
5988 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
5990 else if (rs6000_tls_size == 32)
5992 tmp2 = gen_reg_rtx (Pmode);
5993 if (TARGET_64BIT)
5994 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
5995 else
5996 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
5997 emit_insn (insn);
5998 if (TARGET_64BIT)
5999 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6000 else
6001 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6003 else
6005 tmp2 = gen_reg_rtx (Pmode);
6006 if (TARGET_64BIT)
6007 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6008 else
6009 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6010 emit_insn (insn);
6011 insn = gen_rtx_SET (Pmode, dest,
6012 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6014 emit_insn (insn);
6016 else
6018 /* IE, or 64-bit offset LE. */
6019 tmp2 = gen_reg_rtx (Pmode);
6020 if (TARGET_64BIT)
6021 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6022 else
6023 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6024 emit_insn (insn);
6025 if (TARGET_64BIT)
6026 insn = gen_tls_tls_64 (dest, tmp2, addr);
6027 else
6028 insn = gen_tls_tls_32 (dest, tmp2, addr);
6029 emit_insn (insn);
6033 return dest;
6036 /* Return 1 if X contains a thread-local symbol. */
6038 static bool
6039 rs6000_tls_referenced_p (rtx x)
6041 if (! TARGET_HAVE_TLS)
6042 return false;
6044 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6047 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6049 static bool
6050 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6052 if (GET_CODE (x) == HIGH
6053 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6054 return true;
6056 return rs6000_tls_referenced_p (x);
6059 /* Return 1 if *X is a thread-local symbol. This is the same as
6060 rs6000_tls_symbol_ref except for the type of the unused argument. */
6062 static int
6063 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6065 return RS6000_SYMBOL_REF_TLS_P (*x);
6068 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6069 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6070 can be addressed relative to the toc pointer. */
6072 static bool
6073 use_toc_relative_ref (rtx sym)
6075 return ((constant_pool_expr_p (sym)
6076 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6077 get_pool_mode (sym)))
6078 || (TARGET_CMODEL == CMODEL_MEDIUM
6079 && !CONSTANT_POOL_ADDRESS_P (sym)
6080 && SYMBOL_REF_LOCAL_P (sym)));
6083 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6084 replace the input X, or the original X if no replacement is called for.
6085 The output parameter *WIN is 1 if the calling macro should goto WIN,
6086 0 if it should not.
6088 For RS/6000, we wish to handle large displacements off a base
6089 register by splitting the addend across an addiu/addis and the mem insn.
6090 This cuts number of extra insns needed from 3 to 1.
6092 On Darwin, we use this to generate code for floating point constants.
6093 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6094 The Darwin code is inside #if TARGET_MACHO because only then are the
6095 machopic_* functions defined. */
6096 static rtx
6097 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6098 int opnum, int type,
6099 int ind_levels ATTRIBUTE_UNUSED, int *win)
6101 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6103 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6104 DFmode/DImode MEM. */
6105 if (reg_offset_p
6106 && opnum == 1
6107 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6108 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6109 reg_offset_p = false;
6111 /* We must recognize output that we have already generated ourselves. */
6112 if (GET_CODE (x) == PLUS
6113 && GET_CODE (XEXP (x, 0)) == PLUS
6114 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6115 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6116 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6118 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6119 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6120 opnum, (enum reload_type) type);
6121 *win = 1;
6122 return x;
6125 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6126 if (GET_CODE (x) == LO_SUM
6127 && GET_CODE (XEXP (x, 0)) == HIGH)
6129 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6130 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6131 opnum, (enum reload_type) type);
6132 *win = 1;
6133 return x;
6136 #if TARGET_MACHO
6137 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6138 && GET_CODE (x) == LO_SUM
6139 && GET_CODE (XEXP (x, 0)) == PLUS
6140 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6141 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6142 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6143 && machopic_operand_p (XEXP (x, 1)))
6145 /* Result of previous invocation of this function on Darwin
6146 floating point constant. */
6147 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6148 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6149 opnum, (enum reload_type) type);
6150 *win = 1;
6151 return x;
6153 #endif
6155 if (TARGET_CMODEL != CMODEL_SMALL
6156 && reg_offset_p
6157 && small_toc_ref (x, VOIDmode))
6159 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6160 x = gen_rtx_LO_SUM (Pmode, hi, x);
6161 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6162 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6163 opnum, (enum reload_type) type);
6164 *win = 1;
6165 return x;
6168 /* Force ld/std non-word aligned offset into base register by wrapping
6169 in offset 0. */
6170 if (GET_CODE (x) == PLUS
6171 && GET_CODE (XEXP (x, 0)) == REG
6172 && REGNO (XEXP (x, 0)) < 32
6173 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6174 && GET_CODE (XEXP (x, 1)) == CONST_INT
6175 && reg_offset_p
6176 && (INTVAL (XEXP (x, 1)) & 3) != 0
6177 && VECTOR_MEM_NONE_P (mode)
6178 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
6179 && TARGET_POWERPC64)
6181 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
6182 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6183 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6184 opnum, (enum reload_type) type);
6185 *win = 1;
6186 return x;
6189 if (GET_CODE (x) == PLUS
6190 && GET_CODE (XEXP (x, 0)) == REG
6191 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6192 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6193 && GET_CODE (XEXP (x, 1)) == CONST_INT
6194 && reg_offset_p
6195 && !SPE_VECTOR_MODE (mode)
6196 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6197 || mode == DDmode || mode == TDmode
6198 || mode == DImode))
6199 && VECTOR_MEM_NONE_P (mode))
6201 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6202 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6203 HOST_WIDE_INT high
6204 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6206 /* Check for 32-bit overflow. */
6207 if (high + low != val)
6209 *win = 0;
6210 return x;
6213 /* Reload the high part into a base reg; leave the low part
6214 in the mem directly. */
6216 x = gen_rtx_PLUS (GET_MODE (x),
6217 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6218 GEN_INT (high)),
6219 GEN_INT (low));
6221 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6222 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6223 opnum, (enum reload_type) type);
6224 *win = 1;
6225 return x;
6228 if (GET_CODE (x) == SYMBOL_REF
6229 && reg_offset_p
6230 && VECTOR_MEM_NONE_P (mode)
6231 && !SPE_VECTOR_MODE (mode)
6232 #if TARGET_MACHO
6233 && DEFAULT_ABI == ABI_DARWIN
6234 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6235 && machopic_symbol_defined_p (x)
6236 #else
6237 && DEFAULT_ABI == ABI_V4
6238 && !flag_pic
6239 #endif
6240 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6241 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6242 without fprs.
6243 ??? Assume floating point reg based on mode? This assumption is
6244 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6245 where reload ends up doing a DFmode load of a constant from
6246 mem using two gprs. Unfortunately, at this point reload
6247 hasn't yet selected regs so poking around in reload data
6248 won't help and even if we could figure out the regs reliably,
6249 we'd still want to allow this transformation when the mem is
6250 naturally aligned. Since we say the address is good here, we
6251 can't disable offsets from LO_SUMs in mem_operand_gpr.
6252 FIXME: Allow offset from lo_sum for other modes too, when
6253 mem is sufficiently aligned. */
6254 && mode != TFmode
6255 && mode != TDmode
6256 && (mode != DImode || TARGET_POWERPC64)
6257 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6258 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6260 #if TARGET_MACHO
6261 if (flag_pic)
6263 rtx offset = machopic_gen_offset (x);
6264 x = gen_rtx_LO_SUM (GET_MODE (x),
6265 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6266 gen_rtx_HIGH (Pmode, offset)), offset);
6268 else
6269 #endif
6270 x = gen_rtx_LO_SUM (GET_MODE (x),
6271 gen_rtx_HIGH (Pmode, x), x);
6273 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6274 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6275 opnum, (enum reload_type) type);
6276 *win = 1;
6277 return x;
6280 /* Reload an offset address wrapped by an AND that represents the
6281 masking of the lower bits. Strip the outer AND and let reload
6282 convert the offset address into an indirect address. For VSX,
6283 force reload to create the address with an AND in a separate
6284 register, because we can't guarantee an altivec register will
6285 be used. */
6286 if (VECTOR_MEM_ALTIVEC_P (mode)
6287 && GET_CODE (x) == AND
6288 && GET_CODE (XEXP (x, 0)) == PLUS
6289 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6290 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6291 && GET_CODE (XEXP (x, 1)) == CONST_INT
6292 && INTVAL (XEXP (x, 1)) == -16)
6294 x = XEXP (x, 0);
6295 *win = 1;
6296 return x;
6299 if (TARGET_TOC
6300 && reg_offset_p
6301 && GET_CODE (x) == SYMBOL_REF
6302 && use_toc_relative_ref (x))
6304 x = create_TOC_reference (x, NULL_RTX);
6305 if (TARGET_CMODEL != CMODEL_SMALL)
6306 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6307 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6308 opnum, (enum reload_type) type);
6309 *win = 1;
6310 return x;
6312 *win = 0;
6313 return x;
6316 /* Debug version of rs6000_legitimize_reload_address. */
6317 static rtx
6318 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
6319 int opnum, int type,
6320 int ind_levels, int *win)
6322 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
6323 ind_levels, win);
6324 fprintf (stderr,
6325 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6326 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6327 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
6328 debug_rtx (x);
6330 if (x == ret)
6331 fprintf (stderr, "Same address returned\n");
6332 else if (!ret)
6333 fprintf (stderr, "NULL returned\n");
6334 else
6336 fprintf (stderr, "New address:\n");
6337 debug_rtx (ret);
6340 return ret;
6343 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6344 that is a valid memory address for an instruction.
6345 The MODE argument is the machine mode for the MEM expression
6346 that wants to use this address.
6348 On the RS/6000, there are four valid address: a SYMBOL_REF that
6349 refers to a constant pool entry of an address (or the sum of it
6350 plus a constant), a short (16-bit signed) constant plus a register,
6351 the sum of two registers, or a register indirect, possibly with an
6352 auto-increment. For DFmode, DDmode and DImode with a constant plus
6353 register, we must ensure that both words are addressable or PowerPC64
6354 with offset word aligned.
6356 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6357 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6358 because adjacent memory cells are accessed by adding word-sized offsets
6359 during assembly output. */
6360 static bool
6361 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
6363 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6365 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6366 if (VECTOR_MEM_ALTIVEC_P (mode)
6367 && GET_CODE (x) == AND
6368 && GET_CODE (XEXP (x, 1)) == CONST_INT
6369 && INTVAL (XEXP (x, 1)) == -16)
6370 x = XEXP (x, 0);
6372 if (RS6000_SYMBOL_REF_TLS_P (x))
6373 return 0;
6374 if (legitimate_indirect_address_p (x, reg_ok_strict))
6375 return 1;
6376 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
6377 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6378 && !SPE_VECTOR_MODE (mode)
6379 && mode != TFmode
6380 && mode != TDmode
6381 /* Restrict addressing for DI because of our SUBREG hackery. */
6382 && !(TARGET_E500_DOUBLE
6383 && (mode == DFmode || mode == DDmode || mode == DImode))
6384 && TARGET_UPDATE
6385 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
6386 return 1;
6387 if (virtual_stack_registers_memory_p (x))
6388 return 1;
6389 if (reg_offset_p && legitimate_small_data_p (mode, x))
6390 return 1;
6391 if (reg_offset_p
6392 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
6393 return 1;
6394 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6395 if (! reg_ok_strict
6396 && reg_offset_p
6397 && GET_CODE (x) == PLUS
6398 && GET_CODE (XEXP (x, 0)) == REG
6399 && (XEXP (x, 0) == virtual_stack_vars_rtx
6400 || XEXP (x, 0) == arg_pointer_rtx)
6401 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6402 return 1;
6403 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
6404 return 1;
6405 if (mode != TImode
6406 && mode != TFmode
6407 && mode != TDmode
6408 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6409 || TARGET_POWERPC64
6410 || (mode != DFmode && mode != DDmode)
6411 || (TARGET_E500_DOUBLE && mode != DDmode))
6412 && (TARGET_POWERPC64 || mode != DImode)
6413 && !avoiding_indexed_address_p (mode)
6414 && legitimate_indexed_address_p (x, reg_ok_strict))
6415 return 1;
6416 if (GET_CODE (x) == PRE_MODIFY
6417 && mode != TImode
6418 && mode != TFmode
6419 && mode != TDmode
6420 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6421 || TARGET_POWERPC64
6422 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
6423 && (TARGET_POWERPC64 || mode != DImode)
6424 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6425 && !SPE_VECTOR_MODE (mode)
6426 /* Restrict addressing for DI because of our SUBREG hackery. */
6427 && !(TARGET_E500_DOUBLE
6428 && (mode == DFmode || mode == DDmode || mode == DImode))
6429 && TARGET_UPDATE
6430 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
6431 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
6432 reg_ok_strict, false)
6433 || (!avoiding_indexed_address_p (mode)
6434 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
6435 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6436 return 1;
6437 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
6438 return 1;
6439 return 0;
6442 /* Debug version of rs6000_legitimate_address_p. */
6443 static bool
6444 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
6445 bool reg_ok_strict)
6447 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
6448 fprintf (stderr,
6449 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6450 "strict = %d, code = %s\n",
6451 ret ? "true" : "false",
6452 GET_MODE_NAME (mode),
6453 reg_ok_strict,
6454 GET_RTX_NAME (GET_CODE (x)));
6455 debug_rtx (x);
6457 return ret;
6460 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6462 static bool
6463 rs6000_mode_dependent_address_p (const_rtx addr,
6464 addr_space_t as ATTRIBUTE_UNUSED)
6466 return rs6000_mode_dependent_address_ptr (addr);
6469 /* Go to LABEL if ADDR (a legitimate address expression)
6470 has an effect that depends on the machine mode it is used for.
6472 On the RS/6000 this is true of all integral offsets (since AltiVec
6473 and VSX modes don't allow them) or is a pre-increment or decrement.
6475 ??? Except that due to conceptual problems in offsettable_address_p
6476 we can't really report the problems of integral offsets. So leave
6477 this assuming that the adjustable offset must be valid for the
6478 sub-words of a TFmode operand, which is what we had before. */
6480 static bool
6481 rs6000_mode_dependent_address (const_rtx addr)
6483 switch (GET_CODE (addr))
6485 case PLUS:
6486 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6487 is considered a legitimate address before reload, so there
6488 are no offset restrictions in that case. Note that this
6489 condition is safe in strict mode because any address involving
6490 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6491 been rejected as illegitimate. */
6492 if (XEXP (addr, 0) != virtual_stack_vars_rtx
6493 && XEXP (addr, 0) != arg_pointer_rtx
6494 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6496 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
6497 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
6499 break;
6501 case LO_SUM:
6502 /* Anything in the constant pool is sufficiently aligned that
6503 all bytes have the same high part address. */
6504 return !legitimate_constant_pool_address_p (addr, QImode, false);
6506 /* Auto-increment cases are now treated generically in recog.c. */
6507 case PRE_MODIFY:
6508 return TARGET_UPDATE;
6510 /* AND is only allowed in Altivec loads. */
6511 case AND:
6512 return true;
6514 default:
6515 break;
6518 return false;
6521 /* Debug version of rs6000_mode_dependent_address. */
6522 static bool
6523 rs6000_debug_mode_dependent_address (const_rtx addr)
6525 bool ret = rs6000_mode_dependent_address (addr);
6527 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
6528 ret ? "true" : "false");
6529 debug_rtx (addr);
6531 return ret;
6534 /* Implement FIND_BASE_TERM. */
6537 rs6000_find_base_term (rtx op)
6539 rtx base;
6541 base = op;
6542 if (GET_CODE (base) == CONST)
6543 base = XEXP (base, 0);
6544 if (GET_CODE (base) == PLUS)
6545 base = XEXP (base, 0);
6546 if (GET_CODE (base) == UNSPEC)
6547 switch (XINT (base, 1))
6549 case UNSPEC_TOCREL:
6550 case UNSPEC_MACHOPIC_OFFSET:
6551 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6552 for aliasing purposes. */
6553 return XVECEXP (base, 0, 0);
6556 return op;
6559 /* More elaborate version of recog's offsettable_memref_p predicate
6560 that works around the ??? note of rs6000_mode_dependent_address.
6561 In particular it accepts
6563 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6565 in 32-bit mode, that the recog predicate rejects. */
6567 static bool
6568 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
6570 bool worst_case;
6572 if (!MEM_P (op))
6573 return false;
6575 /* First mimic offsettable_memref_p. */
6576 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
6577 return true;
6579 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6580 the latter predicate knows nothing about the mode of the memory
6581 reference and, therefore, assumes that it is the largest supported
6582 mode (TFmode). As a consequence, legitimate offsettable memory
6583 references are rejected. rs6000_legitimate_offset_address_p contains
6584 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6585 at least with a little bit of help here given that we know the
6586 actual registers used. */
6587 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
6588 || GET_MODE_SIZE (reg_mode) == 4);
6589 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
6590 true, worst_case);
6593 /* Change register usage conditional on target flags. */
6594 static void
6595 rs6000_conditional_register_usage (void)
6597 int i;
6599 if (TARGET_DEBUG_TARGET)
6600 fprintf (stderr, "rs6000_conditional_register_usage called\n");
6602 /* Set MQ register fixed (already call_used) so that it will not be
6603 allocated. */
6604 fixed_regs[64] = 1;
6606 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6607 if (TARGET_64BIT)
6608 fixed_regs[13] = call_used_regs[13]
6609 = call_really_used_regs[13] = 1;
6611 /* Conditionally disable FPRs. */
6612 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
6613 for (i = 32; i < 64; i++)
6614 fixed_regs[i] = call_used_regs[i]
6615 = call_really_used_regs[i] = 1;
6617 /* The TOC register is not killed across calls in a way that is
6618 visible to the compiler. */
6619 if (DEFAULT_ABI == ABI_AIX)
6620 call_really_used_regs[2] = 0;
6622 if (DEFAULT_ABI == ABI_V4
6623 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6624 && flag_pic == 2)
6625 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6627 if (DEFAULT_ABI == ABI_V4
6628 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6629 && flag_pic == 1)
6630 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6631 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6632 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6634 if (DEFAULT_ABI == ABI_DARWIN
6635 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6636 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6637 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6638 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6640 if (TARGET_TOC && TARGET_MINIMAL_TOC)
6641 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6642 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6644 if (TARGET_SPE)
6646 global_regs[SPEFSCR_REGNO] = 1;
6647 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6648 registers in prologues and epilogues. We no longer use r14
6649 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6650 pool for link-compatibility with older versions of GCC. Once
6651 "old" code has died out, we can return r14 to the allocation
6652 pool. */
6653 fixed_regs[14]
6654 = call_used_regs[14]
6655 = call_really_used_regs[14] = 1;
6658 if (!TARGET_ALTIVEC && !TARGET_VSX)
6660 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
6661 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6662 call_really_used_regs[VRSAVE_REGNO] = 1;
6665 if (TARGET_ALTIVEC || TARGET_VSX)
6666 global_regs[VSCR_REGNO] = 1;
6668 if (TARGET_ALTIVEC_ABI)
6670 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
6671 call_used_regs[i] = call_really_used_regs[i] = 1;
6673 /* AIX reserves VR20:31 in non-extended ABI mode. */
6674 if (TARGET_XCOFF)
6675 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
6676 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6680 /* Try to output insns to set TARGET equal to the constant C if it can
6681 be done in less than N insns. Do all computations in MODE.
6682 Returns the place where the output has been placed if it can be
6683 done and the insns have been emitted. If it would take more than N
6684 insns, zero is returned and no insns and emitted. */
6687 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
6688 rtx source, int n ATTRIBUTE_UNUSED)
6690 rtx result, insn, set;
6691 HOST_WIDE_INT c0, c1;
6693 switch (mode)
6695 case QImode:
6696 case HImode:
6697 if (dest == NULL)
6698 dest = gen_reg_rtx (mode);
6699 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
6700 return dest;
6702 case SImode:
6703 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
6705 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
6706 GEN_INT (INTVAL (source)
6707 & (~ (HOST_WIDE_INT) 0xffff))));
6708 emit_insn (gen_rtx_SET (VOIDmode, dest,
6709 gen_rtx_IOR (SImode, copy_rtx (result),
6710 GEN_INT (INTVAL (source) & 0xffff))));
6711 result = dest;
6712 break;
6714 case DImode:
6715 switch (GET_CODE (source))
6717 case CONST_INT:
6718 c0 = INTVAL (source);
6719 c1 = -(c0 < 0);
6720 break;
6722 case CONST_DOUBLE:
6723 #if HOST_BITS_PER_WIDE_INT >= 64
6724 c0 = CONST_DOUBLE_LOW (source);
6725 c1 = -(c0 < 0);
6726 #else
6727 c0 = CONST_DOUBLE_LOW (source);
6728 c1 = CONST_DOUBLE_HIGH (source);
6729 #endif
6730 break;
6732 default:
6733 gcc_unreachable ();
6736 result = rs6000_emit_set_long_const (dest, c0, c1);
6737 break;
6739 default:
6740 gcc_unreachable ();
6743 insn = get_last_insn ();
6744 set = single_set (insn);
6745 if (! CONSTANT_P (SET_SRC (set)))
6746 set_unique_reg_note (insn, REG_EQUAL, source);
6748 return result;
6751 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6752 fall back to a straight forward decomposition. We do this to avoid
6753 exponential run times encountered when looking for longer sequences
6754 with rs6000_emit_set_const. */
6755 static rtx
6756 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
6758 if (!TARGET_POWERPC64)
6760 rtx operand1, operand2;
6762 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
6763 DImode);
6764 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
6765 DImode);
6766 emit_move_insn (operand1, GEN_INT (c1));
6767 emit_move_insn (operand2, GEN_INT (c2));
6769 else
6771 HOST_WIDE_INT ud1, ud2, ud3, ud4;
6773 ud1 = c1 & 0xffff;
6774 ud2 = (c1 & 0xffff0000) >> 16;
6775 #if HOST_BITS_PER_WIDE_INT >= 64
6776 c2 = c1 >> 32;
6777 #endif
6778 ud3 = c2 & 0xffff;
6779 ud4 = (c2 & 0xffff0000) >> 16;
6781 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
6782 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
6784 if (ud1 & 0x8000)
6785 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
6786 else
6787 emit_move_insn (dest, GEN_INT (ud1));
6790 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
6791 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
6793 if (ud2 & 0x8000)
6794 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6795 - 0x80000000));
6796 else
6797 emit_move_insn (dest, GEN_INT (ud2 << 16));
6798 if (ud1 != 0)
6799 emit_move_insn (copy_rtx (dest),
6800 gen_rtx_IOR (DImode, copy_rtx (dest),
6801 GEN_INT (ud1)));
6803 else if (ud3 == 0 && ud4 == 0)
6805 gcc_assert (ud2 & 0x8000);
6806 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6807 - 0x80000000));
6808 if (ud1 != 0)
6809 emit_move_insn (copy_rtx (dest),
6810 gen_rtx_IOR (DImode, copy_rtx (dest),
6811 GEN_INT (ud1)));
6812 emit_move_insn (copy_rtx (dest),
6813 gen_rtx_ZERO_EXTEND (DImode,
6814 gen_lowpart (SImode,
6815 copy_rtx (dest))));
6817 else if ((ud4 == 0xffff && (ud3 & 0x8000))
6818 || (ud4 == 0 && ! (ud3 & 0x8000)))
6820 if (ud3 & 0x8000)
6821 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
6822 - 0x80000000));
6823 else
6824 emit_move_insn (dest, GEN_INT (ud3 << 16));
6826 if (ud2 != 0)
6827 emit_move_insn (copy_rtx (dest),
6828 gen_rtx_IOR (DImode, copy_rtx (dest),
6829 GEN_INT (ud2)));
6830 emit_move_insn (copy_rtx (dest),
6831 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6832 GEN_INT (16)));
6833 if (ud1 != 0)
6834 emit_move_insn (copy_rtx (dest),
6835 gen_rtx_IOR (DImode, copy_rtx (dest),
6836 GEN_INT (ud1)));
6838 else
6840 if (ud4 & 0x8000)
6841 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
6842 - 0x80000000));
6843 else
6844 emit_move_insn (dest, GEN_INT (ud4 << 16));
6846 if (ud3 != 0)
6847 emit_move_insn (copy_rtx (dest),
6848 gen_rtx_IOR (DImode, copy_rtx (dest),
6849 GEN_INT (ud3)));
6851 emit_move_insn (copy_rtx (dest),
6852 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6853 GEN_INT (32)));
6854 if (ud2 != 0)
6855 emit_move_insn (copy_rtx (dest),
6856 gen_rtx_IOR (DImode, copy_rtx (dest),
6857 GEN_INT (ud2 << 16)));
6858 if (ud1 != 0)
6859 emit_move_insn (copy_rtx (dest),
6860 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
6863 return dest;
6866 /* Helper for the following. Get rid of [r+r] memory refs
6867 in cases where it won't work (TImode, TFmode, TDmode). */
6869 static void
6870 rs6000_eliminate_indexed_memrefs (rtx operands[2])
6872 if (reload_in_progress)
6873 return;
6875 if (GET_CODE (operands[0]) == MEM
6876 && GET_CODE (XEXP (operands[0], 0)) != REG
6877 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
6878 GET_MODE (operands[0]), false))
6879 operands[0]
6880 = replace_equiv_address (operands[0],
6881 copy_addr_to_reg (XEXP (operands[0], 0)));
6883 if (GET_CODE (operands[1]) == MEM
6884 && GET_CODE (XEXP (operands[1], 0)) != REG
6885 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
6886 GET_MODE (operands[1]), false))
6887 operands[1]
6888 = replace_equiv_address (operands[1],
6889 copy_addr_to_reg (XEXP (operands[1], 0)));
6892 /* Emit a move from SOURCE to DEST in mode MODE. */
6893 void
6894 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
6896 rtx operands[2];
6897 operands[0] = dest;
6898 operands[1] = source;
6900 if (TARGET_DEBUG_ADDR)
6902 fprintf (stderr,
6903 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6904 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6905 GET_MODE_NAME (mode),
6906 reload_in_progress,
6907 reload_completed,
6908 can_create_pseudo_p ());
6909 debug_rtx (dest);
6910 fprintf (stderr, "source:\n");
6911 debug_rtx (source);
6914 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6915 if (GET_CODE (operands[1]) == CONST_DOUBLE
6916 && ! FLOAT_MODE_P (mode)
6917 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6919 /* FIXME. This should never happen. */
6920 /* Since it seems that it does, do the safe thing and convert
6921 to a CONST_INT. */
6922 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
6924 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
6925 || FLOAT_MODE_P (mode)
6926 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
6927 || CONST_DOUBLE_LOW (operands[1]) < 0)
6928 && (CONST_DOUBLE_HIGH (operands[1]) != -1
6929 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
6931 /* Check if GCC is setting up a block move that will end up using FP
6932 registers as temporaries. We must make sure this is acceptable. */
6933 if (GET_CODE (operands[0]) == MEM
6934 && GET_CODE (operands[1]) == MEM
6935 && mode == DImode
6936 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
6937 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
6938 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
6939 ? 32 : MEM_ALIGN (operands[0])))
6940 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
6941 ? 32
6942 : MEM_ALIGN (operands[1]))))
6943 && ! MEM_VOLATILE_P (operands [0])
6944 && ! MEM_VOLATILE_P (operands [1]))
6946 emit_move_insn (adjust_address (operands[0], SImode, 0),
6947 adjust_address (operands[1], SImode, 0));
6948 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
6949 adjust_address (copy_rtx (operands[1]), SImode, 4));
6950 return;
6953 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
6954 && !gpc_reg_operand (operands[1], mode))
6955 operands[1] = force_reg (mode, operands[1]);
6957 /* Recognize the case where operand[1] is a reference to thread-local
6958 data and load its address to a register. */
6959 if (rs6000_tls_referenced_p (operands[1]))
6961 enum tls_model model;
6962 rtx tmp = operands[1];
6963 rtx addend = NULL;
6965 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
6967 addend = XEXP (XEXP (tmp, 0), 1);
6968 tmp = XEXP (XEXP (tmp, 0), 0);
6971 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
6972 model = SYMBOL_REF_TLS_MODEL (tmp);
6973 gcc_assert (model != 0);
6975 tmp = rs6000_legitimize_tls_address (tmp, model);
6976 if (addend)
6978 tmp = gen_rtx_PLUS (mode, tmp, addend);
6979 tmp = force_operand (tmp, operands[0]);
6981 operands[1] = tmp;
6984 /* Handle the case where reload calls us with an invalid address. */
6985 if (reload_in_progress && mode == Pmode
6986 && (! general_operand (operands[1], mode)
6987 || ! nonimmediate_operand (operands[0], mode)))
6988 goto emit_set;
6990 /* 128-bit constant floating-point values on Darwin should really be
6991 loaded as two parts. */
6992 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
6993 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
6995 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
6996 simplify_gen_subreg (DFmode, operands[1], mode, 0),
6997 DFmode);
6998 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
6999 GET_MODE_SIZE (DFmode)),
7000 simplify_gen_subreg (DFmode, operands[1], mode,
7001 GET_MODE_SIZE (DFmode)),
7002 DFmode);
7003 return;
7006 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7007 cfun->machine->sdmode_stack_slot =
7008 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7010 if (reload_in_progress
7011 && mode == SDmode
7012 && MEM_P (operands[0])
7013 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7014 && REG_P (operands[1]))
7016 if (FP_REGNO_P (REGNO (operands[1])))
7018 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7019 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7020 emit_insn (gen_movsd_store (mem, operands[1]));
7022 else if (INT_REGNO_P (REGNO (operands[1])))
7024 rtx mem = adjust_address_nv (operands[0], mode, 4);
7025 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7026 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7028 else
7029 gcc_unreachable();
7030 return;
7032 if (reload_in_progress
7033 && mode == SDmode
7034 && REG_P (operands[0])
7035 && MEM_P (operands[1])
7036 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7038 if (FP_REGNO_P (REGNO (operands[0])))
7040 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7041 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7042 emit_insn (gen_movsd_load (operands[0], mem));
7044 else if (INT_REGNO_P (REGNO (operands[0])))
7046 rtx mem = adjust_address_nv (operands[1], mode, 4);
7047 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7048 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7050 else
7051 gcc_unreachable();
7052 return;
7055 /* FIXME: In the long term, this switch statement should go away
7056 and be replaced by a sequence of tests based on things like
7057 mode == Pmode. */
7058 switch (mode)
7060 case HImode:
7061 case QImode:
7062 if (CONSTANT_P (operands[1])
7063 && GET_CODE (operands[1]) != CONST_INT)
7064 operands[1] = force_const_mem (mode, operands[1]);
7065 break;
7067 case TFmode:
7068 case TDmode:
7069 rs6000_eliminate_indexed_memrefs (operands);
7070 /* fall through */
7072 case DFmode:
7073 case DDmode:
7074 case SFmode:
7075 case SDmode:
7076 if (CONSTANT_P (operands[1])
7077 && ! easy_fp_constant (operands[1], mode))
7078 operands[1] = force_const_mem (mode, operands[1]);
7079 break;
7081 case V16QImode:
7082 case V8HImode:
7083 case V4SFmode:
7084 case V4SImode:
7085 case V4HImode:
7086 case V2SFmode:
7087 case V2SImode:
7088 case V1DImode:
7089 case V2DFmode:
7090 case V2DImode:
7091 if (CONSTANT_P (operands[1])
7092 && !easy_vector_constant (operands[1], mode))
7093 operands[1] = force_const_mem (mode, operands[1]);
7094 break;
7096 case SImode:
7097 case DImode:
7098 /* Use default pattern for address of ELF small data */
7099 if (TARGET_ELF
7100 && mode == Pmode
7101 && DEFAULT_ABI == ABI_V4
7102 && (GET_CODE (operands[1]) == SYMBOL_REF
7103 || GET_CODE (operands[1]) == CONST)
7104 && small_data_operand (operands[1], mode))
7106 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7107 return;
7110 if (DEFAULT_ABI == ABI_V4
7111 && mode == Pmode && mode == SImode
7112 && flag_pic == 1 && got_operand (operands[1], mode))
7114 emit_insn (gen_movsi_got (operands[0], operands[1]));
7115 return;
7118 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
7119 && TARGET_NO_TOC
7120 && ! flag_pic
7121 && mode == Pmode
7122 && CONSTANT_P (operands[1])
7123 && GET_CODE (operands[1]) != HIGH
7124 && GET_CODE (operands[1]) != CONST_INT)
7126 rtx target = (!can_create_pseudo_p ()
7127 ? operands[0]
7128 : gen_reg_rtx (mode));
7130 /* If this is a function address on -mcall-aixdesc,
7131 convert it to the address of the descriptor. */
7132 if (DEFAULT_ABI == ABI_AIX
7133 && GET_CODE (operands[1]) == SYMBOL_REF
7134 && XSTR (operands[1], 0)[0] == '.')
7136 const char *name = XSTR (operands[1], 0);
7137 rtx new_ref;
7138 while (*name == '.')
7139 name++;
7140 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7141 CONSTANT_POOL_ADDRESS_P (new_ref)
7142 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7143 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7144 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7145 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7146 operands[1] = new_ref;
7149 if (DEFAULT_ABI == ABI_DARWIN)
7151 #if TARGET_MACHO
7152 if (MACHO_DYNAMIC_NO_PIC_P)
7154 /* Take care of any required data indirection. */
7155 operands[1] = rs6000_machopic_legitimize_pic_address (
7156 operands[1], mode, operands[0]);
7157 if (operands[0] != operands[1])
7158 emit_insn (gen_rtx_SET (VOIDmode,
7159 operands[0], operands[1]));
7160 return;
7162 #endif
7163 emit_insn (gen_macho_high (target, operands[1]));
7164 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7165 return;
7168 emit_insn (gen_elf_high (target, operands[1]));
7169 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7170 return;
7173 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7174 and we have put it in the TOC, we just need to make a TOC-relative
7175 reference to it. */
7176 if (TARGET_TOC
7177 && GET_CODE (operands[1]) == SYMBOL_REF
7178 && use_toc_relative_ref (operands[1]))
7179 operands[1] = create_TOC_reference (operands[1], operands[0]);
7180 else if (mode == Pmode
7181 && CONSTANT_P (operands[1])
7182 && GET_CODE (operands[1]) != HIGH
7183 && ((GET_CODE (operands[1]) != CONST_INT
7184 && ! easy_fp_constant (operands[1], mode))
7185 || (GET_CODE (operands[1]) == CONST_INT
7186 && (num_insns_constant (operands[1], mode)
7187 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7188 || (GET_CODE (operands[0]) == REG
7189 && FP_REGNO_P (REGNO (operands[0]))))
7190 && !toc_relative_expr_p (operands[1], false)
7191 && (TARGET_CMODEL == CMODEL_SMALL
7192 || can_create_pseudo_p ()
7193 || (REG_P (operands[0])
7194 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7197 #if TARGET_MACHO
7198 /* Darwin uses a special PIC legitimizer. */
7199 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7201 operands[1] =
7202 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7203 operands[0]);
7204 if (operands[0] != operands[1])
7205 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7206 return;
7208 #endif
7210 /* If we are to limit the number of things we put in the TOC and
7211 this is a symbol plus a constant we can add in one insn,
7212 just put the symbol in the TOC and add the constant. Don't do
7213 this if reload is in progress. */
7214 if (GET_CODE (operands[1]) == CONST
7215 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7216 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7217 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7218 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7219 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7220 && ! side_effects_p (operands[0]))
7222 rtx sym =
7223 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7224 rtx other = XEXP (XEXP (operands[1], 0), 1);
7226 sym = force_reg (mode, sym);
7227 emit_insn (gen_add3_insn (operands[0], sym, other));
7228 return;
7231 operands[1] = force_const_mem (mode, operands[1]);
7233 if (TARGET_TOC
7234 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7235 && constant_pool_expr_p (XEXP (operands[1], 0))
7236 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7237 get_pool_constant (XEXP (operands[1], 0)),
7238 get_pool_mode (XEXP (operands[1], 0))))
7240 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7241 operands[0]);
7242 operands[1] = gen_const_mem (mode, tocref);
7243 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7246 break;
7248 case TImode:
7249 rs6000_eliminate_indexed_memrefs (operands);
7250 break;
7252 default:
7253 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7256 /* Above, we may have called force_const_mem which may have returned
7257 an invalid address. If we can, fix this up; otherwise, reload will
7258 have to deal with it. */
7259 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7260 operands[1] = validize_mem (operands[1]);
7262 emit_set:
7263 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7266 /* Return true if a structure, union or array containing FIELD should be
7267 accessed using `BLKMODE'.
7269 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7270 entire thing in a DI and use subregs to access the internals.
7271 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7272 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7273 best thing to do is set structs to BLKmode and avoid Severe Tire
7274 Damage.
7276 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7277 fit into 1, whereas DI still needs two. */
7279 static bool
7280 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
7282 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7283 || (TARGET_E500_DOUBLE && mode == DFmode));
7286 /* Nonzero if we can use a floating-point register to pass this arg. */
7287 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7288 (SCALAR_FLOAT_MODE_P (MODE) \
7289 && (CUM)->fregno <= FP_ARG_MAX_REG \
7290 && TARGET_HARD_FLOAT && TARGET_FPRS)
7292 /* Nonzero if we can use an AltiVec register to pass this arg. */
7293 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7294 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7295 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7296 && TARGET_ALTIVEC_ABI \
7297 && (NAMED))
7299 /* Return a nonzero value to say to return the function value in
7300 memory, just as large structures are always returned. TYPE will be
7301 the data type of the value, and FNTYPE will be the type of the
7302 function doing the returning, or @code{NULL} for libcalls.
7304 The AIX ABI for the RS/6000 specifies that all structures are
7305 returned in memory. The Darwin ABI does the same.
7307 For the Darwin 64 Bit ABI, a function result can be returned in
7308 registers or in memory, depending on the size of the return data
7309 type. If it is returned in registers, the value occupies the same
7310 registers as it would if it were the first and only function
7311 argument. Otherwise, the function places its result in memory at
7312 the location pointed to by GPR3.
7314 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7315 but a draft put them in memory, and GCC used to implement the draft
7316 instead of the final standard. Therefore, aix_struct_return
7317 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7318 compatibility can change DRAFT_V4_STRUCT_RET to override the
7319 default, and -m switches get the final word. See
7320 rs6000_option_override_internal for more details.
7322 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7323 long double support is enabled. These values are returned in memory.
7325 int_size_in_bytes returns -1 for variable size objects, which go in
7326 memory always. The cast to unsigned makes -1 > 8. */
7328 static bool
7329 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7331 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7332 if (TARGET_MACHO
7333 && rs6000_darwin64_abi
7334 && TREE_CODE (type) == RECORD_TYPE
7335 && int_size_in_bytes (type) > 0)
7337 CUMULATIVE_ARGS valcum;
7338 rtx valret;
7340 valcum.words = 0;
7341 valcum.fregno = FP_ARG_MIN_REG;
7342 valcum.vregno = ALTIVEC_ARG_MIN_REG;
7343 /* Do a trial code generation as if this were going to be passed
7344 as an argument; if any part goes in memory, we return NULL. */
7345 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
7346 if (valret)
7347 return false;
7348 /* Otherwise fall through to more conventional ABI rules. */
7351 if (AGGREGATE_TYPE_P (type)
7352 && (aix_struct_return
7353 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
7354 return true;
7356 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7357 modes only exist for GCC vector types if -maltivec. */
7358 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
7359 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7360 return false;
7362 /* Return synthetic vectors in memory. */
7363 if (TREE_CODE (type) == VECTOR_TYPE
7364 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
7366 static bool warned_for_return_big_vectors = false;
7367 if (!warned_for_return_big_vectors)
7369 warning (0, "GCC vector returned by reference: "
7370 "non-standard ABI extension with no compatibility guarantee");
7371 warned_for_return_big_vectors = true;
7373 return true;
7376 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
7377 return true;
7379 return false;
7382 #ifdef HAVE_AS_GNU_ATTRIBUTE
7383 /* Return TRUE if a call to function FNDECL may be one that
7384 potentially affects the function calling ABI of the object file. */
7386 static bool
7387 call_ABI_of_interest (tree fndecl)
7389 if (cgraph_state == CGRAPH_STATE_EXPANSION)
7391 struct cgraph_node *c_node;
7393 /* Libcalls are always interesting. */
7394 if (fndecl == NULL_TREE)
7395 return true;
7397 /* Any call to an external function is interesting. */
7398 if (DECL_EXTERNAL (fndecl))
7399 return true;
7401 /* Interesting functions that we are emitting in this object file. */
7402 c_node = cgraph_get_node (fndecl);
7403 c_node = cgraph_function_or_thunk_node (c_node, NULL);
7404 return !cgraph_only_called_directly_p (c_node);
7406 return false;
7408 #endif
7410 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7411 for a call to a function whose data type is FNTYPE.
7412 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7414 For incoming args we set the number of arguments in the prototype large
7415 so we never return a PARALLEL. */
7417 void
7418 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
7419 rtx libname ATTRIBUTE_UNUSED, int incoming,
7420 int libcall, int n_named_args,
7421 tree fndecl ATTRIBUTE_UNUSED,
7422 enum machine_mode return_mode ATTRIBUTE_UNUSED)
7424 static CUMULATIVE_ARGS zero_cumulative;
7426 *cum = zero_cumulative;
7427 cum->words = 0;
7428 cum->fregno = FP_ARG_MIN_REG;
7429 cum->vregno = ALTIVEC_ARG_MIN_REG;
7430 cum->prototype = (fntype && prototype_p (fntype));
7431 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
7432 ? CALL_LIBCALL : CALL_NORMAL);
7433 cum->sysv_gregno = GP_ARG_MIN_REG;
7434 cum->stdarg = stdarg_p (fntype);
7436 cum->nargs_prototype = 0;
7437 if (incoming || cum->prototype)
7438 cum->nargs_prototype = n_named_args;
7440 /* Check for a longcall attribute. */
7441 if ((!fntype && rs6000_default_long_calls)
7442 || (fntype
7443 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
7444 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
7445 cum->call_cookie |= CALL_LONG;
7447 if (TARGET_DEBUG_ARG)
7449 fprintf (stderr, "\ninit_cumulative_args:");
7450 if (fntype)
7452 tree ret_type = TREE_TYPE (fntype);
7453 fprintf (stderr, " ret code = %s,",
7454 tree_code_name[ (int)TREE_CODE (ret_type) ]);
7457 if (cum->call_cookie & CALL_LONG)
7458 fprintf (stderr, " longcall,");
7460 fprintf (stderr, " proto = %d, nargs = %d\n",
7461 cum->prototype, cum->nargs_prototype);
7464 #ifdef HAVE_AS_GNU_ATTRIBUTE
7465 if (DEFAULT_ABI == ABI_V4)
7467 cum->escapes = call_ABI_of_interest (fndecl);
7468 if (cum->escapes)
7470 tree return_type;
7472 if (fntype)
7474 return_type = TREE_TYPE (fntype);
7475 return_mode = TYPE_MODE (return_type);
7477 else
7478 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
7480 if (return_type != NULL)
7482 if (TREE_CODE (return_type) == RECORD_TYPE
7483 && TYPE_TRANSPARENT_AGGR (return_type))
7485 return_type = TREE_TYPE (first_field (return_type));
7486 return_mode = TYPE_MODE (return_type);
7488 if (AGGREGATE_TYPE_P (return_type)
7489 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
7490 <= 8))
7491 rs6000_returns_struct = true;
7493 if (SCALAR_FLOAT_MODE_P (return_mode))
7494 rs6000_passes_float = true;
7495 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
7496 || SPE_VECTOR_MODE (return_mode))
7497 rs6000_passes_vector = true;
7500 #endif
7502 if (fntype
7503 && !TARGET_ALTIVEC
7504 && TARGET_ALTIVEC_ABI
7505 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
7507 error ("cannot return value in vector register because"
7508 " altivec instructions are disabled, use -maltivec"
7509 " to enable them");
7513 /* Return true if TYPE must be passed on the stack and not in registers. */
7515 static bool
7516 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
7518 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
7519 return must_pass_in_stack_var_size (mode, type);
7520 else
7521 return must_pass_in_stack_var_size_or_pad (mode, type);
7524 /* If defined, a C expression which determines whether, and in which
7525 direction, to pad out an argument with extra space. The value
7526 should be of type `enum direction': either `upward' to pad above
7527 the argument, `downward' to pad below, or `none' to inhibit
7528 padding.
7530 For the AIX ABI structs are always stored left shifted in their
7531 argument slot. */
7533 enum direction
7534 function_arg_padding (enum machine_mode mode, const_tree type)
7536 #ifndef AGGREGATE_PADDING_FIXED
7537 #define AGGREGATE_PADDING_FIXED 0
7538 #endif
7539 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7540 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7541 #endif
7543 if (!AGGREGATE_PADDING_FIXED)
7545 /* GCC used to pass structures of the same size as integer types as
7546 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7547 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7548 passed padded downward, except that -mstrict-align further
7549 muddied the water in that multi-component structures of 2 and 4
7550 bytes in size were passed padded upward.
7552 The following arranges for best compatibility with previous
7553 versions of gcc, but removes the -mstrict-align dependency. */
7554 if (BYTES_BIG_ENDIAN)
7556 HOST_WIDE_INT size = 0;
7558 if (mode == BLKmode)
7560 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
7561 size = int_size_in_bytes (type);
7563 else
7564 size = GET_MODE_SIZE (mode);
7566 if (size == 1 || size == 2 || size == 4)
7567 return downward;
7569 return upward;
7572 if (AGGREGATES_PAD_UPWARD_ALWAYS)
7574 if (type != 0 && AGGREGATE_TYPE_P (type))
7575 return upward;
7578 /* Fall back to the default. */
7579 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7582 /* If defined, a C expression that gives the alignment boundary, in bits,
7583 of an argument with the specified mode and type. If it is not defined,
7584 PARM_BOUNDARY is used for all arguments.
7586 V.4 wants long longs and doubles to be double word aligned. Just
7587 testing the mode size is a boneheaded way to do this as it means
7588 that other types such as complex int are also double word aligned.
7589 However, we're stuck with this because changing the ABI might break
7590 existing library interfaces.
7592 Doubleword align SPE vectors.
7593 Quadword align Altivec/VSX vectors.
7594 Quadword align large synthetic vector types. */
7596 static unsigned int
7597 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
7599 if (DEFAULT_ABI == ABI_V4
7600 && (GET_MODE_SIZE (mode) == 8
7601 || (TARGET_HARD_FLOAT
7602 && TARGET_FPRS
7603 && (mode == TFmode || mode == TDmode))))
7604 return 64;
7605 else if (SPE_VECTOR_MODE (mode)
7606 || (type && TREE_CODE (type) == VECTOR_TYPE
7607 && int_size_in_bytes (type) >= 8
7608 && int_size_in_bytes (type) < 16))
7609 return 64;
7610 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7611 || (type && TREE_CODE (type) == VECTOR_TYPE
7612 && int_size_in_bytes (type) >= 16))
7613 return 128;
7614 else if (TARGET_MACHO
7615 && rs6000_darwin64_abi
7616 && mode == BLKmode
7617 && type && TYPE_ALIGN (type) > 64)
7618 return 128;
7619 else
7620 return PARM_BOUNDARY;
7623 /* For a function parm of MODE and TYPE, return the starting word in
7624 the parameter area. NWORDS of the parameter area are already used. */
7626 static unsigned int
7627 rs6000_parm_start (enum machine_mode mode, const_tree type,
7628 unsigned int nwords)
7630 unsigned int align;
7631 unsigned int parm_offset;
7633 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
7634 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
7635 return nwords + (-(parm_offset + nwords) & align);
7638 /* Compute the size (in words) of a function argument. */
7640 static unsigned long
7641 rs6000_arg_size (enum machine_mode mode, const_tree type)
7643 unsigned long size;
7645 if (mode != BLKmode)
7646 size = GET_MODE_SIZE (mode);
7647 else
7648 size = int_size_in_bytes (type);
7650 if (TARGET_32BIT)
7651 return (size + 3) >> 2;
7652 else
7653 return (size + 7) >> 3;
7656 /* Use this to flush pending int fields. */
7658 static void
7659 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
7660 HOST_WIDE_INT bitpos, int final)
7662 unsigned int startbit, endbit;
7663 int intregs, intoffset;
7664 enum machine_mode mode;
7666 /* Handle the situations where a float is taking up the first half
7667 of the GPR, and the other half is empty (typically due to
7668 alignment restrictions). We can detect this by a 8-byte-aligned
7669 int field, or by seeing that this is the final flush for this
7670 argument. Count the word and continue on. */
7671 if (cum->floats_in_gpr == 1
7672 && (cum->intoffset % 64 == 0
7673 || (cum->intoffset == -1 && final)))
7675 cum->words++;
7676 cum->floats_in_gpr = 0;
7679 if (cum->intoffset == -1)
7680 return;
7682 intoffset = cum->intoffset;
7683 cum->intoffset = -1;
7684 cum->floats_in_gpr = 0;
7686 if (intoffset % BITS_PER_WORD != 0)
7688 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7689 MODE_INT, 0);
7690 if (mode == BLKmode)
7692 /* We couldn't find an appropriate mode, which happens,
7693 e.g., in packed structs when there are 3 bytes to load.
7694 Back intoffset back to the beginning of the word in this
7695 case. */
7696 intoffset = intoffset & -BITS_PER_WORD;
7700 startbit = intoffset & -BITS_PER_WORD;
7701 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7702 intregs = (endbit - startbit) / BITS_PER_WORD;
7703 cum->words += intregs;
7704 /* words should be unsigned. */
7705 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
7707 int pad = (endbit/BITS_PER_WORD) - cum->words;
7708 cum->words += pad;
7712 /* The darwin64 ABI calls for us to recurse down through structs,
7713 looking for elements passed in registers. Unfortunately, we have
7714 to track int register count here also because of misalignments
7715 in powerpc alignment mode. */
7717 static void
7718 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
7719 const_tree type,
7720 HOST_WIDE_INT startbitpos)
7722 tree f;
7724 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
7725 if (TREE_CODE (f) == FIELD_DECL)
7727 HOST_WIDE_INT bitpos = startbitpos;
7728 tree ftype = TREE_TYPE (f);
7729 enum machine_mode mode;
7730 if (ftype == error_mark_node)
7731 continue;
7732 mode = TYPE_MODE (ftype);
7734 if (DECL_SIZE (f) != 0
7735 && host_integerp (bit_position (f), 1))
7736 bitpos += int_bit_position (f);
7738 /* ??? FIXME: else assume zero offset. */
7740 if (TREE_CODE (ftype) == RECORD_TYPE)
7741 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
7742 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
7744 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
7745 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7746 cum->fregno += n_fpregs;
7747 /* Single-precision floats present a special problem for
7748 us, because they are smaller than an 8-byte GPR, and so
7749 the structure-packing rules combined with the standard
7750 varargs behavior mean that we want to pack float/float
7751 and float/int combinations into a single register's
7752 space. This is complicated by the arg advance flushing,
7753 which works on arbitrarily large groups of int-type
7754 fields. */
7755 if (mode == SFmode)
7757 if (cum->floats_in_gpr == 1)
7759 /* Two floats in a word; count the word and reset
7760 the float count. */
7761 cum->words++;
7762 cum->floats_in_gpr = 0;
7764 else if (bitpos % 64 == 0)
7766 /* A float at the beginning of an 8-byte word;
7767 count it and put off adjusting cum->words until
7768 we see if a arg advance flush is going to do it
7769 for us. */
7770 cum->floats_in_gpr++;
7772 else
7774 /* The float is at the end of a word, preceded
7775 by integer fields, so the arg advance flush
7776 just above has already set cum->words and
7777 everything is taken care of. */
7780 else
7781 cum->words += n_fpregs;
7783 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
7785 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7786 cum->vregno++;
7787 cum->words += 2;
7789 else if (cum->intoffset == -1)
7790 cum->intoffset = bitpos;
7794 /* Check for an item that needs to be considered specially under the darwin 64
7795 bit ABI. These are record types where the mode is BLK or the structure is
7796 8 bytes in size. */
7797 static int
7798 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
7800 return rs6000_darwin64_abi
7801 && ((mode == BLKmode
7802 && TREE_CODE (type) == RECORD_TYPE
7803 && int_size_in_bytes (type) > 0)
7804 || (type && TREE_CODE (type) == RECORD_TYPE
7805 && int_size_in_bytes (type) == 8)) ? 1 : 0;
7808 /* Update the data in CUM to advance over an argument
7809 of mode MODE and data type TYPE.
7810 (TYPE is null for libcalls where that information may not be available.)
7812 Note that for args passed by reference, function_arg will be called
7813 with MODE and TYPE set to that of the pointer to the arg, not the arg
7814 itself. */
7816 static void
7817 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7818 const_tree type, bool named, int depth)
7820 /* Only tick off an argument if we're not recursing. */
7821 if (depth == 0)
7822 cum->nargs_prototype--;
7824 #ifdef HAVE_AS_GNU_ATTRIBUTE
7825 if (DEFAULT_ABI == ABI_V4
7826 && cum->escapes)
7828 if (SCALAR_FLOAT_MODE_P (mode))
7829 rs6000_passes_float = true;
7830 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
7831 rs6000_passes_vector = true;
7832 else if (SPE_VECTOR_MODE (mode)
7833 && !cum->stdarg
7834 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7835 rs6000_passes_vector = true;
7837 #endif
7839 if (TARGET_ALTIVEC_ABI
7840 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7841 || (type && TREE_CODE (type) == VECTOR_TYPE
7842 && int_size_in_bytes (type) == 16)))
7844 bool stack = false;
7846 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7848 cum->vregno++;
7849 if (!TARGET_ALTIVEC)
7850 error ("cannot pass argument in vector register because"
7851 " altivec instructions are disabled, use -maltivec"
7852 " to enable them");
7854 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7855 even if it is going to be passed in a vector register.
7856 Darwin does the same for variable-argument functions. */
7857 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
7858 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
7859 stack = true;
7861 else
7862 stack = true;
7864 if (stack)
7866 int align;
7868 /* Vector parameters must be 16-byte aligned. This places
7869 them at 2 mod 4 in terms of words in 32-bit mode, since
7870 the parameter save area starts at offset 24 from the
7871 stack. In 64-bit mode, they just have to start on an
7872 even word, since the parameter save area is 16-byte
7873 aligned. Space for GPRs is reserved even if the argument
7874 will be passed in memory. */
7875 if (TARGET_32BIT)
7876 align = (2 - cum->words) & 3;
7877 else
7878 align = cum->words & 1;
7879 cum->words += align + rs6000_arg_size (mode, type);
7881 if (TARGET_DEBUG_ARG)
7883 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
7884 cum->words, align);
7885 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
7886 cum->nargs_prototype, cum->prototype,
7887 GET_MODE_NAME (mode));
7891 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
7892 && !cum->stdarg
7893 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7894 cum->sysv_gregno++;
7896 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
7898 int size = int_size_in_bytes (type);
7899 /* Variable sized types have size == -1 and are
7900 treated as if consisting entirely of ints.
7901 Pad to 16 byte boundary if needed. */
7902 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
7903 && (cum->words % 2) != 0)
7904 cum->words++;
7905 /* For varargs, we can just go up by the size of the struct. */
7906 if (!named)
7907 cum->words += (size + 7) / 8;
7908 else
7910 /* It is tempting to say int register count just goes up by
7911 sizeof(type)/8, but this is wrong in a case such as
7912 { int; double; int; } [powerpc alignment]. We have to
7913 grovel through the fields for these too. */
7914 cum->intoffset = 0;
7915 cum->floats_in_gpr = 0;
7916 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
7917 rs6000_darwin64_record_arg_advance_flush (cum,
7918 size * BITS_PER_UNIT, 1);
7920 if (TARGET_DEBUG_ARG)
7922 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
7923 cum->words, TYPE_ALIGN (type), size);
7924 fprintf (stderr,
7925 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7926 cum->nargs_prototype, cum->prototype,
7927 GET_MODE_NAME (mode));
7930 else if (DEFAULT_ABI == ABI_V4)
7932 if (TARGET_HARD_FLOAT && TARGET_FPRS
7933 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
7934 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
7935 || (mode == TFmode && !TARGET_IEEEQUAD)
7936 || mode == SDmode || mode == DDmode || mode == TDmode))
7938 /* _Decimal128 must use an even/odd register pair. This assumes
7939 that the register number is odd when fregno is odd. */
7940 if (mode == TDmode && (cum->fregno % 2) == 1)
7941 cum->fregno++;
7943 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
7944 <= FP_ARG_V4_MAX_REG)
7945 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7946 else
7948 cum->fregno = FP_ARG_V4_MAX_REG + 1;
7949 if (mode == DFmode || mode == TFmode
7950 || mode == DDmode || mode == TDmode)
7951 cum->words += cum->words & 1;
7952 cum->words += rs6000_arg_size (mode, type);
7955 else
7957 int n_words = rs6000_arg_size (mode, type);
7958 int gregno = cum->sysv_gregno;
7960 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7961 (r7,r8) or (r9,r10). As does any other 2 word item such
7962 as complex int due to a historical mistake. */
7963 if (n_words == 2)
7964 gregno += (1 - gregno) & 1;
7966 /* Multi-reg args are not split between registers and stack. */
7967 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7969 /* Long long and SPE vectors are aligned on the stack.
7970 So are other 2 word items such as complex int due to
7971 a historical mistake. */
7972 if (n_words == 2)
7973 cum->words += cum->words & 1;
7974 cum->words += n_words;
7977 /* Note: continuing to accumulate gregno past when we've started
7978 spilling to the stack indicates the fact that we've started
7979 spilling to the stack to expand_builtin_saveregs. */
7980 cum->sysv_gregno = gregno + n_words;
7983 if (TARGET_DEBUG_ARG)
7985 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7986 cum->words, cum->fregno);
7987 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
7988 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
7989 fprintf (stderr, "mode = %4s, named = %d\n",
7990 GET_MODE_NAME (mode), named);
7993 else
7995 int n_words = rs6000_arg_size (mode, type);
7996 int start_words = cum->words;
7997 int align_words = rs6000_parm_start (mode, type, start_words);
7999 cum->words = align_words + n_words;
8001 if (SCALAR_FLOAT_MODE_P (mode)
8002 && TARGET_HARD_FLOAT && TARGET_FPRS)
8004 /* _Decimal128 must be passed in an even/odd float register pair.
8005 This assumes that the register number is odd when fregno is
8006 odd. */
8007 if (mode == TDmode && (cum->fregno % 2) == 1)
8008 cum->fregno++;
8009 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8012 if (TARGET_DEBUG_ARG)
8014 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8015 cum->words, cum->fregno);
8016 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8017 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8018 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8019 named, align_words - start_words, depth);
8024 static void
8025 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8026 const_tree type, bool named)
8028 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8032 static rtx
8033 spe_build_register_parallel (enum machine_mode mode, int gregno)
8035 rtx r1, r3, r5, r7;
8037 switch (mode)
8039 case DFmode:
8040 r1 = gen_rtx_REG (DImode, gregno);
8041 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8042 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8044 case DCmode:
8045 case TFmode:
8046 r1 = gen_rtx_REG (DImode, gregno);
8047 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8048 r3 = gen_rtx_REG (DImode, gregno + 2);
8049 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8050 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8052 case TCmode:
8053 r1 = gen_rtx_REG (DImode, gregno);
8054 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8055 r3 = gen_rtx_REG (DImode, gregno + 2);
8056 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8057 r5 = gen_rtx_REG (DImode, gregno + 4);
8058 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8059 r7 = gen_rtx_REG (DImode, gregno + 6);
8060 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8061 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8063 default:
8064 gcc_unreachable ();
8068 /* Determine where to put a SIMD argument on the SPE. */
8069 static rtx
8070 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8071 const_tree type)
8073 int gregno = cum->sysv_gregno;
8075 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8076 are passed and returned in a pair of GPRs for ABI compatibility. */
8077 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8078 || mode == DCmode || mode == TCmode))
8080 int n_words = rs6000_arg_size (mode, type);
8082 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8083 if (mode == DFmode)
8084 gregno += (1 - gregno) & 1;
8086 /* Multi-reg args are not split between registers and stack. */
8087 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8088 return NULL_RTX;
8090 return spe_build_register_parallel (mode, gregno);
8092 if (cum->stdarg)
8094 int n_words = rs6000_arg_size (mode, type);
8096 /* SPE vectors are put in odd registers. */
8097 if (n_words == 2 && (gregno & 1) == 0)
8098 gregno += 1;
8100 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8102 rtx r1, r2;
8103 enum machine_mode m = SImode;
8105 r1 = gen_rtx_REG (m, gregno);
8106 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8107 r2 = gen_rtx_REG (m, gregno + 1);
8108 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8109 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8111 else
8112 return NULL_RTX;
8114 else
8116 if (gregno <= GP_ARG_MAX_REG)
8117 return gen_rtx_REG (mode, gregno);
8118 else
8119 return NULL_RTX;
8123 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8124 structure between cum->intoffset and bitpos to integer registers. */
8126 static void
8127 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
8128 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
8130 enum machine_mode mode;
8131 unsigned int regno;
8132 unsigned int startbit, endbit;
8133 int this_regno, intregs, intoffset;
8134 rtx reg;
8136 if (cum->intoffset == -1)
8137 return;
8139 intoffset = cum->intoffset;
8140 cum->intoffset = -1;
8142 /* If this is the trailing part of a word, try to only load that
8143 much into the register. Otherwise load the whole register. Note
8144 that in the latter case we may pick up unwanted bits. It's not a
8145 problem at the moment but may wish to revisit. */
8147 if (intoffset % BITS_PER_WORD != 0)
8149 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8150 MODE_INT, 0);
8151 if (mode == BLKmode)
8153 /* We couldn't find an appropriate mode, which happens,
8154 e.g., in packed structs when there are 3 bytes to load.
8155 Back intoffset back to the beginning of the word in this
8156 case. */
8157 intoffset = intoffset & -BITS_PER_WORD;
8158 mode = word_mode;
8161 else
8162 mode = word_mode;
8164 startbit = intoffset & -BITS_PER_WORD;
8165 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8166 intregs = (endbit - startbit) / BITS_PER_WORD;
8167 this_regno = cum->words + intoffset / BITS_PER_WORD;
8169 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8170 cum->use_stack = 1;
8172 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8173 if (intregs <= 0)
8174 return;
8176 intoffset /= BITS_PER_UNIT;
8179 regno = GP_ARG_MIN_REG + this_regno;
8180 reg = gen_rtx_REG (mode, regno);
8181 rvec[(*k)++] =
8182 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8184 this_regno += 1;
8185 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8186 mode = word_mode;
8187 intregs -= 1;
8189 while (intregs > 0);
8192 /* Recursive workhorse for the following. */
8194 static void
8195 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8196 HOST_WIDE_INT startbitpos, rtx rvec[],
8197 int *k)
8199 tree f;
8201 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8202 if (TREE_CODE (f) == FIELD_DECL)
8204 HOST_WIDE_INT bitpos = startbitpos;
8205 tree ftype = TREE_TYPE (f);
8206 enum machine_mode mode;
8207 if (ftype == error_mark_node)
8208 continue;
8209 mode = TYPE_MODE (ftype);
8211 if (DECL_SIZE (f) != 0
8212 && host_integerp (bit_position (f), 1))
8213 bitpos += int_bit_position (f);
8215 /* ??? FIXME: else assume zero offset. */
8217 if (TREE_CODE (ftype) == RECORD_TYPE)
8218 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8219 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8221 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8222 #if 0
8223 switch (mode)
8225 case SCmode: mode = SFmode; break;
8226 case DCmode: mode = DFmode; break;
8227 case TCmode: mode = TFmode; break;
8228 default: break;
8230 #endif
8231 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8232 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8234 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8235 && (mode == TFmode || mode == TDmode));
8236 /* Long double or _Decimal128 split over regs and memory. */
8237 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8238 cum->use_stack=1;
8240 rvec[(*k)++]
8241 = gen_rtx_EXPR_LIST (VOIDmode,
8242 gen_rtx_REG (mode, cum->fregno++),
8243 GEN_INT (bitpos / BITS_PER_UNIT));
8244 if (mode == TFmode || mode == TDmode)
8245 cum->fregno++;
8247 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8249 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8250 rvec[(*k)++]
8251 = gen_rtx_EXPR_LIST (VOIDmode,
8252 gen_rtx_REG (mode, cum->vregno++),
8253 GEN_INT (bitpos / BITS_PER_UNIT));
8255 else if (cum->intoffset == -1)
8256 cum->intoffset = bitpos;
8260 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8261 the register(s) to be used for each field and subfield of a struct
8262 being passed by value, along with the offset of where the
8263 register's value may be found in the block. FP fields go in FP
8264 register, vector fields go in vector registers, and everything
8265 else goes in int registers, packed as in memory.
8267 This code is also used for function return values. RETVAL indicates
8268 whether this is the case.
8270 Much of this is taken from the SPARC V9 port, which has a similar
8271 calling convention. */
8273 static rtx
8274 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8275 bool named, bool retval)
8277 rtx rvec[FIRST_PSEUDO_REGISTER];
8278 int k = 1, kbase = 1;
8279 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8280 /* This is a copy; modifications are not visible to our caller. */
8281 CUMULATIVE_ARGS copy_cum = *orig_cum;
8282 CUMULATIVE_ARGS *cum = &copy_cum;
8284 /* Pad to 16 byte boundary if needed. */
8285 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8286 && (cum->words % 2) != 0)
8287 cum->words++;
8289 cum->intoffset = 0;
8290 cum->use_stack = 0;
8291 cum->named = named;
8293 /* Put entries into rvec[] for individual FP and vector fields, and
8294 for the chunks of memory that go in int regs. Note we start at
8295 element 1; 0 is reserved for an indication of using memory, and
8296 may or may not be filled in below. */
8297 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8298 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8300 /* If any part of the struct went on the stack put all of it there.
8301 This hack is because the generic code for
8302 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8303 parts of the struct are not at the beginning. */
8304 if (cum->use_stack)
8306 if (retval)
8307 return NULL_RTX; /* doesn't go in registers at all */
8308 kbase = 0;
8309 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8311 if (k > 1 || cum->use_stack)
8312 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
8313 else
8314 return NULL_RTX;
8317 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8319 static rtx
8320 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
8321 int align_words)
8323 int n_units;
8324 int i, k;
8325 rtx rvec[GP_ARG_NUM_REG + 1];
8327 if (align_words >= GP_ARG_NUM_REG)
8328 return NULL_RTX;
8330 n_units = rs6000_arg_size (mode, type);
8332 /* Optimize the simple case where the arg fits in one gpr, except in
8333 the case of BLKmode due to assign_parms assuming that registers are
8334 BITS_PER_WORD wide. */
8335 if (n_units == 0
8336 || (n_units == 1 && mode != BLKmode))
8337 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8339 k = 0;
8340 if (align_words + n_units > GP_ARG_NUM_REG)
8341 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8342 using a magic NULL_RTX component.
8343 This is not strictly correct. Only some of the arg belongs in
8344 memory, not all of it. However, the normal scheme using
8345 function_arg_partial_nregs can result in unusual subregs, eg.
8346 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8347 store the whole arg to memory is often more efficient than code
8348 to store pieces, and we know that space is available in the right
8349 place for the whole arg. */
8350 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8352 i = 0;
8355 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
8356 rtx off = GEN_INT (i++ * 4);
8357 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8359 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
8361 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8364 /* Determine where to put an argument to a function.
8365 Value is zero to push the argument on the stack,
8366 or a hard register in which to store the argument.
8368 MODE is the argument's machine mode.
8369 TYPE is the data type of the argument (as a tree).
8370 This is null for libcalls where that information may
8371 not be available.
8372 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8373 the preceding args and about the function being called. It is
8374 not modified in this routine.
8375 NAMED is nonzero if this argument is a named parameter
8376 (otherwise it is an extra parameter matching an ellipsis).
8378 On RS/6000 the first eight words of non-FP are normally in registers
8379 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8380 Under V.4, the first 8 FP args are in registers.
8382 If this is floating-point and no prototype is specified, we use
8383 both an FP and integer register (or possibly FP reg and stack). Library
8384 functions (when CALL_LIBCALL is set) always have the proper types for args,
8385 so we can pass the FP value just in one register. emit_library_function
8386 doesn't support PARALLEL anyway.
8388 Note that for args passed by reference, function_arg will be called
8389 with MODE and TYPE set to that of the pointer to the arg, not the arg
8390 itself. */
8392 static rtx
8393 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8394 const_tree type, bool named)
8396 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8397 enum rs6000_abi abi = DEFAULT_ABI;
8399 /* Return a marker to indicate whether CR1 needs to set or clear the
8400 bit that V.4 uses to say fp args were passed in registers.
8401 Assume that we don't need the marker for software floating point,
8402 or compiler generated library calls. */
8403 if (mode == VOIDmode)
8405 if (abi == ABI_V4
8406 && (cum->call_cookie & CALL_LIBCALL) == 0
8407 && (cum->stdarg
8408 || (cum->nargs_prototype < 0
8409 && (cum->prototype || TARGET_NO_PROTOTYPE))))
8411 /* For the SPE, we need to crxor CR6 always. */
8412 if (TARGET_SPE_ABI)
8413 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
8414 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
8415 return GEN_INT (cum->call_cookie
8416 | ((cum->fregno == FP_ARG_MIN_REG)
8417 ? CALL_V4_SET_FP_ARGS
8418 : CALL_V4_CLEAR_FP_ARGS));
8421 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
8424 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8426 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
8427 if (rslt != NULL_RTX)
8428 return rslt;
8429 /* Else fall through to usual handling. */
8432 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8433 if (TARGET_64BIT && ! cum->prototype)
8435 /* Vector parameters get passed in vector register
8436 and also in GPRs or memory, in absence of prototype. */
8437 int align_words;
8438 rtx slot;
8439 align_words = (cum->words + 1) & ~1;
8441 if (align_words >= GP_ARG_NUM_REG)
8443 slot = NULL_RTX;
8445 else
8447 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8449 return gen_rtx_PARALLEL (mode,
8450 gen_rtvec (2,
8451 gen_rtx_EXPR_LIST (VOIDmode,
8452 slot, const0_rtx),
8453 gen_rtx_EXPR_LIST (VOIDmode,
8454 gen_rtx_REG (mode, cum->vregno),
8455 const0_rtx)));
8457 else
8458 return gen_rtx_REG (mode, cum->vregno);
8459 else if (TARGET_ALTIVEC_ABI
8460 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8461 || (type && TREE_CODE (type) == VECTOR_TYPE
8462 && int_size_in_bytes (type) == 16)))
8464 if (named || abi == ABI_V4)
8465 return NULL_RTX;
8466 else
8468 /* Vector parameters to varargs functions under AIX or Darwin
8469 get passed in memory and possibly also in GPRs. */
8470 int align, align_words, n_words;
8471 enum machine_mode part_mode;
8473 /* Vector parameters must be 16-byte aligned. This places them at
8474 2 mod 4 in terms of words in 32-bit mode, since the parameter
8475 save area starts at offset 24 from the stack. In 64-bit mode,
8476 they just have to start on an even word, since the parameter
8477 save area is 16-byte aligned. */
8478 if (TARGET_32BIT)
8479 align = (2 - cum->words) & 3;
8480 else
8481 align = cum->words & 1;
8482 align_words = cum->words + align;
8484 /* Out of registers? Memory, then. */
8485 if (align_words >= GP_ARG_NUM_REG)
8486 return NULL_RTX;
8488 if (TARGET_32BIT && TARGET_POWERPC64)
8489 return rs6000_mixed_function_arg (mode, type, align_words);
8491 /* The vector value goes in GPRs. Only the part of the
8492 value in GPRs is reported here. */
8493 part_mode = mode;
8494 n_words = rs6000_arg_size (mode, type);
8495 if (align_words + n_words > GP_ARG_NUM_REG)
8496 /* Fortunately, there are only two possibilities, the value
8497 is either wholly in GPRs or half in GPRs and half not. */
8498 part_mode = DImode;
8500 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
8503 else if (TARGET_SPE_ABI && TARGET_SPE
8504 && (SPE_VECTOR_MODE (mode)
8505 || (TARGET_E500_DOUBLE && (mode == DFmode
8506 || mode == DCmode
8507 || mode == TFmode
8508 || mode == TCmode))))
8509 return rs6000_spe_function_arg (cum, mode, type);
8511 else if (abi == ABI_V4)
8513 if (TARGET_HARD_FLOAT && TARGET_FPRS
8514 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8515 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8516 || (mode == TFmode && !TARGET_IEEEQUAD)
8517 || mode == SDmode || mode == DDmode || mode == TDmode))
8519 /* _Decimal128 must use an even/odd register pair. This assumes
8520 that the register number is odd when fregno is odd. */
8521 if (mode == TDmode && (cum->fregno % 2) == 1)
8522 cum->fregno++;
8524 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8525 <= FP_ARG_V4_MAX_REG)
8526 return gen_rtx_REG (mode, cum->fregno);
8527 else
8528 return NULL_RTX;
8530 else
8532 int n_words = rs6000_arg_size (mode, type);
8533 int gregno = cum->sysv_gregno;
8535 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8536 (r7,r8) or (r9,r10). As does any other 2 word item such
8537 as complex int due to a historical mistake. */
8538 if (n_words == 2)
8539 gregno += (1 - gregno) & 1;
8541 /* Multi-reg args are not split between registers and stack. */
8542 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8543 return NULL_RTX;
8545 if (TARGET_32BIT && TARGET_POWERPC64)
8546 return rs6000_mixed_function_arg (mode, type,
8547 gregno - GP_ARG_MIN_REG);
8548 return gen_rtx_REG (mode, gregno);
8551 else
8553 int align_words = rs6000_parm_start (mode, type, cum->words);
8555 /* _Decimal128 must be passed in an even/odd float register pair.
8556 This assumes that the register number is odd when fregno is odd. */
8557 if (mode == TDmode && (cum->fregno % 2) == 1)
8558 cum->fregno++;
8560 if (USE_FP_FOR_ARG_P (cum, mode, type))
8562 rtx rvec[GP_ARG_NUM_REG + 1];
8563 rtx r;
8564 int k;
8565 bool needs_psave;
8566 enum machine_mode fmode = mode;
8567 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8569 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8571 /* Currently, we only ever need one reg here because complex
8572 doubles are split. */
8573 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8574 && (fmode == TFmode || fmode == TDmode));
8576 /* Long double or _Decimal128 split over regs and memory. */
8577 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
8580 /* Do we also need to pass this arg in the parameter save
8581 area? */
8582 needs_psave = (type
8583 && (cum->nargs_prototype <= 0
8584 || (DEFAULT_ABI == ABI_AIX
8585 && TARGET_XL_COMPAT
8586 && align_words >= GP_ARG_NUM_REG)));
8588 if (!needs_psave && mode == fmode)
8589 return gen_rtx_REG (fmode, cum->fregno);
8591 k = 0;
8592 if (needs_psave)
8594 /* Describe the part that goes in gprs or the stack.
8595 This piece must come first, before the fprs. */
8596 if (align_words < GP_ARG_NUM_REG)
8598 unsigned long n_words = rs6000_arg_size (mode, type);
8600 if (align_words + n_words > GP_ARG_NUM_REG
8601 || (TARGET_32BIT && TARGET_POWERPC64))
8603 /* If this is partially on the stack, then we only
8604 include the portion actually in registers here. */
8605 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
8606 rtx off;
8607 int i = 0;
8608 if (align_words + n_words > GP_ARG_NUM_REG)
8609 /* Not all of the arg fits in gprs. Say that it
8610 goes in memory too, using a magic NULL_RTX
8611 component. Also see comment in
8612 rs6000_mixed_function_arg for why the normal
8613 function_arg_partial_nregs scheme doesn't work
8614 in this case. */
8615 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
8616 const0_rtx);
8619 r = gen_rtx_REG (rmode,
8620 GP_ARG_MIN_REG + align_words);
8621 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
8622 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8624 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
8626 else
8628 /* The whole arg fits in gprs. */
8629 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8630 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8633 else
8634 /* It's entirely in memory. */
8635 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8638 /* Describe where this piece goes in the fprs. */
8639 r = gen_rtx_REG (fmode, cum->fregno);
8640 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8642 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8644 else if (align_words < GP_ARG_NUM_REG)
8646 if (TARGET_32BIT && TARGET_POWERPC64)
8647 return rs6000_mixed_function_arg (mode, type, align_words);
8649 if (mode == BLKmode)
8650 mode = Pmode;
8652 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8654 else
8655 return NULL_RTX;
8659 /* For an arg passed partly in registers and partly in memory, this is
8660 the number of bytes passed in registers. For args passed entirely in
8661 registers or entirely in memory, zero. When an arg is described by a
8662 PARALLEL, perhaps using more than one register type, this function
8663 returns the number of bytes used by the first element of the PARALLEL. */
8665 static int
8666 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
8667 tree type, bool named)
8669 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8670 int ret = 0;
8671 int align_words;
8673 if (DEFAULT_ABI == ABI_V4)
8674 return 0;
8676 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
8677 && cum->nargs_prototype >= 0)
8678 return 0;
8680 /* In this complicated case we just disable the partial_nregs code. */
8681 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8682 return 0;
8684 align_words = rs6000_parm_start (mode, type, cum->words);
8686 if (USE_FP_FOR_ARG_P (cum, mode, type))
8688 /* If we are passing this arg in the fixed parameter save area
8689 (gprs or memory) as well as fprs, then this function should
8690 return the number of partial bytes passed in the parameter
8691 save area rather than partial bytes passed in fprs. */
8692 if (type
8693 && (cum->nargs_prototype <= 0
8694 || (DEFAULT_ABI == ABI_AIX
8695 && TARGET_XL_COMPAT
8696 && align_words >= GP_ARG_NUM_REG)))
8697 return 0;
8698 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
8699 > FP_ARG_MAX_REG + 1)
8700 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
8701 else if (cum->nargs_prototype >= 0)
8702 return 0;
8705 if (align_words < GP_ARG_NUM_REG
8706 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
8707 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
8709 if (ret != 0 && TARGET_DEBUG_ARG)
8710 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
8712 return ret;
8715 /* A C expression that indicates when an argument must be passed by
8716 reference. If nonzero for an argument, a copy of that argument is
8717 made in memory and a pointer to the argument is passed instead of
8718 the argument itself. The pointer is passed in whatever way is
8719 appropriate for passing a pointer to that type.
8721 Under V.4, aggregates and long double are passed by reference.
8723 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8724 reference unless the AltiVec vector extension ABI is in force.
8726 As an extension to all ABIs, variable sized types are passed by
8727 reference. */
8729 static bool
8730 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
8731 enum machine_mode mode, const_tree type,
8732 bool named ATTRIBUTE_UNUSED)
8734 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
8736 if (TARGET_DEBUG_ARG)
8737 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
8738 return 1;
8741 if (!type)
8742 return 0;
8744 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
8746 if (TARGET_DEBUG_ARG)
8747 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
8748 return 1;
8751 if (int_size_in_bytes (type) < 0)
8753 if (TARGET_DEBUG_ARG)
8754 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
8755 return 1;
8758 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8759 modes only exist for GCC vector types if -maltivec. */
8760 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
8762 if (TARGET_DEBUG_ARG)
8763 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
8764 return 1;
8767 /* Pass synthetic vectors in memory. */
8768 if (TREE_CODE (type) == VECTOR_TYPE
8769 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8771 static bool warned_for_pass_big_vectors = false;
8772 if (TARGET_DEBUG_ARG)
8773 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
8774 if (!warned_for_pass_big_vectors)
8776 warning (0, "GCC vector passed by reference: "
8777 "non-standard ABI extension with no compatibility guarantee");
8778 warned_for_pass_big_vectors = true;
8780 return 1;
8783 return 0;
8786 static void
8787 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
8789 int i;
8790 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
8792 if (nregs == 0)
8793 return;
8795 for (i = 0; i < nregs; i++)
8797 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
8798 if (reload_completed)
8800 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
8801 tem = NULL_RTX;
8802 else
8803 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
8804 i * GET_MODE_SIZE (reg_mode));
8806 else
8807 tem = replace_equiv_address (tem, XEXP (tem, 0));
8809 gcc_assert (tem);
8811 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
8815 /* Perform any needed actions needed for a function that is receiving a
8816 variable number of arguments.
8818 CUM is as above.
8820 MODE and TYPE are the mode and type of the current parameter.
8822 PRETEND_SIZE is a variable that should be set to the amount of stack
8823 that must be pushed by the prolog to pretend that our caller pushed
8826 Normally, this macro will push all remaining incoming registers on the
8827 stack and set PRETEND_SIZE to the length of the registers pushed. */
8829 static void
8830 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
8831 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8832 int no_rtl)
8834 CUMULATIVE_ARGS next_cum;
8835 int reg_size = TARGET_32BIT ? 4 : 8;
8836 rtx save_area = NULL_RTX, mem;
8837 int first_reg_offset;
8838 alias_set_type set;
8840 /* Skip the last named argument. */
8841 next_cum = *get_cumulative_args (cum);
8842 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
8844 if (DEFAULT_ABI == ABI_V4)
8846 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
8848 if (! no_rtl)
8850 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
8851 HOST_WIDE_INT offset = 0;
8853 /* Try to optimize the size of the varargs save area.
8854 The ABI requires that ap.reg_save_area is doubleword
8855 aligned, but we don't need to allocate space for all
8856 the bytes, only those to which we actually will save
8857 anything. */
8858 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
8859 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
8860 if (TARGET_HARD_FLOAT && TARGET_FPRS
8861 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8862 && cfun->va_list_fpr_size)
8864 if (gpr_reg_num)
8865 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
8866 * UNITS_PER_FP_WORD;
8867 if (cfun->va_list_fpr_size
8868 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8869 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
8870 else
8871 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8872 * UNITS_PER_FP_WORD;
8874 if (gpr_reg_num)
8876 offset = -((first_reg_offset * reg_size) & ~7);
8877 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
8879 gpr_reg_num = cfun->va_list_gpr_size;
8880 if (reg_size == 4 && (first_reg_offset & 1))
8881 gpr_reg_num++;
8883 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
8885 else if (fpr_size)
8886 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
8887 * UNITS_PER_FP_WORD
8888 - (int) (GP_ARG_NUM_REG * reg_size);
8890 if (gpr_size + fpr_size)
8892 rtx reg_save_area
8893 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
8894 gcc_assert (GET_CODE (reg_save_area) == MEM);
8895 reg_save_area = XEXP (reg_save_area, 0);
8896 if (GET_CODE (reg_save_area) == PLUS)
8898 gcc_assert (XEXP (reg_save_area, 0)
8899 == virtual_stack_vars_rtx);
8900 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
8901 offset += INTVAL (XEXP (reg_save_area, 1));
8903 else
8904 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
8907 cfun->machine->varargs_save_offset = offset;
8908 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
8911 else
8913 first_reg_offset = next_cum.words;
8914 save_area = virtual_incoming_args_rtx;
8916 if (targetm.calls.must_pass_in_stack (mode, type))
8917 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
8920 set = get_varargs_alias_set ();
8921 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
8922 && cfun->va_list_gpr_size)
8924 int nregs = GP_ARG_NUM_REG - first_reg_offset;
8926 if (va_list_gpr_counter_field)
8928 /* V4 va_list_gpr_size counts number of registers needed. */
8929 if (nregs > cfun->va_list_gpr_size)
8930 nregs = cfun->va_list_gpr_size;
8932 else
8934 /* char * va_list instead counts number of bytes needed. */
8935 if (nregs > cfun->va_list_gpr_size / reg_size)
8936 nregs = cfun->va_list_gpr_size / reg_size;
8939 mem = gen_rtx_MEM (BLKmode,
8940 plus_constant (Pmode, save_area,
8941 first_reg_offset * reg_size));
8942 MEM_NOTRAP_P (mem) = 1;
8943 set_mem_alias_set (mem, set);
8944 set_mem_align (mem, BITS_PER_WORD);
8946 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
8947 nregs);
8950 /* Save FP registers if needed. */
8951 if (DEFAULT_ABI == ABI_V4
8952 && TARGET_HARD_FLOAT && TARGET_FPRS
8953 && ! no_rtl
8954 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8955 && cfun->va_list_fpr_size)
8957 int fregno = next_cum.fregno, nregs;
8958 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
8959 rtx lab = gen_label_rtx ();
8960 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
8961 * UNITS_PER_FP_WORD);
8963 emit_jump_insn
8964 (gen_rtx_SET (VOIDmode,
8965 pc_rtx,
8966 gen_rtx_IF_THEN_ELSE (VOIDmode,
8967 gen_rtx_NE (VOIDmode, cr1,
8968 const0_rtx),
8969 gen_rtx_LABEL_REF (VOIDmode, lab),
8970 pc_rtx)));
8972 for (nregs = 0;
8973 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
8974 fregno++, off += UNITS_PER_FP_WORD, nregs++)
8976 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8977 ? DFmode : SFmode,
8978 plus_constant (Pmode, save_area, off));
8979 MEM_NOTRAP_P (mem) = 1;
8980 set_mem_alias_set (mem, set);
8981 set_mem_align (mem, GET_MODE_ALIGNMENT (
8982 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8983 ? DFmode : SFmode));
8984 emit_move_insn (mem, gen_rtx_REG (
8985 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8986 ? DFmode : SFmode, fregno));
8989 emit_label (lab);
8993 /* Create the va_list data type. */
8995 static tree
8996 rs6000_build_builtin_va_list (void)
8998 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9000 /* For AIX, prefer 'char *' because that's what the system
9001 header files like. */
9002 if (DEFAULT_ABI != ABI_V4)
9003 return build_pointer_type (char_type_node);
9005 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9006 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9007 get_identifier ("__va_list_tag"), record);
9009 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9010 unsigned_char_type_node);
9011 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9012 unsigned_char_type_node);
9013 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9014 every user file. */
9015 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9016 get_identifier ("reserved"), short_unsigned_type_node);
9017 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9018 get_identifier ("overflow_arg_area"),
9019 ptr_type_node);
9020 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9021 get_identifier ("reg_save_area"),
9022 ptr_type_node);
9024 va_list_gpr_counter_field = f_gpr;
9025 va_list_fpr_counter_field = f_fpr;
9027 DECL_FIELD_CONTEXT (f_gpr) = record;
9028 DECL_FIELD_CONTEXT (f_fpr) = record;
9029 DECL_FIELD_CONTEXT (f_res) = record;
9030 DECL_FIELD_CONTEXT (f_ovf) = record;
9031 DECL_FIELD_CONTEXT (f_sav) = record;
9033 TYPE_STUB_DECL (record) = type_decl;
9034 TYPE_NAME (record) = type_decl;
9035 TYPE_FIELDS (record) = f_gpr;
9036 DECL_CHAIN (f_gpr) = f_fpr;
9037 DECL_CHAIN (f_fpr) = f_res;
9038 DECL_CHAIN (f_res) = f_ovf;
9039 DECL_CHAIN (f_ovf) = f_sav;
9041 layout_type (record);
9043 /* The correct type is an array type of one element. */
9044 return build_array_type (record, build_index_type (size_zero_node));
9047 /* Implement va_start. */
9049 static void
9050 rs6000_va_start (tree valist, rtx nextarg)
9052 HOST_WIDE_INT words, n_gpr, n_fpr;
9053 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9054 tree gpr, fpr, ovf, sav, t;
9056 /* Only SVR4 needs something special. */
9057 if (DEFAULT_ABI != ABI_V4)
9059 std_expand_builtin_va_start (valist, nextarg);
9060 return;
9063 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9064 f_fpr = DECL_CHAIN (f_gpr);
9065 f_res = DECL_CHAIN (f_fpr);
9066 f_ovf = DECL_CHAIN (f_res);
9067 f_sav = DECL_CHAIN (f_ovf);
9069 valist = build_simple_mem_ref (valist);
9070 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9071 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9072 f_fpr, NULL_TREE);
9073 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9074 f_ovf, NULL_TREE);
9075 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9076 f_sav, NULL_TREE);
9078 /* Count number of gp and fp argument registers used. */
9079 words = crtl->args.info.words;
9080 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9081 GP_ARG_NUM_REG);
9082 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9083 FP_ARG_NUM_REG);
9085 if (TARGET_DEBUG_ARG)
9086 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9087 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9088 words, n_gpr, n_fpr);
9090 if (cfun->va_list_gpr_size)
9092 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9093 build_int_cst (NULL_TREE, n_gpr));
9094 TREE_SIDE_EFFECTS (t) = 1;
9095 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9098 if (cfun->va_list_fpr_size)
9100 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9101 build_int_cst (NULL_TREE, n_fpr));
9102 TREE_SIDE_EFFECTS (t) = 1;
9103 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9105 #ifdef HAVE_AS_GNU_ATTRIBUTE
9106 if (call_ABI_of_interest (cfun->decl))
9107 rs6000_passes_float = true;
9108 #endif
9111 /* Find the overflow area. */
9112 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9113 if (words != 0)
9114 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9115 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9116 TREE_SIDE_EFFECTS (t) = 1;
9117 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9119 /* If there were no va_arg invocations, don't set up the register
9120 save area. */
9121 if (!cfun->va_list_gpr_size
9122 && !cfun->va_list_fpr_size
9123 && n_gpr < GP_ARG_NUM_REG
9124 && n_fpr < FP_ARG_V4_MAX_REG)
9125 return;
9127 /* Find the register save area. */
9128 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
9129 if (cfun->machine->varargs_save_offset)
9130 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
9131 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9132 TREE_SIDE_EFFECTS (t) = 1;
9133 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9136 /* Implement va_arg. */
9138 static tree
9139 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9140 gimple_seq *post_p)
9142 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9143 tree gpr, fpr, ovf, sav, reg, t, u;
9144 int size, rsize, n_reg, sav_ofs, sav_scale;
9145 tree lab_false, lab_over, addr;
9146 int align;
9147 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9148 int regalign = 0;
9149 gimple stmt;
9151 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9153 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9154 return build_va_arg_indirect_ref (t);
9157 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9158 earlier version of gcc, with the property that it always applied alignment
9159 adjustments to the va-args (even for zero-sized types). The cheapest way
9160 to deal with this is to replicate the effect of the part of
9161 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9162 of relevance.
9163 We don't need to check for pass-by-reference because of the test above.
9164 We can return a simplifed answer, since we know there's no offset to add. */
9166 if (TARGET_MACHO
9167 && rs6000_darwin64_abi
9168 && integer_zerop (TYPE_SIZE (type)))
9170 unsigned HOST_WIDE_INT align, boundary;
9171 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9172 align = PARM_BOUNDARY / BITS_PER_UNIT;
9173 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9174 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9175 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9176 boundary /= BITS_PER_UNIT;
9177 if (boundary > align)
9179 tree t ;
9180 /* This updates arg ptr by the amount that would be necessary
9181 to align the zero-sized (but not zero-alignment) item. */
9182 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9183 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9184 gimplify_and_add (t, pre_p);
9186 t = fold_convert (sizetype, valist_tmp);
9187 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9188 fold_convert (TREE_TYPE (valist),
9189 fold_build2 (BIT_AND_EXPR, sizetype, t,
9190 size_int (-boundary))));
9191 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9192 gimplify_and_add (t, pre_p);
9194 /* Since it is zero-sized there's no increment for the item itself. */
9195 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9196 return build_va_arg_indirect_ref (valist_tmp);
9199 if (DEFAULT_ABI != ABI_V4)
9201 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9203 tree elem_type = TREE_TYPE (type);
9204 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9205 int elem_size = GET_MODE_SIZE (elem_mode);
9207 if (elem_size < UNITS_PER_WORD)
9209 tree real_part, imag_part;
9210 gimple_seq post = NULL;
9212 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9213 &post);
9214 /* Copy the value into a temporary, lest the formal temporary
9215 be reused out from under us. */
9216 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9217 gimple_seq_add_seq (pre_p, post);
9219 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9220 post_p);
9222 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9226 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9229 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9230 f_fpr = DECL_CHAIN (f_gpr);
9231 f_res = DECL_CHAIN (f_fpr);
9232 f_ovf = DECL_CHAIN (f_res);
9233 f_sav = DECL_CHAIN (f_ovf);
9235 valist = build_va_arg_indirect_ref (valist);
9236 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9237 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9238 f_fpr, NULL_TREE);
9239 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9240 f_ovf, NULL_TREE);
9241 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9242 f_sav, NULL_TREE);
9244 size = int_size_in_bytes (type);
9245 rsize = (size + 3) / 4;
9246 align = 1;
9248 if (TARGET_HARD_FLOAT && TARGET_FPRS
9249 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9250 || (TARGET_DOUBLE_FLOAT
9251 && (TYPE_MODE (type) == DFmode
9252 || TYPE_MODE (type) == TFmode
9253 || TYPE_MODE (type) == SDmode
9254 || TYPE_MODE (type) == DDmode
9255 || TYPE_MODE (type) == TDmode))))
9257 /* FP args go in FP registers, if present. */
9258 reg = fpr;
9259 n_reg = (size + 7) / 8;
9260 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9261 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9262 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9263 align = 8;
9265 else
9267 /* Otherwise into GP registers. */
9268 reg = gpr;
9269 n_reg = rsize;
9270 sav_ofs = 0;
9271 sav_scale = 4;
9272 if (n_reg == 2)
9273 align = 8;
9276 /* Pull the value out of the saved registers.... */
9278 lab_over = NULL;
9279 addr = create_tmp_var (ptr_type_node, "addr");
9281 /* AltiVec vectors never go in registers when -mabi=altivec. */
9282 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9283 align = 16;
9284 else
9286 lab_false = create_artificial_label (input_location);
9287 lab_over = create_artificial_label (input_location);
9289 /* Long long and SPE vectors are aligned in the registers.
9290 As are any other 2 gpr item such as complex int due to a
9291 historical mistake. */
9292 u = reg;
9293 if (n_reg == 2 && reg == gpr)
9295 regalign = 1;
9296 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9297 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9298 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9299 unshare_expr (reg), u);
9301 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9302 reg number is 0 for f1, so we want to make it odd. */
9303 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9305 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9306 build_int_cst (TREE_TYPE (reg), 1));
9307 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9310 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9311 t = build2 (GE_EXPR, boolean_type_node, u, t);
9312 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9313 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9314 gimplify_and_add (t, pre_p);
9316 t = sav;
9317 if (sav_ofs)
9318 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9320 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9321 build_int_cst (TREE_TYPE (reg), n_reg));
9322 u = fold_convert (sizetype, u);
9323 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
9324 t = fold_build_pointer_plus (t, u);
9326 /* _Decimal32 varargs are located in the second word of the 64-bit
9327 FP register for 32-bit binaries. */
9328 if (!TARGET_POWERPC64
9329 && TARGET_HARD_FLOAT && TARGET_FPRS
9330 && TYPE_MODE (type) == SDmode)
9331 t = fold_build_pointer_plus_hwi (t, size);
9333 gimplify_assign (addr, t, pre_p);
9335 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9337 stmt = gimple_build_label (lab_false);
9338 gimple_seq_add_stmt (pre_p, stmt);
9340 if ((n_reg == 2 && !regalign) || n_reg > 2)
9342 /* Ensure that we don't find any more args in regs.
9343 Alignment has taken care of for special cases. */
9344 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
9348 /* ... otherwise out of the overflow area. */
9350 /* Care for on-stack alignment if needed. */
9351 t = ovf;
9352 if (align != 1)
9354 t = fold_build_pointer_plus_hwi (t, align - 1);
9355 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9356 build_int_cst (TREE_TYPE (t), -align));
9358 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9360 gimplify_assign (unshare_expr (addr), t, pre_p);
9362 t = fold_build_pointer_plus_hwi (t, size);
9363 gimplify_assign (unshare_expr (ovf), t, pre_p);
9365 if (lab_over)
9367 stmt = gimple_build_label (lab_over);
9368 gimple_seq_add_stmt (pre_p, stmt);
9371 if (STRICT_ALIGNMENT
9372 && (TYPE_ALIGN (type)
9373 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
9375 /* The value (of type complex double, for example) may not be
9376 aligned in memory in the saved registers, so copy via a
9377 temporary. (This is the same code as used for SPARC.) */
9378 tree tmp = create_tmp_var (type, "va_arg_tmp");
9379 tree dest_addr = build_fold_addr_expr (tmp);
9381 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
9382 3, dest_addr, addr, size_int (rsize * 4));
9384 gimplify_and_add (copy, pre_p);
9385 addr = dest_addr;
9388 addr = fold_convert (ptrtype, addr);
9389 return build_va_arg_indirect_ref (addr);
9392 /* Builtins. */
9394 static void
9395 def_builtin (const char *name, tree type, enum rs6000_builtins code)
9397 tree t;
9398 unsigned classify = rs6000_builtin_info[(int)code].attr;
9399 const char *attr_string = "";
9401 gcc_assert (name != NULL);
9402 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
9404 if (rs6000_builtin_decls[(int)code])
9405 fatal_error ("internal error: builtin function %s already processed", name);
9407 rs6000_builtin_decls[(int)code] = t =
9408 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
9410 /* Set any special attributes. */
9411 if ((classify & RS6000_BTC_CONST) != 0)
9413 /* const function, function only depends on the inputs. */
9414 TREE_READONLY (t) = 1;
9415 TREE_NOTHROW (t) = 1;
9416 attr_string = ", pure";
9418 else if ((classify & RS6000_BTC_PURE) != 0)
9420 /* pure function, function can read global memory, but does not set any
9421 external state. */
9422 DECL_PURE_P (t) = 1;
9423 TREE_NOTHROW (t) = 1;
9424 attr_string = ", const";
9426 else if ((classify & RS6000_BTC_FP) != 0)
9428 /* Function is a math function. If rounding mode is on, then treat the
9429 function as not reading global memory, but it can have arbitrary side
9430 effects. If it is off, then assume the function is a const function.
9431 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9432 builtin-attribute.def that is used for the math functions. */
9433 TREE_NOTHROW (t) = 1;
9434 if (flag_rounding_math)
9436 DECL_PURE_P (t) = 1;
9437 DECL_IS_NOVOPS (t) = 1;
9438 attr_string = ", fp, pure";
9440 else
9442 TREE_READONLY (t) = 1;
9443 attr_string = ", fp, const";
9446 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
9447 gcc_unreachable ();
9449 if (TARGET_DEBUG_BUILTIN)
9450 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
9451 (int)code, name, attr_string);
9454 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9456 #undef RS6000_BUILTIN_1
9457 #undef RS6000_BUILTIN_2
9458 #undef RS6000_BUILTIN_3
9459 #undef RS6000_BUILTIN_A
9460 #undef RS6000_BUILTIN_D
9461 #undef RS6000_BUILTIN_E
9462 #undef RS6000_BUILTIN_P
9463 #undef RS6000_BUILTIN_Q
9464 #undef RS6000_BUILTIN_S
9465 #undef RS6000_BUILTIN_X
9467 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9468 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9469 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9470 { MASK, ICODE, NAME, ENUM },
9472 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9473 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9474 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9475 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9476 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9477 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9478 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9480 static const struct builtin_description bdesc_3arg[] =
9482 #include "rs6000-builtin.def"
9485 /* DST operations: void foo (void *, const int, const char). */
9487 #undef RS6000_BUILTIN_1
9488 #undef RS6000_BUILTIN_2
9489 #undef RS6000_BUILTIN_3
9490 #undef RS6000_BUILTIN_A
9491 #undef RS6000_BUILTIN_D
9492 #undef RS6000_BUILTIN_E
9493 #undef RS6000_BUILTIN_P
9494 #undef RS6000_BUILTIN_Q
9495 #undef RS6000_BUILTIN_S
9496 #undef RS6000_BUILTIN_X
9498 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9499 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9500 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9501 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9502 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9503 { MASK, ICODE, NAME, ENUM },
9505 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9506 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9507 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9508 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9509 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9511 static const struct builtin_description bdesc_dst[] =
9513 #include "rs6000-builtin.def"
9516 /* Simple binary operations: VECc = foo (VECa, VECb). */
9518 #undef RS6000_BUILTIN_1
9519 #undef RS6000_BUILTIN_2
9520 #undef RS6000_BUILTIN_3
9521 #undef RS6000_BUILTIN_A
9522 #undef RS6000_BUILTIN_D
9523 #undef RS6000_BUILTIN_E
9524 #undef RS6000_BUILTIN_P
9525 #undef RS6000_BUILTIN_Q
9526 #undef RS6000_BUILTIN_S
9527 #undef RS6000_BUILTIN_X
9529 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9530 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9531 { MASK, ICODE, NAME, ENUM },
9533 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9534 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9535 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9536 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9537 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9538 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9539 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9540 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9542 static const struct builtin_description bdesc_2arg[] =
9544 #include "rs6000-builtin.def"
9547 #undef RS6000_BUILTIN_1
9548 #undef RS6000_BUILTIN_2
9549 #undef RS6000_BUILTIN_3
9550 #undef RS6000_BUILTIN_A
9551 #undef RS6000_BUILTIN_D
9552 #undef RS6000_BUILTIN_E
9553 #undef RS6000_BUILTIN_P
9554 #undef RS6000_BUILTIN_Q
9555 #undef RS6000_BUILTIN_S
9556 #undef RS6000_BUILTIN_X
9558 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9559 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9560 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9561 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9562 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9563 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9564 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9565 { MASK, ICODE, NAME, ENUM },
9567 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9568 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9569 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9571 /* AltiVec predicates. */
9573 static const struct builtin_description bdesc_altivec_preds[] =
9575 #include "rs6000-builtin.def"
9578 /* SPE predicates. */
9579 #undef RS6000_BUILTIN_1
9580 #undef RS6000_BUILTIN_2
9581 #undef RS6000_BUILTIN_3
9582 #undef RS6000_BUILTIN_A
9583 #undef RS6000_BUILTIN_D
9584 #undef RS6000_BUILTIN_E
9585 #undef RS6000_BUILTIN_P
9586 #undef RS6000_BUILTIN_Q
9587 #undef RS6000_BUILTIN_S
9588 #undef RS6000_BUILTIN_X
9590 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9591 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9592 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9593 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9594 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9595 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9596 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9597 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9598 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9599 { MASK, ICODE, NAME, ENUM },
9601 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9603 static const struct builtin_description bdesc_spe_predicates[] =
9605 #include "rs6000-builtin.def"
9608 /* SPE evsel predicates. */
9609 #undef RS6000_BUILTIN_1
9610 #undef RS6000_BUILTIN_2
9611 #undef RS6000_BUILTIN_3
9612 #undef RS6000_BUILTIN_A
9613 #undef RS6000_BUILTIN_D
9614 #undef RS6000_BUILTIN_E
9615 #undef RS6000_BUILTIN_P
9616 #undef RS6000_BUILTIN_Q
9617 #undef RS6000_BUILTIN_S
9618 #undef RS6000_BUILTIN_X
9620 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9621 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9622 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9623 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9624 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9625 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9626 { MASK, ICODE, NAME, ENUM },
9628 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9629 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9630 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9631 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9633 static const struct builtin_description bdesc_spe_evsel[] =
9635 #include "rs6000-builtin.def"
9638 /* PAIRED predicates. */
9639 #undef RS6000_BUILTIN_1
9640 #undef RS6000_BUILTIN_2
9641 #undef RS6000_BUILTIN_3
9642 #undef RS6000_BUILTIN_A
9643 #undef RS6000_BUILTIN_D
9644 #undef RS6000_BUILTIN_E
9645 #undef RS6000_BUILTIN_P
9646 #undef RS6000_BUILTIN_Q
9647 #undef RS6000_BUILTIN_S
9648 #undef RS6000_BUILTIN_X
9650 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9651 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9652 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9653 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9654 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9655 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9656 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9657 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9658 { MASK, ICODE, NAME, ENUM },
9660 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9661 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9663 static const struct builtin_description bdesc_paired_preds[] =
9665 #include "rs6000-builtin.def"
9668 /* ABS* operations. */
9670 #undef RS6000_BUILTIN_1
9671 #undef RS6000_BUILTIN_2
9672 #undef RS6000_BUILTIN_3
9673 #undef RS6000_BUILTIN_A
9674 #undef RS6000_BUILTIN_D
9675 #undef RS6000_BUILTIN_E
9676 #undef RS6000_BUILTIN_P
9677 #undef RS6000_BUILTIN_Q
9678 #undef RS6000_BUILTIN_S
9679 #undef RS6000_BUILTIN_X
9681 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9682 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9683 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9684 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9685 { MASK, ICODE, NAME, ENUM },
9687 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9688 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9689 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9690 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9691 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9692 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9694 static const struct builtin_description bdesc_abs[] =
9696 #include "rs6000-builtin.def"
9699 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9700 foo (VECa). */
9702 #undef RS6000_BUILTIN_1
9703 #undef RS6000_BUILTIN_2
9704 #undef RS6000_BUILTIN_3
9705 #undef RS6000_BUILTIN_A
9706 #undef RS6000_BUILTIN_E
9707 #undef RS6000_BUILTIN_D
9708 #undef RS6000_BUILTIN_P
9709 #undef RS6000_BUILTIN_Q
9710 #undef RS6000_BUILTIN_S
9711 #undef RS6000_BUILTIN_X
9713 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9714 { MASK, ICODE, NAME, ENUM },
9716 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9717 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9718 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9719 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9720 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9721 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9722 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9723 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9724 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9726 static const struct builtin_description bdesc_1arg[] =
9728 #include "rs6000-builtin.def"
9731 #undef RS6000_BUILTIN_1
9732 #undef RS6000_BUILTIN_2
9733 #undef RS6000_BUILTIN_3
9734 #undef RS6000_BUILTIN_A
9735 #undef RS6000_BUILTIN_D
9736 #undef RS6000_BUILTIN_E
9737 #undef RS6000_BUILTIN_P
9738 #undef RS6000_BUILTIN_Q
9739 #undef RS6000_BUILTIN_S
9740 #undef RS6000_BUILTIN_X
9742 /* Return true if a builtin function is overloaded. */
9743 bool
9744 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
9746 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
9749 /* Expand an expression EXP that calls a builtin without arguments. */
9750 static rtx
9751 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
9753 rtx pat;
9754 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9756 if (icode == CODE_FOR_nothing)
9757 /* Builtin not supported on this processor. */
9758 return 0;
9760 if (target == 0
9761 || GET_MODE (target) != tmode
9762 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9763 target = gen_reg_rtx (tmode);
9765 pat = GEN_FCN (icode) (target);
9766 if (! pat)
9767 return 0;
9768 emit_insn (pat);
9770 return target;
9774 static rtx
9775 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
9777 rtx pat;
9778 tree arg0 = CALL_EXPR_ARG (exp, 0);
9779 rtx op0 = expand_normal (arg0);
9780 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9781 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9783 if (icode == CODE_FOR_nothing)
9784 /* Builtin not supported on this processor. */
9785 return 0;
9787 /* If we got invalid arguments bail out before generating bad rtl. */
9788 if (arg0 == error_mark_node)
9789 return const0_rtx;
9791 if (icode == CODE_FOR_altivec_vspltisb
9792 || icode == CODE_FOR_altivec_vspltish
9793 || icode == CODE_FOR_altivec_vspltisw
9794 || icode == CODE_FOR_spe_evsplatfi
9795 || icode == CODE_FOR_spe_evsplati)
9797 /* Only allow 5-bit *signed* literals. */
9798 if (GET_CODE (op0) != CONST_INT
9799 || INTVAL (op0) > 15
9800 || INTVAL (op0) < -16)
9802 error ("argument 1 must be a 5-bit signed literal");
9803 return const0_rtx;
9807 if (target == 0
9808 || GET_MODE (target) != tmode
9809 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9810 target = gen_reg_rtx (tmode);
9812 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9813 op0 = copy_to_mode_reg (mode0, op0);
9815 pat = GEN_FCN (icode) (target, op0);
9816 if (! pat)
9817 return 0;
9818 emit_insn (pat);
9820 return target;
9823 static rtx
9824 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
9826 rtx pat, scratch1, scratch2;
9827 tree arg0 = CALL_EXPR_ARG (exp, 0);
9828 rtx op0 = expand_normal (arg0);
9829 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9830 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9832 /* If we have invalid arguments, bail out before generating bad rtl. */
9833 if (arg0 == error_mark_node)
9834 return const0_rtx;
9836 if (target == 0
9837 || GET_MODE (target) != tmode
9838 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9839 target = gen_reg_rtx (tmode);
9841 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9842 op0 = copy_to_mode_reg (mode0, op0);
9844 scratch1 = gen_reg_rtx (mode0);
9845 scratch2 = gen_reg_rtx (mode0);
9847 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
9848 if (! pat)
9849 return 0;
9850 emit_insn (pat);
9852 return target;
9855 static rtx
9856 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
9858 rtx pat;
9859 tree arg0 = CALL_EXPR_ARG (exp, 0);
9860 tree arg1 = CALL_EXPR_ARG (exp, 1);
9861 rtx op0 = expand_normal (arg0);
9862 rtx op1 = expand_normal (arg1);
9863 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9864 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9865 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9867 if (icode == CODE_FOR_nothing)
9868 /* Builtin not supported on this processor. */
9869 return 0;
9871 /* If we got invalid arguments bail out before generating bad rtl. */
9872 if (arg0 == error_mark_node || arg1 == error_mark_node)
9873 return const0_rtx;
9875 if (icode == CODE_FOR_altivec_vcfux
9876 || icode == CODE_FOR_altivec_vcfsx
9877 || icode == CODE_FOR_altivec_vctsxs
9878 || icode == CODE_FOR_altivec_vctuxs
9879 || icode == CODE_FOR_altivec_vspltb
9880 || icode == CODE_FOR_altivec_vsplth
9881 || icode == CODE_FOR_altivec_vspltw
9882 || icode == CODE_FOR_spe_evaddiw
9883 || icode == CODE_FOR_spe_evldd
9884 || icode == CODE_FOR_spe_evldh
9885 || icode == CODE_FOR_spe_evldw
9886 || icode == CODE_FOR_spe_evlhhesplat
9887 || icode == CODE_FOR_spe_evlhhossplat
9888 || icode == CODE_FOR_spe_evlhhousplat
9889 || icode == CODE_FOR_spe_evlwhe
9890 || icode == CODE_FOR_spe_evlwhos
9891 || icode == CODE_FOR_spe_evlwhou
9892 || icode == CODE_FOR_spe_evlwhsplat
9893 || icode == CODE_FOR_spe_evlwwsplat
9894 || icode == CODE_FOR_spe_evrlwi
9895 || icode == CODE_FOR_spe_evslwi
9896 || icode == CODE_FOR_spe_evsrwis
9897 || icode == CODE_FOR_spe_evsubifw
9898 || icode == CODE_FOR_spe_evsrwiu)
9900 /* Only allow 5-bit unsigned literals. */
9901 STRIP_NOPS (arg1);
9902 if (TREE_CODE (arg1) != INTEGER_CST
9903 || TREE_INT_CST_LOW (arg1) & ~0x1f)
9905 error ("argument 2 must be a 5-bit unsigned literal");
9906 return const0_rtx;
9910 if (target == 0
9911 || GET_MODE (target) != tmode
9912 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9913 target = gen_reg_rtx (tmode);
9915 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9916 op0 = copy_to_mode_reg (mode0, op0);
9917 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9918 op1 = copy_to_mode_reg (mode1, op1);
9920 pat = GEN_FCN (icode) (target, op0, op1);
9921 if (! pat)
9922 return 0;
9923 emit_insn (pat);
9925 return target;
9928 static rtx
9929 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9931 rtx pat, scratch;
9932 tree cr6_form = CALL_EXPR_ARG (exp, 0);
9933 tree arg0 = CALL_EXPR_ARG (exp, 1);
9934 tree arg1 = CALL_EXPR_ARG (exp, 2);
9935 rtx op0 = expand_normal (arg0);
9936 rtx op1 = expand_normal (arg1);
9937 enum machine_mode tmode = SImode;
9938 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9939 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9940 int cr6_form_int;
9942 if (TREE_CODE (cr6_form) != INTEGER_CST)
9944 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9945 return const0_rtx;
9947 else
9948 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
9950 gcc_assert (mode0 == mode1);
9952 /* If we have invalid arguments, bail out before generating bad rtl. */
9953 if (arg0 == error_mark_node || arg1 == error_mark_node)
9954 return const0_rtx;
9956 if (target == 0
9957 || GET_MODE (target) != tmode
9958 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9959 target = gen_reg_rtx (tmode);
9961 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9962 op0 = copy_to_mode_reg (mode0, op0);
9963 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9964 op1 = copy_to_mode_reg (mode1, op1);
9966 scratch = gen_reg_rtx (mode0);
9968 pat = GEN_FCN (icode) (scratch, op0, op1);
9969 if (! pat)
9970 return 0;
9971 emit_insn (pat);
9973 /* The vec_any* and vec_all* predicates use the same opcodes for two
9974 different operations, but the bits in CR6 will be different
9975 depending on what information we want. So we have to play tricks
9976 with CR6 to get the right bits out.
9978 If you think this is disgusting, look at the specs for the
9979 AltiVec predicates. */
9981 switch (cr6_form_int)
9983 case 0:
9984 emit_insn (gen_cr6_test_for_zero (target));
9985 break;
9986 case 1:
9987 emit_insn (gen_cr6_test_for_zero_reverse (target));
9988 break;
9989 case 2:
9990 emit_insn (gen_cr6_test_for_lt (target));
9991 break;
9992 case 3:
9993 emit_insn (gen_cr6_test_for_lt_reverse (target));
9994 break;
9995 default:
9996 error ("argument 1 of __builtin_altivec_predicate is out of range");
9997 break;
10000 return target;
10003 static rtx
10004 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10006 rtx pat, addr;
10007 tree arg0 = CALL_EXPR_ARG (exp, 0);
10008 tree arg1 = CALL_EXPR_ARG (exp, 1);
10009 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10010 enum machine_mode mode0 = Pmode;
10011 enum machine_mode mode1 = Pmode;
10012 rtx op0 = expand_normal (arg0);
10013 rtx op1 = expand_normal (arg1);
10015 if (icode == CODE_FOR_nothing)
10016 /* Builtin not supported on this processor. */
10017 return 0;
10019 /* If we got invalid arguments bail out before generating bad rtl. */
10020 if (arg0 == error_mark_node || arg1 == error_mark_node)
10021 return const0_rtx;
10023 if (target == 0
10024 || GET_MODE (target) != tmode
10025 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10026 target = gen_reg_rtx (tmode);
10028 op1 = copy_to_mode_reg (mode1, op1);
10030 if (op0 == const0_rtx)
10032 addr = gen_rtx_MEM (tmode, op1);
10034 else
10036 op0 = copy_to_mode_reg (mode0, op0);
10037 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10040 pat = GEN_FCN (icode) (target, addr);
10042 if (! pat)
10043 return 0;
10044 emit_insn (pat);
10046 return target;
10049 static rtx
10050 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10052 rtx pat, addr;
10053 tree arg0 = CALL_EXPR_ARG (exp, 0);
10054 tree arg1 = CALL_EXPR_ARG (exp, 1);
10055 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10056 enum machine_mode mode0 = Pmode;
10057 enum machine_mode mode1 = Pmode;
10058 rtx op0 = expand_normal (arg0);
10059 rtx op1 = expand_normal (arg1);
10061 if (icode == CODE_FOR_nothing)
10062 /* Builtin not supported on this processor. */
10063 return 0;
10065 /* If we got invalid arguments bail out before generating bad rtl. */
10066 if (arg0 == error_mark_node || arg1 == error_mark_node)
10067 return const0_rtx;
10069 if (target == 0
10070 || GET_MODE (target) != tmode
10071 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10072 target = gen_reg_rtx (tmode);
10074 op1 = copy_to_mode_reg (mode1, op1);
10076 if (op0 == const0_rtx)
10078 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
10080 else
10082 op0 = copy_to_mode_reg (mode0, op0);
10083 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
10086 pat = GEN_FCN (icode) (target, addr);
10088 if (! pat)
10089 return 0;
10090 emit_insn (pat);
10092 return target;
10095 static rtx
10096 spe_expand_stv_builtin (enum insn_code icode, tree exp)
10098 tree arg0 = CALL_EXPR_ARG (exp, 0);
10099 tree arg1 = CALL_EXPR_ARG (exp, 1);
10100 tree arg2 = CALL_EXPR_ARG (exp, 2);
10101 rtx op0 = expand_normal (arg0);
10102 rtx op1 = expand_normal (arg1);
10103 rtx op2 = expand_normal (arg2);
10104 rtx pat;
10105 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
10106 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
10107 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
10109 /* Invalid arguments. Bail before doing anything stoopid! */
10110 if (arg0 == error_mark_node
10111 || arg1 == error_mark_node
10112 || arg2 == error_mark_node)
10113 return const0_rtx;
10115 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
10116 op0 = copy_to_mode_reg (mode2, op0);
10117 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
10118 op1 = copy_to_mode_reg (mode0, op1);
10119 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10120 op2 = copy_to_mode_reg (mode1, op2);
10122 pat = GEN_FCN (icode) (op1, op2, op0);
10123 if (pat)
10124 emit_insn (pat);
10125 return NULL_RTX;
10128 static rtx
10129 paired_expand_stv_builtin (enum insn_code icode, tree exp)
10131 tree arg0 = CALL_EXPR_ARG (exp, 0);
10132 tree arg1 = CALL_EXPR_ARG (exp, 1);
10133 tree arg2 = CALL_EXPR_ARG (exp, 2);
10134 rtx op0 = expand_normal (arg0);
10135 rtx op1 = expand_normal (arg1);
10136 rtx op2 = expand_normal (arg2);
10137 rtx pat, addr;
10138 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10139 enum machine_mode mode1 = Pmode;
10140 enum machine_mode mode2 = Pmode;
10142 /* Invalid arguments. Bail before doing anything stoopid! */
10143 if (arg0 == error_mark_node
10144 || arg1 == error_mark_node
10145 || arg2 == error_mark_node)
10146 return const0_rtx;
10148 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
10149 op0 = copy_to_mode_reg (tmode, op0);
10151 op2 = copy_to_mode_reg (mode2, op2);
10153 if (op1 == const0_rtx)
10155 addr = gen_rtx_MEM (tmode, op2);
10157 else
10159 op1 = copy_to_mode_reg (mode1, op1);
10160 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10163 pat = GEN_FCN (icode) (addr, op0);
10164 if (pat)
10165 emit_insn (pat);
10166 return NULL_RTX;
10169 static rtx
10170 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10172 tree arg0 = CALL_EXPR_ARG (exp, 0);
10173 tree arg1 = CALL_EXPR_ARG (exp, 1);
10174 tree arg2 = CALL_EXPR_ARG (exp, 2);
10175 rtx op0 = expand_normal (arg0);
10176 rtx op1 = expand_normal (arg1);
10177 rtx op2 = expand_normal (arg2);
10178 rtx pat, addr;
10179 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10180 enum machine_mode smode = insn_data[icode].operand[1].mode;
10181 enum machine_mode mode1 = Pmode;
10182 enum machine_mode mode2 = Pmode;
10184 /* Invalid arguments. Bail before doing anything stoopid! */
10185 if (arg0 == error_mark_node
10186 || arg1 == error_mark_node
10187 || arg2 == error_mark_node)
10188 return const0_rtx;
10190 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10191 op0 = copy_to_mode_reg (smode, op0);
10193 op2 = copy_to_mode_reg (mode2, op2);
10195 if (op1 == const0_rtx)
10197 addr = gen_rtx_MEM (tmode, op2);
10199 else
10201 op1 = copy_to_mode_reg (mode1, op1);
10202 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10205 pat = GEN_FCN (icode) (addr, op0);
10206 if (pat)
10207 emit_insn (pat);
10208 return NULL_RTX;
10211 static rtx
10212 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10214 rtx pat;
10215 tree arg0 = CALL_EXPR_ARG (exp, 0);
10216 tree arg1 = CALL_EXPR_ARG (exp, 1);
10217 tree arg2 = CALL_EXPR_ARG (exp, 2);
10218 rtx op0 = expand_normal (arg0);
10219 rtx op1 = expand_normal (arg1);
10220 rtx op2 = expand_normal (arg2);
10221 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10222 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10223 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10224 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10226 if (icode == CODE_FOR_nothing)
10227 /* Builtin not supported on this processor. */
10228 return 0;
10230 /* If we got invalid arguments bail out before generating bad rtl. */
10231 if (arg0 == error_mark_node
10232 || arg1 == error_mark_node
10233 || arg2 == error_mark_node)
10234 return const0_rtx;
10236 /* Check and prepare argument depending on the instruction code.
10238 Note that a switch statement instead of the sequence of tests
10239 would be incorrect as many of the CODE_FOR values could be
10240 CODE_FOR_nothing and that would yield multiple alternatives
10241 with identical values. We'd never reach here at runtime in
10242 this case. */
10243 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10244 || icode == CODE_FOR_altivec_vsldoi_v4si
10245 || icode == CODE_FOR_altivec_vsldoi_v8hi
10246 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10248 /* Only allow 4-bit unsigned literals. */
10249 STRIP_NOPS (arg2);
10250 if (TREE_CODE (arg2) != INTEGER_CST
10251 || TREE_INT_CST_LOW (arg2) & ~0xf)
10253 error ("argument 3 must be a 4-bit unsigned literal");
10254 return const0_rtx;
10257 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10258 || icode == CODE_FOR_vsx_xxpermdi_v2di
10259 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10260 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10261 || icode == CODE_FOR_vsx_xxsldwi_v4si
10262 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10263 || icode == CODE_FOR_vsx_xxsldwi_v2di
10264 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10266 /* Only allow 2-bit unsigned literals. */
10267 STRIP_NOPS (arg2);
10268 if (TREE_CODE (arg2) != INTEGER_CST
10269 || TREE_INT_CST_LOW (arg2) & ~0x3)
10271 error ("argument 3 must be a 2-bit unsigned literal");
10272 return const0_rtx;
10275 else if (icode == CODE_FOR_vsx_set_v2df
10276 || icode == CODE_FOR_vsx_set_v2di)
10278 /* Only allow 1-bit unsigned literals. */
10279 STRIP_NOPS (arg2);
10280 if (TREE_CODE (arg2) != INTEGER_CST
10281 || TREE_INT_CST_LOW (arg2) & ~0x1)
10283 error ("argument 3 must be a 1-bit unsigned literal");
10284 return const0_rtx;
10288 if (target == 0
10289 || GET_MODE (target) != tmode
10290 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10291 target = gen_reg_rtx (tmode);
10293 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10294 op0 = copy_to_mode_reg (mode0, op0);
10295 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10296 op1 = copy_to_mode_reg (mode1, op1);
10297 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10298 op2 = copy_to_mode_reg (mode2, op2);
10300 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10301 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10302 else
10303 pat = GEN_FCN (icode) (target, op0, op1, op2);
10304 if (! pat)
10305 return 0;
10306 emit_insn (pat);
10308 return target;
10311 /* Expand the lvx builtins. */
10312 static rtx
10313 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10315 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10316 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10317 tree arg0;
10318 enum machine_mode tmode, mode0;
10319 rtx pat, op0;
10320 enum insn_code icode;
10322 switch (fcode)
10324 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10325 icode = CODE_FOR_vector_altivec_load_v16qi;
10326 break;
10327 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10328 icode = CODE_FOR_vector_altivec_load_v8hi;
10329 break;
10330 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10331 icode = CODE_FOR_vector_altivec_load_v4si;
10332 break;
10333 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10334 icode = CODE_FOR_vector_altivec_load_v4sf;
10335 break;
10336 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
10337 icode = CODE_FOR_vector_altivec_load_v2df;
10338 break;
10339 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
10340 icode = CODE_FOR_vector_altivec_load_v2di;
10341 break;
10342 default:
10343 *expandedp = false;
10344 return NULL_RTX;
10347 *expandedp = true;
10349 arg0 = CALL_EXPR_ARG (exp, 0);
10350 op0 = expand_normal (arg0);
10351 tmode = insn_data[icode].operand[0].mode;
10352 mode0 = insn_data[icode].operand[1].mode;
10354 if (target == 0
10355 || GET_MODE (target) != tmode
10356 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10357 target = gen_reg_rtx (tmode);
10359 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10360 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10362 pat = GEN_FCN (icode) (target, op0);
10363 if (! pat)
10364 return 0;
10365 emit_insn (pat);
10366 return target;
10369 /* Expand the stvx builtins. */
10370 static rtx
10371 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10372 bool *expandedp)
10374 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10375 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10376 tree arg0, arg1;
10377 enum machine_mode mode0, mode1;
10378 rtx pat, op0, op1;
10379 enum insn_code icode;
10381 switch (fcode)
10383 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10384 icode = CODE_FOR_vector_altivec_store_v16qi;
10385 break;
10386 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10387 icode = CODE_FOR_vector_altivec_store_v8hi;
10388 break;
10389 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10390 icode = CODE_FOR_vector_altivec_store_v4si;
10391 break;
10392 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10393 icode = CODE_FOR_vector_altivec_store_v4sf;
10394 break;
10395 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
10396 icode = CODE_FOR_vector_altivec_store_v2df;
10397 break;
10398 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
10399 icode = CODE_FOR_vector_altivec_store_v2di;
10400 break;
10401 default:
10402 *expandedp = false;
10403 return NULL_RTX;
10406 arg0 = CALL_EXPR_ARG (exp, 0);
10407 arg1 = CALL_EXPR_ARG (exp, 1);
10408 op0 = expand_normal (arg0);
10409 op1 = expand_normal (arg1);
10410 mode0 = insn_data[icode].operand[0].mode;
10411 mode1 = insn_data[icode].operand[1].mode;
10413 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10414 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10415 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10416 op1 = copy_to_mode_reg (mode1, op1);
10418 pat = GEN_FCN (icode) (op0, op1);
10419 if (pat)
10420 emit_insn (pat);
10422 *expandedp = true;
10423 return NULL_RTX;
10426 /* Expand the dst builtins. */
10427 static rtx
10428 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10429 bool *expandedp)
10431 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10432 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10433 tree arg0, arg1, arg2;
10434 enum machine_mode mode0, mode1;
10435 rtx pat, op0, op1, op2;
10436 const struct builtin_description *d;
10437 size_t i;
10439 *expandedp = false;
10441 /* Handle DST variants. */
10442 d = bdesc_dst;
10443 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10444 if (d->code == fcode)
10446 arg0 = CALL_EXPR_ARG (exp, 0);
10447 arg1 = CALL_EXPR_ARG (exp, 1);
10448 arg2 = CALL_EXPR_ARG (exp, 2);
10449 op0 = expand_normal (arg0);
10450 op1 = expand_normal (arg1);
10451 op2 = expand_normal (arg2);
10452 mode0 = insn_data[d->icode].operand[0].mode;
10453 mode1 = insn_data[d->icode].operand[1].mode;
10455 /* Invalid arguments, bail out before generating bad rtl. */
10456 if (arg0 == error_mark_node
10457 || arg1 == error_mark_node
10458 || arg2 == error_mark_node)
10459 return const0_rtx;
10461 *expandedp = true;
10462 STRIP_NOPS (arg2);
10463 if (TREE_CODE (arg2) != INTEGER_CST
10464 || TREE_INT_CST_LOW (arg2) & ~0x3)
10466 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10467 return const0_rtx;
10470 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10471 op0 = copy_to_mode_reg (Pmode, op0);
10472 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10473 op1 = copy_to_mode_reg (mode1, op1);
10475 pat = GEN_FCN (d->icode) (op0, op1, op2);
10476 if (pat != 0)
10477 emit_insn (pat);
10479 return NULL_RTX;
10482 return NULL_RTX;
10485 /* Expand vec_init builtin. */
10486 static rtx
10487 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10489 enum machine_mode tmode = TYPE_MODE (type);
10490 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10491 int i, n_elt = GET_MODE_NUNITS (tmode);
10492 rtvec v = rtvec_alloc (n_elt);
10494 gcc_assert (VECTOR_MODE_P (tmode));
10495 gcc_assert (n_elt == call_expr_nargs (exp));
10497 for (i = 0; i < n_elt; ++i)
10499 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10500 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10503 if (!target || !register_operand (target, tmode))
10504 target = gen_reg_rtx (tmode);
10506 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10507 return target;
10510 /* Return the integer constant in ARG. Constrain it to be in the range
10511 of the subparts of VEC_TYPE; issue an error if not. */
10513 static int
10514 get_element_number (tree vec_type, tree arg)
10516 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10518 if (!host_integerp (arg, 1)
10519 || (elt = tree_low_cst (arg, 1), elt > max))
10521 error ("selector must be an integer constant in the range 0..%wi", max);
10522 return 0;
10525 return elt;
10528 /* Expand vec_set builtin. */
10529 static rtx
10530 altivec_expand_vec_set_builtin (tree exp)
10532 enum machine_mode tmode, mode1;
10533 tree arg0, arg1, arg2;
10534 int elt;
10535 rtx op0, op1;
10537 arg0 = CALL_EXPR_ARG (exp, 0);
10538 arg1 = CALL_EXPR_ARG (exp, 1);
10539 arg2 = CALL_EXPR_ARG (exp, 2);
10541 tmode = TYPE_MODE (TREE_TYPE (arg0));
10542 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10543 gcc_assert (VECTOR_MODE_P (tmode));
10545 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10546 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10547 elt = get_element_number (TREE_TYPE (arg0), arg2);
10549 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10550 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10552 op0 = force_reg (tmode, op0);
10553 op1 = force_reg (mode1, op1);
10555 rs6000_expand_vector_set (op0, op1, elt);
10557 return op0;
10560 /* Expand vec_ext builtin. */
10561 static rtx
10562 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10564 enum machine_mode tmode, mode0;
10565 tree arg0, arg1;
10566 int elt;
10567 rtx op0;
10569 arg0 = CALL_EXPR_ARG (exp, 0);
10570 arg1 = CALL_EXPR_ARG (exp, 1);
10572 op0 = expand_normal (arg0);
10573 elt = get_element_number (TREE_TYPE (arg0), arg1);
10575 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10576 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10577 gcc_assert (VECTOR_MODE_P (mode0));
10579 op0 = force_reg (mode0, op0);
10581 if (optimize || !target || !register_operand (target, tmode))
10582 target = gen_reg_rtx (tmode);
10584 rs6000_expand_vector_extract (target, op0, elt);
10586 return target;
10589 /* Expand the builtin in EXP and store the result in TARGET. Store
10590 true in *EXPANDEDP if we found a builtin to expand. */
10591 static rtx
10592 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
10594 const struct builtin_description *d;
10595 size_t i;
10596 enum insn_code icode;
10597 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10598 tree arg0;
10599 rtx op0, pat;
10600 enum machine_mode tmode, mode0;
10601 enum rs6000_builtins fcode
10602 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10604 if (rs6000_overloaded_builtin_p (fcode))
10606 *expandedp = true;
10607 error ("unresolved overload for Altivec builtin %qF", fndecl);
10609 /* Given it is invalid, just generate a normal call. */
10610 return expand_call (exp, target, false);
10613 target = altivec_expand_ld_builtin (exp, target, expandedp);
10614 if (*expandedp)
10615 return target;
10617 target = altivec_expand_st_builtin (exp, target, expandedp);
10618 if (*expandedp)
10619 return target;
10621 target = altivec_expand_dst_builtin (exp, target, expandedp);
10622 if (*expandedp)
10623 return target;
10625 *expandedp = true;
10627 switch (fcode)
10629 case ALTIVEC_BUILTIN_STVX:
10630 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
10631 case ALTIVEC_BUILTIN_STVEBX:
10632 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
10633 case ALTIVEC_BUILTIN_STVEHX:
10634 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
10635 case ALTIVEC_BUILTIN_STVEWX:
10636 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
10637 case ALTIVEC_BUILTIN_STVXL:
10638 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
10640 case ALTIVEC_BUILTIN_STVLX:
10641 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
10642 case ALTIVEC_BUILTIN_STVLXL:
10643 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
10644 case ALTIVEC_BUILTIN_STVRX:
10645 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
10646 case ALTIVEC_BUILTIN_STVRXL:
10647 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
10649 case VSX_BUILTIN_STXVD2X_V2DF:
10650 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
10651 case VSX_BUILTIN_STXVD2X_V2DI:
10652 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
10653 case VSX_BUILTIN_STXVW4X_V4SF:
10654 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
10655 case VSX_BUILTIN_STXVW4X_V4SI:
10656 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
10657 case VSX_BUILTIN_STXVW4X_V8HI:
10658 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
10659 case VSX_BUILTIN_STXVW4X_V16QI:
10660 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
10662 case ALTIVEC_BUILTIN_MFVSCR:
10663 icode = CODE_FOR_altivec_mfvscr;
10664 tmode = insn_data[icode].operand[0].mode;
10666 if (target == 0
10667 || GET_MODE (target) != tmode
10668 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10669 target = gen_reg_rtx (tmode);
10671 pat = GEN_FCN (icode) (target);
10672 if (! pat)
10673 return 0;
10674 emit_insn (pat);
10675 return target;
10677 case ALTIVEC_BUILTIN_MTVSCR:
10678 icode = CODE_FOR_altivec_mtvscr;
10679 arg0 = CALL_EXPR_ARG (exp, 0);
10680 op0 = expand_normal (arg0);
10681 mode0 = insn_data[icode].operand[0].mode;
10683 /* If we got invalid arguments bail out before generating bad rtl. */
10684 if (arg0 == error_mark_node)
10685 return const0_rtx;
10687 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10688 op0 = copy_to_mode_reg (mode0, op0);
10690 pat = GEN_FCN (icode) (op0);
10691 if (pat)
10692 emit_insn (pat);
10693 return NULL_RTX;
10695 case ALTIVEC_BUILTIN_DSSALL:
10696 emit_insn (gen_altivec_dssall ());
10697 return NULL_RTX;
10699 case ALTIVEC_BUILTIN_DSS:
10700 icode = CODE_FOR_altivec_dss;
10701 arg0 = CALL_EXPR_ARG (exp, 0);
10702 STRIP_NOPS (arg0);
10703 op0 = expand_normal (arg0);
10704 mode0 = insn_data[icode].operand[0].mode;
10706 /* If we got invalid arguments bail out before generating bad rtl. */
10707 if (arg0 == error_mark_node)
10708 return const0_rtx;
10710 if (TREE_CODE (arg0) != INTEGER_CST
10711 || TREE_INT_CST_LOW (arg0) & ~0x3)
10713 error ("argument to dss must be a 2-bit unsigned literal");
10714 return const0_rtx;
10717 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10718 op0 = copy_to_mode_reg (mode0, op0);
10720 emit_insn (gen_altivec_dss (op0));
10721 return NULL_RTX;
10723 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
10724 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
10725 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
10726 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
10727 case VSX_BUILTIN_VEC_INIT_V2DF:
10728 case VSX_BUILTIN_VEC_INIT_V2DI:
10729 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
10731 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
10732 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
10733 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
10734 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
10735 case VSX_BUILTIN_VEC_SET_V2DF:
10736 case VSX_BUILTIN_VEC_SET_V2DI:
10737 return altivec_expand_vec_set_builtin (exp);
10739 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
10740 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
10741 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
10742 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
10743 case VSX_BUILTIN_VEC_EXT_V2DF:
10744 case VSX_BUILTIN_VEC_EXT_V2DI:
10745 return altivec_expand_vec_ext_builtin (exp, target);
10747 default:
10748 break;
10749 /* Fall through. */
10752 /* Expand abs* operations. */
10753 d = bdesc_abs;
10754 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10755 if (d->code == fcode)
10756 return altivec_expand_abs_builtin (d->icode, exp, target);
10758 /* Expand the AltiVec predicates. */
10759 d = bdesc_altivec_preds;
10760 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
10761 if (d->code == fcode)
10762 return altivec_expand_predicate_builtin (d->icode, exp, target);
10764 /* LV* are funky. We initialized them differently. */
10765 switch (fcode)
10767 case ALTIVEC_BUILTIN_LVSL:
10768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
10769 exp, target, false);
10770 case ALTIVEC_BUILTIN_LVSR:
10771 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
10772 exp, target, false);
10773 case ALTIVEC_BUILTIN_LVEBX:
10774 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
10775 exp, target, false);
10776 case ALTIVEC_BUILTIN_LVEHX:
10777 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
10778 exp, target, false);
10779 case ALTIVEC_BUILTIN_LVEWX:
10780 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
10781 exp, target, false);
10782 case ALTIVEC_BUILTIN_LVXL:
10783 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
10784 exp, target, false);
10785 case ALTIVEC_BUILTIN_LVX:
10786 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
10787 exp, target, false);
10788 case ALTIVEC_BUILTIN_LVLX:
10789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
10790 exp, target, true);
10791 case ALTIVEC_BUILTIN_LVLXL:
10792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
10793 exp, target, true);
10794 case ALTIVEC_BUILTIN_LVRX:
10795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
10796 exp, target, true);
10797 case ALTIVEC_BUILTIN_LVRXL:
10798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
10799 exp, target, true);
10800 case VSX_BUILTIN_LXVD2X_V2DF:
10801 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
10802 exp, target, false);
10803 case VSX_BUILTIN_LXVD2X_V2DI:
10804 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
10805 exp, target, false);
10806 case VSX_BUILTIN_LXVW4X_V4SF:
10807 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
10808 exp, target, false);
10809 case VSX_BUILTIN_LXVW4X_V4SI:
10810 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
10811 exp, target, false);
10812 case VSX_BUILTIN_LXVW4X_V8HI:
10813 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
10814 exp, target, false);
10815 case VSX_BUILTIN_LXVW4X_V16QI:
10816 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
10817 exp, target, false);
10818 break;
10819 default:
10820 break;
10821 /* Fall through. */
10824 *expandedp = false;
10825 return NULL_RTX;
10828 /* Expand the builtin in EXP and store the result in TARGET. Store
10829 true in *EXPANDEDP if we found a builtin to expand. */
10830 static rtx
10831 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
10833 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10834 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10835 const struct builtin_description *d;
10836 size_t i;
10838 *expandedp = true;
10840 switch (fcode)
10842 case PAIRED_BUILTIN_STX:
10843 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
10844 case PAIRED_BUILTIN_LX:
10845 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
10846 default:
10847 break;
10848 /* Fall through. */
10851 /* Expand the paired predicates. */
10852 d = bdesc_paired_preds;
10853 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
10854 if (d->code == fcode)
10855 return paired_expand_predicate_builtin (d->icode, exp, target);
10857 *expandedp = false;
10858 return NULL_RTX;
10861 /* Binops that need to be initialized manually, but can be expanded
10862 automagically by rs6000_expand_binop_builtin. */
10863 static const struct builtin_description bdesc_2arg_spe[] =
10865 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
10866 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
10867 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
10868 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
10869 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
10870 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
10871 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
10872 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
10873 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
10874 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
10875 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
10876 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
10877 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
10878 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
10879 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
10880 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
10881 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
10882 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
10883 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
10884 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
10885 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
10886 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
10889 /* Expand the builtin in EXP and store the result in TARGET. Store
10890 true in *EXPANDEDP if we found a builtin to expand.
10892 This expands the SPE builtins that are not simple unary and binary
10893 operations. */
10894 static rtx
10895 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
10897 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10898 tree arg1, arg0;
10899 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10900 enum insn_code icode;
10901 enum machine_mode tmode, mode0;
10902 rtx pat, op0;
10903 const struct builtin_description *d;
10904 size_t i;
10906 *expandedp = true;
10908 /* Syntax check for a 5-bit unsigned immediate. */
10909 switch (fcode)
10911 case SPE_BUILTIN_EVSTDD:
10912 case SPE_BUILTIN_EVSTDH:
10913 case SPE_BUILTIN_EVSTDW:
10914 case SPE_BUILTIN_EVSTWHE:
10915 case SPE_BUILTIN_EVSTWHO:
10916 case SPE_BUILTIN_EVSTWWE:
10917 case SPE_BUILTIN_EVSTWWO:
10918 arg1 = CALL_EXPR_ARG (exp, 2);
10919 if (TREE_CODE (arg1) != INTEGER_CST
10920 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10922 error ("argument 2 must be a 5-bit unsigned literal");
10923 return const0_rtx;
10925 break;
10926 default:
10927 break;
10930 /* The evsplat*i instructions are not quite generic. */
10931 switch (fcode)
10933 case SPE_BUILTIN_EVSPLATFI:
10934 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
10935 exp, target);
10936 case SPE_BUILTIN_EVSPLATI:
10937 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
10938 exp, target);
10939 default:
10940 break;
10943 d = bdesc_2arg_spe;
10944 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
10945 if (d->code == fcode)
10946 return rs6000_expand_binop_builtin (d->icode, exp, target);
10948 d = bdesc_spe_predicates;
10949 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
10950 if (d->code == fcode)
10951 return spe_expand_predicate_builtin (d->icode, exp, target);
10953 d = bdesc_spe_evsel;
10954 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
10955 if (d->code == fcode)
10956 return spe_expand_evsel_builtin (d->icode, exp, target);
10958 switch (fcode)
10960 case SPE_BUILTIN_EVSTDDX:
10961 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
10962 case SPE_BUILTIN_EVSTDHX:
10963 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
10964 case SPE_BUILTIN_EVSTDWX:
10965 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
10966 case SPE_BUILTIN_EVSTWHEX:
10967 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
10968 case SPE_BUILTIN_EVSTWHOX:
10969 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
10970 case SPE_BUILTIN_EVSTWWEX:
10971 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
10972 case SPE_BUILTIN_EVSTWWOX:
10973 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
10974 case SPE_BUILTIN_EVSTDD:
10975 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
10976 case SPE_BUILTIN_EVSTDH:
10977 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
10978 case SPE_BUILTIN_EVSTDW:
10979 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
10980 case SPE_BUILTIN_EVSTWHE:
10981 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
10982 case SPE_BUILTIN_EVSTWHO:
10983 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
10984 case SPE_BUILTIN_EVSTWWE:
10985 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
10986 case SPE_BUILTIN_EVSTWWO:
10987 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
10988 case SPE_BUILTIN_MFSPEFSCR:
10989 icode = CODE_FOR_spe_mfspefscr;
10990 tmode = insn_data[icode].operand[0].mode;
10992 if (target == 0
10993 || GET_MODE (target) != tmode
10994 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10995 target = gen_reg_rtx (tmode);
10997 pat = GEN_FCN (icode) (target);
10998 if (! pat)
10999 return 0;
11000 emit_insn (pat);
11001 return target;
11002 case SPE_BUILTIN_MTSPEFSCR:
11003 icode = CODE_FOR_spe_mtspefscr;
11004 arg0 = CALL_EXPR_ARG (exp, 0);
11005 op0 = expand_normal (arg0);
11006 mode0 = insn_data[icode].operand[0].mode;
11008 if (arg0 == error_mark_node)
11009 return const0_rtx;
11011 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11012 op0 = copy_to_mode_reg (mode0, op0);
11014 pat = GEN_FCN (icode) (op0);
11015 if (pat)
11016 emit_insn (pat);
11017 return NULL_RTX;
11018 default:
11019 break;
11022 *expandedp = false;
11023 return NULL_RTX;
11026 static rtx
11027 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11029 rtx pat, scratch, tmp;
11030 tree form = CALL_EXPR_ARG (exp, 0);
11031 tree arg0 = CALL_EXPR_ARG (exp, 1);
11032 tree arg1 = CALL_EXPR_ARG (exp, 2);
11033 rtx op0 = expand_normal (arg0);
11034 rtx op1 = expand_normal (arg1);
11035 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11036 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11037 int form_int;
11038 enum rtx_code code;
11040 if (TREE_CODE (form) != INTEGER_CST)
11042 error ("argument 1 of __builtin_paired_predicate must be a constant");
11043 return const0_rtx;
11045 else
11046 form_int = TREE_INT_CST_LOW (form);
11048 gcc_assert (mode0 == mode1);
11050 if (arg0 == error_mark_node || arg1 == error_mark_node)
11051 return const0_rtx;
11053 if (target == 0
11054 || GET_MODE (target) != SImode
11055 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
11056 target = gen_reg_rtx (SImode);
11057 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
11058 op0 = copy_to_mode_reg (mode0, op0);
11059 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
11060 op1 = copy_to_mode_reg (mode1, op1);
11062 scratch = gen_reg_rtx (CCFPmode);
11064 pat = GEN_FCN (icode) (scratch, op0, op1);
11065 if (!pat)
11066 return const0_rtx;
11068 emit_insn (pat);
11070 switch (form_int)
11072 /* LT bit. */
11073 case 0:
11074 code = LT;
11075 break;
11076 /* GT bit. */
11077 case 1:
11078 code = GT;
11079 break;
11080 /* EQ bit. */
11081 case 2:
11082 code = EQ;
11083 break;
11084 /* UN bit. */
11085 case 3:
11086 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11087 return target;
11088 default:
11089 error ("argument 1 of __builtin_paired_predicate is out of range");
11090 return const0_rtx;
11093 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11094 emit_move_insn (target, tmp);
11095 return target;
11098 static rtx
11099 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11101 rtx pat, scratch, tmp;
11102 tree form = CALL_EXPR_ARG (exp, 0);
11103 tree arg0 = CALL_EXPR_ARG (exp, 1);
11104 tree arg1 = CALL_EXPR_ARG (exp, 2);
11105 rtx op0 = expand_normal (arg0);
11106 rtx op1 = expand_normal (arg1);
11107 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11108 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11109 int form_int;
11110 enum rtx_code code;
11112 if (TREE_CODE (form) != INTEGER_CST)
11114 error ("argument 1 of __builtin_spe_predicate must be a constant");
11115 return const0_rtx;
11117 else
11118 form_int = TREE_INT_CST_LOW (form);
11120 gcc_assert (mode0 == mode1);
11122 if (arg0 == error_mark_node || arg1 == error_mark_node)
11123 return const0_rtx;
11125 if (target == 0
11126 || GET_MODE (target) != SImode
11127 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
11128 target = gen_reg_rtx (SImode);
11130 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11131 op0 = copy_to_mode_reg (mode0, op0);
11132 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11133 op1 = copy_to_mode_reg (mode1, op1);
11135 scratch = gen_reg_rtx (CCmode);
11137 pat = GEN_FCN (icode) (scratch, op0, op1);
11138 if (! pat)
11139 return const0_rtx;
11140 emit_insn (pat);
11142 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11143 _lower_. We use one compare, but look in different bits of the
11144 CR for each variant.
11146 There are 2 elements in each SPE simd type (upper/lower). The CR
11147 bits are set as follows:
11149 BIT0 | BIT 1 | BIT 2 | BIT 3
11150 U | L | (U | L) | (U & L)
11152 So, for an "all" relationship, BIT 3 would be set.
11153 For an "any" relationship, BIT 2 would be set. Etc.
11155 Following traditional nomenclature, these bits map to:
11157 BIT0 | BIT 1 | BIT 2 | BIT 3
11158 LT | GT | EQ | OV
11160 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11163 switch (form_int)
11165 /* All variant. OV bit. */
11166 case 0:
11167 /* We need to get to the OV bit, which is the ORDERED bit. We
11168 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11169 that's ugly and will make validate_condition_mode die.
11170 So let's just use another pattern. */
11171 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11172 return target;
11173 /* Any variant. EQ bit. */
11174 case 1:
11175 code = EQ;
11176 break;
11177 /* Upper variant. LT bit. */
11178 case 2:
11179 code = LT;
11180 break;
11181 /* Lower variant. GT bit. */
11182 case 3:
11183 code = GT;
11184 break;
11185 default:
11186 error ("argument 1 of __builtin_spe_predicate is out of range");
11187 return const0_rtx;
11190 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11191 emit_move_insn (target, tmp);
11193 return target;
11196 /* The evsel builtins look like this:
11198 e = __builtin_spe_evsel_OP (a, b, c, d);
11200 and work like this:
11202 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11203 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11206 static rtx
11207 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11209 rtx pat, scratch;
11210 tree arg0 = CALL_EXPR_ARG (exp, 0);
11211 tree arg1 = CALL_EXPR_ARG (exp, 1);
11212 tree arg2 = CALL_EXPR_ARG (exp, 2);
11213 tree arg3 = CALL_EXPR_ARG (exp, 3);
11214 rtx op0 = expand_normal (arg0);
11215 rtx op1 = expand_normal (arg1);
11216 rtx op2 = expand_normal (arg2);
11217 rtx op3 = expand_normal (arg3);
11218 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11219 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11221 gcc_assert (mode0 == mode1);
11223 if (arg0 == error_mark_node || arg1 == error_mark_node
11224 || arg2 == error_mark_node || arg3 == error_mark_node)
11225 return const0_rtx;
11227 if (target == 0
11228 || GET_MODE (target) != mode0
11229 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11230 target = gen_reg_rtx (mode0);
11232 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11233 op0 = copy_to_mode_reg (mode0, op0);
11234 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11235 op1 = copy_to_mode_reg (mode0, op1);
11236 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11237 op2 = copy_to_mode_reg (mode0, op2);
11238 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11239 op3 = copy_to_mode_reg (mode0, op3);
11241 /* Generate the compare. */
11242 scratch = gen_reg_rtx (CCmode);
11243 pat = GEN_FCN (icode) (scratch, op0, op1);
11244 if (! pat)
11245 return const0_rtx;
11246 emit_insn (pat);
11248 if (mode0 == V2SImode)
11249 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11250 else
11251 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11253 return target;
11256 /* Raise an error message for a builtin function that is called without the
11257 appropriate target options being set. */
11259 static void
11260 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11262 size_t uns_fncode = (size_t)fncode;
11263 const char *name = rs6000_builtin_info[uns_fncode].name;
11264 unsigned fnmask = rs6000_builtin_info[uns_fncode].mask;
11266 gcc_assert (name != NULL);
11267 if ((fnmask & RS6000_BTM_CELL) != 0)
11268 error ("Builtin function %s is only valid for the cell processor", name);
11269 else if ((fnmask & RS6000_BTM_VSX) != 0)
11270 error ("Builtin function %s requires the -mvsx option", name);
11271 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11272 error ("Builtin function %s requires the -maltivec option", name);
11273 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11274 error ("Builtin function %s requires the -mpaired option", name);
11275 else if ((fnmask & RS6000_BTM_SPE) != 0)
11276 error ("Builtin function %s requires the -mspe option", name);
11277 else
11278 error ("Builtin function %s is not supported with the current options",
11279 name);
11282 /* Expand an expression EXP that calls a built-in function,
11283 with result going to TARGET if that's convenient
11284 (and in mode MODE if that's convenient).
11285 SUBTARGET may be used as the target for computing one of EXP's operands.
11286 IGNORE is nonzero if the value is to be ignored. */
11288 static rtx
11289 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11290 enum machine_mode mode ATTRIBUTE_UNUSED,
11291 int ignore ATTRIBUTE_UNUSED)
11293 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11294 enum rs6000_builtins fcode
11295 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
11296 size_t uns_fcode = (size_t)fcode;
11297 const struct builtin_description *d;
11298 size_t i;
11299 rtx ret;
11300 bool success;
11301 unsigned mask = rs6000_builtin_info[uns_fcode].mask;
11302 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
11304 if (TARGET_DEBUG_BUILTIN)
11306 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
11307 const char *name1 = rs6000_builtin_info[uns_fcode].name;
11308 const char *name2 = ((icode != CODE_FOR_nothing)
11309 ? get_insn_name ((int)icode)
11310 : "nothing");
11311 const char *name3;
11313 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
11315 default: name3 = "unknown"; break;
11316 case RS6000_BTC_SPECIAL: name3 = "special"; break;
11317 case RS6000_BTC_UNARY: name3 = "unary"; break;
11318 case RS6000_BTC_BINARY: name3 = "binary"; break;
11319 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
11320 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
11321 case RS6000_BTC_ABS: name3 = "abs"; break;
11322 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
11323 case RS6000_BTC_DST: name3 = "dst"; break;
11327 fprintf (stderr,
11328 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11329 (name1) ? name1 : "---", fcode,
11330 (name2) ? name2 : "---", (int)icode,
11331 name3,
11332 func_valid_p ? "" : ", not valid");
11335 if (!func_valid_p)
11337 rs6000_invalid_builtin (fcode);
11339 /* Given it is invalid, just generate a normal call. */
11340 return expand_call (exp, target, ignore);
11343 switch (fcode)
11345 case RS6000_BUILTIN_RECIP:
11346 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
11348 case RS6000_BUILTIN_RECIPF:
11349 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
11351 case RS6000_BUILTIN_RSQRTF:
11352 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
11354 case RS6000_BUILTIN_RSQRT:
11355 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
11357 case POWER7_BUILTIN_BPERMD:
11358 return rs6000_expand_binop_builtin (((TARGET_64BIT)
11359 ? CODE_FOR_bpermd_di
11360 : CODE_FOR_bpermd_si), exp, target);
11362 case RS6000_BUILTIN_GET_TB:
11363 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
11364 target);
11366 case RS6000_BUILTIN_MFTB:
11367 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
11368 ? CODE_FOR_rs6000_mftb_di
11369 : CODE_FOR_rs6000_mftb_si),
11370 target);
11372 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
11373 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
11375 int icode = (int) CODE_FOR_altivec_lvsr;
11376 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11377 enum machine_mode mode = insn_data[icode].operand[1].mode;
11378 tree arg;
11379 rtx op, addr, pat;
11381 gcc_assert (TARGET_ALTIVEC);
11383 arg = CALL_EXPR_ARG (exp, 0);
11384 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
11385 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
11386 addr = memory_address (mode, op);
11387 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11388 op = addr;
11389 else
11391 /* For the load case need to negate the address. */
11392 op = gen_reg_rtx (GET_MODE (addr));
11393 emit_insn (gen_rtx_SET (VOIDmode, op,
11394 gen_rtx_NEG (GET_MODE (addr), addr)));
11396 op = gen_rtx_MEM (mode, op);
11398 if (target == 0
11399 || GET_MODE (target) != tmode
11400 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11401 target = gen_reg_rtx (tmode);
11403 /*pat = gen_altivec_lvsr (target, op);*/
11404 pat = GEN_FCN (icode) (target, op);
11405 if (!pat)
11406 return 0;
11407 emit_insn (pat);
11409 return target;
11412 case ALTIVEC_BUILTIN_VCFUX:
11413 case ALTIVEC_BUILTIN_VCFSX:
11414 case ALTIVEC_BUILTIN_VCTUXS:
11415 case ALTIVEC_BUILTIN_VCTSXS:
11416 /* FIXME: There's got to be a nicer way to handle this case than
11417 constructing a new CALL_EXPR. */
11418 if (call_expr_nargs (exp) == 1)
11420 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11421 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11423 break;
11425 default:
11426 break;
11429 if (TARGET_ALTIVEC)
11431 ret = altivec_expand_builtin (exp, target, &success);
11433 if (success)
11434 return ret;
11436 if (TARGET_SPE)
11438 ret = spe_expand_builtin (exp, target, &success);
11440 if (success)
11441 return ret;
11443 if (TARGET_PAIRED_FLOAT)
11445 ret = paired_expand_builtin (exp, target, &success);
11447 if (success)
11448 return ret;
11451 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11453 /* Handle simple unary operations. */
11454 d = bdesc_1arg;
11455 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11456 if (d->code == fcode)
11457 return rs6000_expand_unop_builtin (d->icode, exp, target);
11459 /* Handle simple binary operations. */
11460 d = bdesc_2arg;
11461 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11462 if (d->code == fcode)
11463 return rs6000_expand_binop_builtin (d->icode, exp, target);
11465 /* Handle simple ternary operations. */
11466 d = bdesc_3arg;
11467 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11468 if (d->code == fcode)
11469 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11471 gcc_unreachable ();
11474 static void
11475 rs6000_init_builtins (void)
11477 tree tdecl;
11478 tree ftype;
11479 enum machine_mode mode;
11481 if (TARGET_DEBUG_BUILTIN)
11482 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
11483 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
11484 (TARGET_SPE) ? ", spe" : "",
11485 (TARGET_ALTIVEC) ? ", altivec" : "",
11486 (TARGET_VSX) ? ", vsx" : "");
11488 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11489 V2SF_type_node = build_vector_type (float_type_node, 2);
11490 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11491 V2DF_type_node = build_vector_type (double_type_node, 2);
11492 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11493 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11494 V4SF_type_node = build_vector_type (float_type_node, 4);
11495 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11496 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11498 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11499 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11500 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11501 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11503 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11504 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11505 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11506 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11508 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11509 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11510 'vector unsigned short'. */
11512 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11513 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11514 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11515 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11516 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11518 long_integer_type_internal_node = long_integer_type_node;
11519 long_unsigned_type_internal_node = long_unsigned_type_node;
11520 long_long_integer_type_internal_node = long_long_integer_type_node;
11521 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
11522 intQI_type_internal_node = intQI_type_node;
11523 uintQI_type_internal_node = unsigned_intQI_type_node;
11524 intHI_type_internal_node = intHI_type_node;
11525 uintHI_type_internal_node = unsigned_intHI_type_node;
11526 intSI_type_internal_node = intSI_type_node;
11527 uintSI_type_internal_node = unsigned_intSI_type_node;
11528 intDI_type_internal_node = intDI_type_node;
11529 uintDI_type_internal_node = unsigned_intDI_type_node;
11530 float_type_internal_node = float_type_node;
11531 double_type_internal_node = double_type_node;
11532 void_type_internal_node = void_type_node;
11534 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11535 tree type node. */
11536 builtin_mode_to_type[QImode][0] = integer_type_node;
11537 builtin_mode_to_type[HImode][0] = integer_type_node;
11538 builtin_mode_to_type[SImode][0] = intSI_type_node;
11539 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11540 builtin_mode_to_type[DImode][0] = intDI_type_node;
11541 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11542 builtin_mode_to_type[SFmode][0] = float_type_node;
11543 builtin_mode_to_type[DFmode][0] = double_type_node;
11544 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11545 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11546 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11547 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11548 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11549 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11550 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11551 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11552 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11553 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11554 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11555 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11556 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11558 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
11559 TYPE_NAME (bool_char_type_node) = tdecl;
11561 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
11562 TYPE_NAME (bool_short_type_node) = tdecl;
11564 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
11565 TYPE_NAME (bool_int_type_node) = tdecl;
11567 tdecl = add_builtin_type ("__pixel", pixel_type_node);
11568 TYPE_NAME (pixel_type_node) = tdecl;
11570 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11571 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11572 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11573 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11574 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11576 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
11577 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11579 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
11580 TYPE_NAME (V16QI_type_node) = tdecl;
11582 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
11583 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11585 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
11586 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
11588 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
11589 TYPE_NAME (V8HI_type_node) = tdecl;
11591 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
11592 TYPE_NAME (bool_V8HI_type_node) = tdecl;
11594 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
11595 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
11597 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
11598 TYPE_NAME (V4SI_type_node) = tdecl;
11600 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
11601 TYPE_NAME (bool_V4SI_type_node) = tdecl;
11603 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
11604 TYPE_NAME (V4SF_type_node) = tdecl;
11606 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
11607 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
11609 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
11610 TYPE_NAME (V2DF_type_node) = tdecl;
11612 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
11613 TYPE_NAME (V2DI_type_node) = tdecl;
11615 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
11616 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
11618 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
11619 TYPE_NAME (bool_V2DI_type_node) = tdecl;
11621 /* Paired and SPE builtins are only available if you build a compiler with
11622 the appropriate options, so only create those builtins with the
11623 appropriate compiler option. Create Altivec and VSX builtins on machines
11624 with at least the general purpose extensions (970 and newer) to allow the
11625 use of the target attribute. */
11626 if (TARGET_PAIRED_FLOAT)
11627 paired_init_builtins ();
11628 if (TARGET_SPE)
11629 spe_init_builtins ();
11630 if (TARGET_EXTRA_BUILTINS)
11631 altivec_init_builtins ();
11632 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
11633 rs6000_common_init_builtins ();
11635 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
11636 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
11637 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
11639 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
11640 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
11641 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
11643 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
11644 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
11645 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
11647 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
11648 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
11649 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
11651 mode = (TARGET_64BIT) ? DImode : SImode;
11652 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
11653 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
11654 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
11656 ftype = build_function_type_list (unsigned_intDI_type_node,
11657 NULL_TREE);
11658 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
11660 if (TARGET_64BIT)
11661 ftype = build_function_type_list (unsigned_intDI_type_node,
11662 NULL_TREE);
11663 else
11664 ftype = build_function_type_list (unsigned_intSI_type_node,
11665 NULL_TREE);
11666 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
11668 #if TARGET_XCOFF
11669 /* AIX libm provides clog as __clog. */
11670 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
11671 set_user_assembler_name (tdecl, "__clog");
11672 #endif
11674 #ifdef SUBTARGET_INIT_BUILTINS
11675 SUBTARGET_INIT_BUILTINS;
11676 #endif
11679 /* Returns the rs6000 builtin decl for CODE. */
11681 static tree
11682 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11684 unsigned fnmask;
11686 if (code >= RS6000_BUILTIN_COUNT)
11687 return error_mark_node;
11689 fnmask = rs6000_builtin_info[code].mask;
11690 if ((fnmask & rs6000_builtin_mask) != fnmask)
11692 rs6000_invalid_builtin ((enum rs6000_builtins)code);
11693 return error_mark_node;
11696 return rs6000_builtin_decls[code];
11699 static void
11700 spe_init_builtins (void)
11702 tree puint_type_node = build_pointer_type (unsigned_type_node);
11703 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
11704 const struct builtin_description *d;
11705 size_t i;
11707 tree v2si_ftype_4_v2si
11708 = build_function_type_list (opaque_V2SI_type_node,
11709 opaque_V2SI_type_node,
11710 opaque_V2SI_type_node,
11711 opaque_V2SI_type_node,
11712 opaque_V2SI_type_node,
11713 NULL_TREE);
11715 tree v2sf_ftype_4_v2sf
11716 = build_function_type_list (opaque_V2SF_type_node,
11717 opaque_V2SF_type_node,
11718 opaque_V2SF_type_node,
11719 opaque_V2SF_type_node,
11720 opaque_V2SF_type_node,
11721 NULL_TREE);
11723 tree int_ftype_int_v2si_v2si
11724 = build_function_type_list (integer_type_node,
11725 integer_type_node,
11726 opaque_V2SI_type_node,
11727 opaque_V2SI_type_node,
11728 NULL_TREE);
11730 tree int_ftype_int_v2sf_v2sf
11731 = build_function_type_list (integer_type_node,
11732 integer_type_node,
11733 opaque_V2SF_type_node,
11734 opaque_V2SF_type_node,
11735 NULL_TREE);
11737 tree void_ftype_v2si_puint_int
11738 = build_function_type_list (void_type_node,
11739 opaque_V2SI_type_node,
11740 puint_type_node,
11741 integer_type_node,
11742 NULL_TREE);
11744 tree void_ftype_v2si_puint_char
11745 = build_function_type_list (void_type_node,
11746 opaque_V2SI_type_node,
11747 puint_type_node,
11748 char_type_node,
11749 NULL_TREE);
11751 tree void_ftype_v2si_pv2si_int
11752 = build_function_type_list (void_type_node,
11753 opaque_V2SI_type_node,
11754 opaque_p_V2SI_type_node,
11755 integer_type_node,
11756 NULL_TREE);
11758 tree void_ftype_v2si_pv2si_char
11759 = build_function_type_list (void_type_node,
11760 opaque_V2SI_type_node,
11761 opaque_p_V2SI_type_node,
11762 char_type_node,
11763 NULL_TREE);
11765 tree void_ftype_int
11766 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11768 tree int_ftype_void
11769 = build_function_type_list (integer_type_node, NULL_TREE);
11771 tree v2si_ftype_pv2si_int
11772 = build_function_type_list (opaque_V2SI_type_node,
11773 opaque_p_V2SI_type_node,
11774 integer_type_node,
11775 NULL_TREE);
11777 tree v2si_ftype_puint_int
11778 = build_function_type_list (opaque_V2SI_type_node,
11779 puint_type_node,
11780 integer_type_node,
11781 NULL_TREE);
11783 tree v2si_ftype_pushort_int
11784 = build_function_type_list (opaque_V2SI_type_node,
11785 pushort_type_node,
11786 integer_type_node,
11787 NULL_TREE);
11789 tree v2si_ftype_signed_char
11790 = build_function_type_list (opaque_V2SI_type_node,
11791 signed_char_type_node,
11792 NULL_TREE);
11794 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
11796 /* Initialize irregular SPE builtins. */
11798 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
11799 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
11800 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
11801 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
11802 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
11803 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
11804 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
11805 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
11806 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
11807 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
11808 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
11809 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
11810 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
11811 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
11812 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
11813 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
11814 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
11815 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
11817 /* Loads. */
11818 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
11819 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
11820 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
11821 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
11822 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
11823 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
11824 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
11825 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
11826 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
11827 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
11828 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
11829 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
11830 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
11831 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
11832 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
11833 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
11834 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
11835 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
11836 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
11837 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
11838 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
11839 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
11841 /* Predicates. */
11842 d = bdesc_spe_predicates;
11843 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
11845 tree type;
11847 switch (insn_data[d->icode].operand[1].mode)
11849 case V2SImode:
11850 type = int_ftype_int_v2si_v2si;
11851 break;
11852 case V2SFmode:
11853 type = int_ftype_int_v2sf_v2sf;
11854 break;
11855 default:
11856 gcc_unreachable ();
11859 def_builtin (d->name, type, d->code);
11862 /* Evsel predicates. */
11863 d = bdesc_spe_evsel;
11864 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
11866 tree type;
11868 switch (insn_data[d->icode].operand[1].mode)
11870 case V2SImode:
11871 type = v2si_ftype_4_v2si;
11872 break;
11873 case V2SFmode:
11874 type = v2sf_ftype_4_v2sf;
11875 break;
11876 default:
11877 gcc_unreachable ();
11880 def_builtin (d->name, type, d->code);
11884 static void
11885 paired_init_builtins (void)
11887 const struct builtin_description *d;
11888 size_t i;
11890 tree int_ftype_int_v2sf_v2sf
11891 = build_function_type_list (integer_type_node,
11892 integer_type_node,
11893 V2SF_type_node,
11894 V2SF_type_node,
11895 NULL_TREE);
11896 tree pcfloat_type_node =
11897 build_pointer_type (build_qualified_type
11898 (float_type_node, TYPE_QUAL_CONST));
11900 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
11901 long_integer_type_node,
11902 pcfloat_type_node,
11903 NULL_TREE);
11904 tree void_ftype_v2sf_long_pcfloat =
11905 build_function_type_list (void_type_node,
11906 V2SF_type_node,
11907 long_integer_type_node,
11908 pcfloat_type_node,
11909 NULL_TREE);
11912 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
11913 PAIRED_BUILTIN_LX);
11916 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
11917 PAIRED_BUILTIN_STX);
11919 /* Predicates. */
11920 d = bdesc_paired_preds;
11921 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
11923 tree type;
11925 if (TARGET_DEBUG_BUILTIN)
11926 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
11927 (int)i, get_insn_name (d->icode), (int)d->icode,
11928 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
11930 switch (insn_data[d->icode].operand[1].mode)
11932 case V2SFmode:
11933 type = int_ftype_int_v2sf_v2sf;
11934 break;
11935 default:
11936 gcc_unreachable ();
11939 def_builtin (d->name, type, d->code);
11943 static void
11944 altivec_init_builtins (void)
11946 const struct builtin_description *d;
11947 size_t i;
11948 tree ftype;
11949 tree decl;
11951 tree pvoid_type_node = build_pointer_type (void_type_node);
11953 tree pcvoid_type_node
11954 = build_pointer_type (build_qualified_type (void_type_node,
11955 TYPE_QUAL_CONST));
11957 tree int_ftype_opaque
11958 = build_function_type_list (integer_type_node,
11959 opaque_V4SI_type_node, NULL_TREE);
11960 tree opaque_ftype_opaque
11961 = build_function_type_list (integer_type_node, NULL_TREE);
11962 tree opaque_ftype_opaque_int
11963 = build_function_type_list (opaque_V4SI_type_node,
11964 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
11965 tree opaque_ftype_opaque_opaque_int
11966 = build_function_type_list (opaque_V4SI_type_node,
11967 opaque_V4SI_type_node, opaque_V4SI_type_node,
11968 integer_type_node, NULL_TREE);
11969 tree int_ftype_int_opaque_opaque
11970 = build_function_type_list (integer_type_node,
11971 integer_type_node, opaque_V4SI_type_node,
11972 opaque_V4SI_type_node, NULL_TREE);
11973 tree int_ftype_int_v4si_v4si
11974 = build_function_type_list (integer_type_node,
11975 integer_type_node, V4SI_type_node,
11976 V4SI_type_node, NULL_TREE);
11977 tree void_ftype_v4si
11978 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
11979 tree v8hi_ftype_void
11980 = build_function_type_list (V8HI_type_node, NULL_TREE);
11981 tree void_ftype_void
11982 = build_function_type_list (void_type_node, NULL_TREE);
11983 tree void_ftype_int
11984 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11986 tree opaque_ftype_long_pcvoid
11987 = build_function_type_list (opaque_V4SI_type_node,
11988 long_integer_type_node, pcvoid_type_node,
11989 NULL_TREE);
11990 tree v16qi_ftype_long_pcvoid
11991 = build_function_type_list (V16QI_type_node,
11992 long_integer_type_node, pcvoid_type_node,
11993 NULL_TREE);
11994 tree v8hi_ftype_long_pcvoid
11995 = build_function_type_list (V8HI_type_node,
11996 long_integer_type_node, pcvoid_type_node,
11997 NULL_TREE);
11998 tree v4si_ftype_long_pcvoid
11999 = build_function_type_list (V4SI_type_node,
12000 long_integer_type_node, pcvoid_type_node,
12001 NULL_TREE);
12002 tree v4sf_ftype_long_pcvoid
12003 = build_function_type_list (V4SF_type_node,
12004 long_integer_type_node, pcvoid_type_node,
12005 NULL_TREE);
12006 tree v2df_ftype_long_pcvoid
12007 = build_function_type_list (V2DF_type_node,
12008 long_integer_type_node, pcvoid_type_node,
12009 NULL_TREE);
12010 tree v2di_ftype_long_pcvoid
12011 = build_function_type_list (V2DI_type_node,
12012 long_integer_type_node, pcvoid_type_node,
12013 NULL_TREE);
12015 tree void_ftype_opaque_long_pvoid
12016 = build_function_type_list (void_type_node,
12017 opaque_V4SI_type_node, long_integer_type_node,
12018 pvoid_type_node, NULL_TREE);
12019 tree void_ftype_v4si_long_pvoid
12020 = build_function_type_list (void_type_node,
12021 V4SI_type_node, long_integer_type_node,
12022 pvoid_type_node, NULL_TREE);
12023 tree void_ftype_v16qi_long_pvoid
12024 = build_function_type_list (void_type_node,
12025 V16QI_type_node, long_integer_type_node,
12026 pvoid_type_node, NULL_TREE);
12027 tree void_ftype_v8hi_long_pvoid
12028 = build_function_type_list (void_type_node,
12029 V8HI_type_node, long_integer_type_node,
12030 pvoid_type_node, NULL_TREE);
12031 tree void_ftype_v4sf_long_pvoid
12032 = build_function_type_list (void_type_node,
12033 V4SF_type_node, long_integer_type_node,
12034 pvoid_type_node, NULL_TREE);
12035 tree void_ftype_v2df_long_pvoid
12036 = build_function_type_list (void_type_node,
12037 V2DF_type_node, long_integer_type_node,
12038 pvoid_type_node, NULL_TREE);
12039 tree void_ftype_v2di_long_pvoid
12040 = build_function_type_list (void_type_node,
12041 V2DI_type_node, long_integer_type_node,
12042 pvoid_type_node, NULL_TREE);
12043 tree int_ftype_int_v8hi_v8hi
12044 = build_function_type_list (integer_type_node,
12045 integer_type_node, V8HI_type_node,
12046 V8HI_type_node, NULL_TREE);
12047 tree int_ftype_int_v16qi_v16qi
12048 = build_function_type_list (integer_type_node,
12049 integer_type_node, V16QI_type_node,
12050 V16QI_type_node, NULL_TREE);
12051 tree int_ftype_int_v4sf_v4sf
12052 = build_function_type_list (integer_type_node,
12053 integer_type_node, V4SF_type_node,
12054 V4SF_type_node, NULL_TREE);
12055 tree int_ftype_int_v2df_v2df
12056 = build_function_type_list (integer_type_node,
12057 integer_type_node, V2DF_type_node,
12058 V2DF_type_node, NULL_TREE);
12059 tree v4si_ftype_v4si
12060 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
12061 tree v8hi_ftype_v8hi
12062 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
12063 tree v16qi_ftype_v16qi
12064 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
12065 tree v4sf_ftype_v4sf
12066 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12067 tree v2df_ftype_v2df
12068 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12069 tree void_ftype_pcvoid_int_int
12070 = build_function_type_list (void_type_node,
12071 pcvoid_type_node, integer_type_node,
12072 integer_type_node, NULL_TREE);
12074 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
12075 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
12076 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
12077 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
12078 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
12079 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
12080 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
12081 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
12082 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
12083 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
12084 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
12085 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
12086 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
12087 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
12088 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
12089 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
12090 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
12091 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
12092 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
12093 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
12094 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
12095 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
12096 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
12097 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
12098 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
12099 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
12100 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
12101 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
12102 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
12103 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
12105 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
12106 VSX_BUILTIN_LXVD2X_V2DF);
12107 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
12108 VSX_BUILTIN_LXVD2X_V2DI);
12109 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
12110 VSX_BUILTIN_LXVW4X_V4SF);
12111 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
12112 VSX_BUILTIN_LXVW4X_V4SI);
12113 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
12114 VSX_BUILTIN_LXVW4X_V8HI);
12115 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
12116 VSX_BUILTIN_LXVW4X_V16QI);
12117 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
12118 VSX_BUILTIN_STXVD2X_V2DF);
12119 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
12120 VSX_BUILTIN_STXVD2X_V2DI);
12121 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
12122 VSX_BUILTIN_STXVW4X_V4SF);
12123 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
12124 VSX_BUILTIN_STXVW4X_V4SI);
12125 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
12126 VSX_BUILTIN_STXVW4X_V8HI);
12127 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
12128 VSX_BUILTIN_STXVW4X_V16QI);
12129 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
12130 VSX_BUILTIN_VEC_LD);
12131 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
12132 VSX_BUILTIN_VEC_ST);
12134 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
12135 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
12136 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
12138 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
12139 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
12140 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
12141 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
12142 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
12143 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
12144 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
12145 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
12146 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
12147 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
12148 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
12149 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
12151 /* Cell builtins. */
12152 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
12153 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
12154 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
12155 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
12157 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
12158 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
12159 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
12160 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
12162 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
12163 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
12164 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
12165 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
12167 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
12168 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
12169 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
12170 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
12172 /* Add the DST variants. */
12173 d = bdesc_dst;
12174 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12175 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
12177 /* Initialize the predicates. */
12178 d = bdesc_altivec_preds;
12179 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12181 enum machine_mode mode1;
12182 tree type;
12184 if (rs6000_overloaded_builtin_p (d->code))
12185 mode1 = VOIDmode;
12186 else
12187 mode1 = insn_data[d->icode].operand[1].mode;
12189 switch (mode1)
12191 case VOIDmode:
12192 type = int_ftype_int_opaque_opaque;
12193 break;
12194 case V4SImode:
12195 type = int_ftype_int_v4si_v4si;
12196 break;
12197 case V8HImode:
12198 type = int_ftype_int_v8hi_v8hi;
12199 break;
12200 case V16QImode:
12201 type = int_ftype_int_v16qi_v16qi;
12202 break;
12203 case V4SFmode:
12204 type = int_ftype_int_v4sf_v4sf;
12205 break;
12206 case V2DFmode:
12207 type = int_ftype_int_v2df_v2df;
12208 break;
12209 default:
12210 gcc_unreachable ();
12213 def_builtin (d->name, type, d->code);
12216 /* Initialize the abs* operators. */
12217 d = bdesc_abs;
12218 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12220 enum machine_mode mode0;
12221 tree type;
12223 mode0 = insn_data[d->icode].operand[0].mode;
12225 switch (mode0)
12227 case V4SImode:
12228 type = v4si_ftype_v4si;
12229 break;
12230 case V8HImode:
12231 type = v8hi_ftype_v8hi;
12232 break;
12233 case V16QImode:
12234 type = v16qi_ftype_v16qi;
12235 break;
12236 case V4SFmode:
12237 type = v4sf_ftype_v4sf;
12238 break;
12239 case V2DFmode:
12240 type = v2df_ftype_v2df;
12241 break;
12242 default:
12243 gcc_unreachable ();
12246 def_builtin (d->name, type, d->code);
12249 /* Initialize target builtin that implements
12250 targetm.vectorize.builtin_mask_for_load. */
12252 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12253 v16qi_ftype_long_pcvoid,
12254 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12255 BUILT_IN_MD, NULL, NULL_TREE);
12256 TREE_READONLY (decl) = 1;
12257 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12258 altivec_builtin_mask_for_load = decl;
12260 /* Access to the vec_init patterns. */
12261 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12262 integer_type_node, integer_type_node,
12263 integer_type_node, NULL_TREE);
12264 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12266 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12267 short_integer_type_node,
12268 short_integer_type_node,
12269 short_integer_type_node,
12270 short_integer_type_node,
12271 short_integer_type_node,
12272 short_integer_type_node,
12273 short_integer_type_node, NULL_TREE);
12274 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12276 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12277 char_type_node, char_type_node,
12278 char_type_node, char_type_node,
12279 char_type_node, char_type_node,
12280 char_type_node, char_type_node,
12281 char_type_node, char_type_node,
12282 char_type_node, char_type_node,
12283 char_type_node, char_type_node,
12284 char_type_node, NULL_TREE);
12285 def_builtin ("__builtin_vec_init_v16qi", ftype,
12286 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
12288 ftype = build_function_type_list (V4SF_type_node, float_type_node,
12289 float_type_node, float_type_node,
12290 float_type_node, NULL_TREE);
12291 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
12293 /* VSX builtins. */
12294 ftype = build_function_type_list (V2DF_type_node, double_type_node,
12295 double_type_node, NULL_TREE);
12296 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
12298 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12299 intDI_type_node, NULL_TREE);
12300 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
12302 /* Access to the vec_set patterns. */
12303 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12304 intSI_type_node,
12305 integer_type_node, NULL_TREE);
12306 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
12308 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12309 intHI_type_node,
12310 integer_type_node, NULL_TREE);
12311 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
12313 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12314 intQI_type_node,
12315 integer_type_node, NULL_TREE);
12316 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
12318 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12319 float_type_node,
12320 integer_type_node, NULL_TREE);
12321 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
12323 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12324 double_type_node,
12325 integer_type_node, NULL_TREE);
12326 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
12328 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12329 intDI_type_node,
12330 integer_type_node, NULL_TREE);
12331 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
12333 /* Access to the vec_extract patterns. */
12334 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12335 integer_type_node, NULL_TREE);
12336 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12338 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12339 integer_type_node, NULL_TREE);
12340 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12342 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12343 integer_type_node, NULL_TREE);
12344 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12346 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12347 integer_type_node, NULL_TREE);
12348 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12350 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12351 integer_type_node, NULL_TREE);
12352 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
12354 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12355 integer_type_node, NULL_TREE);
12356 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
12359 /* Hash function for builtin functions with up to 3 arguments and a return
12360 type. */
12361 static unsigned
12362 builtin_hash_function (const void *hash_entry)
12364 unsigned ret = 0;
12365 int i;
12366 const struct builtin_hash_struct *bh =
12367 (const struct builtin_hash_struct *) hash_entry;
12369 for (i = 0; i < 4; i++)
12371 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12372 ret = (ret * 2) + bh->uns_p[i];
12375 return ret;
12378 /* Compare builtin hash entries H1 and H2 for equivalence. */
12379 static int
12380 builtin_hash_eq (const void *h1, const void *h2)
12382 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12383 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12385 return ((p1->mode[0] == p2->mode[0])
12386 && (p1->mode[1] == p2->mode[1])
12387 && (p1->mode[2] == p2->mode[2])
12388 && (p1->mode[3] == p2->mode[3])
12389 && (p1->uns_p[0] == p2->uns_p[0])
12390 && (p1->uns_p[1] == p2->uns_p[1])
12391 && (p1->uns_p[2] == p2->uns_p[2])
12392 && (p1->uns_p[3] == p2->uns_p[3]));
12395 /* Map types for builtin functions with an explicit return type and up to 3
12396 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12397 of the argument. */
12398 static tree
12399 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12400 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12401 enum rs6000_builtins builtin, const char *name)
12403 struct builtin_hash_struct h;
12404 struct builtin_hash_struct *h2;
12405 void **found;
12406 int num_args = 3;
12407 int i;
12408 tree ret_type = NULL_TREE;
12409 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12411 /* Create builtin_hash_table. */
12412 if (builtin_hash_table == NULL)
12413 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12414 builtin_hash_eq, NULL);
12416 h.type = NULL_TREE;
12417 h.mode[0] = mode_ret;
12418 h.mode[1] = mode_arg0;
12419 h.mode[2] = mode_arg1;
12420 h.mode[3] = mode_arg2;
12421 h.uns_p[0] = 0;
12422 h.uns_p[1] = 0;
12423 h.uns_p[2] = 0;
12424 h.uns_p[3] = 0;
12426 /* If the builtin is a type that produces unsigned results or takes unsigned
12427 arguments, and it is returned as a decl for the vectorizer (such as
12428 widening multiplies, permute), make sure the arguments and return value
12429 are type correct. */
12430 switch (builtin)
12432 /* unsigned 2 argument functions. */
12433 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12434 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12435 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12436 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12437 h.uns_p[0] = 1;
12438 h.uns_p[1] = 1;
12439 h.uns_p[2] = 1;
12440 break;
12442 /* unsigned 3 argument functions. */
12443 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12444 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12445 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12446 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12447 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12448 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12449 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12450 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12451 case VSX_BUILTIN_VPERM_16QI_UNS:
12452 case VSX_BUILTIN_VPERM_8HI_UNS:
12453 case VSX_BUILTIN_VPERM_4SI_UNS:
12454 case VSX_BUILTIN_VPERM_2DI_UNS:
12455 case VSX_BUILTIN_XXSEL_16QI_UNS:
12456 case VSX_BUILTIN_XXSEL_8HI_UNS:
12457 case VSX_BUILTIN_XXSEL_4SI_UNS:
12458 case VSX_BUILTIN_XXSEL_2DI_UNS:
12459 h.uns_p[0] = 1;
12460 h.uns_p[1] = 1;
12461 h.uns_p[2] = 1;
12462 h.uns_p[3] = 1;
12463 break;
12465 /* signed permute functions with unsigned char mask. */
12466 case ALTIVEC_BUILTIN_VPERM_16QI:
12467 case ALTIVEC_BUILTIN_VPERM_8HI:
12468 case ALTIVEC_BUILTIN_VPERM_4SI:
12469 case ALTIVEC_BUILTIN_VPERM_4SF:
12470 case ALTIVEC_BUILTIN_VPERM_2DI:
12471 case ALTIVEC_BUILTIN_VPERM_2DF:
12472 case VSX_BUILTIN_VPERM_16QI:
12473 case VSX_BUILTIN_VPERM_8HI:
12474 case VSX_BUILTIN_VPERM_4SI:
12475 case VSX_BUILTIN_VPERM_4SF:
12476 case VSX_BUILTIN_VPERM_2DI:
12477 case VSX_BUILTIN_VPERM_2DF:
12478 h.uns_p[3] = 1;
12479 break;
12481 /* unsigned args, signed return. */
12482 case VSX_BUILTIN_XVCVUXDDP_UNS:
12483 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
12484 h.uns_p[1] = 1;
12485 break;
12487 /* signed args, unsigned return. */
12488 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12489 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
12490 h.uns_p[0] = 1;
12491 break;
12493 default:
12494 break;
12497 /* Figure out how many args are present. */
12498 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12499 num_args--;
12501 if (num_args == 0)
12502 fatal_error ("internal error: builtin function %s had no type", name);
12504 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12505 if (!ret_type && h.uns_p[0])
12506 ret_type = builtin_mode_to_type[h.mode[0]][0];
12508 if (!ret_type)
12509 fatal_error ("internal error: builtin function %s had an unexpected "
12510 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12512 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
12513 arg_type[i] = NULL_TREE;
12515 for (i = 0; i < num_args; i++)
12517 int m = (int) h.mode[i+1];
12518 int uns_p = h.uns_p[i+1];
12520 arg_type[i] = builtin_mode_to_type[m][uns_p];
12521 if (!arg_type[i] && uns_p)
12522 arg_type[i] = builtin_mode_to_type[m][0];
12524 if (!arg_type[i])
12525 fatal_error ("internal error: builtin function %s, argument %d "
12526 "had unexpected argument type %s", name, i,
12527 GET_MODE_NAME (m));
12530 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12531 if (*found == NULL)
12533 h2 = ggc_alloc_builtin_hash_struct ();
12534 *h2 = h;
12535 *found = (void *)h2;
12537 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
12538 arg_type[2], NULL_TREE);
12541 return ((struct builtin_hash_struct *)(*found))->type;
12544 static void
12545 rs6000_common_init_builtins (void)
12547 const struct builtin_description *d;
12548 size_t i;
12550 tree opaque_ftype_opaque = NULL_TREE;
12551 tree opaque_ftype_opaque_opaque = NULL_TREE;
12552 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
12553 tree v2si_ftype_qi = NULL_TREE;
12554 tree v2si_ftype_v2si_qi = NULL_TREE;
12555 tree v2si_ftype_int_qi = NULL_TREE;
12556 unsigned builtin_mask = rs6000_builtin_mask;
12558 if (!TARGET_PAIRED_FLOAT)
12560 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
12561 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
12564 /* Paired and SPE builtins are only available if you build a compiler with
12565 the appropriate options, so only create those builtins with the
12566 appropriate compiler option. Create Altivec and VSX builtins on machines
12567 with at least the general purpose extensions (970 and newer) to allow the
12568 use of the target attribute.. */
12570 if (TARGET_EXTRA_BUILTINS)
12571 builtin_mask |= RS6000_BTM_COMMON;
12573 /* Add the ternary operators. */
12574 d = bdesc_3arg;
12575 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12577 tree type;
12578 unsigned mask = d->mask;
12580 if ((mask & builtin_mask) != mask)
12582 if (TARGET_DEBUG_BUILTIN)
12583 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
12584 continue;
12587 if (rs6000_overloaded_builtin_p (d->code))
12589 if (! (type = opaque_ftype_opaque_opaque_opaque))
12590 type = opaque_ftype_opaque_opaque_opaque
12591 = build_function_type_list (opaque_V4SI_type_node,
12592 opaque_V4SI_type_node,
12593 opaque_V4SI_type_node,
12594 opaque_V4SI_type_node,
12595 NULL_TREE);
12597 else
12599 enum insn_code icode = d->icode;
12600 if (d->name == 0 || icode == CODE_FOR_nothing)
12601 continue;
12603 type = builtin_function_type (insn_data[icode].operand[0].mode,
12604 insn_data[icode].operand[1].mode,
12605 insn_data[icode].operand[2].mode,
12606 insn_data[icode].operand[3].mode,
12607 d->code, d->name);
12610 def_builtin (d->name, type, d->code);
12613 /* Add the binary operators. */
12614 d = bdesc_2arg;
12615 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12617 enum machine_mode mode0, mode1, mode2;
12618 tree type;
12619 unsigned mask = d->mask;
12621 if ((mask & builtin_mask) != mask)
12623 if (TARGET_DEBUG_BUILTIN)
12624 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
12625 continue;
12628 if (rs6000_overloaded_builtin_p (d->code))
12630 if (! (type = opaque_ftype_opaque_opaque))
12631 type = opaque_ftype_opaque_opaque
12632 = build_function_type_list (opaque_V4SI_type_node,
12633 opaque_V4SI_type_node,
12634 opaque_V4SI_type_node,
12635 NULL_TREE);
12637 else
12639 enum insn_code icode = d->icode;
12640 if (d->name == 0 || icode == CODE_FOR_nothing)
12641 continue;
12643 mode0 = insn_data[icode].operand[0].mode;
12644 mode1 = insn_data[icode].operand[1].mode;
12645 mode2 = insn_data[icode].operand[2].mode;
12647 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
12649 if (! (type = v2si_ftype_v2si_qi))
12650 type = v2si_ftype_v2si_qi
12651 = build_function_type_list (opaque_V2SI_type_node,
12652 opaque_V2SI_type_node,
12653 char_type_node,
12654 NULL_TREE);
12657 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
12658 && mode2 == QImode)
12660 if (! (type = v2si_ftype_int_qi))
12661 type = v2si_ftype_int_qi
12662 = build_function_type_list (opaque_V2SI_type_node,
12663 integer_type_node,
12664 char_type_node,
12665 NULL_TREE);
12668 else
12669 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
12670 d->code, d->name);
12673 def_builtin (d->name, type, d->code);
12676 /* Add the simple unary operators. */
12677 d = bdesc_1arg;
12678 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12680 enum machine_mode mode0, mode1;
12681 tree type;
12682 unsigned mask = d->mask;
12684 if ((mask & builtin_mask) != mask)
12686 if (TARGET_DEBUG_BUILTIN)
12687 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
12688 continue;
12691 if (rs6000_overloaded_builtin_p (d->code))
12693 if (! (type = opaque_ftype_opaque))
12694 type = opaque_ftype_opaque
12695 = build_function_type_list (opaque_V4SI_type_node,
12696 opaque_V4SI_type_node,
12697 NULL_TREE);
12699 else
12701 enum insn_code icode = d->icode;
12702 if (d->name == 0 || icode == CODE_FOR_nothing)
12703 continue;
12705 mode0 = insn_data[icode].operand[0].mode;
12706 mode1 = insn_data[icode].operand[1].mode;
12708 if (mode0 == V2SImode && mode1 == QImode)
12710 if (! (type = v2si_ftype_qi))
12711 type = v2si_ftype_qi
12712 = build_function_type_list (opaque_V2SI_type_node,
12713 char_type_node,
12714 NULL_TREE);
12717 else
12718 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
12719 d->code, d->name);
12722 def_builtin (d->name, type, d->code);
12726 static void
12727 rs6000_init_libfuncs (void)
12729 if (!TARGET_IEEEQUAD)
12730 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12731 if (!TARGET_XL_COMPAT)
12733 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
12734 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
12735 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
12736 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
12738 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
12740 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
12741 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
12742 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
12743 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
12744 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
12745 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
12746 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
12748 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
12749 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
12750 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
12751 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
12752 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
12753 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
12754 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
12755 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
12758 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
12759 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
12761 else
12763 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
12764 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
12765 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
12766 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
12768 else
12770 /* 32-bit SVR4 quad floating point routines. */
12772 set_optab_libfunc (add_optab, TFmode, "_q_add");
12773 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
12774 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
12775 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
12776 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
12777 if (TARGET_PPC_GPOPT)
12778 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
12780 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
12781 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
12782 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
12783 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
12784 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
12785 set_optab_libfunc (le_optab, TFmode, "_q_fle");
12787 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
12788 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
12789 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
12790 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
12791 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
12792 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
12793 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
12794 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
12799 /* Expand a block clear operation, and return 1 if successful. Return 0
12800 if we should let the compiler generate normal code.
12802 operands[0] is the destination
12803 operands[1] is the length
12804 operands[3] is the alignment */
12807 expand_block_clear (rtx operands[])
12809 rtx orig_dest = operands[0];
12810 rtx bytes_rtx = operands[1];
12811 rtx align_rtx = operands[3];
12812 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
12813 HOST_WIDE_INT align;
12814 HOST_WIDE_INT bytes;
12815 int offset;
12816 int clear_bytes;
12817 int clear_step;
12819 /* If this is not a fixed size move, just call memcpy */
12820 if (! constp)
12821 return 0;
12823 /* This must be a fixed size alignment */
12824 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12825 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12827 /* Anything to clear? */
12828 bytes = INTVAL (bytes_rtx);
12829 if (bytes <= 0)
12830 return 1;
12832 /* Use the builtin memset after a point, to avoid huge code bloat.
12833 When optimize_size, avoid any significant code bloat; calling
12834 memset is about 4 instructions, so allow for one instruction to
12835 load zero and three to do clearing. */
12836 if (TARGET_ALTIVEC && align >= 128)
12837 clear_step = 16;
12838 else if (TARGET_POWERPC64 && align >= 32)
12839 clear_step = 8;
12840 else if (TARGET_SPE && align >= 64)
12841 clear_step = 8;
12842 else
12843 clear_step = 4;
12845 if (optimize_size && bytes > 3 * clear_step)
12846 return 0;
12847 if (! optimize_size && bytes > 8 * clear_step)
12848 return 0;
12850 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
12852 enum machine_mode mode = BLKmode;
12853 rtx dest;
12855 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
12857 clear_bytes = 16;
12858 mode = V4SImode;
12860 else if (bytes >= 8 && TARGET_SPE && align >= 64)
12862 clear_bytes = 8;
12863 mode = V2SImode;
12865 else if (bytes >= 8 && TARGET_POWERPC64
12866 /* 64-bit loads and stores require word-aligned
12867 displacements. */
12868 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12870 clear_bytes = 8;
12871 mode = DImode;
12873 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12874 { /* move 4 bytes */
12875 clear_bytes = 4;
12876 mode = SImode;
12878 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12879 { /* move 2 bytes */
12880 clear_bytes = 2;
12881 mode = HImode;
12883 else /* move 1 byte at a time */
12885 clear_bytes = 1;
12886 mode = QImode;
12889 dest = adjust_address (orig_dest, mode, offset);
12891 emit_move_insn (dest, CONST0_RTX (mode));
12894 return 1;
12898 /* Expand a block move operation, and return 1 if successful. Return 0
12899 if we should let the compiler generate normal code.
12901 operands[0] is the destination
12902 operands[1] is the source
12903 operands[2] is the length
12904 operands[3] is the alignment */
12906 #define MAX_MOVE_REG 4
12909 expand_block_move (rtx operands[])
12911 rtx orig_dest = operands[0];
12912 rtx orig_src = operands[1];
12913 rtx bytes_rtx = operands[2];
12914 rtx align_rtx = operands[3];
12915 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
12916 int align;
12917 int bytes;
12918 int offset;
12919 int move_bytes;
12920 rtx stores[MAX_MOVE_REG];
12921 int num_reg = 0;
12923 /* If this is not a fixed size move, just call memcpy */
12924 if (! constp)
12925 return 0;
12927 /* This must be a fixed size alignment */
12928 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12929 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12931 /* Anything to move? */
12932 bytes = INTVAL (bytes_rtx);
12933 if (bytes <= 0)
12934 return 1;
12936 if (bytes > rs6000_block_move_inline_limit)
12937 return 0;
12939 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
12941 union {
12942 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
12943 rtx (*mov) (rtx, rtx);
12944 } gen_func;
12945 enum machine_mode mode = BLKmode;
12946 rtx src, dest;
12948 /* Altivec first, since it will be faster than a string move
12949 when it applies, and usually not significantly larger. */
12950 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
12952 move_bytes = 16;
12953 mode = V4SImode;
12954 gen_func.mov = gen_movv4si;
12956 else if (TARGET_SPE && bytes >= 8 && align >= 64)
12958 move_bytes = 8;
12959 mode = V2SImode;
12960 gen_func.mov = gen_movv2si;
12962 else if (TARGET_STRING
12963 && bytes > 24 /* move up to 32 bytes at a time */
12964 && ! fixed_regs[5]
12965 && ! fixed_regs[6]
12966 && ! fixed_regs[7]
12967 && ! fixed_regs[8]
12968 && ! fixed_regs[9]
12969 && ! fixed_regs[10]
12970 && ! fixed_regs[11]
12971 && ! fixed_regs[12])
12973 move_bytes = (bytes > 32) ? 32 : bytes;
12974 gen_func.movmemsi = gen_movmemsi_8reg;
12976 else if (TARGET_STRING
12977 && bytes > 16 /* move up to 24 bytes at a time */
12978 && ! fixed_regs[5]
12979 && ! fixed_regs[6]
12980 && ! fixed_regs[7]
12981 && ! fixed_regs[8]
12982 && ! fixed_regs[9]
12983 && ! fixed_regs[10])
12985 move_bytes = (bytes > 24) ? 24 : bytes;
12986 gen_func.movmemsi = gen_movmemsi_6reg;
12988 else if (TARGET_STRING
12989 && bytes > 8 /* move up to 16 bytes at a time */
12990 && ! fixed_regs[5]
12991 && ! fixed_regs[6]
12992 && ! fixed_regs[7]
12993 && ! fixed_regs[8])
12995 move_bytes = (bytes > 16) ? 16 : bytes;
12996 gen_func.movmemsi = gen_movmemsi_4reg;
12998 else if (bytes >= 8 && TARGET_POWERPC64
12999 /* 64-bit loads and stores require word-aligned
13000 displacements. */
13001 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13003 move_bytes = 8;
13004 mode = DImode;
13005 gen_func.mov = gen_movdi;
13007 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
13008 { /* move up to 8 bytes at a time */
13009 move_bytes = (bytes > 8) ? 8 : bytes;
13010 gen_func.movmemsi = gen_movmemsi_2reg;
13012 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13013 { /* move 4 bytes */
13014 move_bytes = 4;
13015 mode = SImode;
13016 gen_func.mov = gen_movsi;
13018 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13019 { /* move 2 bytes */
13020 move_bytes = 2;
13021 mode = HImode;
13022 gen_func.mov = gen_movhi;
13024 else if (TARGET_STRING && bytes > 1)
13025 { /* move up to 4 bytes at a time */
13026 move_bytes = (bytes > 4) ? 4 : bytes;
13027 gen_func.movmemsi = gen_movmemsi_1reg;
13029 else /* move 1 byte at a time */
13031 move_bytes = 1;
13032 mode = QImode;
13033 gen_func.mov = gen_movqi;
13036 src = adjust_address (orig_src, mode, offset);
13037 dest = adjust_address (orig_dest, mode, offset);
13039 if (mode != BLKmode)
13041 rtx tmp_reg = gen_reg_rtx (mode);
13043 emit_insn ((*gen_func.mov) (tmp_reg, src));
13044 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
13047 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
13049 int i;
13050 for (i = 0; i < num_reg; i++)
13051 emit_insn (stores[i]);
13052 num_reg = 0;
13055 if (mode == BLKmode)
13057 /* Move the address into scratch registers. The movmemsi
13058 patterns require zero offset. */
13059 if (!REG_P (XEXP (src, 0)))
13061 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
13062 src = replace_equiv_address (src, src_reg);
13064 set_mem_size (src, move_bytes);
13066 if (!REG_P (XEXP (dest, 0)))
13068 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
13069 dest = replace_equiv_address (dest, dest_reg);
13071 set_mem_size (dest, move_bytes);
13073 emit_insn ((*gen_func.movmemsi) (dest, src,
13074 GEN_INT (move_bytes & 31),
13075 align_rtx));
13079 return 1;
13083 /* Return a string to perform a load_multiple operation.
13084 operands[0] is the vector.
13085 operands[1] is the source address.
13086 operands[2] is the first destination register. */
13088 const char *
13089 rs6000_output_load_multiple (rtx operands[3])
13091 /* We have to handle the case where the pseudo used to contain the address
13092 is assigned to one of the output registers. */
13093 int i, j;
13094 int words = XVECLEN (operands[0], 0);
13095 rtx xop[10];
13097 if (XVECLEN (operands[0], 0) == 1)
13098 return "lwz %2,0(%1)";
13100 for (i = 0; i < words; i++)
13101 if (refers_to_regno_p (REGNO (operands[2]) + i,
13102 REGNO (operands[2]) + i + 1, operands[1], 0))
13104 if (i == words-1)
13106 xop[0] = GEN_INT (4 * (words-1));
13107 xop[1] = operands[1];
13108 xop[2] = operands[2];
13109 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
13110 return "";
13112 else if (i == 0)
13114 xop[0] = GEN_INT (4 * (words-1));
13115 xop[1] = operands[1];
13116 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
13117 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
13118 return "";
13120 else
13122 for (j = 0; j < words; j++)
13123 if (j != i)
13125 xop[0] = GEN_INT (j * 4);
13126 xop[1] = operands[1];
13127 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
13128 output_asm_insn ("lwz %2,%0(%1)", xop);
13130 xop[0] = GEN_INT (i * 4);
13131 xop[1] = operands[1];
13132 output_asm_insn ("lwz %1,%0(%1)", xop);
13133 return "";
13137 return "lswi %2,%1,%N0";
13141 /* A validation routine: say whether CODE, a condition code, and MODE
13142 match. The other alternatives either don't make sense or should
13143 never be generated. */
13145 void
13146 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
13148 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
13149 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
13150 && GET_MODE_CLASS (mode) == MODE_CC);
13152 /* These don't make sense. */
13153 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
13154 || mode != CCUNSmode);
13156 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
13157 || mode == CCUNSmode);
13159 gcc_assert (mode == CCFPmode
13160 || (code != ORDERED && code != UNORDERED
13161 && code != UNEQ && code != LTGT
13162 && code != UNGT && code != UNLT
13163 && code != UNGE && code != UNLE));
13165 /* These should never be generated except for
13166 flag_finite_math_only. */
13167 gcc_assert (mode != CCFPmode
13168 || flag_finite_math_only
13169 || (code != LE && code != GE
13170 && code != UNEQ && code != LTGT
13171 && code != UNGT && code != UNLT));
13173 /* These are invalid; the information is not there. */
13174 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
13178 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13179 mask required to convert the result of a rotate insn into a shift
13180 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13183 includes_lshift_p (rtx shiftop, rtx andop)
13185 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13187 shift_mask <<= INTVAL (shiftop);
13189 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13192 /* Similar, but for right shift. */
13195 includes_rshift_p (rtx shiftop, rtx andop)
13197 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13199 shift_mask >>= INTVAL (shiftop);
13201 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13204 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13205 to perform a left shift. It must have exactly SHIFTOP least
13206 significant 0's, then one or more 1's, then zero or more 0's. */
13209 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13211 if (GET_CODE (andop) == CONST_INT)
13213 HOST_WIDE_INT c, lsb, shift_mask;
13215 c = INTVAL (andop);
13216 if (c == 0 || c == ~0)
13217 return 0;
13219 shift_mask = ~0;
13220 shift_mask <<= INTVAL (shiftop);
13222 /* Find the least significant one bit. */
13223 lsb = c & -c;
13225 /* It must coincide with the LSB of the shift mask. */
13226 if (-lsb != shift_mask)
13227 return 0;
13229 /* Invert to look for the next transition (if any). */
13230 c = ~c;
13232 /* Remove the low group of ones (originally low group of zeros). */
13233 c &= -lsb;
13235 /* Again find the lsb, and check we have all 1's above. */
13236 lsb = c & -c;
13237 return c == -lsb;
13239 else if (GET_CODE (andop) == CONST_DOUBLE
13240 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13242 HOST_WIDE_INT low, high, lsb;
13243 HOST_WIDE_INT shift_mask_low, shift_mask_high;
13245 low = CONST_DOUBLE_LOW (andop);
13246 if (HOST_BITS_PER_WIDE_INT < 64)
13247 high = CONST_DOUBLE_HIGH (andop);
13249 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
13250 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
13251 return 0;
13253 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13255 shift_mask_high = ~0;
13256 if (INTVAL (shiftop) > 32)
13257 shift_mask_high <<= INTVAL (shiftop) - 32;
13259 lsb = high & -high;
13261 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
13262 return 0;
13264 high = ~high;
13265 high &= -lsb;
13267 lsb = high & -high;
13268 return high == -lsb;
13271 shift_mask_low = ~0;
13272 shift_mask_low <<= INTVAL (shiftop);
13274 lsb = low & -low;
13276 if (-lsb != shift_mask_low)
13277 return 0;
13279 if (HOST_BITS_PER_WIDE_INT < 64)
13280 high = ~high;
13281 low = ~low;
13282 low &= -lsb;
13284 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13286 lsb = high & -high;
13287 return high == -lsb;
13290 lsb = low & -low;
13291 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
13293 else
13294 return 0;
13297 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13298 to perform a left shift. It must have SHIFTOP or more least
13299 significant 0's, with the remainder of the word 1's. */
13302 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13304 if (GET_CODE (andop) == CONST_INT)
13306 HOST_WIDE_INT c, lsb, shift_mask;
13308 shift_mask = ~0;
13309 shift_mask <<= INTVAL (shiftop);
13310 c = INTVAL (andop);
13312 /* Find the least significant one bit. */
13313 lsb = c & -c;
13315 /* It must be covered by the shift mask.
13316 This test also rejects c == 0. */
13317 if ((lsb & shift_mask) == 0)
13318 return 0;
13320 /* Check we have all 1's above the transition, and reject all 1's. */
13321 return c == -lsb && lsb != 1;
13323 else if (GET_CODE (andop) == CONST_DOUBLE
13324 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13326 HOST_WIDE_INT low, lsb, shift_mask_low;
13328 low = CONST_DOUBLE_LOW (andop);
13330 if (HOST_BITS_PER_WIDE_INT < 64)
13332 HOST_WIDE_INT high, shift_mask_high;
13334 high = CONST_DOUBLE_HIGH (andop);
13336 if (low == 0)
13338 shift_mask_high = ~0;
13339 if (INTVAL (shiftop) > 32)
13340 shift_mask_high <<= INTVAL (shiftop) - 32;
13342 lsb = high & -high;
13344 if ((lsb & shift_mask_high) == 0)
13345 return 0;
13347 return high == -lsb;
13349 if (high != ~0)
13350 return 0;
13353 shift_mask_low = ~0;
13354 shift_mask_low <<= INTVAL (shiftop);
13356 lsb = low & -low;
13358 if ((lsb & shift_mask_low) == 0)
13359 return 0;
13361 return low == -lsb && lsb != 1;
13363 else
13364 return 0;
13367 /* Return 1 if operands will generate a valid arguments to rlwimi
13368 instruction for insert with right shift in 64-bit mode. The mask may
13369 not start on the first bit or stop on the last bit because wrap-around
13370 effects of instruction do not correspond to semantics of RTL insn. */
13373 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13375 if (INTVAL (startop) > 32
13376 && INTVAL (startop) < 64
13377 && INTVAL (sizeop) > 1
13378 && INTVAL (sizeop) + INTVAL (startop) < 64
13379 && INTVAL (shiftop) > 0
13380 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13381 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13382 return 1;
13384 return 0;
13387 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13388 for lfq and stfq insns iff the registers are hard registers. */
13391 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13393 /* We might have been passed a SUBREG. */
13394 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13395 return 0;
13397 /* We might have been passed non floating point registers. */
13398 if (!FP_REGNO_P (REGNO (reg1))
13399 || !FP_REGNO_P (REGNO (reg2)))
13400 return 0;
13402 return (REGNO (reg1) == REGNO (reg2) - 1);
13405 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13406 addr1 and addr2 must be in consecutive memory locations
13407 (addr2 == addr1 + 8). */
13410 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13412 rtx addr1, addr2;
13413 unsigned int reg1, reg2;
13414 int offset1, offset2;
13416 /* The mems cannot be volatile. */
13417 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13418 return 0;
13420 addr1 = XEXP (mem1, 0);
13421 addr2 = XEXP (mem2, 0);
13423 /* Extract an offset (if used) from the first addr. */
13424 if (GET_CODE (addr1) == PLUS)
13426 /* If not a REG, return zero. */
13427 if (GET_CODE (XEXP (addr1, 0)) != REG)
13428 return 0;
13429 else
13431 reg1 = REGNO (XEXP (addr1, 0));
13432 /* The offset must be constant! */
13433 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13434 return 0;
13435 offset1 = INTVAL (XEXP (addr1, 1));
13438 else if (GET_CODE (addr1) != REG)
13439 return 0;
13440 else
13442 reg1 = REGNO (addr1);
13443 /* This was a simple (mem (reg)) expression. Offset is 0. */
13444 offset1 = 0;
13447 /* And now for the second addr. */
13448 if (GET_CODE (addr2) == PLUS)
13450 /* If not a REG, return zero. */
13451 if (GET_CODE (XEXP (addr2, 0)) != REG)
13452 return 0;
13453 else
13455 reg2 = REGNO (XEXP (addr2, 0));
13456 /* The offset must be constant. */
13457 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13458 return 0;
13459 offset2 = INTVAL (XEXP (addr2, 1));
13462 else if (GET_CODE (addr2) != REG)
13463 return 0;
13464 else
13466 reg2 = REGNO (addr2);
13467 /* This was a simple (mem (reg)) expression. Offset is 0. */
13468 offset2 = 0;
13471 /* Both of these must have the same base register. */
13472 if (reg1 != reg2)
13473 return 0;
13475 /* The offset for the second addr must be 8 more than the first addr. */
13476 if (offset2 != offset1 + 8)
13477 return 0;
13479 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13480 instructions. */
13481 return 1;
13486 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13488 static bool eliminated = false;
13489 rtx ret;
13491 if (mode != SDmode)
13492 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13493 else
13495 rtx mem = cfun->machine->sdmode_stack_slot;
13496 gcc_assert (mem != NULL_RTX);
13498 if (!eliminated)
13500 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13501 cfun->machine->sdmode_stack_slot = mem;
13502 eliminated = true;
13504 ret = mem;
13507 if (TARGET_DEBUG_ADDR)
13509 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13510 GET_MODE_NAME (mode));
13511 if (!ret)
13512 fprintf (stderr, "\tNULL_RTX\n");
13513 else
13514 debug_rtx (ret);
13517 return ret;
13520 static tree
13521 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13523 /* Don't walk into types. */
13524 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13526 *walk_subtrees = 0;
13527 return NULL_TREE;
13530 switch (TREE_CODE (*tp))
13532 case VAR_DECL:
13533 case PARM_DECL:
13534 case FIELD_DECL:
13535 case RESULT_DECL:
13536 case SSA_NAME:
13537 case REAL_CST:
13538 case MEM_REF:
13539 case VIEW_CONVERT_EXPR:
13540 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13541 return *tp;
13542 break;
13543 default:
13544 break;
13547 return NULL_TREE;
13550 enum reload_reg_type {
13551 GPR_REGISTER_TYPE,
13552 VECTOR_REGISTER_TYPE,
13553 OTHER_REGISTER_TYPE
13556 static enum reload_reg_type
13557 rs6000_reload_register_type (enum reg_class rclass)
13559 switch (rclass)
13561 case GENERAL_REGS:
13562 case BASE_REGS:
13563 return GPR_REGISTER_TYPE;
13565 case FLOAT_REGS:
13566 case ALTIVEC_REGS:
13567 case VSX_REGS:
13568 return VECTOR_REGISTER_TYPE;
13570 default:
13571 return OTHER_REGISTER_TYPE;
13575 /* Inform reload about cases where moving X with a mode MODE to a register in
13576 RCLASS requires an extra scratch or immediate register. Return the class
13577 needed for the immediate register.
13579 For VSX and Altivec, we may need a register to convert sp+offset into
13580 reg+sp.
13582 For misaligned 64-bit gpr loads and stores we need a register to
13583 convert an offset address to indirect. */
13585 static reg_class_t
13586 rs6000_secondary_reload (bool in_p,
13587 rtx x,
13588 reg_class_t rclass_i,
13589 enum machine_mode mode,
13590 secondary_reload_info *sri)
13592 enum reg_class rclass = (enum reg_class) rclass_i;
13593 reg_class_t ret = ALL_REGS;
13594 enum insn_code icode;
13595 bool default_p = false;
13597 sri->icode = CODE_FOR_nothing;
13599 /* Convert vector loads and stores into gprs to use an additional base
13600 register. */
13601 icode = rs6000_vector_reload[mode][in_p != false];
13602 if (icode != CODE_FOR_nothing)
13604 ret = NO_REGS;
13605 sri->icode = CODE_FOR_nothing;
13606 sri->extra_cost = 0;
13608 if (GET_CODE (x) == MEM)
13610 rtx addr = XEXP (x, 0);
13612 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13613 an extra register in that case, but it would need an extra
13614 register if the addressing is reg+reg or (reg+reg)&(-16). */
13615 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
13617 if (!legitimate_indirect_address_p (addr, false)
13618 && !rs6000_legitimate_offset_address_p (TImode, addr,
13619 false, true))
13621 sri->icode = icode;
13622 /* account for splitting the loads, and converting the
13623 address from reg+reg to reg. */
13624 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
13625 + ((GET_CODE (addr) == AND) ? 1 : 0));
13628 /* Loads to and stores from vector registers can only do reg+reg
13629 addressing. Altivec registers can also do (reg+reg)&(-16). */
13630 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
13631 || rclass == FLOAT_REGS || rclass == NO_REGS)
13633 if (!VECTOR_MEM_ALTIVEC_P (mode)
13634 && GET_CODE (addr) == AND
13635 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13636 && INTVAL (XEXP (addr, 1)) == -16
13637 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
13638 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
13640 sri->icode = icode;
13641 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
13642 ? 2 : 1);
13644 else if (!legitimate_indirect_address_p (addr, false)
13645 && (rclass == NO_REGS
13646 || !legitimate_indexed_address_p (addr, false)))
13648 sri->icode = icode;
13649 sri->extra_cost = 1;
13651 else
13652 icode = CODE_FOR_nothing;
13654 /* Any other loads, including to pseudo registers which haven't been
13655 assigned to a register yet, default to require a scratch
13656 register. */
13657 else
13659 sri->icode = icode;
13660 sri->extra_cost = 2;
13663 else if (REG_P (x))
13665 int regno = true_regnum (x);
13667 icode = CODE_FOR_nothing;
13668 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
13669 default_p = true;
13670 else
13672 enum reg_class xclass = REGNO_REG_CLASS (regno);
13673 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
13674 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
13676 /* If memory is needed, use default_secondary_reload to create the
13677 stack slot. */
13678 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
13679 default_p = true;
13680 else
13681 ret = NO_REGS;
13684 else
13685 default_p = true;
13687 else if (TARGET_POWERPC64
13688 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13689 && MEM_P (x)
13690 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
13692 rtx off = address_offset (XEXP (x, 0));
13693 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13695 if (off != NULL_RTX
13696 && (INTVAL (off) & 3) != 0
13697 && (unsigned HOST_WIDE_INT) INTVAL (off) + 0x8000 < 0x10000 - extra)
13699 if (in_p)
13700 sri->icode = CODE_FOR_reload_di_load;
13701 else
13702 sri->icode = CODE_FOR_reload_di_store;
13703 sri->extra_cost = 2;
13704 ret = NO_REGS;
13706 else
13707 default_p = true;
13709 else if (!TARGET_POWERPC64
13710 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13711 && MEM_P (x)
13712 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
13714 rtx off = address_offset (XEXP (x, 0));
13715 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13717 /* We need a secondary reload only when our legitimate_address_p
13718 says the address is good (as otherwise the entire address
13719 will be reloaded). So for mode sizes of 8 and 16 this will
13720 be when the offset is in the ranges [0x7ffc,0x7fff] and
13721 [0x7ff4,0x7ff7] respectively. Note that the address we see
13722 here may have been manipulated by legitimize_reload_address. */
13723 if (off != NULL_RTX
13724 && ((unsigned HOST_WIDE_INT) INTVAL (off) - (0x8000 - extra)
13725 < UNITS_PER_WORD))
13727 if (in_p)
13728 sri->icode = CODE_FOR_reload_si_load;
13729 else
13730 sri->icode = CODE_FOR_reload_si_store;
13731 sri->extra_cost = 2;
13732 ret = NO_REGS;
13734 else
13735 default_p = true;
13737 else
13738 default_p = true;
13740 if (default_p)
13741 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
13743 gcc_assert (ret != ALL_REGS);
13745 if (TARGET_DEBUG_ADDR)
13747 fprintf (stderr,
13748 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13749 "mode = %s",
13750 reg_class_names[ret],
13751 in_p ? "true" : "false",
13752 reg_class_names[rclass],
13753 GET_MODE_NAME (mode));
13755 if (default_p)
13756 fprintf (stderr, ", default secondary reload");
13758 if (sri->icode != CODE_FOR_nothing)
13759 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
13760 insn_data[sri->icode].name, sri->extra_cost);
13761 else
13762 fprintf (stderr, "\n");
13764 debug_rtx (x);
13767 return ret;
13770 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13771 to SP+reg addressing. */
13773 void
13774 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
13776 int regno = true_regnum (reg);
13777 enum machine_mode mode = GET_MODE (reg);
13778 enum reg_class rclass;
13779 rtx addr;
13780 rtx and_op2 = NULL_RTX;
13781 rtx addr_op1;
13782 rtx addr_op2;
13783 rtx scratch_or_premodify = scratch;
13784 rtx and_rtx;
13785 rtx cc_clobber;
13787 if (TARGET_DEBUG_ADDR)
13789 fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
13790 store_p ? "store" : "load");
13791 fprintf (stderr, "reg:\n");
13792 debug_rtx (reg);
13793 fprintf (stderr, "mem:\n");
13794 debug_rtx (mem);
13795 fprintf (stderr, "scratch:\n");
13796 debug_rtx (scratch);
13799 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13800 gcc_assert (GET_CODE (mem) == MEM);
13801 rclass = REGNO_REG_CLASS (regno);
13802 addr = XEXP (mem, 0);
13804 switch (rclass)
13806 /* GPRs can handle reg + small constant, all other addresses need to use
13807 the scratch register. */
13808 case GENERAL_REGS:
13809 case BASE_REGS:
13810 if (GET_CODE (addr) == AND)
13812 and_op2 = XEXP (addr, 1);
13813 addr = XEXP (addr, 0);
13816 if (GET_CODE (addr) == PRE_MODIFY)
13818 scratch_or_premodify = XEXP (addr, 0);
13819 gcc_assert (REG_P (scratch_or_premodify));
13820 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13821 addr = XEXP (addr, 1);
13824 if (GET_CODE (addr) == PLUS
13825 && (and_op2 != NULL_RTX
13826 || !rs6000_legitimate_offset_address_p (TImode, addr,
13827 false, true)))
13829 addr_op1 = XEXP (addr, 0);
13830 addr_op2 = XEXP (addr, 1);
13831 gcc_assert (legitimate_indirect_address_p (addr_op1, false));
13833 if (!REG_P (addr_op2)
13834 && (GET_CODE (addr_op2) != CONST_INT
13835 || !satisfies_constraint_I (addr_op2)))
13837 if (TARGET_DEBUG_ADDR)
13839 fprintf (stderr,
13840 "\nMove plus addr to register %s, mode = %s: ",
13841 rs6000_reg_names[REGNO (scratch)],
13842 GET_MODE_NAME (mode));
13843 debug_rtx (addr_op2);
13845 rs6000_emit_move (scratch, addr_op2, Pmode);
13846 addr_op2 = scratch;
13849 emit_insn (gen_rtx_SET (VOIDmode,
13850 scratch_or_premodify,
13851 gen_rtx_PLUS (Pmode,
13852 addr_op1,
13853 addr_op2)));
13855 addr = scratch_or_premodify;
13856 scratch_or_premodify = scratch;
13858 else if (!legitimate_indirect_address_p (addr, false)
13859 && !rs6000_legitimate_offset_address_p (TImode, addr,
13860 false, true))
13862 if (TARGET_DEBUG_ADDR)
13864 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13865 rs6000_reg_names[REGNO (scratch_or_premodify)],
13866 GET_MODE_NAME (mode));
13867 debug_rtx (addr);
13869 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13870 addr = scratch_or_premodify;
13871 scratch_or_premodify = scratch;
13873 break;
13875 /* Float/Altivec registers can only handle reg+reg addressing. Move
13876 other addresses into a scratch register. */
13877 case FLOAT_REGS:
13878 case VSX_REGS:
13879 case ALTIVEC_REGS:
13881 /* With float regs, we need to handle the AND ourselves, since we can't
13882 use the Altivec instruction with an implicit AND -16. Allow scalar
13883 loads to float registers to use reg+offset even if VSX. */
13884 if (GET_CODE (addr) == AND
13885 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
13886 || GET_CODE (XEXP (addr, 1)) != CONST_INT
13887 || INTVAL (XEXP (addr, 1)) != -16
13888 || !VECTOR_MEM_ALTIVEC_P (mode)))
13890 and_op2 = XEXP (addr, 1);
13891 addr = XEXP (addr, 0);
13894 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13895 as the address later. */
13896 if (GET_CODE (addr) == PRE_MODIFY
13897 && (!VECTOR_MEM_VSX_P (mode)
13898 || and_op2 != NULL_RTX
13899 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
13901 scratch_or_premodify = XEXP (addr, 0);
13902 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
13903 false));
13904 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13905 addr = XEXP (addr, 1);
13908 if (legitimate_indirect_address_p (addr, false) /* reg */
13909 || legitimate_indexed_address_p (addr, false) /* reg+reg */
13910 || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
13911 || (GET_CODE (addr) == AND /* Altivec memory */
13912 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13913 && INTVAL (XEXP (addr, 1)) == -16
13914 && VECTOR_MEM_ALTIVEC_P (mode))
13915 || (rclass == FLOAT_REGS /* legacy float mem */
13916 && GET_MODE_SIZE (mode) == 8
13917 && and_op2 == NULL_RTX
13918 && scratch_or_premodify == scratch
13919 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
13922 else if (GET_CODE (addr) == PLUS)
13924 addr_op1 = XEXP (addr, 0);
13925 addr_op2 = XEXP (addr, 1);
13926 gcc_assert (REG_P (addr_op1));
13928 if (TARGET_DEBUG_ADDR)
13930 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
13931 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13932 debug_rtx (addr_op2);
13934 rs6000_emit_move (scratch, addr_op2, Pmode);
13935 emit_insn (gen_rtx_SET (VOIDmode,
13936 scratch_or_premodify,
13937 gen_rtx_PLUS (Pmode,
13938 addr_op1,
13939 scratch)));
13940 addr = scratch_or_premodify;
13941 scratch_or_premodify = scratch;
13944 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
13945 || GET_CODE (addr) == CONST_INT || REG_P (addr))
13947 if (TARGET_DEBUG_ADDR)
13949 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13950 rs6000_reg_names[REGNO (scratch_or_premodify)],
13951 GET_MODE_NAME (mode));
13952 debug_rtx (addr);
13955 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13956 addr = scratch_or_premodify;
13957 scratch_or_premodify = scratch;
13960 else
13961 gcc_unreachable ();
13963 break;
13965 default:
13966 gcc_unreachable ();
13969 /* If the original address involved a pre-modify that we couldn't use the VSX
13970 memory instruction with update, and we haven't taken care of already,
13971 store the address in the pre-modify register and use that as the
13972 address. */
13973 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
13975 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
13976 addr = scratch_or_premodify;
13979 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13980 memory instruction, recreate the AND now, including the clobber which is
13981 generated by the general ANDSI3/ANDDI3 patterns for the
13982 andi. instruction. */
13983 if (and_op2 != NULL_RTX)
13985 if (! legitimate_indirect_address_p (addr, false))
13987 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
13988 addr = scratch;
13991 if (TARGET_DEBUG_ADDR)
13993 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
13994 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13995 debug_rtx (and_op2);
13998 and_rtx = gen_rtx_SET (VOIDmode,
13999 scratch,
14000 gen_rtx_AND (Pmode,
14001 addr,
14002 and_op2));
14004 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
14005 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14006 gen_rtvec (2, and_rtx, cc_clobber)));
14007 addr = scratch;
14010 /* Adjust the address if it changed. */
14011 if (addr != XEXP (mem, 0))
14013 mem = change_address (mem, mode, addr);
14014 if (TARGET_DEBUG_ADDR)
14015 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
14018 /* Now create the move. */
14019 if (store_p)
14020 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14021 else
14022 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14024 return;
14027 /* Convert reloads involving 64-bit gprs and misaligned offset
14028 addressing, or multiple 32-bit gprs and offsets that are too large,
14029 to use indirect addressing. */
14031 void
14032 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
14034 int regno = true_regnum (reg);
14035 enum reg_class rclass;
14036 rtx addr;
14037 rtx scratch_or_premodify = scratch;
14039 if (TARGET_DEBUG_ADDR)
14041 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
14042 store_p ? "store" : "load");
14043 fprintf (stderr, "reg:\n");
14044 debug_rtx (reg);
14045 fprintf (stderr, "mem:\n");
14046 debug_rtx (mem);
14047 fprintf (stderr, "scratch:\n");
14048 debug_rtx (scratch);
14051 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
14052 gcc_assert (GET_CODE (mem) == MEM);
14053 rclass = REGNO_REG_CLASS (regno);
14054 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
14055 addr = XEXP (mem, 0);
14057 if (GET_CODE (addr) == PRE_MODIFY)
14059 scratch_or_premodify = XEXP (addr, 0);
14060 gcc_assert (REG_P (scratch_or_premodify));
14061 addr = XEXP (addr, 1);
14063 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
14065 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14067 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
14069 /* Now create the move. */
14070 if (store_p)
14071 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14072 else
14073 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14075 return;
14078 /* Allocate a 64-bit stack slot to be used for copying SDmode
14079 values through if this function has any SDmode references. */
14081 static void
14082 rs6000_alloc_sdmode_stack_slot (void)
14084 tree t;
14085 basic_block bb;
14086 gimple_stmt_iterator gsi;
14088 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
14090 FOR_EACH_BB (bb)
14091 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
14093 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
14094 if (ret)
14096 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14097 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14098 SDmode, 0);
14099 return;
14103 /* Check for any SDmode parameters of the function. */
14104 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
14106 if (TREE_TYPE (t) == error_mark_node)
14107 continue;
14109 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
14110 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
14112 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14113 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14114 SDmode, 0);
14115 return;
14120 static void
14121 rs6000_instantiate_decls (void)
14123 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
14124 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
14127 /* Given an rtx X being reloaded into a reg required to be
14128 in class CLASS, return the class of reg to actually use.
14129 In general this is just CLASS; but on some machines
14130 in some cases it is preferable to use a more restrictive class.
14132 On the RS/6000, we have to return NO_REGS when we want to reload a
14133 floating-point CONST_DOUBLE to force it to be copied to memory.
14135 We also don't want to reload integer values into floating-point
14136 registers if we can at all help it. In fact, this can
14137 cause reload to die, if it tries to generate a reload of CTR
14138 into a FP register and discovers it doesn't have the memory location
14139 required.
14141 ??? Would it be a good idea to have reload do the converse, that is
14142 try to reload floating modes into FP registers if possible?
14145 static enum reg_class
14146 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
14148 enum machine_mode mode = GET_MODE (x);
14150 if (VECTOR_UNIT_VSX_P (mode)
14151 && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
14152 return rclass;
14154 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
14155 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
14156 && easy_vector_constant (x, mode))
14157 return ALTIVEC_REGS;
14159 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
14160 return NO_REGS;
14162 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
14163 return GENERAL_REGS;
14165 /* For VSX, prefer the traditional registers for 64-bit values because we can
14166 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14167 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14168 prefer Altivec loads.. */
14169 if (rclass == VSX_REGS)
14171 if (GET_MODE_SIZE (mode) <= 8)
14172 return FLOAT_REGS;
14174 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
14175 return ALTIVEC_REGS;
14177 return rclass;
14180 return rclass;
14183 /* Debug version of rs6000_preferred_reload_class. */
14184 static enum reg_class
14185 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
14187 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
14189 fprintf (stderr,
14190 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14191 "mode = %s, x:\n",
14192 reg_class_names[ret], reg_class_names[rclass],
14193 GET_MODE_NAME (GET_MODE (x)));
14194 debug_rtx (x);
14196 return ret;
14199 /* If we are copying between FP or AltiVec registers and anything else, we need
14200 a memory location. The exception is when we are targeting ppc64 and the
14201 move to/from fpr to gpr instructions are available. Also, under VSX, you
14202 can copy vector registers from the FP register set to the Altivec register
14203 set and vice versa. */
14205 static bool
14206 rs6000_secondary_memory_needed (enum reg_class class1,
14207 enum reg_class class2,
14208 enum machine_mode mode)
14210 if (class1 == class2)
14211 return false;
14213 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14214 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14215 between these classes. But we need memory for other things that can go in
14216 FLOAT_REGS like SFmode. */
14217 if (TARGET_VSX
14218 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
14219 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
14220 || class1 == FLOAT_REGS))
14221 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
14222 && class2 != FLOAT_REGS);
14224 if (class1 == VSX_REGS || class2 == VSX_REGS)
14225 return true;
14227 if (class1 == FLOAT_REGS
14228 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14229 || ((mode != DFmode)
14230 && (mode != DDmode)
14231 && (mode != DImode))))
14232 return true;
14234 if (class2 == FLOAT_REGS
14235 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14236 || ((mode != DFmode)
14237 && (mode != DDmode)
14238 && (mode != DImode))))
14239 return true;
14241 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
14242 return true;
14244 return false;
14247 /* Debug version of rs6000_secondary_memory_needed. */
14248 static bool
14249 rs6000_debug_secondary_memory_needed (enum reg_class class1,
14250 enum reg_class class2,
14251 enum machine_mode mode)
14253 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
14255 fprintf (stderr,
14256 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14257 "class2 = %s, mode = %s\n",
14258 ret ? "true" : "false", reg_class_names[class1],
14259 reg_class_names[class2], GET_MODE_NAME (mode));
14261 return ret;
14264 /* Return the register class of a scratch register needed to copy IN into
14265 or out of a register in RCLASS in MODE. If it can be done directly,
14266 NO_REGS is returned. */
14268 static enum reg_class
14269 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
14270 rtx in)
14272 int regno;
14274 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
14275 #if TARGET_MACHO
14276 && MACHOPIC_INDIRECT
14277 #endif
14280 /* We cannot copy a symbolic operand directly into anything
14281 other than BASE_REGS for TARGET_ELF. So indicate that a
14282 register from BASE_REGS is needed as an intermediate
14283 register.
14285 On Darwin, pic addresses require a load from memory, which
14286 needs a base register. */
14287 if (rclass != BASE_REGS
14288 && (GET_CODE (in) == SYMBOL_REF
14289 || GET_CODE (in) == HIGH
14290 || GET_CODE (in) == LABEL_REF
14291 || GET_CODE (in) == CONST))
14292 return BASE_REGS;
14295 if (GET_CODE (in) == REG)
14297 regno = REGNO (in);
14298 if (regno >= FIRST_PSEUDO_REGISTER)
14300 regno = true_regnum (in);
14301 if (regno >= FIRST_PSEUDO_REGISTER)
14302 regno = -1;
14305 else if (GET_CODE (in) == SUBREG)
14307 regno = true_regnum (in);
14308 if (regno >= FIRST_PSEUDO_REGISTER)
14309 regno = -1;
14311 else
14312 regno = -1;
14314 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14315 into anything. */
14316 if (rclass == GENERAL_REGS || rclass == BASE_REGS
14317 || (regno >= 0 && INT_REGNO_P (regno)))
14318 return NO_REGS;
14320 /* Constants, memory, and FP registers can go into FP registers. */
14321 if ((regno == -1 || FP_REGNO_P (regno))
14322 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
14323 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
14325 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14326 VSX. */
14327 if (TARGET_VSX
14328 && (regno == -1 || VSX_REGNO_P (regno))
14329 && VSX_REG_CLASS_P (rclass))
14330 return NO_REGS;
14332 /* Memory, and AltiVec registers can go into AltiVec registers. */
14333 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
14334 && rclass == ALTIVEC_REGS)
14335 return NO_REGS;
14337 /* We can copy among the CR registers. */
14338 if ((rclass == CR_REGS || rclass == CR0_REGS)
14339 && regno >= 0 && CR_REGNO_P (regno))
14340 return NO_REGS;
14342 /* Otherwise, we need GENERAL_REGS. */
14343 return GENERAL_REGS;
14346 /* Debug version of rs6000_secondary_reload_class. */
14347 static enum reg_class
14348 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14349 enum machine_mode mode, rtx in)
14351 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14352 fprintf (stderr,
14353 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14354 "mode = %s, input rtx:\n",
14355 reg_class_names[ret], reg_class_names[rclass],
14356 GET_MODE_NAME (mode));
14357 debug_rtx (in);
14359 return ret;
14362 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14364 static bool
14365 rs6000_cannot_change_mode_class (enum machine_mode from,
14366 enum machine_mode to,
14367 enum reg_class rclass)
14369 unsigned from_size = GET_MODE_SIZE (from);
14370 unsigned to_size = GET_MODE_SIZE (to);
14372 if (from_size != to_size)
14374 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14375 return ((from_size < 8 || to_size < 8 || TARGET_IEEEQUAD)
14376 && reg_classes_intersect_p (xclass, rclass));
14379 if (TARGET_E500_DOUBLE
14380 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14381 || (((to) == TFmode) + ((from) == TFmode)) == 1
14382 || (((to) == DDmode) + ((from) == DDmode)) == 1
14383 || (((to) == TDmode) + ((from) == TDmode)) == 1
14384 || (((to) == DImode) + ((from) == DImode)) == 1))
14385 return true;
14387 /* Since the VSX register set includes traditional floating point registers
14388 and altivec registers, just check for the size being different instead of
14389 trying to check whether the modes are vector modes. Otherwise it won't
14390 allow say DF and DI to change classes. */
14391 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14392 return (from_size != 8 && from_size != 16);
14394 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14395 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14396 return true;
14398 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14399 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14400 return true;
14402 return false;
14405 /* Debug version of rs6000_cannot_change_mode_class. */
14406 static bool
14407 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14408 enum machine_mode to,
14409 enum reg_class rclass)
14411 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14413 fprintf (stderr,
14414 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14415 "to = %s, rclass = %s\n",
14416 ret ? "true" : "false",
14417 GET_MODE_NAME (from), GET_MODE_NAME (to),
14418 reg_class_names[rclass]);
14420 return ret;
14423 /* Given a comparison operation, return the bit number in CCR to test. We
14424 know this is a valid comparison.
14426 SCC_P is 1 if this is for an scc. That means that %D will have been
14427 used instead of %C, so the bits will be in different places.
14429 Return -1 if OP isn't a valid comparison for some reason. */
14432 ccr_bit (rtx op, int scc_p)
14434 enum rtx_code code = GET_CODE (op);
14435 enum machine_mode cc_mode;
14436 int cc_regnum;
14437 int base_bit;
14438 rtx reg;
14440 if (!COMPARISON_P (op))
14441 return -1;
14443 reg = XEXP (op, 0);
14445 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14447 cc_mode = GET_MODE (reg);
14448 cc_regnum = REGNO (reg);
14449 base_bit = 4 * (cc_regnum - CR0_REGNO);
14451 validate_condition_mode (code, cc_mode);
14453 /* When generating a sCOND operation, only positive conditions are
14454 allowed. */
14455 gcc_assert (!scc_p
14456 || code == EQ || code == GT || code == LT || code == UNORDERED
14457 || code == GTU || code == LTU);
14459 switch (code)
14461 case NE:
14462 return scc_p ? base_bit + 3 : base_bit + 2;
14463 case EQ:
14464 return base_bit + 2;
14465 case GT: case GTU: case UNLE:
14466 return base_bit + 1;
14467 case LT: case LTU: case UNGE:
14468 return base_bit;
14469 case ORDERED: case UNORDERED:
14470 return base_bit + 3;
14472 case GE: case GEU:
14473 /* If scc, we will have done a cror to put the bit in the
14474 unordered position. So test that bit. For integer, this is ! LT
14475 unless this is an scc insn. */
14476 return scc_p ? base_bit + 3 : base_bit;
14478 case LE: case LEU:
14479 return scc_p ? base_bit + 3 : base_bit + 1;
14481 default:
14482 gcc_unreachable ();
14486 /* Return the GOT register. */
14489 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
14491 /* The second flow pass currently (June 1999) can't update
14492 regs_ever_live without disturbing other parts of the compiler, so
14493 update it here to make the prolog/epilogue code happy. */
14494 if (!can_create_pseudo_p ()
14495 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
14496 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
14498 crtl->uses_pic_offset_table = 1;
14500 return pic_offset_table_rtx;
14503 static rs6000_stack_t stack_info;
14505 /* Function to init struct machine_function.
14506 This will be called, via a pointer variable,
14507 from push_function_context. */
14509 static struct machine_function *
14510 rs6000_init_machine_status (void)
14512 stack_info.reload_completed = 0;
14513 return ggc_alloc_cleared_machine_function ();
14516 /* These macros test for integers and extract the low-order bits. */
14517 #define INT_P(X) \
14518 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14519 && GET_MODE (X) == VOIDmode)
14521 #define INT_LOWPART(X) \
14522 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14525 extract_MB (rtx op)
14527 int i;
14528 unsigned long val = INT_LOWPART (op);
14530 /* If the high bit is zero, the value is the first 1 bit we find
14531 from the left. */
14532 if ((val & 0x80000000) == 0)
14534 gcc_assert (val & 0xffffffff);
14536 i = 1;
14537 while (((val <<= 1) & 0x80000000) == 0)
14538 ++i;
14539 return i;
14542 /* If the high bit is set and the low bit is not, or the mask is all
14543 1's, the value is zero. */
14544 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
14545 return 0;
14547 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14548 from the right. */
14549 i = 31;
14550 while (((val >>= 1) & 1) != 0)
14551 --i;
14553 return i;
14557 extract_ME (rtx op)
14559 int i;
14560 unsigned long val = INT_LOWPART (op);
14562 /* If the low bit is zero, the value is the first 1 bit we find from
14563 the right. */
14564 if ((val & 1) == 0)
14566 gcc_assert (val & 0xffffffff);
14568 i = 30;
14569 while (((val >>= 1) & 1) == 0)
14570 --i;
14572 return i;
14575 /* If the low bit is set and the high bit is not, or the mask is all
14576 1's, the value is 31. */
14577 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
14578 return 31;
14580 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14581 from the left. */
14582 i = 0;
14583 while (((val <<= 1) & 0x80000000) != 0)
14584 ++i;
14586 return i;
14589 /* Locate some local-dynamic symbol still in use by this function
14590 so that we can print its name in some tls_ld pattern. */
14592 static const char *
14593 rs6000_get_some_local_dynamic_name (void)
14595 rtx insn;
14597 if (cfun->machine->some_ld_name)
14598 return cfun->machine->some_ld_name;
14600 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
14601 if (INSN_P (insn)
14602 && for_each_rtx (&PATTERN (insn),
14603 rs6000_get_some_local_dynamic_name_1, 0))
14604 return cfun->machine->some_ld_name;
14606 gcc_unreachable ();
14609 /* Helper function for rs6000_get_some_local_dynamic_name. */
14611 static int
14612 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
14614 rtx x = *px;
14616 if (GET_CODE (x) == SYMBOL_REF)
14618 const char *str = XSTR (x, 0);
14619 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
14621 cfun->machine->some_ld_name = str;
14622 return 1;
14626 return 0;
14629 /* Write out a function code label. */
14631 void
14632 rs6000_output_function_entry (FILE *file, const char *fname)
14634 if (fname[0] != '.')
14636 switch (DEFAULT_ABI)
14638 default:
14639 gcc_unreachable ();
14641 case ABI_AIX:
14642 if (DOT_SYMBOLS)
14643 putc ('.', file);
14644 else
14645 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
14646 break;
14648 case ABI_V4:
14649 case ABI_DARWIN:
14650 break;
14654 RS6000_OUTPUT_BASENAME (file, fname);
14657 /* Print an operand. Recognize special options, documented below. */
14659 #if TARGET_ELF
14660 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14661 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14662 #else
14663 #define SMALL_DATA_RELOC "sda21"
14664 #define SMALL_DATA_REG 0
14665 #endif
14667 void
14668 print_operand (FILE *file, rtx x, int code)
14670 int i;
14671 unsigned HOST_WIDE_INT uval;
14673 switch (code)
14675 /* %a is output_address. */
14677 case 'A':
14678 /* If X is a constant integer whose low-order 5 bits are zero,
14679 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14680 in the AIX assembler where "sri" with a zero shift count
14681 writes a trash instruction. */
14682 if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
14683 putc ('l', file);
14684 else
14685 putc ('r', file);
14686 return;
14688 case 'b':
14689 /* If constant, low-order 16 bits of constant, unsigned.
14690 Otherwise, write normally. */
14691 if (INT_P (x))
14692 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
14693 else
14694 print_operand (file, x, 0);
14695 return;
14697 case 'B':
14698 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14699 for 64-bit mask direction. */
14700 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
14701 return;
14703 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14704 output_operand. */
14706 case 'D':
14707 /* Like 'J' but get to the GT bit only. */
14708 gcc_assert (REG_P (x));
14710 /* Bit 1 is GT bit. */
14711 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
14713 /* Add one for shift count in rlinm for scc. */
14714 fprintf (file, "%d", i + 1);
14715 return;
14717 case 'E':
14718 /* X is a CR register. Print the number of the EQ bit of the CR */
14719 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14720 output_operand_lossage ("invalid %%E value");
14721 else
14722 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
14723 return;
14725 case 'f':
14726 /* X is a CR register. Print the shift count needed to move it
14727 to the high-order four bits. */
14728 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14729 output_operand_lossage ("invalid %%f value");
14730 else
14731 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
14732 return;
14734 case 'F':
14735 /* Similar, but print the count for the rotate in the opposite
14736 direction. */
14737 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14738 output_operand_lossage ("invalid %%F value");
14739 else
14740 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
14741 return;
14743 case 'G':
14744 /* X is a constant integer. If it is negative, print "m",
14745 otherwise print "z". This is to make an aze or ame insn. */
14746 if (GET_CODE (x) != CONST_INT)
14747 output_operand_lossage ("invalid %%G value");
14748 else if (INTVAL (x) >= 0)
14749 putc ('z', file);
14750 else
14751 putc ('m', file);
14752 return;
14754 case 'h':
14755 /* If constant, output low-order five bits. Otherwise, write
14756 normally. */
14757 if (INT_P (x))
14758 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
14759 else
14760 print_operand (file, x, 0);
14761 return;
14763 case 'H':
14764 /* If constant, output low-order six bits. Otherwise, write
14765 normally. */
14766 if (INT_P (x))
14767 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
14768 else
14769 print_operand (file, x, 0);
14770 return;
14772 case 'I':
14773 /* Print `i' if this is a constant, else nothing. */
14774 if (INT_P (x))
14775 putc ('i', file);
14776 return;
14778 case 'j':
14779 /* Write the bit number in CCR for jump. */
14780 i = ccr_bit (x, 0);
14781 if (i == -1)
14782 output_operand_lossage ("invalid %%j code");
14783 else
14784 fprintf (file, "%d", i);
14785 return;
14787 case 'J':
14788 /* Similar, but add one for shift count in rlinm for scc and pass
14789 scc flag to `ccr_bit'. */
14790 i = ccr_bit (x, 1);
14791 if (i == -1)
14792 output_operand_lossage ("invalid %%J code");
14793 else
14794 /* If we want bit 31, write a shift count of zero, not 32. */
14795 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14796 return;
14798 case 'k':
14799 /* X must be a constant. Write the 1's complement of the
14800 constant. */
14801 if (! INT_P (x))
14802 output_operand_lossage ("invalid %%k value");
14803 else
14804 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
14805 return;
14807 case 'K':
14808 /* X must be a symbolic constant on ELF. Write an
14809 expression suitable for an 'addi' that adds in the low 16
14810 bits of the MEM. */
14811 if (GET_CODE (x) == CONST)
14813 if (GET_CODE (XEXP (x, 0)) != PLUS
14814 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
14815 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
14816 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
14817 output_operand_lossage ("invalid %%K value");
14819 print_operand_address (file, x);
14820 fputs ("@l", file);
14821 return;
14823 /* %l is output_asm_label. */
14825 case 'L':
14826 /* Write second word of DImode or DFmode reference. Works on register
14827 or non-indexed memory only. */
14828 if (REG_P (x))
14829 fputs (reg_names[REGNO (x) + 1], file);
14830 else if (MEM_P (x))
14832 /* Handle possible auto-increment. Since it is pre-increment and
14833 we have already done it, we can just use an offset of word. */
14834 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14835 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14836 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14837 UNITS_PER_WORD));
14838 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14839 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14840 UNITS_PER_WORD));
14841 else
14842 output_address (XEXP (adjust_address_nv (x, SImode,
14843 UNITS_PER_WORD),
14844 0));
14846 if (small_data_operand (x, GET_MODE (x)))
14847 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14848 reg_names[SMALL_DATA_REG]);
14850 return;
14852 case 'm':
14853 /* MB value for a mask operand. */
14854 if (! mask_operand (x, SImode))
14855 output_operand_lossage ("invalid %%m value");
14857 fprintf (file, "%d", extract_MB (x));
14858 return;
14860 case 'M':
14861 /* ME value for a mask operand. */
14862 if (! mask_operand (x, SImode))
14863 output_operand_lossage ("invalid %%M value");
14865 fprintf (file, "%d", extract_ME (x));
14866 return;
14868 /* %n outputs the negative of its operand. */
14870 case 'N':
14871 /* Write the number of elements in the vector times 4. */
14872 if (GET_CODE (x) != PARALLEL)
14873 output_operand_lossage ("invalid %%N value");
14874 else
14875 fprintf (file, "%d", XVECLEN (x, 0) * 4);
14876 return;
14878 case 'O':
14879 /* Similar, but subtract 1 first. */
14880 if (GET_CODE (x) != PARALLEL)
14881 output_operand_lossage ("invalid %%O value");
14882 else
14883 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
14884 return;
14886 case 'p':
14887 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14888 if (! INT_P (x)
14889 || INT_LOWPART (x) < 0
14890 || (i = exact_log2 (INT_LOWPART (x))) < 0)
14891 output_operand_lossage ("invalid %%p value");
14892 else
14893 fprintf (file, "%d", i);
14894 return;
14896 case 'P':
14897 /* The operand must be an indirect memory reference. The result
14898 is the register name. */
14899 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
14900 || REGNO (XEXP (x, 0)) >= 32)
14901 output_operand_lossage ("invalid %%P value");
14902 else
14903 fputs (reg_names[REGNO (XEXP (x, 0))], file);
14904 return;
14906 case 'q':
14907 /* This outputs the logical code corresponding to a boolean
14908 expression. The expression may have one or both operands
14909 negated (if one, only the first one). For condition register
14910 logical operations, it will also treat the negated
14911 CR codes as NOTs, but not handle NOTs of them. */
14913 const char *const *t = 0;
14914 const char *s;
14915 enum rtx_code code = GET_CODE (x);
14916 static const char * const tbl[3][3] = {
14917 { "and", "andc", "nor" },
14918 { "or", "orc", "nand" },
14919 { "xor", "eqv", "xor" } };
14921 if (code == AND)
14922 t = tbl[0];
14923 else if (code == IOR)
14924 t = tbl[1];
14925 else if (code == XOR)
14926 t = tbl[2];
14927 else
14928 output_operand_lossage ("invalid %%q value");
14930 if (GET_CODE (XEXP (x, 0)) != NOT)
14931 s = t[0];
14932 else
14934 if (GET_CODE (XEXP (x, 1)) == NOT)
14935 s = t[2];
14936 else
14937 s = t[1];
14940 fputs (s, file);
14942 return;
14944 case 'Q':
14945 if (TARGET_MFCRF)
14946 fputc (',', file);
14947 /* FALLTHRU */
14948 else
14949 return;
14951 case 'R':
14952 /* X is a CR register. Print the mask for `mtcrf'. */
14953 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14954 output_operand_lossage ("invalid %%R value");
14955 else
14956 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
14957 return;
14959 case 's':
14960 /* Low 5 bits of 32 - value */
14961 if (! INT_P (x))
14962 output_operand_lossage ("invalid %%s value");
14963 else
14964 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
14965 return;
14967 case 'S':
14968 /* PowerPC64 mask position. All 0's is excluded.
14969 CONST_INT 32-bit mask is considered sign-extended so any
14970 transition must occur within the CONST_INT, not on the boundary. */
14971 if (! mask64_operand (x, DImode))
14972 output_operand_lossage ("invalid %%S value");
14974 uval = INT_LOWPART (x);
14976 if (uval & 1) /* Clear Left */
14978 #if HOST_BITS_PER_WIDE_INT > 64
14979 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14980 #endif
14981 i = 64;
14983 else /* Clear Right */
14985 uval = ~uval;
14986 #if HOST_BITS_PER_WIDE_INT > 64
14987 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14988 #endif
14989 i = 63;
14991 while (uval != 0)
14992 --i, uval >>= 1;
14993 gcc_assert (i >= 0);
14994 fprintf (file, "%d", i);
14995 return;
14997 case 't':
14998 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
14999 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
15001 /* Bit 3 is OV bit. */
15002 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
15004 /* If we want bit 31, write a shift count of zero, not 32. */
15005 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15006 return;
15008 case 'T':
15009 /* Print the symbolic name of a branch target register. */
15010 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
15011 && REGNO (x) != CTR_REGNO))
15012 output_operand_lossage ("invalid %%T value");
15013 else if (REGNO (x) == LR_REGNO)
15014 fputs ("lr", file);
15015 else
15016 fputs ("ctr", file);
15017 return;
15019 case 'u':
15020 /* High-order 16 bits of constant for use in unsigned operand. */
15021 if (! INT_P (x))
15022 output_operand_lossage ("invalid %%u value");
15023 else
15024 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15025 (INT_LOWPART (x) >> 16) & 0xffff);
15026 return;
15028 case 'v':
15029 /* High-order 16 bits of constant for use in signed operand. */
15030 if (! INT_P (x))
15031 output_operand_lossage ("invalid %%v value");
15032 else
15033 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15034 (INT_LOWPART (x) >> 16) & 0xffff);
15035 return;
15037 case 'U':
15038 /* Print `u' if this has an auto-increment or auto-decrement. */
15039 if (MEM_P (x)
15040 && (GET_CODE (XEXP (x, 0)) == PRE_INC
15041 || GET_CODE (XEXP (x, 0)) == PRE_DEC
15042 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
15043 putc ('u', file);
15044 return;
15046 case 'V':
15047 /* Print the trap code for this operand. */
15048 switch (GET_CODE (x))
15050 case EQ:
15051 fputs ("eq", file); /* 4 */
15052 break;
15053 case NE:
15054 fputs ("ne", file); /* 24 */
15055 break;
15056 case LT:
15057 fputs ("lt", file); /* 16 */
15058 break;
15059 case LE:
15060 fputs ("le", file); /* 20 */
15061 break;
15062 case GT:
15063 fputs ("gt", file); /* 8 */
15064 break;
15065 case GE:
15066 fputs ("ge", file); /* 12 */
15067 break;
15068 case LTU:
15069 fputs ("llt", file); /* 2 */
15070 break;
15071 case LEU:
15072 fputs ("lle", file); /* 6 */
15073 break;
15074 case GTU:
15075 fputs ("lgt", file); /* 1 */
15076 break;
15077 case GEU:
15078 fputs ("lge", file); /* 5 */
15079 break;
15080 default:
15081 gcc_unreachable ();
15083 break;
15085 case 'w':
15086 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15087 normally. */
15088 if (INT_P (x))
15089 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
15090 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
15091 else
15092 print_operand (file, x, 0);
15093 return;
15095 case 'W':
15096 /* MB value for a PowerPC64 rldic operand. */
15097 i = clz_hwi (GET_CODE (x) == CONST_INT
15098 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
15100 #if HOST_BITS_PER_WIDE_INT == 32
15101 if (GET_CODE (x) == CONST_INT && i > 0)
15102 i += 32; /* zero-extend high-part was all 0's */
15103 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
15104 i = clz_hwi (CONST_DOUBLE_LOW (x)) + 32;
15105 #endif
15107 fprintf (file, "%d", i);
15108 return;
15110 case 'x':
15111 /* X is a FPR or Altivec register used in a VSX context. */
15112 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
15113 output_operand_lossage ("invalid %%x value");
15114 else
15116 int reg = REGNO (x);
15117 int vsx_reg = (FP_REGNO_P (reg)
15118 ? reg - 32
15119 : reg - FIRST_ALTIVEC_REGNO + 32);
15121 #ifdef TARGET_REGNAMES
15122 if (TARGET_REGNAMES)
15123 fprintf (file, "%%vs%d", vsx_reg);
15124 else
15125 #endif
15126 fprintf (file, "%d", vsx_reg);
15128 return;
15130 case 'X':
15131 if (MEM_P (x)
15132 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
15133 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
15134 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
15135 putc ('x', file);
15136 return;
15138 case 'Y':
15139 /* Like 'L', for third word of TImode */
15140 if (REG_P (x))
15141 fputs (reg_names[REGNO (x) + 2], file);
15142 else if (MEM_P (x))
15144 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15145 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15146 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15147 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15148 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15149 else
15150 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
15151 if (small_data_operand (x, GET_MODE (x)))
15152 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15153 reg_names[SMALL_DATA_REG]);
15155 return;
15157 case 'z':
15158 /* X is a SYMBOL_REF. Write out the name preceded by a
15159 period and without any trailing data in brackets. Used for function
15160 names. If we are configured for System V (or the embedded ABI) on
15161 the PowerPC, do not emit the period, since those systems do not use
15162 TOCs and the like. */
15163 gcc_assert (GET_CODE (x) == SYMBOL_REF);
15165 /* Mark the decl as referenced so that cgraph will output the
15166 function. */
15167 if (SYMBOL_REF_DECL (x))
15168 mark_decl_referenced (SYMBOL_REF_DECL (x));
15170 /* For macho, check to see if we need a stub. */
15171 if (TARGET_MACHO)
15173 const char *name = XSTR (x, 0);
15174 #if TARGET_MACHO
15175 if (darwin_emit_branch_islands
15176 && MACHOPIC_INDIRECT
15177 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
15178 name = machopic_indirection_name (x, /*stub_p=*/true);
15179 #endif
15180 assemble_name (file, name);
15182 else if (!DOT_SYMBOLS)
15183 assemble_name (file, XSTR (x, 0));
15184 else
15185 rs6000_output_function_entry (file, XSTR (x, 0));
15186 return;
15188 case 'Z':
15189 /* Like 'L', for last word of TImode. */
15190 if (REG_P (x))
15191 fputs (reg_names[REGNO (x) + 3], file);
15192 else if (MEM_P (x))
15194 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15195 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15196 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15197 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15198 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15199 else
15200 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
15201 if (small_data_operand (x, GET_MODE (x)))
15202 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15203 reg_names[SMALL_DATA_REG]);
15205 return;
15207 /* Print AltiVec or SPE memory operand. */
15208 case 'y':
15210 rtx tmp;
15212 gcc_assert (MEM_P (x));
15214 tmp = XEXP (x, 0);
15216 /* Ugly hack because %y is overloaded. */
15217 if ((TARGET_SPE || TARGET_E500_DOUBLE)
15218 && (GET_MODE_SIZE (GET_MODE (x)) == 8
15219 || GET_MODE (x) == TFmode
15220 || GET_MODE (x) == TImode))
15222 /* Handle [reg]. */
15223 if (REG_P (tmp))
15225 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
15226 break;
15228 /* Handle [reg+UIMM]. */
15229 else if (GET_CODE (tmp) == PLUS &&
15230 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
15232 int x;
15234 gcc_assert (REG_P (XEXP (tmp, 0)));
15236 x = INTVAL (XEXP (tmp, 1));
15237 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
15238 break;
15241 /* Fall through. Must be [reg+reg]. */
15243 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
15244 && GET_CODE (tmp) == AND
15245 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
15246 && INTVAL (XEXP (tmp, 1)) == -16)
15247 tmp = XEXP (tmp, 0);
15248 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
15249 && GET_CODE (tmp) == PRE_MODIFY)
15250 tmp = XEXP (tmp, 1);
15251 if (REG_P (tmp))
15252 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
15253 else
15255 if (!GET_CODE (tmp) == PLUS
15256 || !REG_P (XEXP (tmp, 0))
15257 || !REG_P (XEXP (tmp, 1)))
15259 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15260 break;
15263 if (REGNO (XEXP (tmp, 0)) == 0)
15264 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
15265 reg_names[ REGNO (XEXP (tmp, 0)) ]);
15266 else
15267 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
15268 reg_names[ REGNO (XEXP (tmp, 1)) ]);
15270 break;
15273 case 0:
15274 if (REG_P (x))
15275 fprintf (file, "%s", reg_names[REGNO (x)]);
15276 else if (MEM_P (x))
15278 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15279 know the width from the mode. */
15280 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
15281 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
15282 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15283 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
15284 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
15285 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15286 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15287 output_address (XEXP (XEXP (x, 0), 1));
15288 else
15289 output_address (XEXP (x, 0));
15291 else
15293 if (toc_relative_expr_p (x, false))
15294 /* This hack along with a corresponding hack in
15295 rs6000_output_addr_const_extra arranges to output addends
15296 where the assembler expects to find them. eg.
15297 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15298 without this hack would be output as "x@toc+4". We
15299 want "x+4@toc". */
15300 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15301 else
15302 output_addr_const (file, x);
15304 return;
15306 case '&':
15307 assemble_name (file, rs6000_get_some_local_dynamic_name ());
15308 return;
15310 default:
15311 output_operand_lossage ("invalid %%xn code");
15315 /* Print the address of an operand. */
15317 void
15318 print_operand_address (FILE *file, rtx x)
15320 if (REG_P (x))
15321 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15322 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15323 || GET_CODE (x) == LABEL_REF)
15325 output_addr_const (file, x);
15326 if (small_data_operand (x, GET_MODE (x)))
15327 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15328 reg_names[SMALL_DATA_REG]);
15329 else
15330 gcc_assert (!TARGET_TOC);
15332 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15333 && REG_P (XEXP (x, 1)))
15335 if (REGNO (XEXP (x, 0)) == 0)
15336 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15337 reg_names[ REGNO (XEXP (x, 0)) ]);
15338 else
15339 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15340 reg_names[ REGNO (XEXP (x, 1)) ]);
15342 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15343 && GET_CODE (XEXP (x, 1)) == CONST_INT)
15344 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15345 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15346 #if TARGET_MACHO
15347 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15348 && CONSTANT_P (XEXP (x, 1)))
15350 fprintf (file, "lo16(");
15351 output_addr_const (file, XEXP (x, 1));
15352 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15354 #endif
15355 #if TARGET_ELF
15356 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15357 && CONSTANT_P (XEXP (x, 1)))
15359 output_addr_const (file, XEXP (x, 1));
15360 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15362 #endif
15363 else if (toc_relative_expr_p (x, false))
15365 /* This hack along with a corresponding hack in
15366 rs6000_output_addr_const_extra arranges to output addends
15367 where the assembler expects to find them. eg.
15368 (lo_sum (reg 9)
15369 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15370 without this hack would be output as "x@toc+8@l(9)". We
15371 want "x+8@toc@l(9)". */
15372 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15373 if (GET_CODE (x) == LO_SUM)
15374 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
15375 else
15376 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
15378 else
15379 gcc_unreachable ();
15382 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15384 static bool
15385 rs6000_output_addr_const_extra (FILE *file, rtx x)
15387 if (GET_CODE (x) == UNSPEC)
15388 switch (XINT (x, 1))
15390 case UNSPEC_TOCREL:
15391 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
15392 && REG_P (XVECEXP (x, 0, 1))
15393 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
15394 output_addr_const (file, XVECEXP (x, 0, 0));
15395 if (x == tocrel_base && tocrel_offset != const0_rtx)
15397 if (INTVAL (tocrel_offset) >= 0)
15398 fprintf (file, "+");
15399 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
15401 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15403 putc ('-', file);
15404 assemble_name (file, toc_label_name);
15406 else if (TARGET_ELF)
15407 fputs ("@toc", file);
15408 return true;
15410 #if TARGET_MACHO
15411 case UNSPEC_MACHOPIC_OFFSET:
15412 output_addr_const (file, XVECEXP (x, 0, 0));
15413 putc ('-', file);
15414 machopic_output_function_base_name (file);
15415 return true;
15416 #endif
15418 return false;
15421 /* Target hook for assembling integer objects. The PowerPC version has
15422 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15423 is defined. It also needs to handle DI-mode objects on 64-bit
15424 targets. */
15426 static bool
15427 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15429 #ifdef RELOCATABLE_NEEDS_FIXUP
15430 /* Special handling for SI values. */
15431 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15433 static int recurse = 0;
15435 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15436 the .fixup section. Since the TOC section is already relocated, we
15437 don't need to mark it here. We used to skip the text section, but it
15438 should never be valid for relocated addresses to be placed in the text
15439 section. */
15440 if (TARGET_RELOCATABLE
15441 && in_section != toc_section
15442 && !recurse
15443 && GET_CODE (x) != CONST_INT
15444 && GET_CODE (x) != CONST_DOUBLE
15445 && CONSTANT_P (x))
15447 char buf[256];
15449 recurse = 1;
15450 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15451 fixuplabelno++;
15452 ASM_OUTPUT_LABEL (asm_out_file, buf);
15453 fprintf (asm_out_file, "\t.long\t(");
15454 output_addr_const (asm_out_file, x);
15455 fprintf (asm_out_file, ")@fixup\n");
15456 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15457 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15458 fprintf (asm_out_file, "\t.long\t");
15459 assemble_name (asm_out_file, buf);
15460 fprintf (asm_out_file, "\n\t.previous\n");
15461 recurse = 0;
15462 return true;
15464 /* Remove initial .'s to turn a -mcall-aixdesc function
15465 address into the address of the descriptor, not the function
15466 itself. */
15467 else if (GET_CODE (x) == SYMBOL_REF
15468 && XSTR (x, 0)[0] == '.'
15469 && DEFAULT_ABI == ABI_AIX)
15471 const char *name = XSTR (x, 0);
15472 while (*name == '.')
15473 name++;
15475 fprintf (asm_out_file, "\t.long\t%s\n", name);
15476 return true;
15479 #endif /* RELOCATABLE_NEEDS_FIXUP */
15480 return default_assemble_integer (x, size, aligned_p);
15483 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15484 /* Emit an assembler directive to set symbol visibility for DECL to
15485 VISIBILITY_TYPE. */
15487 static void
15488 rs6000_assemble_visibility (tree decl, int vis)
15490 /* Functions need to have their entry point symbol visibility set as
15491 well as their descriptor symbol visibility. */
15492 if (DEFAULT_ABI == ABI_AIX
15493 && DOT_SYMBOLS
15494 && TREE_CODE (decl) == FUNCTION_DECL)
15496 static const char * const visibility_types[] = {
15497 NULL, "internal", "hidden", "protected"
15500 const char *name, *type;
15502 name = ((* targetm.strip_name_encoding)
15503 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
15504 type = visibility_types[vis];
15506 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
15507 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
15509 else
15510 default_assemble_visibility (decl, vis);
15512 #endif
15514 enum rtx_code
15515 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
15517 /* Reversal of FP compares takes care -- an ordered compare
15518 becomes an unordered compare and vice versa. */
15519 if (mode == CCFPmode
15520 && (!flag_finite_math_only
15521 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
15522 || code == UNEQ || code == LTGT))
15523 return reverse_condition_maybe_unordered (code);
15524 else
15525 return reverse_condition (code);
15528 /* Generate a compare for CODE. Return a brand-new rtx that
15529 represents the result of the compare. */
15531 static rtx
15532 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
15534 enum machine_mode comp_mode;
15535 rtx compare_result;
15536 enum rtx_code code = GET_CODE (cmp);
15537 rtx op0 = XEXP (cmp, 0);
15538 rtx op1 = XEXP (cmp, 1);
15540 if (FLOAT_MODE_P (mode))
15541 comp_mode = CCFPmode;
15542 else if (code == GTU || code == LTU
15543 || code == GEU || code == LEU)
15544 comp_mode = CCUNSmode;
15545 else if ((code == EQ || code == NE)
15546 && unsigned_reg_p (op0)
15547 && (unsigned_reg_p (op1)
15548 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
15549 /* These are unsigned values, perhaps there will be a later
15550 ordering compare that can be shared with this one. */
15551 comp_mode = CCUNSmode;
15552 else
15553 comp_mode = CCmode;
15555 /* If we have an unsigned compare, make sure we don't have a signed value as
15556 an immediate. */
15557 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
15558 && INTVAL (op1) < 0)
15560 op0 = copy_rtx_if_shared (op0);
15561 op1 = force_reg (GET_MODE (op0), op1);
15562 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
15565 /* First, the compare. */
15566 compare_result = gen_reg_rtx (comp_mode);
15568 /* E500 FP compare instructions on the GPRs. Yuck! */
15569 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
15570 && FLOAT_MODE_P (mode))
15572 rtx cmp, or_result, compare_result2;
15573 enum machine_mode op_mode = GET_MODE (op0);
15575 if (op_mode == VOIDmode)
15576 op_mode = GET_MODE (op1);
15578 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15579 This explains the following mess. */
15581 switch (code)
15583 case EQ: case UNEQ: case NE: case LTGT:
15584 switch (op_mode)
15586 case SFmode:
15587 cmp = (flag_finite_math_only && !flag_trapping_math)
15588 ? gen_tstsfeq_gpr (compare_result, op0, op1)
15589 : gen_cmpsfeq_gpr (compare_result, op0, op1);
15590 break;
15592 case DFmode:
15593 cmp = (flag_finite_math_only && !flag_trapping_math)
15594 ? gen_tstdfeq_gpr (compare_result, op0, op1)
15595 : gen_cmpdfeq_gpr (compare_result, op0, op1);
15596 break;
15598 case TFmode:
15599 cmp = (flag_finite_math_only && !flag_trapping_math)
15600 ? gen_tsttfeq_gpr (compare_result, op0, op1)
15601 : gen_cmptfeq_gpr (compare_result, op0, op1);
15602 break;
15604 default:
15605 gcc_unreachable ();
15607 break;
15609 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
15610 switch (op_mode)
15612 case SFmode:
15613 cmp = (flag_finite_math_only && !flag_trapping_math)
15614 ? gen_tstsfgt_gpr (compare_result, op0, op1)
15615 : gen_cmpsfgt_gpr (compare_result, op0, op1);
15616 break;
15618 case DFmode:
15619 cmp = (flag_finite_math_only && !flag_trapping_math)
15620 ? gen_tstdfgt_gpr (compare_result, op0, op1)
15621 : gen_cmpdfgt_gpr (compare_result, op0, op1);
15622 break;
15624 case TFmode:
15625 cmp = (flag_finite_math_only && !flag_trapping_math)
15626 ? gen_tsttfgt_gpr (compare_result, op0, op1)
15627 : gen_cmptfgt_gpr (compare_result, op0, op1);
15628 break;
15630 default:
15631 gcc_unreachable ();
15633 break;
15635 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
15636 switch (op_mode)
15638 case SFmode:
15639 cmp = (flag_finite_math_only && !flag_trapping_math)
15640 ? gen_tstsflt_gpr (compare_result, op0, op1)
15641 : gen_cmpsflt_gpr (compare_result, op0, op1);
15642 break;
15644 case DFmode:
15645 cmp = (flag_finite_math_only && !flag_trapping_math)
15646 ? gen_tstdflt_gpr (compare_result, op0, op1)
15647 : gen_cmpdflt_gpr (compare_result, op0, op1);
15648 break;
15650 case TFmode:
15651 cmp = (flag_finite_math_only && !flag_trapping_math)
15652 ? gen_tsttflt_gpr (compare_result, op0, op1)
15653 : gen_cmptflt_gpr (compare_result, op0, op1);
15654 break;
15656 default:
15657 gcc_unreachable ();
15659 break;
15660 default:
15661 gcc_unreachable ();
15664 /* Synthesize LE and GE from LT/GT || EQ. */
15665 if (code == LE || code == GE || code == LEU || code == GEU)
15667 emit_insn (cmp);
15669 switch (code)
15671 case LE: code = LT; break;
15672 case GE: code = GT; break;
15673 case LEU: code = LT; break;
15674 case GEU: code = GT; break;
15675 default: gcc_unreachable ();
15678 compare_result2 = gen_reg_rtx (CCFPmode);
15680 /* Do the EQ. */
15681 switch (op_mode)
15683 case SFmode:
15684 cmp = (flag_finite_math_only && !flag_trapping_math)
15685 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
15686 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
15687 break;
15689 case DFmode:
15690 cmp = (flag_finite_math_only && !flag_trapping_math)
15691 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
15692 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
15693 break;
15695 case TFmode:
15696 cmp = (flag_finite_math_only && !flag_trapping_math)
15697 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
15698 : gen_cmptfeq_gpr (compare_result2, op0, op1);
15699 break;
15701 default:
15702 gcc_unreachable ();
15704 emit_insn (cmp);
15706 /* OR them together. */
15707 or_result = gen_reg_rtx (CCFPmode);
15708 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
15709 compare_result2);
15710 compare_result = or_result;
15711 code = EQ;
15713 else
15715 if (code == NE || code == LTGT)
15716 code = NE;
15717 else
15718 code = EQ;
15721 emit_insn (cmp);
15723 else
15725 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15726 CLOBBERs to match cmptf_internal2 pattern. */
15727 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
15728 && GET_MODE (op0) == TFmode
15729 && !TARGET_IEEEQUAD
15730 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
15731 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15732 gen_rtvec (10,
15733 gen_rtx_SET (VOIDmode,
15734 compare_result,
15735 gen_rtx_COMPARE (comp_mode, op0, op1)),
15736 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15737 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15738 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15739 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15740 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15741 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15742 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15743 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15744 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
15745 else if (GET_CODE (op1) == UNSPEC
15746 && XINT (op1, 1) == UNSPEC_SP_TEST)
15748 rtx op1b = XVECEXP (op1, 0, 0);
15749 comp_mode = CCEQmode;
15750 compare_result = gen_reg_rtx (CCEQmode);
15751 if (TARGET_64BIT)
15752 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
15753 else
15754 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
15756 else
15757 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
15758 gen_rtx_COMPARE (comp_mode, op0, op1)));
15761 /* Some kinds of FP comparisons need an OR operation;
15762 under flag_finite_math_only we don't bother. */
15763 if (FLOAT_MODE_P (mode)
15764 && !flag_finite_math_only
15765 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
15766 && (code == LE || code == GE
15767 || code == UNEQ || code == LTGT
15768 || code == UNGT || code == UNLT))
15770 enum rtx_code or1, or2;
15771 rtx or1_rtx, or2_rtx, compare2_rtx;
15772 rtx or_result = gen_reg_rtx (CCEQmode);
15774 switch (code)
15776 case LE: or1 = LT; or2 = EQ; break;
15777 case GE: or1 = GT; or2 = EQ; break;
15778 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
15779 case LTGT: or1 = LT; or2 = GT; break;
15780 case UNGT: or1 = UNORDERED; or2 = GT; break;
15781 case UNLT: or1 = UNORDERED; or2 = LT; break;
15782 default: gcc_unreachable ();
15784 validate_condition_mode (or1, comp_mode);
15785 validate_condition_mode (or2, comp_mode);
15786 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
15787 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
15788 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
15789 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
15790 const_true_rtx);
15791 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
15793 compare_result = or_result;
15794 code = EQ;
15797 validate_condition_mode (code, GET_MODE (compare_result));
15799 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
15803 /* Emit the RTL for an sISEL pattern. */
15805 void
15806 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
15808 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
15811 void
15812 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
15814 rtx condition_rtx;
15815 enum machine_mode op_mode;
15816 enum rtx_code cond_code;
15817 rtx result = operands[0];
15819 if (TARGET_ISEL && (mode == SImode || mode == DImode))
15821 rs6000_emit_sISEL (mode, operands);
15822 return;
15825 condition_rtx = rs6000_generate_compare (operands[1], mode);
15826 cond_code = GET_CODE (condition_rtx);
15828 if (FLOAT_MODE_P (mode)
15829 && !TARGET_FPRS && TARGET_HARD_FLOAT)
15831 rtx t;
15833 PUT_MODE (condition_rtx, SImode);
15834 t = XEXP (condition_rtx, 0);
15836 gcc_assert (cond_code == NE || cond_code == EQ);
15838 if (cond_code == NE)
15839 emit_insn (gen_e500_flip_gt_bit (t, t));
15841 emit_insn (gen_move_from_CR_gt_bit (result, t));
15842 return;
15845 if (cond_code == NE
15846 || cond_code == GE || cond_code == LE
15847 || cond_code == GEU || cond_code == LEU
15848 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
15850 rtx not_result = gen_reg_rtx (CCEQmode);
15851 rtx not_op, rev_cond_rtx;
15852 enum machine_mode cc_mode;
15854 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
15856 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
15857 SImode, XEXP (condition_rtx, 0), const0_rtx);
15858 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
15859 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
15860 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
15863 op_mode = GET_MODE (XEXP (operands[1], 0));
15864 if (op_mode == VOIDmode)
15865 op_mode = GET_MODE (XEXP (operands[1], 1));
15867 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
15869 PUT_MODE (condition_rtx, DImode);
15870 convert_move (result, condition_rtx, 0);
15872 else
15874 PUT_MODE (condition_rtx, SImode);
15875 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
15879 /* Emit a branch of kind CODE to location LOC. */
15881 void
15882 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
15884 rtx condition_rtx, loc_ref;
15886 condition_rtx = rs6000_generate_compare (operands[0], mode);
15887 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
15888 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
15889 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
15890 loc_ref, pc_rtx)));
15893 /* Return the string to output a conditional branch to LABEL, which is
15894 the operand number of the label, or -1 if the branch is really a
15895 conditional return.
15897 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15898 condition code register and its mode specifies what kind of
15899 comparison we made.
15901 REVERSED is nonzero if we should reverse the sense of the comparison.
15903 INSN is the insn. */
15905 char *
15906 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
15908 static char string[64];
15909 enum rtx_code code = GET_CODE (op);
15910 rtx cc_reg = XEXP (op, 0);
15911 enum machine_mode mode = GET_MODE (cc_reg);
15912 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
15913 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
15914 int really_reversed = reversed ^ need_longbranch;
15915 char *s = string;
15916 const char *ccode;
15917 const char *pred;
15918 rtx note;
15920 validate_condition_mode (code, mode);
15922 /* Work out which way this really branches. We could use
15923 reverse_condition_maybe_unordered here always but this
15924 makes the resulting assembler clearer. */
15925 if (really_reversed)
15927 /* Reversal of FP compares takes care -- an ordered compare
15928 becomes an unordered compare and vice versa. */
15929 if (mode == CCFPmode)
15930 code = reverse_condition_maybe_unordered (code);
15931 else
15932 code = reverse_condition (code);
15935 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
15937 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15938 to the GT bit. */
15939 switch (code)
15941 case EQ:
15942 /* Opposite of GT. */
15943 code = GT;
15944 break;
15946 case NE:
15947 code = UNLE;
15948 break;
15950 default:
15951 gcc_unreachable ();
15955 switch (code)
15957 /* Not all of these are actually distinct opcodes, but
15958 we distinguish them for clarity of the resulting assembler. */
15959 case NE: case LTGT:
15960 ccode = "ne"; break;
15961 case EQ: case UNEQ:
15962 ccode = "eq"; break;
15963 case GE: case GEU:
15964 ccode = "ge"; break;
15965 case GT: case GTU: case UNGT:
15966 ccode = "gt"; break;
15967 case LE: case LEU:
15968 ccode = "le"; break;
15969 case LT: case LTU: case UNLT:
15970 ccode = "lt"; break;
15971 case UNORDERED: ccode = "un"; break;
15972 case ORDERED: ccode = "nu"; break;
15973 case UNGE: ccode = "nl"; break;
15974 case UNLE: ccode = "ng"; break;
15975 default:
15976 gcc_unreachable ();
15979 /* Maybe we have a guess as to how likely the branch is. */
15980 pred = "";
15981 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
15982 if (note != NULL_RTX)
15984 /* PROB is the difference from 50%. */
15985 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
15987 /* Only hint for highly probable/improbable branches on newer
15988 cpus as static prediction overrides processor dynamic
15989 prediction. For older cpus we may as well always hint, but
15990 assume not taken for branches that are very close to 50% as a
15991 mispredicted taken branch is more expensive than a
15992 mispredicted not-taken branch. */
15993 if (rs6000_always_hint
15994 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
15995 && br_prob_note_reliable_p (note)))
15997 if (abs (prob) > REG_BR_PROB_BASE / 20
15998 && ((prob > 0) ^ need_longbranch))
15999 pred = "+";
16000 else
16001 pred = "-";
16005 if (label == NULL)
16006 s += sprintf (s, "b%slr%s ", ccode, pred);
16007 else
16008 s += sprintf (s, "b%s%s ", ccode, pred);
16010 /* We need to escape any '%' characters in the reg_names string.
16011 Assume they'd only be the first character.... */
16012 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
16013 *s++ = '%';
16014 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
16016 if (label != NULL)
16018 /* If the branch distance was too far, we may have to use an
16019 unconditional branch to go the distance. */
16020 if (need_longbranch)
16021 s += sprintf (s, ",$+8\n\tb %s", label);
16022 else
16023 s += sprintf (s, ",%s", label);
16026 return string;
16029 /* Return the string to flip the GT bit on a CR. */
16030 char *
16031 output_e500_flip_gt_bit (rtx dst, rtx src)
16033 static char string[64];
16034 int a, b;
16036 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
16037 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
16039 /* GT bit. */
16040 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
16041 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
16043 sprintf (string, "crnot %d,%d", a, b);
16044 return string;
16047 /* Return insn for VSX or Altivec comparisons. */
16049 static rtx
16050 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
16052 rtx mask;
16053 enum machine_mode mode = GET_MODE (op0);
16055 switch (code)
16057 default:
16058 break;
16060 case GE:
16061 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
16062 return NULL_RTX;
16064 case EQ:
16065 case GT:
16066 case GTU:
16067 case ORDERED:
16068 case UNORDERED:
16069 case UNEQ:
16070 case LTGT:
16071 mask = gen_reg_rtx (mode);
16072 emit_insn (gen_rtx_SET (VOIDmode,
16073 mask,
16074 gen_rtx_fmt_ee (code, mode, op0, op1)));
16075 return mask;
16078 return NULL_RTX;
16081 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16082 DMODE is expected destination mode. This is a recursive function. */
16084 static rtx
16085 rs6000_emit_vector_compare (enum rtx_code rcode,
16086 rtx op0, rtx op1,
16087 enum machine_mode dmode)
16089 rtx mask;
16090 bool swap_operands = false;
16091 bool try_again = false;
16093 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
16094 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
16096 /* See if the comparison works as is. */
16097 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16098 if (mask)
16099 return mask;
16101 switch (rcode)
16103 case LT:
16104 rcode = GT;
16105 swap_operands = true;
16106 try_again = true;
16107 break;
16108 case LTU:
16109 rcode = GTU;
16110 swap_operands = true;
16111 try_again = true;
16112 break;
16113 case NE:
16114 case UNLE:
16115 case UNLT:
16116 case UNGE:
16117 case UNGT:
16118 /* Invert condition and try again.
16119 e.g., A != B becomes ~(A==B). */
16121 enum rtx_code rev_code;
16122 enum insn_code nor_code;
16123 rtx mask2;
16125 rev_code = reverse_condition_maybe_unordered (rcode);
16126 if (rev_code == UNKNOWN)
16127 return NULL_RTX;
16129 nor_code = optab_handler (one_cmpl_optab, dmode);
16130 if (nor_code == CODE_FOR_nothing)
16131 return NULL_RTX;
16133 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
16134 if (!mask2)
16135 return NULL_RTX;
16137 mask = gen_reg_rtx (dmode);
16138 emit_insn (GEN_FCN (nor_code) (mask, mask2));
16139 return mask;
16141 break;
16142 case GE:
16143 case GEU:
16144 case LE:
16145 case LEU:
16146 /* Try GT/GTU/LT/LTU OR EQ */
16148 rtx c_rtx, eq_rtx;
16149 enum insn_code ior_code;
16150 enum rtx_code new_code;
16152 switch (rcode)
16154 case GE:
16155 new_code = GT;
16156 break;
16158 case GEU:
16159 new_code = GTU;
16160 break;
16162 case LE:
16163 new_code = LT;
16164 break;
16166 case LEU:
16167 new_code = LTU;
16168 break;
16170 default:
16171 gcc_unreachable ();
16174 ior_code = optab_handler (ior_optab, dmode);
16175 if (ior_code == CODE_FOR_nothing)
16176 return NULL_RTX;
16178 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
16179 if (!c_rtx)
16180 return NULL_RTX;
16182 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
16183 if (!eq_rtx)
16184 return NULL_RTX;
16186 mask = gen_reg_rtx (dmode);
16187 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
16188 return mask;
16190 break;
16191 default:
16192 return NULL_RTX;
16195 if (try_again)
16197 if (swap_operands)
16199 rtx tmp;
16200 tmp = op0;
16201 op0 = op1;
16202 op1 = tmp;
16205 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16206 if (mask)
16207 return mask;
16210 /* You only get two chances. */
16211 return NULL_RTX;
16214 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16215 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16216 operands for the relation operation COND. */
16219 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
16220 rtx cond, rtx cc_op0, rtx cc_op1)
16222 enum machine_mode dest_mode = GET_MODE (dest);
16223 enum machine_mode mask_mode = GET_MODE (cc_op0);
16224 enum rtx_code rcode = GET_CODE (cond);
16225 enum machine_mode cc_mode = CCmode;
16226 rtx mask;
16227 rtx cond2;
16228 rtx tmp;
16229 bool invert_move = false;
16231 if (VECTOR_UNIT_NONE_P (dest_mode))
16232 return 0;
16234 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
16235 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
16237 switch (rcode)
16239 /* Swap operands if we can, and fall back to doing the operation as
16240 specified, and doing a NOR to invert the test. */
16241 case NE:
16242 case UNLE:
16243 case UNLT:
16244 case UNGE:
16245 case UNGT:
16246 /* Invert condition and try again.
16247 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16248 invert_move = true;
16249 rcode = reverse_condition_maybe_unordered (rcode);
16250 if (rcode == UNKNOWN)
16251 return 0;
16252 break;
16254 /* Mark unsigned tests with CCUNSmode. */
16255 case GTU:
16256 case GEU:
16257 case LTU:
16258 case LEU:
16259 cc_mode = CCUNSmode;
16260 break;
16262 default:
16263 break;
16266 /* Get the vector mask for the given relational operations. */
16267 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
16269 if (!mask)
16270 return 0;
16272 if (invert_move)
16274 tmp = op_true;
16275 op_true = op_false;
16276 op_false = tmp;
16279 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
16280 CONST0_RTX (dest_mode));
16281 emit_insn (gen_rtx_SET (VOIDmode,
16282 dest,
16283 gen_rtx_IF_THEN_ELSE (dest_mode,
16284 cond2,
16285 op_true,
16286 op_false)));
16287 return 1;
16290 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16291 operands of the last comparison is nonzero/true, FALSE_COND if it
16292 is zero/false. Return 0 if the hardware has no such operation. */
16295 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16297 enum rtx_code code = GET_CODE (op);
16298 rtx op0 = XEXP (op, 0);
16299 rtx op1 = XEXP (op, 1);
16300 REAL_VALUE_TYPE c1;
16301 enum machine_mode compare_mode = GET_MODE (op0);
16302 enum machine_mode result_mode = GET_MODE (dest);
16303 rtx temp;
16304 bool is_against_zero;
16306 /* These modes should always match. */
16307 if (GET_MODE (op1) != compare_mode
16308 /* In the isel case however, we can use a compare immediate, so
16309 op1 may be a small constant. */
16310 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
16311 return 0;
16312 if (GET_MODE (true_cond) != result_mode)
16313 return 0;
16314 if (GET_MODE (false_cond) != result_mode)
16315 return 0;
16317 /* Don't allow using floating point comparisons for integer results for
16318 now. */
16319 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
16320 return 0;
16322 /* First, work out if the hardware can do this at all, or
16323 if it's too slow.... */
16324 if (!FLOAT_MODE_P (compare_mode))
16326 if (TARGET_ISEL)
16327 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16328 return 0;
16330 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16331 && SCALAR_FLOAT_MODE_P (compare_mode))
16332 return 0;
16334 is_against_zero = op1 == CONST0_RTX (compare_mode);
16336 /* A floating-point subtract might overflow, underflow, or produce
16337 an inexact result, thus changing the floating-point flags, so it
16338 can't be generated if we care about that. It's safe if one side
16339 of the construct is zero, since then no subtract will be
16340 generated. */
16341 if (SCALAR_FLOAT_MODE_P (compare_mode)
16342 && flag_trapping_math && ! is_against_zero)
16343 return 0;
16345 /* Eliminate half of the comparisons by switching operands, this
16346 makes the remaining code simpler. */
16347 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16348 || code == LTGT || code == LT || code == UNLE)
16350 code = reverse_condition_maybe_unordered (code);
16351 temp = true_cond;
16352 true_cond = false_cond;
16353 false_cond = temp;
16356 /* UNEQ and LTGT take four instructions for a comparison with zero,
16357 it'll probably be faster to use a branch here too. */
16358 if (code == UNEQ && HONOR_NANS (compare_mode))
16359 return 0;
16361 if (GET_CODE (op1) == CONST_DOUBLE)
16362 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16364 /* We're going to try to implement comparisons by performing
16365 a subtract, then comparing against zero. Unfortunately,
16366 Inf - Inf is NaN which is not zero, and so if we don't
16367 know that the operand is finite and the comparison
16368 would treat EQ different to UNORDERED, we can't do it. */
16369 if (HONOR_INFINITIES (compare_mode)
16370 && code != GT && code != UNGE
16371 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16372 /* Constructs of the form (a OP b ? a : b) are safe. */
16373 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16374 || (! rtx_equal_p (op0, true_cond)
16375 && ! rtx_equal_p (op1, true_cond))))
16376 return 0;
16378 /* At this point we know we can use fsel. */
16380 /* Reduce the comparison to a comparison against zero. */
16381 if (! is_against_zero)
16383 temp = gen_reg_rtx (compare_mode);
16384 emit_insn (gen_rtx_SET (VOIDmode, temp,
16385 gen_rtx_MINUS (compare_mode, op0, op1)));
16386 op0 = temp;
16387 op1 = CONST0_RTX (compare_mode);
16390 /* If we don't care about NaNs we can reduce some of the comparisons
16391 down to faster ones. */
16392 if (! HONOR_NANS (compare_mode))
16393 switch (code)
16395 case GT:
16396 code = LE;
16397 temp = true_cond;
16398 true_cond = false_cond;
16399 false_cond = temp;
16400 break;
16401 case UNGE:
16402 code = GE;
16403 break;
16404 case UNEQ:
16405 code = EQ;
16406 break;
16407 default:
16408 break;
16411 /* Now, reduce everything down to a GE. */
16412 switch (code)
16414 case GE:
16415 break;
16417 case LE:
16418 temp = gen_reg_rtx (compare_mode);
16419 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16420 op0 = temp;
16421 break;
16423 case ORDERED:
16424 temp = gen_reg_rtx (compare_mode);
16425 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16426 op0 = temp;
16427 break;
16429 case EQ:
16430 temp = gen_reg_rtx (compare_mode);
16431 emit_insn (gen_rtx_SET (VOIDmode, temp,
16432 gen_rtx_NEG (compare_mode,
16433 gen_rtx_ABS (compare_mode, op0))));
16434 op0 = temp;
16435 break;
16437 case UNGE:
16438 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16439 temp = gen_reg_rtx (result_mode);
16440 emit_insn (gen_rtx_SET (VOIDmode, temp,
16441 gen_rtx_IF_THEN_ELSE (result_mode,
16442 gen_rtx_GE (VOIDmode,
16443 op0, op1),
16444 true_cond, false_cond)));
16445 false_cond = true_cond;
16446 true_cond = temp;
16448 temp = gen_reg_rtx (compare_mode);
16449 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16450 op0 = temp;
16451 break;
16453 case GT:
16454 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16455 temp = gen_reg_rtx (result_mode);
16456 emit_insn (gen_rtx_SET (VOIDmode, temp,
16457 gen_rtx_IF_THEN_ELSE (result_mode,
16458 gen_rtx_GE (VOIDmode,
16459 op0, op1),
16460 true_cond, false_cond)));
16461 true_cond = false_cond;
16462 false_cond = temp;
16464 temp = gen_reg_rtx (compare_mode);
16465 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16466 op0 = temp;
16467 break;
16469 default:
16470 gcc_unreachable ();
16473 emit_insn (gen_rtx_SET (VOIDmode, dest,
16474 gen_rtx_IF_THEN_ELSE (result_mode,
16475 gen_rtx_GE (VOIDmode,
16476 op0, op1),
16477 true_cond, false_cond)));
16478 return 1;
16481 /* Same as above, but for ints (isel). */
16483 static int
16484 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16486 rtx condition_rtx, cr;
16487 enum machine_mode mode = GET_MODE (dest);
16488 enum rtx_code cond_code;
16489 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
16490 bool signedp;
16492 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
16493 return 0;
16495 /* We still have to do the compare, because isel doesn't do a
16496 compare, it just looks at the CRx bits set by a previous compare
16497 instruction. */
16498 condition_rtx = rs6000_generate_compare (op, mode);
16499 cond_code = GET_CODE (condition_rtx);
16500 cr = XEXP (condition_rtx, 0);
16501 signedp = GET_MODE (cr) == CCmode;
16503 isel_func = (mode == SImode
16504 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
16505 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
16507 switch (cond_code)
16509 case LT: case GT: case LTU: case GTU: case EQ:
16510 /* isel handles these directly. */
16511 break;
16513 default:
16514 /* We need to swap the sense of the comparison. */
16516 rtx t = true_cond;
16517 true_cond = false_cond;
16518 false_cond = t;
16519 PUT_CODE (condition_rtx, reverse_condition (cond_code));
16521 break;
16524 false_cond = force_reg (mode, false_cond);
16525 if (true_cond != const0_rtx)
16526 true_cond = force_reg (mode, true_cond);
16528 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
16530 return 1;
16533 const char *
16534 output_isel (rtx *operands)
16536 enum rtx_code code;
16538 code = GET_CODE (operands[1]);
16540 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
16542 gcc_assert (GET_CODE (operands[2]) == REG
16543 && GET_CODE (operands[3]) == REG);
16544 PUT_CODE (operands[1], reverse_condition (code));
16545 return "isel %0,%3,%2,%j1";
16548 return "isel %0,%2,%3,%j1";
16551 void
16552 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16554 enum machine_mode mode = GET_MODE (op0);
16555 enum rtx_code c;
16556 rtx target;
16558 /* VSX/altivec have direct min/max insns. */
16559 if ((code == SMAX || code == SMIN)
16560 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
16561 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
16563 emit_insn (gen_rtx_SET (VOIDmode,
16564 dest,
16565 gen_rtx_fmt_ee (code, mode, op0, op1)));
16566 return;
16569 if (code == SMAX || code == SMIN)
16570 c = GE;
16571 else
16572 c = GEU;
16574 if (code == SMAX || code == UMAX)
16575 target = emit_conditional_move (dest, c, op0, op1, mode,
16576 op0, op1, mode, 0);
16577 else
16578 target = emit_conditional_move (dest, c, op0, op1, mode,
16579 op1, op0, mode, 0);
16580 gcc_assert (target);
16581 if (target != dest)
16582 emit_move_insn (dest, target);
16585 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16586 COND is true. Mark the jump as unlikely to be taken. */
16588 static void
16589 emit_unlikely_jump (rtx cond, rtx label)
16591 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
16592 rtx x;
16594 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
16595 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
16596 add_reg_note (x, REG_BR_PROB, very_unlikely);
16599 /* A subroutine of the atomic operation splitters. Emit a load-locked
16600 instruction in MODE. */
16602 static void
16603 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
16605 rtx (*fn) (rtx, rtx) = NULL;
16607 switch (mode)
16609 case SImode:
16610 fn = gen_load_lockedsi;
16611 break;
16612 case DImode:
16613 fn = gen_load_lockeddi;
16614 break;
16615 default:
16616 gcc_unreachable ();
16618 emit_insn (fn (reg, mem));
16621 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16622 instruction in MODE. */
16624 static void
16625 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
16627 rtx (*fn) (rtx, rtx, rtx) = NULL;
16629 switch (mode)
16631 case SImode:
16632 fn = gen_store_conditionalsi;
16633 break;
16634 case DImode:
16635 fn = gen_store_conditionaldi;
16636 break;
16637 default:
16638 gcc_unreachable ();
16641 /* Emit sync before stwcx. to address PPC405 Erratum. */
16642 if (PPC405_ERRATUM77)
16643 emit_insn (gen_hwsync ());
16645 emit_insn (fn (res, mem, val));
16648 /* Expand barriers before and after a load_locked/store_cond sequence. */
16650 static rtx
16651 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
16653 rtx addr = XEXP (mem, 0);
16654 int strict_p = (reload_in_progress || reload_completed);
16656 if (!legitimate_indirect_address_p (addr, strict_p)
16657 && !legitimate_indexed_address_p (addr, strict_p))
16659 addr = force_reg (Pmode, addr);
16660 mem = replace_equiv_address_nv (mem, addr);
16663 switch (model)
16665 case MEMMODEL_RELAXED:
16666 case MEMMODEL_CONSUME:
16667 case MEMMODEL_ACQUIRE:
16668 break;
16669 case MEMMODEL_RELEASE:
16670 case MEMMODEL_ACQ_REL:
16671 emit_insn (gen_lwsync ());
16672 break;
16673 case MEMMODEL_SEQ_CST:
16674 emit_insn (gen_hwsync ());
16675 break;
16676 default:
16677 gcc_unreachable ();
16679 return mem;
16682 static void
16683 rs6000_post_atomic_barrier (enum memmodel model)
16685 switch (model)
16687 case MEMMODEL_RELAXED:
16688 case MEMMODEL_CONSUME:
16689 case MEMMODEL_RELEASE:
16690 break;
16691 case MEMMODEL_ACQUIRE:
16692 case MEMMODEL_ACQ_REL:
16693 case MEMMODEL_SEQ_CST:
16694 emit_insn (gen_isync ());
16695 break;
16696 default:
16697 gcc_unreachable ();
16701 /* A subroutine of the various atomic expanders. For sub-word operations,
16702 we must adjust things to operate on SImode. Given the original MEM,
16703 return a new aligned memory. Also build and return the quantities by
16704 which to shift and mask. */
16706 static rtx
16707 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
16709 rtx addr, align, shift, mask, mem;
16710 HOST_WIDE_INT shift_mask;
16711 enum machine_mode mode = GET_MODE (orig_mem);
16713 /* For smaller modes, we have to implement this via SImode. */
16714 shift_mask = (mode == QImode ? 0x18 : 0x10);
16716 addr = XEXP (orig_mem, 0);
16717 addr = force_reg (GET_MODE (addr), addr);
16719 /* Aligned memory containing subword. Generate a new memory. We
16720 do not want any of the existing MEM_ATTR data, as we're now
16721 accessing memory outside the original object. */
16722 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
16723 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16724 mem = gen_rtx_MEM (SImode, align);
16725 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
16726 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
16727 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
16729 /* Shift amount for subword relative to aligned word. */
16730 shift = gen_reg_rtx (SImode);
16731 addr = gen_lowpart (SImode, addr);
16732 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
16733 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
16734 shift, 1, OPTAB_LIB_WIDEN);
16735 *pshift = shift;
16737 /* Mask for insertion. */
16738 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
16739 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
16740 *pmask = mask;
16742 return mem;
16745 /* A subroutine of the various atomic expanders. For sub-word operands,
16746 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16748 static rtx
16749 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
16751 rtx x;
16753 x = gen_reg_rtx (SImode);
16754 emit_insn (gen_rtx_SET (VOIDmode, x,
16755 gen_rtx_AND (SImode,
16756 gen_rtx_NOT (SImode, mask),
16757 oldval)));
16759 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
16761 return x;
16764 /* A subroutine of the various atomic expanders. For sub-word operands,
16765 extract WIDE to NARROW via SHIFT. */
16767 static void
16768 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
16770 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
16771 wide, 1, OPTAB_LIB_WIDEN);
16772 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
16775 /* Expand an atomic compare and swap operation. */
16777 void
16778 rs6000_expand_atomic_compare_and_swap (rtx operands[])
16780 rtx boolval, retval, mem, oldval, newval, cond;
16781 rtx label1, label2, x, mask, shift;
16782 enum machine_mode mode;
16783 enum memmodel mod_s, mod_f;
16784 bool is_weak;
16786 boolval = operands[0];
16787 retval = operands[1];
16788 mem = operands[2];
16789 oldval = operands[3];
16790 newval = operands[4];
16791 is_weak = (INTVAL (operands[5]) != 0);
16792 mod_s = (enum memmodel) INTVAL (operands[6]);
16793 mod_f = (enum memmodel) INTVAL (operands[7]);
16794 mode = GET_MODE (mem);
16796 mask = shift = NULL_RTX;
16797 if (mode == QImode || mode == HImode)
16799 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16801 /* Shift and mask OLDVAL into position with the word. */
16802 oldval = convert_modes (SImode, mode, oldval, 1);
16803 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
16804 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16806 /* Shift and mask NEWVAL into position within the word. */
16807 newval = convert_modes (SImode, mode, newval, 1);
16808 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
16809 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16811 /* Prepare to adjust the return value. */
16812 retval = gen_reg_rtx (SImode);
16813 mode = SImode;
16815 else if (reg_overlap_mentioned_p (retval, oldval))
16816 oldval = copy_to_reg (oldval);
16818 mem = rs6000_pre_atomic_barrier (mem, mod_s);
16820 label1 = NULL_RTX;
16821 if (!is_weak)
16823 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16824 emit_label (XEXP (label1, 0));
16826 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16828 emit_load_locked (mode, retval, mem);
16830 x = retval;
16831 if (mask)
16833 x = expand_simple_binop (SImode, AND, retval, mask,
16834 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16837 cond = gen_reg_rtx (CCmode);
16838 x = gen_rtx_COMPARE (CCmode, x, oldval);
16839 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
16841 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16842 emit_unlikely_jump (x, label2);
16844 x = newval;
16845 if (mask)
16846 x = rs6000_mask_atomic_subword (retval, newval, mask);
16848 emit_store_conditional (mode, cond, mem, x);
16850 if (!is_weak)
16852 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16853 emit_unlikely_jump (x, label1);
16856 if (mod_f != MEMMODEL_RELAXED)
16857 emit_label (XEXP (label2, 0));
16859 rs6000_post_atomic_barrier (mod_s);
16861 if (mod_f == MEMMODEL_RELAXED)
16862 emit_label (XEXP (label2, 0));
16864 if (shift)
16865 rs6000_finish_atomic_subword (operands[1], retval, shift);
16867 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16868 x = gen_rtx_EQ (SImode, cond, const0_rtx);
16869 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
16872 /* Expand an atomic exchange operation. */
16874 void
16875 rs6000_expand_atomic_exchange (rtx operands[])
16877 rtx retval, mem, val, cond;
16878 enum machine_mode mode;
16879 enum memmodel model;
16880 rtx label, x, mask, shift;
16882 retval = operands[0];
16883 mem = operands[1];
16884 val = operands[2];
16885 model = (enum memmodel) INTVAL (operands[3]);
16886 mode = GET_MODE (mem);
16888 mask = shift = NULL_RTX;
16889 if (mode == QImode || mode == HImode)
16891 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16893 /* Shift and mask VAL into position with the word. */
16894 val = convert_modes (SImode, mode, val, 1);
16895 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16896 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16898 /* Prepare to adjust the return value. */
16899 retval = gen_reg_rtx (SImode);
16900 mode = SImode;
16903 mem = rs6000_pre_atomic_barrier (mem, model);
16905 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16906 emit_label (XEXP (label, 0));
16908 emit_load_locked (mode, retval, mem);
16910 x = val;
16911 if (mask)
16912 x = rs6000_mask_atomic_subword (retval, val, mask);
16914 cond = gen_reg_rtx (CCmode);
16915 emit_store_conditional (mode, cond, mem, x);
16917 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16918 emit_unlikely_jump (x, label);
16920 rs6000_post_atomic_barrier (model);
16922 if (shift)
16923 rs6000_finish_atomic_subword (operands[0], retval, shift);
16926 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16927 to perform. MEM is the memory on which to operate. VAL is the second
16928 operand of the binary operator. BEFORE and AFTER are optional locations to
16929 return the value of MEM either before of after the operation. MODEL_RTX
16930 is a CONST_INT containing the memory model to use. */
16932 void
16933 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
16934 rtx orig_before, rtx orig_after, rtx model_rtx)
16936 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
16937 enum machine_mode mode = GET_MODE (mem);
16938 rtx label, x, cond, mask, shift;
16939 rtx before = orig_before, after = orig_after;
16941 mask = shift = NULL_RTX;
16942 if (mode == QImode || mode == HImode)
16944 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16946 /* Shift and mask VAL into position with the word. */
16947 val = convert_modes (SImode, mode, val, 1);
16948 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16949 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16951 switch (code)
16953 case IOR:
16954 case XOR:
16955 /* We've already zero-extended VAL. That is sufficient to
16956 make certain that it does not affect other bits. */
16957 mask = NULL;
16958 break;
16960 case AND:
16961 /* If we make certain that all of the other bits in VAL are
16962 set, that will be sufficient to not affect other bits. */
16963 x = gen_rtx_NOT (SImode, mask);
16964 x = gen_rtx_IOR (SImode, x, val);
16965 emit_insn (gen_rtx_SET (VOIDmode, val, x));
16966 mask = NULL;
16967 break;
16969 case NOT:
16970 case PLUS:
16971 case MINUS:
16972 /* These will all affect bits outside the field and need
16973 adjustment via MASK within the loop. */
16974 break;
16976 default:
16977 gcc_unreachable ();
16980 /* Prepare to adjust the return value. */
16981 before = gen_reg_rtx (SImode);
16982 if (after)
16983 after = gen_reg_rtx (SImode);
16984 mode = SImode;
16987 mem = rs6000_pre_atomic_barrier (mem, model);
16989 label = gen_label_rtx ();
16990 emit_label (label);
16991 label = gen_rtx_LABEL_REF (VOIDmode, label);
16993 if (before == NULL_RTX)
16994 before = gen_reg_rtx (mode);
16996 emit_load_locked (mode, before, mem);
16998 if (code == NOT)
17000 x = expand_simple_binop (mode, AND, before, val,
17001 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17002 after = expand_simple_unop (mode, NOT, x, after, 1);
17004 else
17006 after = expand_simple_binop (mode, code, before, val,
17007 after, 1, OPTAB_LIB_WIDEN);
17010 x = after;
17011 if (mask)
17013 x = expand_simple_binop (SImode, AND, after, mask,
17014 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17015 x = rs6000_mask_atomic_subword (before, x, mask);
17018 cond = gen_reg_rtx (CCmode);
17019 emit_store_conditional (mode, cond, mem, x);
17021 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17022 emit_unlikely_jump (x, label);
17024 rs6000_post_atomic_barrier (model);
17026 if (shift)
17028 if (orig_before)
17029 rs6000_finish_atomic_subword (orig_before, before, shift);
17030 if (orig_after)
17031 rs6000_finish_atomic_subword (orig_after, after, shift);
17033 else if (orig_after && after != orig_after)
17034 emit_move_insn (orig_after, after);
17037 /* Emit instructions to move SRC to DST. Called by splitters for
17038 multi-register moves. It will emit at most one instruction for
17039 each register that is accessed; that is, it won't emit li/lis pairs
17040 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17041 register. */
17043 void
17044 rs6000_split_multireg_move (rtx dst, rtx src)
17046 /* The register number of the first register being moved. */
17047 int reg;
17048 /* The mode that is to be moved. */
17049 enum machine_mode mode;
17050 /* The mode that the move is being done in, and its size. */
17051 enum machine_mode reg_mode;
17052 int reg_mode_size;
17053 /* The number of registers that will be moved. */
17054 int nregs;
17056 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
17057 mode = GET_MODE (dst);
17058 nregs = hard_regno_nregs[reg][mode];
17059 if (FP_REGNO_P (reg))
17060 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
17061 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
17062 else if (ALTIVEC_REGNO_P (reg))
17063 reg_mode = V16QImode;
17064 else if (TARGET_E500_DOUBLE && mode == TFmode)
17065 reg_mode = DFmode;
17066 else
17067 reg_mode = word_mode;
17068 reg_mode_size = GET_MODE_SIZE (reg_mode);
17070 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
17072 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
17074 /* Move register range backwards, if we might have destructive
17075 overlap. */
17076 int i;
17077 for (i = nregs - 1; i >= 0; i--)
17078 emit_insn (gen_rtx_SET (VOIDmode,
17079 simplify_gen_subreg (reg_mode, dst, mode,
17080 i * reg_mode_size),
17081 simplify_gen_subreg (reg_mode, src, mode,
17082 i * reg_mode_size)));
17084 else
17086 int i;
17087 int j = -1;
17088 bool used_update = false;
17089 rtx restore_basereg = NULL_RTX;
17091 if (MEM_P (src) && INT_REGNO_P (reg))
17093 rtx breg;
17095 if (GET_CODE (XEXP (src, 0)) == PRE_INC
17096 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
17098 rtx delta_rtx;
17099 breg = XEXP (XEXP (src, 0), 0);
17100 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
17101 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
17102 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
17103 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17104 src = replace_equiv_address (src, breg);
17106 else if (! rs6000_offsettable_memref_p (src, reg_mode))
17108 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
17110 rtx basereg = XEXP (XEXP (src, 0), 0);
17111 if (TARGET_UPDATE)
17113 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
17114 emit_insn (gen_rtx_SET (VOIDmode, ndst,
17115 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
17116 used_update = true;
17118 else
17119 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17120 XEXP (XEXP (src, 0), 1)));
17121 src = replace_equiv_address (src, basereg);
17123 else
17125 rtx basereg = gen_rtx_REG (Pmode, reg);
17126 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
17127 src = replace_equiv_address (src, basereg);
17131 breg = XEXP (src, 0);
17132 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
17133 breg = XEXP (breg, 0);
17135 /* If the base register we are using to address memory is
17136 also a destination reg, then change that register last. */
17137 if (REG_P (breg)
17138 && REGNO (breg) >= REGNO (dst)
17139 && REGNO (breg) < REGNO (dst) + nregs)
17140 j = REGNO (breg) - REGNO (dst);
17142 else if (MEM_P (dst) && INT_REGNO_P (reg))
17144 rtx breg;
17146 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
17147 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
17149 rtx delta_rtx;
17150 breg = XEXP (XEXP (dst, 0), 0);
17151 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
17152 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
17153 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
17155 /* We have to update the breg before doing the store.
17156 Use store with update, if available. */
17158 if (TARGET_UPDATE)
17160 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17161 emit_insn (TARGET_32BIT
17162 ? (TARGET_POWERPC64
17163 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
17164 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
17165 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
17166 used_update = true;
17168 else
17169 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17170 dst = replace_equiv_address (dst, breg);
17172 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
17173 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
17175 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
17177 rtx basereg = XEXP (XEXP (dst, 0), 0);
17178 if (TARGET_UPDATE)
17180 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17181 emit_insn (gen_rtx_SET (VOIDmode,
17182 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
17183 used_update = true;
17185 else
17186 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17187 XEXP (XEXP (dst, 0), 1)));
17188 dst = replace_equiv_address (dst, basereg);
17190 else
17192 rtx basereg = XEXP (XEXP (dst, 0), 0);
17193 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
17194 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
17195 && REG_P (basereg)
17196 && REG_P (offsetreg)
17197 && REGNO (basereg) != REGNO (offsetreg));
17198 if (REGNO (basereg) == 0)
17200 rtx tmp = offsetreg;
17201 offsetreg = basereg;
17202 basereg = tmp;
17204 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
17205 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
17206 dst = replace_equiv_address (dst, basereg);
17209 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
17210 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
17213 for (i = 0; i < nregs; i++)
17215 /* Calculate index to next subword. */
17216 ++j;
17217 if (j == nregs)
17218 j = 0;
17220 /* If compiler already emitted move of first word by
17221 store with update, no need to do anything. */
17222 if (j == 0 && used_update)
17223 continue;
17225 emit_insn (gen_rtx_SET (VOIDmode,
17226 simplify_gen_subreg (reg_mode, dst, mode,
17227 j * reg_mode_size),
17228 simplify_gen_subreg (reg_mode, src, mode,
17229 j * reg_mode_size)));
17231 if (restore_basereg != NULL_RTX)
17232 emit_insn (restore_basereg);
17237 /* This page contains routines that are used to determine what the
17238 function prologue and epilogue code will do and write them out. */
17240 static inline bool
17241 save_reg_p (int r)
17243 return !call_used_regs[r] && df_regs_ever_live_p (r);
17246 /* Return the first fixed-point register that is required to be
17247 saved. 32 if none. */
17250 first_reg_to_save (void)
17252 int first_reg;
17254 /* Find lowest numbered live register. */
17255 for (first_reg = 13; first_reg <= 31; first_reg++)
17256 if (save_reg_p (first_reg))
17257 break;
17259 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
17260 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
17261 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
17262 || (TARGET_TOC && TARGET_MINIMAL_TOC))
17263 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17264 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
17266 #if TARGET_MACHO
17267 if (flag_pic
17268 && crtl->uses_pic_offset_table
17269 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
17270 return RS6000_PIC_OFFSET_TABLE_REGNUM;
17271 #endif
17273 return first_reg;
17276 /* Similar, for FP regs. */
17279 first_fp_reg_to_save (void)
17281 int first_reg;
17283 /* Find lowest numbered live register. */
17284 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
17285 if (save_reg_p (first_reg))
17286 break;
17288 return first_reg;
17291 /* Similar, for AltiVec regs. */
17293 static int
17294 first_altivec_reg_to_save (void)
17296 int i;
17298 /* Stack frame remains as is unless we are in AltiVec ABI. */
17299 if (! TARGET_ALTIVEC_ABI)
17300 return LAST_ALTIVEC_REGNO + 1;
17302 /* On Darwin, the unwind routines are compiled without
17303 TARGET_ALTIVEC, and use save_world to save/restore the
17304 altivec registers when necessary. */
17305 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17306 && ! TARGET_ALTIVEC)
17307 return FIRST_ALTIVEC_REGNO + 20;
17309 /* Find lowest numbered live register. */
17310 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17311 if (save_reg_p (i))
17312 break;
17314 return i;
17317 /* Return a 32-bit mask of the AltiVec registers we need to set in
17318 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17319 the 32-bit word is 0. */
17321 static unsigned int
17322 compute_vrsave_mask (void)
17324 unsigned int i, mask = 0;
17326 /* On Darwin, the unwind routines are compiled without
17327 TARGET_ALTIVEC, and use save_world to save/restore the
17328 call-saved altivec registers when necessary. */
17329 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17330 && ! TARGET_ALTIVEC)
17331 mask |= 0xFFF;
17333 /* First, find out if we use _any_ altivec registers. */
17334 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
17335 if (df_regs_ever_live_p (i))
17336 mask |= ALTIVEC_REG_BIT (i);
17338 if (mask == 0)
17339 return mask;
17341 /* Next, remove the argument registers from the set. These must
17342 be in the VRSAVE mask set by the caller, so we don't need to add
17343 them in again. More importantly, the mask we compute here is
17344 used to generate CLOBBERs in the set_vrsave insn, and we do not
17345 wish the argument registers to die. */
17346 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
17347 mask &= ~ALTIVEC_REG_BIT (i);
17349 /* Similarly, remove the return value from the set. */
17351 bool yes = false;
17352 diddle_return_value (is_altivec_return_reg, &yes);
17353 if (yes)
17354 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
17357 return mask;
17360 /* For a very restricted set of circumstances, we can cut down the
17361 size of prologues/epilogues by calling our own save/restore-the-world
17362 routines. */
17364 static void
17365 compute_save_world_info (rs6000_stack_t *info_ptr)
17367 info_ptr->world_save_p = 1;
17368 info_ptr->world_save_p
17369 = (WORLD_SAVE_P (info_ptr)
17370 && DEFAULT_ABI == ABI_DARWIN
17371 && !cfun->has_nonlocal_label
17372 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17373 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17374 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17375 && info_ptr->cr_save_p);
17377 /* This will not work in conjunction with sibcalls. Make sure there
17378 are none. (This check is expensive, but seldom executed.) */
17379 if (WORLD_SAVE_P (info_ptr))
17381 rtx insn;
17382 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17383 if ( GET_CODE (insn) == CALL_INSN
17384 && SIBLING_CALL_P (insn))
17386 info_ptr->world_save_p = 0;
17387 break;
17391 if (WORLD_SAVE_P (info_ptr))
17393 /* Even if we're not touching VRsave, make sure there's room on the
17394 stack for it, if it looks like we're calling SAVE_WORLD, which
17395 will attempt to save it. */
17396 info_ptr->vrsave_size = 4;
17398 /* If we are going to save the world, we need to save the link register too. */
17399 info_ptr->lr_save_p = 1;
17401 /* "Save" the VRsave register too if we're saving the world. */
17402 if (info_ptr->vrsave_mask == 0)
17403 info_ptr->vrsave_mask = compute_vrsave_mask ();
17405 /* Because the Darwin register save/restore routines only handle
17406 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17407 check. */
17408 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17409 && (info_ptr->first_altivec_reg_save
17410 >= FIRST_SAVED_ALTIVEC_REGNO));
17412 return;
17416 static void
17417 is_altivec_return_reg (rtx reg, void *xyes)
17419 bool *yes = (bool *) xyes;
17420 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17421 *yes = true;
17425 /* Look for user-defined global regs in the range FIRST to LAST-1.
17426 We should not restore these, and so cannot use lmw or out-of-line
17427 restore functions if there are any. We also can't save them
17428 (well, emit frame notes for them), because frame unwinding during
17429 exception handling will restore saved registers. */
17431 static bool
17432 global_regs_p (unsigned first, unsigned last)
17434 while (first < last)
17435 if (global_regs[first++])
17436 return true;
17437 return false;
17440 /* Determine the strategy for savings/restoring registers. */
17442 enum {
17443 SAVRES_MULTIPLE = 0x1,
17444 SAVE_INLINE_FPRS = 0x2,
17445 SAVE_INLINE_GPRS = 0x4,
17446 REST_INLINE_FPRS = 0x8,
17447 REST_INLINE_GPRS = 0x10,
17448 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
17449 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
17450 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
17451 SAVE_INLINE_VRS = 0x100,
17452 REST_INLINE_VRS = 0x200
17455 static int
17456 rs6000_savres_strategy (rs6000_stack_t *info,
17457 bool using_static_chain_p)
17459 int strategy = 0;
17460 bool lr_save_p;
17462 if (TARGET_MULTIPLE
17463 && !TARGET_POWERPC64
17464 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
17465 && info->first_gp_reg_save < 31
17466 && !global_regs_p (info->first_gp_reg_save, 32))
17467 strategy |= SAVRES_MULTIPLE;
17469 if (crtl->calls_eh_return
17470 || cfun->machine->ra_need_lr)
17471 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
17472 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
17473 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17475 if (info->first_fp_reg_save == 64
17476 /* The out-of-line FP routines use double-precision stores;
17477 we can't use those routines if we don't have such stores. */
17478 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
17479 || global_regs_p (info->first_fp_reg_save, 64))
17480 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17482 if (info->first_gp_reg_save == 32
17483 || (!(strategy & SAVRES_MULTIPLE)
17484 && global_regs_p (info->first_gp_reg_save, 32)))
17485 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17487 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
17488 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
17489 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17491 /* Define cutoff for using out-of-line functions to save registers. */
17492 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
17494 if (!optimize_size)
17496 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17497 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17498 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17500 else
17502 /* Prefer out-of-line restore if it will exit. */
17503 if (info->first_fp_reg_save > 61)
17504 strategy |= SAVE_INLINE_FPRS;
17505 if (info->first_gp_reg_save > 29)
17507 if (info->first_fp_reg_save == 64)
17508 strategy |= SAVE_INLINE_GPRS;
17509 else
17510 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17512 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
17513 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17516 else if (DEFAULT_ABI == ABI_DARWIN)
17518 if (info->first_fp_reg_save > 60)
17519 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17520 if (info->first_gp_reg_save > 29)
17521 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17522 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17524 else
17526 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
17527 if (info->first_fp_reg_save > 61)
17528 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17529 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17530 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17533 /* Don't bother to try to save things out-of-line if r11 is occupied
17534 by the static chain. It would require too much fiddling and the
17535 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17536 pointer on Darwin, and AIX uses r1 or r12. */
17537 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
17538 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
17539 | SAVE_INLINE_GPRS
17540 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17542 /* We can only use the out-of-line routines to restore if we've
17543 saved all the registers from first_fp_reg_save in the prologue.
17544 Otherwise, we risk loading garbage. */
17545 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
17547 int i;
17549 for (i = info->first_fp_reg_save; i < 64; i++)
17550 if (!save_reg_p (i))
17552 strategy |= REST_INLINE_FPRS;
17553 break;
17557 /* If we are going to use store multiple, then don't even bother
17558 with the out-of-line routines, since the store-multiple
17559 instruction will always be smaller. */
17560 if ((strategy & SAVRES_MULTIPLE))
17561 strategy |= SAVE_INLINE_GPRS;
17563 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17564 saved is an out-of-line save or restore. Set up the value for
17565 the next test (excluding out-of-line gpr restore). */
17566 lr_save_p = (info->lr_save_p
17567 || !(strategy & SAVE_INLINE_GPRS)
17568 || !(strategy & SAVE_INLINE_FPRS)
17569 || !(strategy & SAVE_INLINE_VRS)
17570 || !(strategy & REST_INLINE_FPRS)
17571 || !(strategy & REST_INLINE_VRS));
17573 /* The situation is more complicated with load multiple. We'd
17574 prefer to use the out-of-line routines for restores, since the
17575 "exit" out-of-line routines can handle the restore of LR and the
17576 frame teardown. However if doesn't make sense to use the
17577 out-of-line routine if that is the only reason we'd need to save
17578 LR, and we can't use the "exit" out-of-line gpr restore if we
17579 have saved some fprs; In those cases it is advantageous to use
17580 load multiple when available. */
17581 if ((strategy & SAVRES_MULTIPLE)
17582 && (!lr_save_p
17583 || info->first_fp_reg_save != 64))
17584 strategy |= REST_INLINE_GPRS;
17586 /* Saving CR interferes with the exit routines used on the SPE, so
17587 just punt here. */
17588 if (TARGET_SPE_ABI
17589 && info->spe_64bit_regs_used
17590 && info->cr_save_p)
17591 strategy |= REST_INLINE_GPRS;
17593 /* We can only use load multiple or the out-of-line routines to
17594 restore if we've used store multiple or out-of-line routines
17595 in the prologue, i.e. if we've saved all the registers from
17596 first_gp_reg_save. Otherwise, we risk loading garbage. */
17597 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
17598 == SAVE_INLINE_GPRS)
17600 int i;
17602 for (i = info->first_gp_reg_save; i < 32; i++)
17603 if (!save_reg_p (i))
17605 strategy |= REST_INLINE_GPRS;
17606 break;
17610 if (TARGET_ELF && TARGET_64BIT)
17612 if (!(strategy & SAVE_INLINE_FPRS))
17613 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17614 else if (!(strategy & SAVE_INLINE_GPRS)
17615 && info->first_fp_reg_save == 64)
17616 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
17618 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
17619 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
17621 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
17622 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17624 return strategy;
17627 /* Calculate the stack information for the current function. This is
17628 complicated by having two separate calling sequences, the AIX calling
17629 sequence and the V.4 calling sequence.
17631 AIX (and Darwin/Mac OS X) stack frames look like:
17632 32-bit 64-bit
17633 SP----> +---------------------------------------+
17634 | back chain to caller | 0 0
17635 +---------------------------------------+
17636 | saved CR | 4 8 (8-11)
17637 +---------------------------------------+
17638 | saved LR | 8 16
17639 +---------------------------------------+
17640 | reserved for compilers | 12 24
17641 +---------------------------------------+
17642 | reserved for binders | 16 32
17643 +---------------------------------------+
17644 | saved TOC pointer | 20 40
17645 +---------------------------------------+
17646 | Parameter save area (P) | 24 48
17647 +---------------------------------------+
17648 | Alloca space (A) | 24+P etc.
17649 +---------------------------------------+
17650 | Local variable space (L) | 24+P+A
17651 +---------------------------------------+
17652 | Float/int conversion temporary (X) | 24+P+A+L
17653 +---------------------------------------+
17654 | Save area for AltiVec registers (W) | 24+P+A+L+X
17655 +---------------------------------------+
17656 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17657 +---------------------------------------+
17658 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17659 +---------------------------------------+
17660 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17661 +---------------------------------------+
17662 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17663 +---------------------------------------+
17664 old SP->| back chain to caller's caller |
17665 +---------------------------------------+
17667 The required alignment for AIX configurations is two words (i.e., 8
17668 or 16 bytes).
17671 V.4 stack frames look like:
17673 SP----> +---------------------------------------+
17674 | back chain to caller | 0
17675 +---------------------------------------+
17676 | caller's saved LR | 4
17677 +---------------------------------------+
17678 | Parameter save area (P) | 8
17679 +---------------------------------------+
17680 | Alloca space (A) | 8+P
17681 +---------------------------------------+
17682 | Varargs save area (V) | 8+P+A
17683 +---------------------------------------+
17684 | Local variable space (L) | 8+P+A+V
17685 +---------------------------------------+
17686 | Float/int conversion temporary (X) | 8+P+A+V+L
17687 +---------------------------------------+
17688 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17689 +---------------------------------------+
17690 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17691 +---------------------------------------+
17692 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17693 +---------------------------------------+
17694 | SPE: area for 64-bit GP registers |
17695 +---------------------------------------+
17696 | SPE alignment padding |
17697 +---------------------------------------+
17698 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17699 +---------------------------------------+
17700 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17701 +---------------------------------------+
17702 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17703 +---------------------------------------+
17704 old SP->| back chain to caller's caller |
17705 +---------------------------------------+
17707 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17708 given. (But note below and in sysv4.h that we require only 8 and
17709 may round up the size of our stack frame anyways. The historical
17710 reason is early versions of powerpc-linux which didn't properly
17711 align the stack at program startup. A happy side-effect is that
17712 -mno-eabi libraries can be used with -meabi programs.)
17714 The EABI configuration defaults to the V.4 layout. However,
17715 the stack alignment requirements may differ. If -mno-eabi is not
17716 given, the required stack alignment is 8 bytes; if -mno-eabi is
17717 given, the required alignment is 16 bytes. (But see V.4 comment
17718 above.) */
17720 #ifndef ABI_STACK_BOUNDARY
17721 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17722 #endif
17724 static rs6000_stack_t *
17725 rs6000_stack_info (void)
17727 rs6000_stack_t *info_ptr = &stack_info;
17728 int reg_size = TARGET_32BIT ? 4 : 8;
17729 int ehrd_size;
17730 int save_align;
17731 int first_gp;
17732 HOST_WIDE_INT non_fixed_size;
17733 bool using_static_chain_p;
17735 if (reload_completed && info_ptr->reload_completed)
17736 return info_ptr;
17738 memset (info_ptr, 0, sizeof (*info_ptr));
17739 info_ptr->reload_completed = reload_completed;
17741 if (TARGET_SPE)
17743 /* Cache value so we don't rescan instruction chain over and over. */
17744 if (cfun->machine->insn_chain_scanned_p == 0)
17745 cfun->machine->insn_chain_scanned_p
17746 = spe_func_has_64bit_regs_p () + 1;
17747 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
17750 /* Select which calling sequence. */
17751 info_ptr->abi = DEFAULT_ABI;
17753 /* Calculate which registers need to be saved & save area size. */
17754 info_ptr->first_gp_reg_save = first_reg_to_save ();
17755 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17756 even if it currently looks like we won't. Reload may need it to
17757 get at a constant; if so, it will have already created a constant
17758 pool entry for it. */
17759 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
17760 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
17761 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
17762 && crtl->uses_const_pool
17763 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
17764 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
17765 else
17766 first_gp = info_ptr->first_gp_reg_save;
17768 info_ptr->gp_size = reg_size * (32 - first_gp);
17770 /* For the SPE, we have an additional upper 32-bits on each GPR.
17771 Ideally we should save the entire 64-bits only when the upper
17772 half is used in SIMD instructions. Since we only record
17773 registers live (not the size they are used in), this proves
17774 difficult because we'd have to traverse the instruction chain at
17775 the right time, taking reload into account. This is a real pain,
17776 so we opt to save the GPRs in 64-bits always if but one register
17777 gets used in 64-bits. Otherwise, all the registers in the frame
17778 get saved in 32-bits.
17780 So... since when we save all GPRs (except the SP) in 64-bits, the
17781 traditional GP save area will be empty. */
17782 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17783 info_ptr->gp_size = 0;
17785 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
17786 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
17788 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
17789 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
17790 - info_ptr->first_altivec_reg_save);
17792 /* Does this function call anything? */
17793 info_ptr->calls_p = (! crtl->is_leaf
17794 || cfun->machine->ra_needs_full_frame);
17796 /* Determine if we need to save the condition code registers. */
17797 if (df_regs_ever_live_p (CR2_REGNO)
17798 || df_regs_ever_live_p (CR3_REGNO)
17799 || df_regs_ever_live_p (CR4_REGNO))
17801 info_ptr->cr_save_p = 1;
17802 if (DEFAULT_ABI == ABI_V4)
17803 info_ptr->cr_size = reg_size;
17806 /* If the current function calls __builtin_eh_return, then we need
17807 to allocate stack space for registers that will hold data for
17808 the exception handler. */
17809 if (crtl->calls_eh_return)
17811 unsigned int i;
17812 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
17813 continue;
17815 /* SPE saves EH registers in 64-bits. */
17816 ehrd_size = i * (TARGET_SPE_ABI
17817 && info_ptr->spe_64bit_regs_used != 0
17818 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
17820 else
17821 ehrd_size = 0;
17823 /* Determine various sizes. */
17824 info_ptr->reg_size = reg_size;
17825 info_ptr->fixed_size = RS6000_SAVE_AREA;
17826 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
17827 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
17828 TARGET_ALTIVEC ? 16 : 8);
17829 if (FRAME_GROWS_DOWNWARD)
17830 info_ptr->vars_size
17831 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
17832 + info_ptr->parm_size,
17833 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
17834 - (info_ptr->fixed_size + info_ptr->vars_size
17835 + info_ptr->parm_size);
17837 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17838 info_ptr->spe_gp_size = 8 * (32 - first_gp);
17839 else
17840 info_ptr->spe_gp_size = 0;
17842 /* Set VRSAVE register if it is saved and restored. */
17843 if (TARGET_ALTIVEC_ABI && TARGET_ALTIVEC_VRSAVE)
17844 info_ptr->vrsave_mask = compute_vrsave_mask ();
17845 else
17846 info_ptr->vrsave_mask = 0;
17848 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
17849 info_ptr->vrsave_size = 4;
17850 else
17851 info_ptr->vrsave_size = 0;
17853 compute_save_world_info (info_ptr);
17855 /* Calculate the offsets. */
17856 switch (DEFAULT_ABI)
17858 case ABI_NONE:
17859 default:
17860 gcc_unreachable ();
17862 case ABI_AIX:
17863 case ABI_DARWIN:
17864 info_ptr->fp_save_offset = - info_ptr->fp_size;
17865 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17867 if (TARGET_ALTIVEC_ABI)
17869 info_ptr->vrsave_save_offset
17870 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
17872 /* Align stack so vector save area is on a quadword boundary.
17873 The padding goes above the vectors. */
17874 if (info_ptr->altivec_size != 0)
17875 info_ptr->altivec_padding_size
17876 = info_ptr->vrsave_save_offset & 0xF;
17877 else
17878 info_ptr->altivec_padding_size = 0;
17880 info_ptr->altivec_save_offset
17881 = info_ptr->vrsave_save_offset
17882 - info_ptr->altivec_padding_size
17883 - info_ptr->altivec_size;
17884 gcc_assert (info_ptr->altivec_size == 0
17885 || info_ptr->altivec_save_offset % 16 == 0);
17887 /* Adjust for AltiVec case. */
17888 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
17890 else
17891 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
17892 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
17893 info_ptr->lr_save_offset = 2*reg_size;
17894 break;
17896 case ABI_V4:
17897 info_ptr->fp_save_offset = - info_ptr->fp_size;
17898 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17899 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
17901 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17903 /* Align stack so SPE GPR save area is aligned on a
17904 double-word boundary. */
17905 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
17906 info_ptr->spe_padding_size
17907 = 8 - (-info_ptr->cr_save_offset % 8);
17908 else
17909 info_ptr->spe_padding_size = 0;
17911 info_ptr->spe_gp_save_offset
17912 = info_ptr->cr_save_offset
17913 - info_ptr->spe_padding_size
17914 - info_ptr->spe_gp_size;
17916 /* Adjust for SPE case. */
17917 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
17919 else if (TARGET_ALTIVEC_ABI)
17921 info_ptr->vrsave_save_offset
17922 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
17924 /* Align stack so vector save area is on a quadword boundary. */
17925 if (info_ptr->altivec_size != 0)
17926 info_ptr->altivec_padding_size
17927 = 16 - (-info_ptr->vrsave_save_offset % 16);
17928 else
17929 info_ptr->altivec_padding_size = 0;
17931 info_ptr->altivec_save_offset
17932 = info_ptr->vrsave_save_offset
17933 - info_ptr->altivec_padding_size
17934 - info_ptr->altivec_size;
17936 /* Adjust for AltiVec case. */
17937 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
17939 else
17940 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
17941 info_ptr->ehrd_offset -= ehrd_size;
17942 info_ptr->lr_save_offset = reg_size;
17943 break;
17946 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
17947 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
17948 + info_ptr->gp_size
17949 + info_ptr->altivec_size
17950 + info_ptr->altivec_padding_size
17951 + info_ptr->spe_gp_size
17952 + info_ptr->spe_padding_size
17953 + ehrd_size
17954 + info_ptr->cr_size
17955 + info_ptr->vrsave_size,
17956 save_align);
17958 non_fixed_size = (info_ptr->vars_size
17959 + info_ptr->parm_size
17960 + info_ptr->save_size);
17962 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
17963 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
17965 /* Determine if we need to save the link register. */
17966 if (info_ptr->calls_p
17967 || (DEFAULT_ABI == ABI_AIX
17968 && crtl->profile
17969 && !TARGET_PROFILE_KERNEL)
17970 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
17971 #ifdef TARGET_RELOCATABLE
17972 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
17973 #endif
17974 || rs6000_ra_ever_killed ())
17975 info_ptr->lr_save_p = 1;
17977 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
17978 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
17979 && call_used_regs[STATIC_CHAIN_REGNUM]);
17980 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
17981 using_static_chain_p);
17983 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
17984 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
17985 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
17986 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
17987 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
17988 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
17989 info_ptr->lr_save_p = 1;
17991 if (info_ptr->lr_save_p)
17992 df_set_regs_ever_live (LR_REGNO, true);
17994 /* Determine if we need to allocate any stack frame:
17996 For AIX we need to push the stack if a frame pointer is needed
17997 (because the stack might be dynamically adjusted), if we are
17998 debugging, if we make calls, or if the sum of fp_save, gp_save,
17999 and local variables are more than the space needed to save all
18000 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
18001 + 18*8 = 288 (GPR13 reserved).
18003 For V.4 we don't have the stack cushion that AIX uses, but assume
18004 that the debugger can handle stackless frames. */
18006 if (info_ptr->calls_p)
18007 info_ptr->push_p = 1;
18009 else if (DEFAULT_ABI == ABI_V4)
18010 info_ptr->push_p = non_fixed_size != 0;
18012 else if (frame_pointer_needed)
18013 info_ptr->push_p = 1;
18015 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
18016 info_ptr->push_p = 1;
18018 else
18019 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
18021 /* Zero offsets if we're not saving those registers. */
18022 if (info_ptr->fp_size == 0)
18023 info_ptr->fp_save_offset = 0;
18025 if (info_ptr->gp_size == 0)
18026 info_ptr->gp_save_offset = 0;
18028 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
18029 info_ptr->altivec_save_offset = 0;
18031 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
18032 info_ptr->vrsave_save_offset = 0;
18034 if (! TARGET_SPE_ABI
18035 || info_ptr->spe_64bit_regs_used == 0
18036 || info_ptr->spe_gp_size == 0)
18037 info_ptr->spe_gp_save_offset = 0;
18039 if (! info_ptr->lr_save_p)
18040 info_ptr->lr_save_offset = 0;
18042 if (! info_ptr->cr_save_p)
18043 info_ptr->cr_save_offset = 0;
18045 return info_ptr;
18048 /* Return true if the current function uses any GPRs in 64-bit SIMD
18049 mode. */
18051 static bool
18052 spe_func_has_64bit_regs_p (void)
18054 rtx insns, insn;
18056 /* Functions that save and restore all the call-saved registers will
18057 need to save/restore the registers in 64-bits. */
18058 if (crtl->calls_eh_return
18059 || cfun->calls_setjmp
18060 || crtl->has_nonlocal_goto)
18061 return true;
18063 insns = get_insns ();
18065 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
18067 if (INSN_P (insn))
18069 rtx i;
18071 /* FIXME: This should be implemented with attributes...
18073 (set_attr "spe64" "true")....then,
18074 if (get_spe64(insn)) return true;
18076 It's the only reliable way to do the stuff below. */
18078 i = PATTERN (insn);
18079 if (GET_CODE (i) == SET)
18081 enum machine_mode mode = GET_MODE (SET_SRC (i));
18083 if (SPE_VECTOR_MODE (mode))
18084 return true;
18085 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
18086 return true;
18091 return false;
18094 static void
18095 debug_stack_info (rs6000_stack_t *info)
18097 const char *abi_string;
18099 if (! info)
18100 info = rs6000_stack_info ();
18102 fprintf (stderr, "\nStack information for function %s:\n",
18103 ((current_function_decl && DECL_NAME (current_function_decl))
18104 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
18105 : "<unknown>"));
18107 switch (info->abi)
18109 default: abi_string = "Unknown"; break;
18110 case ABI_NONE: abi_string = "NONE"; break;
18111 case ABI_AIX: abi_string = "AIX"; break;
18112 case ABI_DARWIN: abi_string = "Darwin"; break;
18113 case ABI_V4: abi_string = "V.4"; break;
18116 fprintf (stderr, "\tABI = %5s\n", abi_string);
18118 if (TARGET_ALTIVEC_ABI)
18119 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
18121 if (TARGET_SPE_ABI)
18122 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
18124 if (info->first_gp_reg_save != 32)
18125 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
18127 if (info->first_fp_reg_save != 64)
18128 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
18130 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
18131 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
18132 info->first_altivec_reg_save);
18134 if (info->lr_save_p)
18135 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
18137 if (info->cr_save_p)
18138 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
18140 if (info->vrsave_mask)
18141 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
18143 if (info->push_p)
18144 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
18146 if (info->calls_p)
18147 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
18149 if (info->gp_save_offset)
18150 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
18152 if (info->fp_save_offset)
18153 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
18155 if (info->altivec_save_offset)
18156 fprintf (stderr, "\taltivec_save_offset = %5d\n",
18157 info->altivec_save_offset);
18159 if (info->spe_gp_save_offset)
18160 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
18161 info->spe_gp_save_offset);
18163 if (info->vrsave_save_offset)
18164 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
18165 info->vrsave_save_offset);
18167 if (info->lr_save_offset)
18168 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
18170 if (info->cr_save_offset)
18171 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
18173 if (info->varargs_save_offset)
18174 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
18176 if (info->total_size)
18177 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18178 info->total_size);
18180 if (info->vars_size)
18181 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18182 info->vars_size);
18184 if (info->parm_size)
18185 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
18187 if (info->fixed_size)
18188 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
18190 if (info->gp_size)
18191 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
18193 if (info->spe_gp_size)
18194 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
18196 if (info->fp_size)
18197 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
18199 if (info->altivec_size)
18200 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
18202 if (info->vrsave_size)
18203 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
18205 if (info->altivec_padding_size)
18206 fprintf (stderr, "\taltivec_padding_size= %5d\n",
18207 info->altivec_padding_size);
18209 if (info->spe_padding_size)
18210 fprintf (stderr, "\tspe_padding_size = %5d\n",
18211 info->spe_padding_size);
18213 if (info->cr_size)
18214 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
18216 if (info->save_size)
18217 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
18219 if (info->reg_size != 4)
18220 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
18222 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
18224 fprintf (stderr, "\n");
18228 rs6000_return_addr (int count, rtx frame)
18230 /* Currently we don't optimize very well between prolog and body
18231 code and for PIC code the code can be actually quite bad, so
18232 don't try to be too clever here. */
18233 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
18235 cfun->machine->ra_needs_full_frame = 1;
18237 return
18238 gen_rtx_MEM
18239 (Pmode,
18240 memory_address
18241 (Pmode,
18242 plus_constant (Pmode,
18243 copy_to_reg
18244 (gen_rtx_MEM (Pmode,
18245 memory_address (Pmode, frame))),
18246 RETURN_ADDRESS_OFFSET)));
18249 cfun->machine->ra_need_lr = 1;
18250 return get_hard_reg_initial_val (Pmode, LR_REGNO);
18253 /* Say whether a function is a candidate for sibcall handling or not. */
18255 static bool
18256 rs6000_function_ok_for_sibcall (tree decl, tree exp)
18258 tree fntype;
18260 if (decl)
18261 fntype = TREE_TYPE (decl);
18262 else
18263 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
18265 /* We can't do it if the called function has more vector parameters
18266 than the current function; there's nowhere to put the VRsave code. */
18267 if (TARGET_ALTIVEC_ABI
18268 && TARGET_ALTIVEC_VRSAVE
18269 && !(decl && decl == current_function_decl))
18271 function_args_iterator args_iter;
18272 tree type;
18273 int nvreg = 0;
18275 /* Functions with vector parameters are required to have a
18276 prototype, so the argument type info must be available
18277 here. */
18278 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
18279 if (TREE_CODE (type) == VECTOR_TYPE
18280 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18281 nvreg++;
18283 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
18284 if (TREE_CODE (type) == VECTOR_TYPE
18285 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18286 nvreg--;
18288 if (nvreg > 0)
18289 return false;
18292 /* Under the AIX ABI we can't allow calls to non-local functions,
18293 because the callee may have a different TOC pointer to the
18294 caller and there's no way to ensure we restore the TOC when we
18295 return. With the secure-plt SYSV ABI we can't make non-local
18296 calls when -fpic/PIC because the plt call stubs use r30. */
18297 if (DEFAULT_ABI == ABI_DARWIN
18298 || (DEFAULT_ABI == ABI_AIX
18299 && decl
18300 && !DECL_EXTERNAL (decl)
18301 && (*targetm.binds_local_p) (decl))
18302 || (DEFAULT_ABI == ABI_V4
18303 && (!TARGET_SECURE_PLT
18304 || !flag_pic
18305 || (decl
18306 && (*targetm.binds_local_p) (decl)))))
18308 tree attr_list = TYPE_ATTRIBUTES (fntype);
18310 if (!lookup_attribute ("longcall", attr_list)
18311 || lookup_attribute ("shortcall", attr_list))
18312 return true;
18315 return false;
18318 /* NULL if INSN insn is valid within a low-overhead loop.
18319 Otherwise return why doloop cannot be applied.
18320 PowerPC uses the COUNT register for branch on table instructions. */
18322 static const char *
18323 rs6000_invalid_within_doloop (const_rtx insn)
18325 if (CALL_P (insn))
18326 return "Function call in the loop.";
18328 if (JUMP_P (insn)
18329 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
18330 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
18331 return "Computed branch in the loop.";
18333 return NULL;
18336 static int
18337 rs6000_ra_ever_killed (void)
18339 rtx top;
18340 rtx reg;
18341 rtx insn;
18343 if (cfun->is_thunk)
18344 return 0;
18346 if (cfun->machine->lr_save_state)
18347 return cfun->machine->lr_save_state - 1;
18349 /* regs_ever_live has LR marked as used if any sibcalls are present,
18350 but this should not force saving and restoring in the
18351 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18352 clobbers LR, so that is inappropriate. */
18354 /* Also, the prologue can generate a store into LR that
18355 doesn't really count, like this:
18357 move LR->R0
18358 bcl to set PIC register
18359 move LR->R31
18360 move R0->LR
18362 When we're called from the epilogue, we need to avoid counting
18363 this as a store. */
18365 push_topmost_sequence ();
18366 top = get_insns ();
18367 pop_topmost_sequence ();
18368 reg = gen_rtx_REG (Pmode, LR_REGNO);
18370 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
18372 if (INSN_P (insn))
18374 if (CALL_P (insn))
18376 if (!SIBLING_CALL_P (insn))
18377 return 1;
18379 else if (find_regno_note (insn, REG_INC, LR_REGNO))
18380 return 1;
18381 else if (set_of (reg, insn) != NULL_RTX
18382 && !prologue_epilogue_contains (insn))
18383 return 1;
18386 return 0;
18389 /* Emit instructions needed to load the TOC register.
18390 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18391 a constant pool; or for SVR4 -fpic. */
18393 void
18394 rs6000_emit_load_toc_table (int fromprolog)
18396 rtx dest;
18397 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
18399 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
18401 char buf[30];
18402 rtx lab, tmp1, tmp2, got;
18404 lab = gen_label_rtx ();
18405 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
18406 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18407 if (flag_pic == 2)
18408 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18409 else
18410 got = rs6000_got_sym ();
18411 tmp1 = tmp2 = dest;
18412 if (!fromprolog)
18414 tmp1 = gen_reg_rtx (Pmode);
18415 tmp2 = gen_reg_rtx (Pmode);
18417 emit_insn (gen_load_toc_v4_PIC_1 (lab));
18418 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
18419 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
18420 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
18422 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
18424 emit_insn (gen_load_toc_v4_pic_si ());
18425 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18427 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
18429 char buf[30];
18430 rtx temp0 = (fromprolog
18431 ? gen_rtx_REG (Pmode, 0)
18432 : gen_reg_rtx (Pmode));
18434 if (fromprolog)
18436 rtx symF, symL;
18438 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
18439 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18441 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
18442 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18444 emit_insn (gen_load_toc_v4_PIC_1 (symF));
18445 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18446 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
18448 else
18450 rtx tocsym, lab;
18452 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18453 lab = gen_label_rtx ();
18454 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
18455 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18456 if (TARGET_LINK_STACK)
18457 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
18458 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
18460 emit_insn (gen_addsi3 (dest, temp0, dest));
18462 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
18464 /* This is for AIX code running in non-PIC ELF32. */
18465 char buf[30];
18466 rtx realsym;
18467 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
18468 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18470 emit_insn (gen_elf_high (dest, realsym));
18471 emit_insn (gen_elf_low (dest, dest, realsym));
18473 else
18475 gcc_assert (DEFAULT_ABI == ABI_AIX);
18477 if (TARGET_32BIT)
18478 emit_insn (gen_load_toc_aix_si (dest));
18479 else
18480 emit_insn (gen_load_toc_aix_di (dest));
18484 /* Emit instructions to restore the link register after determining where
18485 its value has been stored. */
18487 void
18488 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
18490 rs6000_stack_t *info = rs6000_stack_info ();
18491 rtx operands[2];
18493 operands[0] = source;
18494 operands[1] = scratch;
18496 if (info->lr_save_p)
18498 rtx frame_rtx = stack_pointer_rtx;
18499 HOST_WIDE_INT sp_offset = 0;
18500 rtx tmp;
18502 if (frame_pointer_needed
18503 || cfun->calls_alloca
18504 || info->total_size > 32767)
18506 tmp = gen_frame_mem (Pmode, frame_rtx);
18507 emit_move_insn (operands[1], tmp);
18508 frame_rtx = operands[1];
18510 else if (info->push_p)
18511 sp_offset = info->total_size;
18513 tmp = plus_constant (Pmode, frame_rtx,
18514 info->lr_save_offset + sp_offset);
18515 tmp = gen_frame_mem (Pmode, tmp);
18516 emit_move_insn (tmp, operands[0]);
18518 else
18519 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
18521 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18522 state of lr_save_p so any change from here on would be a bug. In
18523 particular, stop rs6000_ra_ever_killed from considering the SET
18524 of lr we may have added just above. */
18525 cfun->machine->lr_save_state = info->lr_save_p + 1;
18528 static GTY(()) alias_set_type set = -1;
18530 alias_set_type
18531 get_TOC_alias_set (void)
18533 if (set == -1)
18534 set = new_alias_set ();
18535 return set;
18538 /* This returns nonzero if the current function uses the TOC. This is
18539 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18540 is generated by the ABI_V4 load_toc_* patterns. */
18541 #if TARGET_ELF
18542 static int
18543 uses_TOC (void)
18545 rtx insn;
18547 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18548 if (INSN_P (insn))
18550 rtx pat = PATTERN (insn);
18551 int i;
18553 if (GET_CODE (pat) == PARALLEL)
18554 for (i = 0; i < XVECLEN (pat, 0); i++)
18556 rtx sub = XVECEXP (pat, 0, i);
18557 if (GET_CODE (sub) == USE)
18559 sub = XEXP (sub, 0);
18560 if (GET_CODE (sub) == UNSPEC
18561 && XINT (sub, 1) == UNSPEC_TOC)
18562 return 1;
18566 return 0;
18568 #endif
18571 create_TOC_reference (rtx symbol, rtx largetoc_reg)
18573 rtx tocrel, tocreg, hi;
18575 if (TARGET_DEBUG_ADDR)
18577 if (GET_CODE (symbol) == SYMBOL_REF)
18578 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18579 XSTR (symbol, 0));
18580 else
18582 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
18583 GET_RTX_NAME (GET_CODE (symbol)));
18584 debug_rtx (symbol);
18588 if (!can_create_pseudo_p ())
18589 df_set_regs_ever_live (TOC_REGISTER, true);
18591 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
18592 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
18593 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
18594 return tocrel;
18596 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
18597 if (largetoc_reg != NULL)
18599 emit_move_insn (largetoc_reg, hi);
18600 hi = largetoc_reg;
18602 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
18605 /* Issue assembly directives that create a reference to the given DWARF
18606 FRAME_TABLE_LABEL from the current function section. */
18607 void
18608 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
18610 fprintf (asm_out_file, "\t.ref %s\n",
18611 (* targetm.strip_name_encoding) (frame_table_label));
18614 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18615 and the change to the stack pointer. */
18617 static void
18618 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
18620 rtvec p;
18621 int i;
18622 rtx regs[3];
18624 i = 0;
18625 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18626 if (hard_frame_needed)
18627 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
18628 if (!(REGNO (fp) == STACK_POINTER_REGNUM
18629 || (hard_frame_needed
18630 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
18631 regs[i++] = fp;
18633 p = rtvec_alloc (i);
18634 while (--i >= 0)
18636 rtx mem = gen_frame_mem (BLKmode, regs[i]);
18637 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
18640 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
18643 /* Emit the correct code for allocating stack space, as insns.
18644 If COPY_REG, make sure a copy of the old frame is left there.
18645 The generated code may use hard register 0 as a temporary. */
18647 static void
18648 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
18650 rtx insn;
18651 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18652 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
18653 rtx todec = gen_int_mode (-size, Pmode);
18654 rtx par, set, mem;
18656 if (INTVAL (todec) != -size)
18658 warning (0, "stack frame too large");
18659 emit_insn (gen_trap ());
18660 return;
18663 if (crtl->limit_stack)
18665 if (REG_P (stack_limit_rtx)
18666 && REGNO (stack_limit_rtx) > 1
18667 && REGNO (stack_limit_rtx) <= 31)
18669 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
18670 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18671 const0_rtx));
18673 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
18674 && TARGET_32BIT
18675 && DEFAULT_ABI == ABI_V4)
18677 rtx toload = gen_rtx_CONST (VOIDmode,
18678 gen_rtx_PLUS (Pmode,
18679 stack_limit_rtx,
18680 GEN_INT (size)));
18682 emit_insn (gen_elf_high (tmp_reg, toload));
18683 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
18684 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18685 const0_rtx));
18687 else
18688 warning (0, "stack limit expression is not supported");
18691 if (copy_reg)
18693 if (copy_off != 0)
18694 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
18695 else
18696 emit_move_insn (copy_reg, stack_reg);
18699 if (size > 32767)
18701 /* Need a note here so that try_split doesn't get confused. */
18702 if (get_last_insn () == NULL_RTX)
18703 emit_note (NOTE_INSN_DELETED);
18704 insn = emit_move_insn (tmp_reg, todec);
18705 try_split (PATTERN (insn), insn, 0);
18706 todec = tmp_reg;
18709 insn = emit_insn (TARGET_32BIT
18710 ? gen_movsi_update_stack (stack_reg, stack_reg,
18711 todec, stack_reg)
18712 : gen_movdi_di_update_stack (stack_reg, stack_reg,
18713 todec, stack_reg));
18714 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18715 it now and set the alias set/attributes. The above gen_*_update
18716 calls will generate a PARALLEL with the MEM set being the first
18717 operation. */
18718 par = PATTERN (insn);
18719 gcc_assert (GET_CODE (par) == PARALLEL);
18720 set = XVECEXP (par, 0, 0);
18721 gcc_assert (GET_CODE (set) == SET);
18722 mem = SET_DEST (set);
18723 gcc_assert (MEM_P (mem));
18724 MEM_NOTRAP_P (mem) = 1;
18725 set_mem_alias_set (mem, get_frame_alias_set ());
18727 RTX_FRAME_RELATED_P (insn) = 1;
18728 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
18729 gen_rtx_SET (VOIDmode, stack_reg,
18730 gen_rtx_PLUS (Pmode, stack_reg,
18731 GEN_INT (-size))));
18734 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18736 #if PROBE_INTERVAL > 32768
18737 #error Cannot use indexed addressing mode for stack probing
18738 #endif
18740 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18741 inclusive. These are offsets from the current stack pointer. */
18743 static void
18744 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
18746 /* See if we have a constant small number of probes to generate. If so,
18747 that's the easy case. */
18748 if (first + size <= 32768)
18750 HOST_WIDE_INT i;
18752 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18753 it exceeds SIZE. If only one probe is needed, this will not
18754 generate any code. Then probe at FIRST + SIZE. */
18755 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
18756 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18757 -(first + i)));
18759 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18760 -(first + size)));
18763 /* Otherwise, do the same as above, but in a loop. Note that we must be
18764 extra careful with variables wrapping around because we might be at
18765 the very top (or the very bottom) of the address space and we have
18766 to be able to handle this case properly; in particular, we use an
18767 equality test for the loop condition. */
18768 else
18770 HOST_WIDE_INT rounded_size;
18771 rtx r12 = gen_rtx_REG (Pmode, 12);
18772 rtx r0 = gen_rtx_REG (Pmode, 0);
18774 /* Sanity check for the addressing mode we're going to use. */
18775 gcc_assert (first <= 32768);
18777 /* Step 1: round SIZE to the previous multiple of the interval. */
18779 rounded_size = size & -PROBE_INTERVAL;
18782 /* Step 2: compute initial and final value of the loop counter. */
18784 /* TEST_ADDR = SP + FIRST. */
18785 emit_insn (gen_rtx_SET (VOIDmode, r12,
18786 plus_constant (Pmode, stack_pointer_rtx,
18787 -first)));
18789 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18790 if (rounded_size > 32768)
18792 emit_move_insn (r0, GEN_INT (-rounded_size));
18793 emit_insn (gen_rtx_SET (VOIDmode, r0,
18794 gen_rtx_PLUS (Pmode, r12, r0)));
18796 else
18797 emit_insn (gen_rtx_SET (VOIDmode, r0,
18798 plus_constant (Pmode, r12, -rounded_size)));
18801 /* Step 3: the loop
18803 while (TEST_ADDR != LAST_ADDR)
18805 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18806 probe at TEST_ADDR
18809 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18810 until it is equal to ROUNDED_SIZE. */
18812 if (TARGET_64BIT)
18813 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
18814 else
18815 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
18818 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18819 that SIZE is equal to ROUNDED_SIZE. */
18821 if (size != rounded_size)
18822 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
18826 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18827 absolute addresses. */
18829 const char *
18830 output_probe_stack_range (rtx reg1, rtx reg2)
18832 static int labelno = 0;
18833 char loop_lab[32], end_lab[32];
18834 rtx xops[2];
18836 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
18837 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
18839 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
18841 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18842 xops[0] = reg1;
18843 xops[1] = reg2;
18844 if (TARGET_64BIT)
18845 output_asm_insn ("cmpd 0,%0,%1", xops);
18846 else
18847 output_asm_insn ("cmpw 0,%0,%1", xops);
18849 fputs ("\tbeq 0,", asm_out_file);
18850 assemble_name_raw (asm_out_file, end_lab);
18851 fputc ('\n', asm_out_file);
18853 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18854 xops[1] = GEN_INT (-PROBE_INTERVAL);
18855 output_asm_insn ("addi %0,%0,%1", xops);
18857 /* Probe at TEST_ADDR and branch. */
18858 xops[1] = gen_rtx_REG (Pmode, 0);
18859 output_asm_insn ("stw %1,0(%0)", xops);
18860 fprintf (asm_out_file, "\tb ");
18861 assemble_name_raw (asm_out_file, loop_lab);
18862 fputc ('\n', asm_out_file);
18864 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
18866 return "";
18869 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18870 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18871 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18872 deduce these equivalences by itself so it wasn't necessary to hold
18873 its hand so much. Don't be tempted to always supply d2_f_d_e with
18874 the actual cfa register, ie. r31 when we are using a hard frame
18875 pointer. That fails when saving regs off r1, and sched moves the
18876 r31 setup past the reg saves. */
18878 static rtx
18879 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
18880 rtx reg2, rtx rreg)
18882 rtx real, temp;
18884 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
18886 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18887 int i;
18889 gcc_checking_assert (val == 0);
18890 real = PATTERN (insn);
18891 if (GET_CODE (real) == PARALLEL)
18892 for (i = 0; i < XVECLEN (real, 0); i++)
18893 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18895 rtx set = XVECEXP (real, 0, i);
18897 RTX_FRAME_RELATED_P (set) = 1;
18899 RTX_FRAME_RELATED_P (insn) = 1;
18900 return insn;
18903 /* copy_rtx will not make unique copies of registers, so we need to
18904 ensure we don't have unwanted sharing here. */
18905 if (reg == reg2)
18906 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18908 if (reg == rreg)
18909 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18911 real = copy_rtx (PATTERN (insn));
18913 if (reg2 != NULL_RTX)
18914 real = replace_rtx (real, reg2, rreg);
18916 if (REGNO (reg) == STACK_POINTER_REGNUM)
18917 gcc_checking_assert (val == 0);
18918 else
18919 real = replace_rtx (real, reg,
18920 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
18921 STACK_POINTER_REGNUM),
18922 GEN_INT (val)));
18924 /* We expect that 'real' is either a SET or a PARALLEL containing
18925 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18926 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18928 if (GET_CODE (real) == SET)
18930 rtx set = real;
18932 temp = simplify_rtx (SET_SRC (set));
18933 if (temp)
18934 SET_SRC (set) = temp;
18935 temp = simplify_rtx (SET_DEST (set));
18936 if (temp)
18937 SET_DEST (set) = temp;
18938 if (GET_CODE (SET_DEST (set)) == MEM)
18940 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18941 if (temp)
18942 XEXP (SET_DEST (set), 0) = temp;
18945 else
18947 int i;
18949 gcc_assert (GET_CODE (real) == PARALLEL);
18950 for (i = 0; i < XVECLEN (real, 0); i++)
18951 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18953 rtx set = XVECEXP (real, 0, i);
18955 temp = simplify_rtx (SET_SRC (set));
18956 if (temp)
18957 SET_SRC (set) = temp;
18958 temp = simplify_rtx (SET_DEST (set));
18959 if (temp)
18960 SET_DEST (set) = temp;
18961 if (GET_CODE (SET_DEST (set)) == MEM)
18963 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18964 if (temp)
18965 XEXP (SET_DEST (set), 0) = temp;
18967 RTX_FRAME_RELATED_P (set) = 1;
18971 RTX_FRAME_RELATED_P (insn) = 1;
18972 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
18974 return insn;
18977 /* Returns an insn that has a vrsave set operation with the
18978 appropriate CLOBBERs. */
18980 static rtx
18981 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
18983 int nclobs, i;
18984 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
18985 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
18987 clobs[0]
18988 = gen_rtx_SET (VOIDmode,
18989 vrsave,
18990 gen_rtx_UNSPEC_VOLATILE (SImode,
18991 gen_rtvec (2, reg, vrsave),
18992 UNSPECV_SET_VRSAVE));
18994 nclobs = 1;
18996 /* We need to clobber the registers in the mask so the scheduler
18997 does not move sets to VRSAVE before sets of AltiVec registers.
18999 However, if the function receives nonlocal gotos, reload will set
19000 all call saved registers live. We will end up with:
19002 (set (reg 999) (mem))
19003 (parallel [ (set (reg vrsave) (unspec blah))
19004 (clobber (reg 999))])
19006 The clobber will cause the store into reg 999 to be dead, and
19007 flow will attempt to delete an epilogue insn. In this case, we
19008 need an unspec use/set of the register. */
19010 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
19011 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19013 if (!epiloguep || call_used_regs [i])
19014 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
19015 gen_rtx_REG (V4SImode, i));
19016 else
19018 rtx reg = gen_rtx_REG (V4SImode, i);
19020 clobs[nclobs++]
19021 = gen_rtx_SET (VOIDmode,
19022 reg,
19023 gen_rtx_UNSPEC (V4SImode,
19024 gen_rtvec (1, reg), 27));
19028 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
19030 for (i = 0; i < nclobs; ++i)
19031 XVECEXP (insn, 0, i) = clobs[i];
19033 return insn;
19036 static rtx
19037 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
19039 rtx addr, mem;
19041 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
19042 mem = gen_frame_mem (GET_MODE (reg), addr);
19043 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
19046 static rtx
19047 gen_frame_load (rtx reg, rtx frame_reg, int offset)
19049 return gen_frame_set (reg, frame_reg, offset, false);
19052 static rtx
19053 gen_frame_store (rtx reg, rtx frame_reg, int offset)
19055 return gen_frame_set (reg, frame_reg, offset, true);
19058 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19059 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19061 static rtx
19062 emit_frame_save (rtx frame_reg, enum machine_mode mode,
19063 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
19065 rtx reg, insn;
19067 /* Some cases that need register indexed addressing. */
19068 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
19069 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
19070 || (TARGET_E500_DOUBLE && mode == DFmode)
19071 || (TARGET_SPE_ABI
19072 && SPE_VECTOR_MODE (mode)
19073 && !SPE_CONST_OFFSET_OK (offset))));
19075 reg = gen_rtx_REG (mode, regno);
19076 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
19077 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
19078 NULL_RTX, NULL_RTX);
19081 /* Emit an offset memory reference suitable for a frame store, while
19082 converting to a valid addressing mode. */
19084 static rtx
19085 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
19087 rtx int_rtx, offset_rtx;
19089 int_rtx = GEN_INT (offset);
19091 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
19092 || (TARGET_E500_DOUBLE && mode == DFmode))
19094 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
19095 emit_move_insn (offset_rtx, int_rtx);
19097 else
19098 offset_rtx = int_rtx;
19100 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
19103 #ifndef TARGET_FIX_AND_CONTINUE
19104 #define TARGET_FIX_AND_CONTINUE 0
19105 #endif
19107 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19108 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19109 #define LAST_SAVRES_REGISTER 31
19110 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19112 enum {
19113 SAVRES_LR = 0x1,
19114 SAVRES_SAVE = 0x2,
19115 SAVRES_REG = 0x0c,
19116 SAVRES_GPR = 0,
19117 SAVRES_FPR = 4,
19118 SAVRES_VR = 8
19121 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
19123 /* Temporary holding space for an out-of-line register save/restore
19124 routine name. */
19125 static char savres_routine_name[30];
19127 /* Return the name for an out-of-line register save/restore routine.
19128 We are saving/restoring GPRs if GPR is true. */
19130 static char *
19131 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
19133 const char *prefix = "";
19134 const char *suffix = "";
19136 /* Different targets are supposed to define
19137 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19138 routine name could be defined with:
19140 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19142 This is a nice idea in practice, but in reality, things are
19143 complicated in several ways:
19145 - ELF targets have save/restore routines for GPRs.
19147 - SPE targets use different prefixes for 32/64-bit registers, and
19148 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19150 - PPC64 ELF targets have routines for save/restore of GPRs that
19151 differ in what they do with the link register, so having a set
19152 prefix doesn't work. (We only use one of the save routines at
19153 the moment, though.)
19155 - PPC32 elf targets have "exit" versions of the restore routines
19156 that restore the link register and can save some extra space.
19157 These require an extra suffix. (There are also "tail" versions
19158 of the restore routines and "GOT" versions of the save routines,
19159 but we don't generate those at present. Same problems apply,
19160 though.)
19162 We deal with all this by synthesizing our own prefix/suffix and
19163 using that for the simple sprintf call shown above. */
19164 if (TARGET_SPE)
19166 /* No floating point saves on the SPE. */
19167 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
19169 if ((sel & SAVRES_SAVE))
19170 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
19171 else
19172 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
19174 if ((sel & SAVRES_LR))
19175 suffix = "_x";
19177 else if (DEFAULT_ABI == ABI_V4)
19179 if (TARGET_64BIT)
19180 goto aix_names;
19182 if ((sel & SAVRES_REG) == SAVRES_GPR)
19183 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
19184 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19185 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
19186 else if ((sel & SAVRES_REG) == SAVRES_VR)
19187 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19188 else
19189 abort ();
19191 if ((sel & SAVRES_LR))
19192 suffix = "_x";
19194 else if (DEFAULT_ABI == ABI_AIX)
19196 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19197 /* No out-of-line save/restore routines for GPRs on AIX. */
19198 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
19199 #endif
19201 aix_names:
19202 if ((sel & SAVRES_REG) == SAVRES_GPR)
19203 prefix = ((sel & SAVRES_SAVE)
19204 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
19205 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
19206 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19208 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19209 if ((sel & SAVRES_LR))
19210 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
19211 else
19212 #endif
19214 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
19215 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
19218 else if ((sel & SAVRES_REG) == SAVRES_VR)
19219 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19220 else
19221 abort ();
19224 if (DEFAULT_ABI == ABI_DARWIN)
19226 /* The Darwin approach is (slightly) different, in order to be
19227 compatible with code generated by the system toolchain. There is a
19228 single symbol for the start of save sequence, and the code here
19229 embeds an offset into that code on the basis of the first register
19230 to be saved. */
19231 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
19232 if ((sel & SAVRES_REG) == SAVRES_GPR)
19233 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
19234 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
19235 (regno - 13) * 4, prefix, regno);
19236 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19237 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
19238 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
19239 else if ((sel & SAVRES_REG) == SAVRES_VR)
19240 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
19241 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
19242 else
19243 abort ();
19245 else
19246 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
19248 return savres_routine_name;
19251 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19252 We are saving/restoring GPRs if GPR is true. */
19254 static rtx
19255 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
19257 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
19258 ? info->first_gp_reg_save
19259 : (sel & SAVRES_REG) == SAVRES_FPR
19260 ? info->first_fp_reg_save - 32
19261 : (sel & SAVRES_REG) == SAVRES_VR
19262 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
19263 : -1);
19264 rtx sym;
19265 int select = sel;
19267 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19268 versions of the gpr routines. */
19269 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
19270 && info->spe_64bit_regs_used)
19271 select ^= SAVRES_FPR ^ SAVRES_GPR;
19273 /* Don't generate bogus routine names. */
19274 gcc_assert (FIRST_SAVRES_REGISTER <= regno
19275 && regno <= LAST_SAVRES_REGISTER
19276 && select >= 0 && select <= 12);
19278 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
19280 if (sym == NULL)
19282 char *name;
19284 name = rs6000_savres_routine_name (info, regno, sel);
19286 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
19287 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
19288 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
19291 return sym;
19294 /* Emit a sequence of insns, including a stack tie if needed, for
19295 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19296 reset the stack pointer, but move the base of the frame into
19297 reg UPDT_REGNO for use by out-of-line register restore routines. */
19299 static rtx
19300 rs6000_emit_stack_reset (rs6000_stack_t *info,
19301 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
19302 unsigned updt_regno)
19304 rtx updt_reg_rtx;
19306 /* This blockage is needed so that sched doesn't decide to move
19307 the sp change before the register restores. */
19308 if (DEFAULT_ABI == ABI_V4
19309 || (TARGET_SPE_ABI
19310 && info->spe_64bit_regs_used != 0
19311 && info->first_gp_reg_save != 32))
19312 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
19314 /* If we are restoring registers out-of-line, we will be using the
19315 "exit" variants of the restore routines, which will reset the
19316 stack for us. But we do need to point updt_reg into the
19317 right place for those routines. */
19318 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
19320 if (frame_off != 0)
19321 return emit_insn (gen_add3_insn (updt_reg_rtx,
19322 frame_reg_rtx, GEN_INT (frame_off)));
19323 else if (REGNO (frame_reg_rtx) != updt_regno)
19324 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
19326 return NULL_RTX;
19329 /* Return the register number used as a pointer by out-of-line
19330 save/restore functions. */
19332 static inline unsigned
19333 ptr_regno_for_savres (int sel)
19335 if (DEFAULT_ABI == ABI_AIX)
19336 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
19337 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
19340 /* Construct a parallel rtx describing the effect of a call to an
19341 out-of-line register save/restore routine, and emit the insn
19342 or jump_insn as appropriate. */
19344 static rtx
19345 rs6000_emit_savres_rtx (rs6000_stack_t *info,
19346 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
19347 enum machine_mode reg_mode, int sel)
19349 int i;
19350 int offset, start_reg, end_reg, n_regs, use_reg;
19351 int reg_size = GET_MODE_SIZE (reg_mode);
19352 rtx sym;
19353 rtvec p;
19354 rtx par, insn;
19356 offset = 0;
19357 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19358 ? info->first_gp_reg_save
19359 : (sel & SAVRES_REG) == SAVRES_FPR
19360 ? info->first_fp_reg_save
19361 : (sel & SAVRES_REG) == SAVRES_VR
19362 ? info->first_altivec_reg_save
19363 : -1);
19364 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19365 ? 32
19366 : (sel & SAVRES_REG) == SAVRES_FPR
19367 ? 64
19368 : (sel & SAVRES_REG) == SAVRES_VR
19369 ? LAST_ALTIVEC_REGNO + 1
19370 : -1);
19371 n_regs = end_reg - start_reg;
19372 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
19373 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
19374 + n_regs);
19376 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19377 RTVEC_ELT (p, offset++) = ret_rtx;
19379 RTVEC_ELT (p, offset++)
19380 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
19382 sym = rs6000_savres_routine_sym (info, sel);
19383 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
19385 use_reg = ptr_regno_for_savres (sel);
19386 if ((sel & SAVRES_REG) == SAVRES_VR)
19388 /* Vector regs are saved/restored using [reg+reg] addressing. */
19389 RTVEC_ELT (p, offset++)
19390 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19391 RTVEC_ELT (p, offset++)
19392 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
19394 else
19395 RTVEC_ELT (p, offset++)
19396 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19398 for (i = 0; i < end_reg - start_reg; i++)
19399 RTVEC_ELT (p, i + offset)
19400 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
19401 frame_reg_rtx, save_area_offset + reg_size * i,
19402 (sel & SAVRES_SAVE) != 0);
19404 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19405 RTVEC_ELT (p, i + offset)
19406 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
19408 par = gen_rtx_PARALLEL (VOIDmode, p);
19410 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19412 insn = emit_jump_insn (par);
19413 JUMP_LABEL (insn) = ret_rtx;
19415 else
19416 insn = emit_insn (par);
19417 return insn;
19420 /* Determine whether the gp REG is really used. */
19422 static bool
19423 rs6000_reg_live_or_pic_offset_p (int reg)
19425 /* If the function calls eh_return, claim used all the registers that would
19426 be checked for liveness otherwise. This is required for the PIC offset
19427 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19428 register allocation purposes in this case. */
19430 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
19431 && (!call_used_regs[reg]
19432 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19433 && !TARGET_SINGLE_PIC_BASE
19434 && TARGET_TOC && TARGET_MINIMAL_TOC)))
19435 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19436 && !TARGET_SINGLE_PIC_BASE
19437 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19438 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
19441 /* Emit function prologue as insns. */
19443 void
19444 rs6000_emit_prologue (void)
19446 rs6000_stack_t *info = rs6000_stack_info ();
19447 enum machine_mode reg_mode = Pmode;
19448 int reg_size = TARGET_32BIT ? 4 : 8;
19449 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19450 rtx frame_reg_rtx = sp_reg_rtx;
19451 unsigned int cr_save_regno;
19452 rtx cr_save_rtx = NULL_RTX;
19453 rtx insn;
19454 int strategy;
19455 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19456 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19457 && call_used_regs[STATIC_CHAIN_REGNUM]);
19458 /* Offset to top of frame for frame_reg and sp respectively. */
19459 HOST_WIDE_INT frame_off = 0;
19460 HOST_WIDE_INT sp_off = 0;
19462 #ifdef ENABLE_CHECKING
19463 /* Track and check usage of r0, r11, r12. */
19464 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
19465 #define START_USE(R) do \
19467 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19468 reg_inuse |= 1 << (R); \
19469 } while (0)
19470 #define END_USE(R) do \
19472 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19473 reg_inuse &= ~(1 << (R)); \
19474 } while (0)
19475 #define NOT_INUSE(R) do \
19477 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19478 } while (0)
19479 #else
19480 #define START_USE(R) do {} while (0)
19481 #define END_USE(R) do {} while (0)
19482 #define NOT_INUSE(R) do {} while (0)
19483 #endif
19485 if (flag_stack_usage_info)
19486 current_function_static_stack_size = info->total_size;
19488 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
19489 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
19491 if (TARGET_FIX_AND_CONTINUE)
19493 /* gdb on darwin arranges to forward a function from the old
19494 address by modifying the first 5 instructions of the function
19495 to branch to the overriding function. This is necessary to
19496 permit function pointers that point to the old function to
19497 actually forward to the new function. */
19498 emit_insn (gen_nop ());
19499 emit_insn (gen_nop ());
19500 emit_insn (gen_nop ());
19501 emit_insn (gen_nop ());
19502 emit_insn (gen_nop ());
19505 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
19507 reg_mode = V2SImode;
19508 reg_size = 8;
19511 /* Handle world saves specially here. */
19512 if (WORLD_SAVE_P (info))
19514 int i, j, sz;
19515 rtx treg;
19516 rtvec p;
19517 rtx reg0;
19519 /* save_world expects lr in r0. */
19520 reg0 = gen_rtx_REG (Pmode, 0);
19521 if (info->lr_save_p)
19523 insn = emit_move_insn (reg0,
19524 gen_rtx_REG (Pmode, LR_REGNO));
19525 RTX_FRAME_RELATED_P (insn) = 1;
19528 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19529 assumptions about the offsets of various bits of the stack
19530 frame. */
19531 gcc_assert (info->gp_save_offset == -220
19532 && info->fp_save_offset == -144
19533 && info->lr_save_offset == 8
19534 && info->cr_save_offset == 4
19535 && info->push_p
19536 && info->lr_save_p
19537 && (!crtl->calls_eh_return
19538 || info->ehrd_offset == -432)
19539 && info->vrsave_save_offset == -224
19540 && info->altivec_save_offset == -416);
19542 treg = gen_rtx_REG (SImode, 11);
19543 emit_move_insn (treg, GEN_INT (-info->total_size));
19545 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19546 in R11. It also clobbers R12, so beware! */
19548 /* Preserve CR2 for save_world prologues */
19549 sz = 5;
19550 sz += 32 - info->first_gp_reg_save;
19551 sz += 64 - info->first_fp_reg_save;
19552 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
19553 p = rtvec_alloc (sz);
19554 j = 0;
19555 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
19556 gen_rtx_REG (SImode,
19557 LR_REGNO));
19558 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
19559 gen_rtx_SYMBOL_REF (Pmode,
19560 "*save_world"));
19561 /* We do floats first so that the instruction pattern matches
19562 properly. */
19563 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19564 RTVEC_ELT (p, j++)
19565 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19566 ? DFmode : SFmode,
19567 info->first_fp_reg_save + i),
19568 frame_reg_rtx,
19569 info->fp_save_offset + frame_off + 8 * i);
19570 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
19571 RTVEC_ELT (p, j++)
19572 = gen_frame_store (gen_rtx_REG (V4SImode,
19573 info->first_altivec_reg_save + i),
19574 frame_reg_rtx,
19575 info->altivec_save_offset + frame_off + 16 * i);
19576 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19577 RTVEC_ELT (p, j++)
19578 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19579 frame_reg_rtx,
19580 info->gp_save_offset + frame_off + reg_size * i);
19582 /* CR register traditionally saved as CR2. */
19583 RTVEC_ELT (p, j++)
19584 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
19585 frame_reg_rtx, info->cr_save_offset + frame_off);
19586 /* Explain about use of R0. */
19587 if (info->lr_save_p)
19588 RTVEC_ELT (p, j++)
19589 = gen_frame_store (reg0,
19590 frame_reg_rtx, info->lr_save_offset + frame_off);
19591 /* Explain what happens to the stack pointer. */
19593 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
19594 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
19597 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19598 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19599 treg, GEN_INT (-info->total_size));
19600 sp_off = frame_off = info->total_size;
19603 strategy = info->savres_strategy;
19605 /* For V.4, update stack before we do any saving and set back pointer. */
19606 if (! WORLD_SAVE_P (info)
19607 && info->push_p
19608 && (DEFAULT_ABI == ABI_V4
19609 || crtl->calls_eh_return))
19611 bool need_r11 = (TARGET_SPE
19612 ? (!(strategy & SAVE_INLINE_GPRS)
19613 && info->spe_64bit_regs_used == 0)
19614 : (!(strategy & SAVE_INLINE_FPRS)
19615 || !(strategy & SAVE_INLINE_GPRS)
19616 || !(strategy & SAVE_INLINE_VRS)));
19617 int ptr_regno = -1;
19618 rtx ptr_reg = NULL_RTX;
19619 int ptr_off = 0;
19621 if (info->total_size < 32767)
19622 frame_off = info->total_size;
19623 else if (need_r11)
19624 ptr_regno = 11;
19625 else if (info->cr_save_p
19626 || info->lr_save_p
19627 || info->first_fp_reg_save < 64
19628 || info->first_gp_reg_save < 32
19629 || info->altivec_size != 0
19630 || info->vrsave_mask != 0
19631 || crtl->calls_eh_return)
19632 ptr_regno = 12;
19633 else
19635 /* The prologue won't be saving any regs so there is no need
19636 to set up a frame register to access any frame save area.
19637 We also won't be using frame_off anywhere below, but set
19638 the correct value anyway to protect against future
19639 changes to this function. */
19640 frame_off = info->total_size;
19642 if (ptr_regno != -1)
19644 /* Set up the frame offset to that needed by the first
19645 out-of-line save function. */
19646 START_USE (ptr_regno);
19647 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19648 frame_reg_rtx = ptr_reg;
19649 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
19650 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
19651 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
19652 ptr_off = info->gp_save_offset + info->gp_size;
19653 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
19654 ptr_off = info->altivec_save_offset + info->altivec_size;
19655 frame_off = -ptr_off;
19657 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
19658 sp_off = info->total_size;
19659 if (frame_reg_rtx != sp_reg_rtx)
19660 rs6000_emit_stack_tie (frame_reg_rtx, false);
19663 /* If we use the link register, get it into r0. */
19664 if (!WORLD_SAVE_P (info) && info->lr_save_p)
19666 rtx addr, reg, mem;
19668 reg = gen_rtx_REG (Pmode, 0);
19669 START_USE (0);
19670 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
19671 RTX_FRAME_RELATED_P (insn) = 1;
19673 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
19674 | SAVE_NOINLINE_FPRS_SAVES_LR)))
19676 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19677 GEN_INT (info->lr_save_offset + frame_off));
19678 mem = gen_rtx_MEM (Pmode, addr);
19679 /* This should not be of rs6000_sr_alias_set, because of
19680 __builtin_return_address. */
19682 insn = emit_move_insn (mem, reg);
19683 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19684 NULL_RTX, NULL_RTX);
19685 END_USE (0);
19689 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19690 r12 will be needed by out-of-line gpr restore. */
19691 cr_save_regno = (DEFAULT_ABI == ABI_AIX
19692 && !(strategy & (SAVE_INLINE_GPRS
19693 | SAVE_NOINLINE_GPRS_SAVES_LR))
19694 ? 11 : 12);
19695 if (!WORLD_SAVE_P (info)
19696 && info->cr_save_p
19697 && REGNO (frame_reg_rtx) != cr_save_regno
19698 && !(using_static_chain_p && cr_save_regno == 11))
19700 rtx set;
19702 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
19703 START_USE (cr_save_regno);
19704 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19705 RTX_FRAME_RELATED_P (insn) = 1;
19706 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19707 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19708 But that's OK. All we have to do is specify that _one_ condition
19709 code register is saved in this stack slot. The thrower's epilogue
19710 will then restore all the call-saved registers.
19711 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19712 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
19713 gen_rtx_REG (SImode, CR2_REGNO));
19714 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19717 /* Do any required saving of fpr's. If only one or two to save, do
19718 it ourselves. Otherwise, call function. */
19719 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
19721 int i;
19722 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19723 if (save_reg_p (info->first_fp_reg_save + i))
19724 emit_frame_save (frame_reg_rtx,
19725 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19726 ? DFmode : SFmode),
19727 info->first_fp_reg_save + i,
19728 info->fp_save_offset + frame_off + 8 * i,
19729 sp_off - frame_off);
19731 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
19733 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
19734 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
19735 unsigned ptr_regno = ptr_regno_for_savres (sel);
19736 rtx ptr_reg = frame_reg_rtx;
19738 if (REGNO (frame_reg_rtx) == ptr_regno)
19739 gcc_checking_assert (frame_off == 0);
19740 else
19742 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19743 NOT_INUSE (ptr_regno);
19744 emit_insn (gen_add3_insn (ptr_reg,
19745 frame_reg_rtx, GEN_INT (frame_off)));
19747 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19748 info->fp_save_offset,
19749 info->lr_save_offset,
19750 DFmode, sel);
19751 rs6000_frame_related (insn, ptr_reg, sp_off,
19752 NULL_RTX, NULL_RTX);
19753 if (lr)
19754 END_USE (0);
19757 /* Save GPRs. This is done as a PARALLEL if we are using
19758 the store-multiple instructions. */
19759 if (!WORLD_SAVE_P (info)
19760 && TARGET_SPE_ABI
19761 && info->spe_64bit_regs_used != 0
19762 && info->first_gp_reg_save != 32)
19764 int i;
19765 rtx spe_save_area_ptr;
19766 HOST_WIDE_INT save_off;
19767 int ool_adjust = 0;
19769 /* Determine whether we can address all of the registers that need
19770 to be saved with an offset from frame_reg_rtx that fits in
19771 the small const field for SPE memory instructions. */
19772 int spe_regs_addressable
19773 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
19774 + reg_size * (32 - info->first_gp_reg_save - 1))
19775 && (strategy & SAVE_INLINE_GPRS));
19777 if (spe_regs_addressable)
19779 spe_save_area_ptr = frame_reg_rtx;
19780 save_off = frame_off;
19782 else
19784 /* Make r11 point to the start of the SPE save area. We need
19785 to be careful here if r11 is holding the static chain. If
19786 it is, then temporarily save it in r0. */
19787 HOST_WIDE_INT offset;
19789 if (!(strategy & SAVE_INLINE_GPRS))
19790 ool_adjust = 8 * (info->first_gp_reg_save
19791 - (FIRST_SAVRES_REGISTER + 1));
19792 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
19793 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
19794 save_off = frame_off - offset;
19796 if (using_static_chain_p)
19798 rtx r0 = gen_rtx_REG (Pmode, 0);
19800 START_USE (0);
19801 gcc_assert (info->first_gp_reg_save > 11);
19803 emit_move_insn (r0, spe_save_area_ptr);
19805 else if (REGNO (frame_reg_rtx) != 11)
19806 START_USE (11);
19808 emit_insn (gen_addsi3 (spe_save_area_ptr,
19809 frame_reg_rtx, GEN_INT (offset)));
19810 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
19811 frame_off = -info->spe_gp_save_offset + ool_adjust;
19814 if ((strategy & SAVE_INLINE_GPRS))
19816 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19817 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19818 emit_frame_save (spe_save_area_ptr, reg_mode,
19819 info->first_gp_reg_save + i,
19820 (info->spe_gp_save_offset + save_off
19821 + reg_size * i),
19822 sp_off - save_off);
19824 else
19826 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
19827 info->spe_gp_save_offset + save_off,
19828 0, reg_mode,
19829 SAVRES_SAVE | SAVRES_GPR);
19831 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
19832 NULL_RTX, NULL_RTX);
19835 /* Move the static chain pointer back. */
19836 if (!spe_regs_addressable)
19838 if (using_static_chain_p)
19840 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
19841 END_USE (0);
19843 else if (REGNO (frame_reg_rtx) != 11)
19844 END_USE (11);
19847 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
19849 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
19850 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
19851 unsigned ptr_regno = ptr_regno_for_savres (sel);
19852 rtx ptr_reg = frame_reg_rtx;
19853 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
19854 int end_save = info->gp_save_offset + info->gp_size;
19855 int ptr_off;
19857 if (!ptr_set_up)
19858 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19860 /* Need to adjust r11 (r12) if we saved any FPRs. */
19861 if (end_save + frame_off != 0)
19863 rtx offset = GEN_INT (end_save + frame_off);
19865 if (ptr_set_up)
19866 frame_off = -end_save;
19867 else
19868 NOT_INUSE (ptr_regno);
19869 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
19871 else if (!ptr_set_up)
19873 NOT_INUSE (ptr_regno);
19874 emit_move_insn (ptr_reg, frame_reg_rtx);
19876 ptr_off = -end_save;
19877 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19878 info->gp_save_offset + ptr_off,
19879 info->lr_save_offset + ptr_off,
19880 reg_mode, sel);
19881 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
19882 NULL_RTX, NULL_RTX);
19883 if (lr)
19884 END_USE (0);
19886 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
19888 rtvec p;
19889 int i;
19890 p = rtvec_alloc (32 - info->first_gp_reg_save);
19891 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19892 RTVEC_ELT (p, i)
19893 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19894 frame_reg_rtx,
19895 info->gp_save_offset + frame_off + reg_size * i);
19896 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19897 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19898 NULL_RTX, NULL_RTX);
19900 else if (!WORLD_SAVE_P (info))
19902 int i;
19903 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19904 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19905 emit_frame_save (frame_reg_rtx, reg_mode,
19906 info->first_gp_reg_save + i,
19907 info->gp_save_offset + frame_off + reg_size * i,
19908 sp_off - frame_off);
19911 if (crtl->calls_eh_return)
19913 unsigned int i;
19914 rtvec p;
19916 for (i = 0; ; ++i)
19918 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19919 if (regno == INVALID_REGNUM)
19920 break;
19923 p = rtvec_alloc (i);
19925 for (i = 0; ; ++i)
19927 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19928 if (regno == INVALID_REGNUM)
19929 break;
19931 insn
19932 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
19933 sp_reg_rtx,
19934 info->ehrd_offset + sp_off + reg_size * (int) i);
19935 RTVEC_ELT (p, i) = insn;
19936 RTX_FRAME_RELATED_P (insn) = 1;
19939 insn = emit_insn (gen_blockage ());
19940 RTX_FRAME_RELATED_P (insn) = 1;
19941 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
19944 /* In AIX ABI we need to make sure r2 is really saved. */
19945 if (TARGET_AIX && crtl->calls_eh_return)
19947 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
19948 rtx save_insn, join_insn, note;
19949 long toc_restore_insn;
19951 tmp_reg = gen_rtx_REG (Pmode, 11);
19952 tmp_reg_si = gen_rtx_REG (SImode, 11);
19953 if (using_static_chain_p)
19955 START_USE (0);
19956 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
19958 else
19959 START_USE (11);
19960 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
19961 /* Peek at instruction to which this function returns. If it's
19962 restoring r2, then we know we've already saved r2. We can't
19963 unconditionally save r2 because the value we have will already
19964 be updated if we arrived at this function via a plt call or
19965 toc adjusting stub. */
19966 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
19967 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
19968 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
19969 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
19970 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
19971 validate_condition_mode (EQ, CCUNSmode);
19972 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
19973 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19974 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
19975 toc_save_done = gen_label_rtx ();
19976 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
19977 gen_rtx_EQ (VOIDmode, compare_result,
19978 const0_rtx),
19979 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
19980 pc_rtx);
19981 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
19982 JUMP_LABEL (jump) = toc_save_done;
19983 LABEL_NUSES (toc_save_done) += 1;
19985 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
19986 TOC_REGNUM, frame_off + 5 * reg_size,
19987 sp_off - frame_off);
19989 emit_label (toc_save_done);
19991 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
19992 have a CFG that has different saves along different paths.
19993 Move the note to a dummy blockage insn, which describes that
19994 R2 is unconditionally saved after the label. */
19995 /* ??? An alternate representation might be a special insn pattern
19996 containing both the branch and the store. That might let the
19997 code that minimizes the number of DW_CFA_advance opcodes better
19998 freedom in placing the annotations. */
19999 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
20000 if (note)
20001 remove_note (save_insn, note);
20002 else
20003 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
20004 copy_rtx (PATTERN (save_insn)), NULL_RTX);
20005 RTX_FRAME_RELATED_P (save_insn) = 0;
20007 join_insn = emit_insn (gen_blockage ());
20008 REG_NOTES (join_insn) = note;
20009 RTX_FRAME_RELATED_P (join_insn) = 1;
20011 if (using_static_chain_p)
20013 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
20014 END_USE (0);
20016 else
20017 END_USE (11);
20020 /* Save CR if we use any that must be preserved. */
20021 if (!WORLD_SAVE_P (info) && info->cr_save_p)
20023 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20024 GEN_INT (info->cr_save_offset + frame_off));
20025 rtx mem = gen_frame_mem (SImode, addr);
20026 /* See the large comment above about why CR2_REGNO is used. */
20027 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
20029 /* If we didn't copy cr before, do so now using r0. */
20030 if (cr_save_rtx == NULL_RTX)
20032 rtx set;
20034 START_USE (0);
20035 cr_save_rtx = gen_rtx_REG (SImode, 0);
20036 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20037 RTX_FRAME_RELATED_P (insn) = 1;
20038 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
20039 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20041 insn = emit_move_insn (mem, cr_save_rtx);
20042 END_USE (REGNO (cr_save_rtx));
20044 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20045 NULL_RTX, NULL_RTX);
20048 /* Update stack and set back pointer unless this is V.4,
20049 for which it was done previously. */
20050 if (!WORLD_SAVE_P (info) && info->push_p
20051 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
20053 rtx ptr_reg = NULL;
20054 int ptr_off = 0;
20056 /* If saving altivec regs we need to be able to address all save
20057 locations using a 16-bit offset. */
20058 if ((strategy & SAVE_INLINE_VRS) == 0
20059 || (info->altivec_size != 0
20060 && (info->altivec_save_offset + info->altivec_size - 16
20061 + info->total_size - frame_off) > 32767)
20062 || (info->vrsave_mask != 0
20063 && (info->vrsave_save_offset
20064 + info->total_size - frame_off) > 32767))
20066 int sel = SAVRES_SAVE | SAVRES_VR;
20067 unsigned ptr_regno = ptr_regno_for_savres (sel);
20069 if (using_static_chain_p
20070 && ptr_regno == STATIC_CHAIN_REGNUM)
20071 ptr_regno = 12;
20072 if (REGNO (frame_reg_rtx) != ptr_regno)
20073 START_USE (ptr_regno);
20074 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20075 frame_reg_rtx = ptr_reg;
20076 ptr_off = info->altivec_save_offset + info->altivec_size;
20077 frame_off = -ptr_off;
20079 else if (REGNO (frame_reg_rtx) == 1)
20080 frame_off = info->total_size;
20081 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20082 sp_off = info->total_size;
20083 if (frame_reg_rtx != sp_reg_rtx)
20084 rs6000_emit_stack_tie (frame_reg_rtx, false);
20087 /* Set frame pointer, if needed. */
20088 if (frame_pointer_needed)
20090 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
20091 sp_reg_rtx);
20092 RTX_FRAME_RELATED_P (insn) = 1;
20095 /* Save AltiVec registers if needed. Save here because the red zone does
20096 not always include AltiVec registers. */
20097 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20098 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
20100 int end_save = info->altivec_save_offset + info->altivec_size;
20101 int ptr_off;
20102 /* Oddly, the vector save/restore functions point r0 at the end
20103 of the save area, then use r11 or r12 to load offsets for
20104 [reg+reg] addressing. */
20105 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20106 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
20107 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20109 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20110 NOT_INUSE (0);
20111 if (end_save + frame_off != 0)
20113 rtx offset = GEN_INT (end_save + frame_off);
20115 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20117 else
20118 emit_move_insn (ptr_reg, frame_reg_rtx);
20120 ptr_off = -end_save;
20121 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20122 info->altivec_save_offset + ptr_off,
20123 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
20124 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
20125 NULL_RTX, NULL_RTX);
20126 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20128 /* The oddity mentioned above clobbered our frame reg. */
20129 emit_move_insn (frame_reg_rtx, ptr_reg);
20130 frame_off = ptr_off;
20133 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20134 && info->altivec_size != 0)
20136 int i;
20138 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20139 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20141 rtx areg, savereg, mem;
20142 int offset;
20144 offset = (info->altivec_save_offset + frame_off
20145 + 16 * (i - info->first_altivec_reg_save));
20147 savereg = gen_rtx_REG (V4SImode, i);
20149 NOT_INUSE (0);
20150 areg = gen_rtx_REG (Pmode, 0);
20151 emit_move_insn (areg, GEN_INT (offset));
20153 /* AltiVec addressing mode is [reg+reg]. */
20154 mem = gen_frame_mem (V4SImode,
20155 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
20157 insn = emit_move_insn (mem, savereg);
20159 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20160 areg, GEN_INT (offset));
20164 /* VRSAVE is a bit vector representing which AltiVec registers
20165 are used. The OS uses this to determine which vector
20166 registers to save on a context switch. We need to save
20167 VRSAVE on the stack frame, add whatever AltiVec registers we
20168 used in this function, and do the corresponding magic in the
20169 epilogue. */
20171 if (!WORLD_SAVE_P (info)
20172 && TARGET_ALTIVEC
20173 && TARGET_ALTIVEC_VRSAVE
20174 && info->vrsave_mask != 0)
20176 rtx reg, vrsave;
20177 int offset;
20178 int save_regno;
20180 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20181 be using r12 as frame_reg_rtx and r11 as the static chain
20182 pointer for nested functions. */
20183 save_regno = 12;
20184 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
20185 save_regno = 11;
20186 else if (REGNO (frame_reg_rtx) == 12)
20188 save_regno = 11;
20189 if (using_static_chain_p)
20190 save_regno = 0;
20193 NOT_INUSE (save_regno);
20194 reg = gen_rtx_REG (SImode, save_regno);
20195 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20196 if (TARGET_MACHO)
20197 emit_insn (gen_get_vrsave_internal (reg));
20198 else
20199 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
20201 /* Save VRSAVE. */
20202 offset = info->vrsave_save_offset + frame_off;
20203 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
20205 /* Include the registers in the mask. */
20206 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
20208 insn = emit_insn (generate_set_vrsave (reg, info, 0));
20211 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20212 if (!TARGET_SINGLE_PIC_BASE
20213 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
20214 || (DEFAULT_ABI == ABI_V4
20215 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
20216 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
20218 /* If emit_load_toc_table will use the link register, we need to save
20219 it. We use R12 for this purpose because emit_load_toc_table
20220 can use register 0. This allows us to use a plain 'blr' to return
20221 from the procedure more often. */
20222 int save_LR_around_toc_setup = (TARGET_ELF
20223 && DEFAULT_ABI != ABI_AIX
20224 && flag_pic
20225 && ! info->lr_save_p
20226 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
20227 if (save_LR_around_toc_setup)
20229 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20230 rtx tmp = gen_rtx_REG (Pmode, 12);
20232 insn = emit_move_insn (tmp, lr);
20233 RTX_FRAME_RELATED_P (insn) = 1;
20235 rs6000_emit_load_toc_table (TRUE);
20237 insn = emit_move_insn (lr, tmp);
20238 add_reg_note (insn, REG_CFA_RESTORE, lr);
20239 RTX_FRAME_RELATED_P (insn) = 1;
20241 else
20242 rs6000_emit_load_toc_table (TRUE);
20245 #if TARGET_MACHO
20246 if (!TARGET_SINGLE_PIC_BASE
20247 && DEFAULT_ABI == ABI_DARWIN
20248 && flag_pic && crtl->uses_pic_offset_table)
20250 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20251 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
20253 /* Save and restore LR locally around this call (in R0). */
20254 if (!info->lr_save_p)
20255 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
20257 emit_insn (gen_load_macho_picbase (src));
20259 emit_move_insn (gen_rtx_REG (Pmode,
20260 RS6000_PIC_OFFSET_TABLE_REGNUM),
20261 lr);
20263 if (!info->lr_save_p)
20264 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
20266 #endif
20268 /* If we need to, save the TOC register after doing the stack setup.
20269 Do not emit eh frame info for this save. The unwinder wants info,
20270 conceptually attached to instructions in this function, about
20271 register values in the caller of this function. This R2 may have
20272 already been changed from the value in the caller.
20273 We don't attempt to write accurate DWARF EH frame info for R2
20274 because code emitted by gcc for a (non-pointer) function call
20275 doesn't save and restore R2. Instead, R2 is managed out-of-line
20276 by a linker generated plt call stub when the function resides in
20277 a shared library. This behaviour is costly to describe in DWARF,
20278 both in terms of the size of DWARF info and the time taken in the
20279 unwinder to interpret it. R2 changes, apart from the
20280 calls_eh_return case earlier in this function, are handled by
20281 linux-unwind.h frob_update_context. */
20282 if (rs6000_save_toc_in_prologue_p ())
20284 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
20285 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
20289 /* Write function prologue. */
20291 static void
20292 rs6000_output_function_prologue (FILE *file,
20293 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20295 rs6000_stack_t *info = rs6000_stack_info ();
20297 if (TARGET_DEBUG_STACK)
20298 debug_stack_info (info);
20300 /* Write .extern for any function we will call to save and restore
20301 fp values. */
20302 if (info->first_fp_reg_save < 64
20303 && !TARGET_MACHO
20304 && !TARGET_ELF)
20306 char *name;
20307 int regno = info->first_fp_reg_save - 32;
20309 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
20311 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20312 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20313 name = rs6000_savres_routine_name (info, regno, sel);
20314 fprintf (file, "\t.extern %s\n", name);
20316 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
20318 bool lr = (info->savres_strategy
20319 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20320 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20321 name = rs6000_savres_routine_name (info, regno, sel);
20322 fprintf (file, "\t.extern %s\n", name);
20326 rs6000_pic_labelno++;
20329 /* Non-zero if vmx regs are restored before the frame pop, zero if
20330 we restore after the pop when possible. */
20331 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20333 /* Restoring cr is a two step process: loading a reg from the frame
20334 save, then moving the reg to cr. For ABI_V4 we must let the
20335 unwinder know that the stack location is no longer valid at or
20336 before the stack deallocation, but we can't emit a cfa_restore for
20337 cr at the stack deallocation like we do for other registers.
20338 The trouble is that it is possible for the move to cr to be
20339 scheduled after the stack deallocation. So say exactly where cr
20340 is located on each of the two insns. */
20342 static rtx
20343 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
20345 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
20346 rtx reg = gen_rtx_REG (SImode, regno);
20347 rtx insn = emit_move_insn (reg, mem);
20349 if (!exit_func && DEFAULT_ABI == ABI_V4)
20351 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20352 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
20354 add_reg_note (insn, REG_CFA_REGISTER, set);
20355 RTX_FRAME_RELATED_P (insn) = 1;
20357 return reg;
20360 /* Reload CR from REG. */
20362 static void
20363 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
20365 int count = 0;
20366 int i;
20368 if (using_mfcr_multiple)
20370 for (i = 0; i < 8; i++)
20371 if (save_reg_p (CR0_REGNO + i))
20372 count++;
20373 gcc_assert (count);
20376 if (using_mfcr_multiple && count > 1)
20378 rtvec p;
20379 int ndx;
20381 p = rtvec_alloc (count);
20383 ndx = 0;
20384 for (i = 0; i < 8; i++)
20385 if (save_reg_p (CR0_REGNO + i))
20387 rtvec r = rtvec_alloc (2);
20388 RTVEC_ELT (r, 0) = reg;
20389 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
20390 RTVEC_ELT (p, ndx) =
20391 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
20392 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
20393 ndx++;
20395 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20396 gcc_assert (ndx == count);
20398 else
20399 for (i = 0; i < 8; i++)
20400 if (save_reg_p (CR0_REGNO + i))
20401 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
20402 reg));
20404 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20406 rtx insn = get_last_insn ();
20407 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20409 add_reg_note (insn, REG_CFA_RESTORE, cr);
20410 RTX_FRAME_RELATED_P (insn) = 1;
20414 /* Like cr, the move to lr instruction can be scheduled after the
20415 stack deallocation, but unlike cr, its stack frame save is still
20416 valid. So we only need to emit the cfa_restore on the correct
20417 instruction. */
20419 static void
20420 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
20422 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
20423 rtx reg = gen_rtx_REG (Pmode, regno);
20425 emit_move_insn (reg, mem);
20428 static void
20429 restore_saved_lr (int regno, bool exit_func)
20431 rtx reg = gen_rtx_REG (Pmode, regno);
20432 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20433 rtx insn = emit_move_insn (lr, reg);
20435 if (!exit_func && flag_shrink_wrap)
20437 add_reg_note (insn, REG_CFA_RESTORE, lr);
20438 RTX_FRAME_RELATED_P (insn) = 1;
20442 static rtx
20443 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
20445 if (info->cr_save_p)
20446 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20447 gen_rtx_REG (SImode, CR2_REGNO),
20448 cfa_restores);
20449 if (info->lr_save_p)
20450 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20451 gen_rtx_REG (Pmode, LR_REGNO),
20452 cfa_restores);
20453 return cfa_restores;
20456 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20457 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20458 below stack pointer not cloberred by signals. */
20460 static inline bool
20461 offset_below_red_zone_p (HOST_WIDE_INT offset)
20463 return offset < (DEFAULT_ABI == ABI_V4
20465 : TARGET_32BIT ? -220 : -288);
20468 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20470 static void
20471 emit_cfa_restores (rtx cfa_restores)
20473 rtx insn = get_last_insn ();
20474 rtx *loc = &REG_NOTES (insn);
20476 while (*loc)
20477 loc = &XEXP (*loc, 1);
20478 *loc = cfa_restores;
20479 RTX_FRAME_RELATED_P (insn) = 1;
20482 /* Emit function epilogue as insns. */
20484 void
20485 rs6000_emit_epilogue (int sibcall)
20487 rs6000_stack_t *info;
20488 int restoring_GPRs_inline;
20489 int restoring_FPRs_inline;
20490 int using_load_multiple;
20491 int using_mtcr_multiple;
20492 int use_backchain_to_restore_sp;
20493 int restore_lr;
20494 int strategy;
20495 HOST_WIDE_INT frame_off = 0;
20496 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
20497 rtx frame_reg_rtx = sp_reg_rtx;
20498 rtx cfa_restores = NULL_RTX;
20499 rtx insn;
20500 rtx cr_save_reg = NULL_RTX;
20501 enum machine_mode reg_mode = Pmode;
20502 int reg_size = TARGET_32BIT ? 4 : 8;
20503 int i;
20504 bool exit_func;
20505 unsigned ptr_regno;
20507 info = rs6000_stack_info ();
20509 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20511 reg_mode = V2SImode;
20512 reg_size = 8;
20515 strategy = info->savres_strategy;
20516 using_load_multiple = strategy & SAVRES_MULTIPLE;
20517 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
20518 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
20519 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
20520 || rs6000_cpu == PROCESSOR_PPC603
20521 || rs6000_cpu == PROCESSOR_PPC750
20522 || optimize_size);
20523 /* Restore via the backchain when we have a large frame, since this
20524 is more efficient than an addis, addi pair. The second condition
20525 here will not trigger at the moment; We don't actually need a
20526 frame pointer for alloca, but the generic parts of the compiler
20527 give us one anyway. */
20528 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
20529 || (cfun->calls_alloca
20530 && !frame_pointer_needed));
20531 restore_lr = (info->lr_save_p
20532 && (restoring_FPRs_inline
20533 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
20534 && (restoring_GPRs_inline
20535 || info->first_fp_reg_save < 64));
20537 if (WORLD_SAVE_P (info))
20539 int i, j;
20540 char rname[30];
20541 const char *alloc_rname;
20542 rtvec p;
20544 /* eh_rest_world_r10 will return to the location saved in the LR
20545 stack slot (which is not likely to be our caller.)
20546 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20547 rest_world is similar, except any R10 parameter is ignored.
20548 The exception-handling stuff that was here in 2.95 is no
20549 longer necessary. */
20551 p = rtvec_alloc (9
20553 + 32 - info->first_gp_reg_save
20554 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
20555 + 63 + 1 - info->first_fp_reg_save);
20557 strcpy (rname, ((crtl->calls_eh_return) ?
20558 "*eh_rest_world_r10" : "*rest_world"));
20559 alloc_rname = ggc_strdup (rname);
20561 j = 0;
20562 RTVEC_ELT (p, j++) = ret_rtx;
20563 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20564 gen_rtx_REG (Pmode,
20565 LR_REGNO));
20566 RTVEC_ELT (p, j++)
20567 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
20568 /* The instruction pattern requires a clobber here;
20569 it is shared with the restVEC helper. */
20570 RTVEC_ELT (p, j++)
20571 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
20574 /* CR register traditionally saved as CR2. */
20575 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
20576 RTVEC_ELT (p, j++)
20577 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
20578 if (flag_shrink_wrap)
20580 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20581 gen_rtx_REG (Pmode, LR_REGNO),
20582 cfa_restores);
20583 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20587 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20589 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20590 RTVEC_ELT (p, j++)
20591 = gen_frame_load (reg,
20592 frame_reg_rtx, info->gp_save_offset + reg_size * i);
20593 if (flag_shrink_wrap)
20594 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20596 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20598 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
20599 RTVEC_ELT (p, j++)
20600 = gen_frame_load (reg,
20601 frame_reg_rtx, info->altivec_save_offset + 16 * i);
20602 if (flag_shrink_wrap)
20603 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20605 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
20607 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20608 ? DFmode : SFmode),
20609 info->first_fp_reg_save + i);
20610 RTVEC_ELT (p, j++)
20611 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
20612 if (flag_shrink_wrap)
20613 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20615 RTVEC_ELT (p, j++)
20616 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
20617 RTVEC_ELT (p, j++)
20618 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
20619 RTVEC_ELT (p, j++)
20620 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
20621 RTVEC_ELT (p, j++)
20622 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
20623 RTVEC_ELT (p, j++)
20624 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
20625 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
20627 if (flag_shrink_wrap)
20629 REG_NOTES (insn) = cfa_restores;
20630 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20631 RTX_FRAME_RELATED_P (insn) = 1;
20633 return;
20636 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20637 if (info->push_p)
20638 frame_off = info->total_size;
20640 /* Restore AltiVec registers if we must do so before adjusting the
20641 stack. */
20642 if (TARGET_ALTIVEC_ABI
20643 && info->altivec_size != 0
20644 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20645 || (DEFAULT_ABI != ABI_V4
20646 && offset_below_red_zone_p (info->altivec_save_offset))))
20648 int i;
20649 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20651 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20652 if (use_backchain_to_restore_sp)
20654 int frame_regno = 11;
20656 if ((strategy & REST_INLINE_VRS) == 0)
20658 /* Of r11 and r12, select the one not clobbered by an
20659 out-of-line restore function for the frame register. */
20660 frame_regno = 11 + 12 - scratch_regno;
20662 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
20663 emit_move_insn (frame_reg_rtx,
20664 gen_rtx_MEM (Pmode, sp_reg_rtx));
20665 frame_off = 0;
20667 else if (frame_pointer_needed)
20668 frame_reg_rtx = hard_frame_pointer_rtx;
20670 if ((strategy & REST_INLINE_VRS) == 0)
20672 int end_save = info->altivec_save_offset + info->altivec_size;
20673 int ptr_off;
20674 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20675 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20677 if (end_save + frame_off != 0)
20679 rtx offset = GEN_INT (end_save + frame_off);
20681 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20683 else
20684 emit_move_insn (ptr_reg, frame_reg_rtx);
20686 ptr_off = -end_save;
20687 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20688 info->altivec_save_offset + ptr_off,
20689 0, V4SImode, SAVRES_VR);
20691 else
20693 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20694 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20696 rtx addr, areg, mem, reg;
20698 areg = gen_rtx_REG (Pmode, 0);
20699 emit_move_insn
20700 (areg, GEN_INT (info->altivec_save_offset
20701 + frame_off
20702 + 16 * (i - info->first_altivec_reg_save)));
20704 /* AltiVec addressing mode is [reg+reg]. */
20705 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20706 mem = gen_frame_mem (V4SImode, addr);
20708 reg = gen_rtx_REG (V4SImode, i);
20709 emit_move_insn (reg, mem);
20713 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20714 if (((strategy & REST_INLINE_VRS) == 0
20715 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20716 && (flag_shrink_wrap
20717 || (offset_below_red_zone_p
20718 (info->altivec_save_offset
20719 + 16 * (i - info->first_altivec_reg_save)))))
20721 rtx reg = gen_rtx_REG (V4SImode, i);
20722 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20726 /* Restore VRSAVE if we must do so before adjusting the stack. */
20727 if (TARGET_ALTIVEC
20728 && TARGET_ALTIVEC_VRSAVE
20729 && info->vrsave_mask != 0
20730 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20731 || (DEFAULT_ABI != ABI_V4
20732 && offset_below_red_zone_p (info->vrsave_save_offset))))
20734 rtx reg;
20736 if (frame_reg_rtx == sp_reg_rtx)
20738 if (use_backchain_to_restore_sp)
20740 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20741 emit_move_insn (frame_reg_rtx,
20742 gen_rtx_MEM (Pmode, sp_reg_rtx));
20743 frame_off = 0;
20745 else if (frame_pointer_needed)
20746 frame_reg_rtx = hard_frame_pointer_rtx;
20749 reg = gen_rtx_REG (SImode, 12);
20750 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20751 info->vrsave_save_offset + frame_off));
20753 emit_insn (generate_set_vrsave (reg, info, 1));
20756 insn = NULL_RTX;
20757 /* If we have a large stack frame, restore the old stack pointer
20758 using the backchain. */
20759 if (use_backchain_to_restore_sp)
20761 if (frame_reg_rtx == sp_reg_rtx)
20763 /* Under V.4, don't reset the stack pointer until after we're done
20764 loading the saved registers. */
20765 if (DEFAULT_ABI == ABI_V4)
20766 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20768 insn = emit_move_insn (frame_reg_rtx,
20769 gen_rtx_MEM (Pmode, sp_reg_rtx));
20770 frame_off = 0;
20772 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20773 && DEFAULT_ABI == ABI_V4)
20774 /* frame_reg_rtx has been set up by the altivec restore. */
20776 else
20778 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
20779 frame_reg_rtx = sp_reg_rtx;
20782 /* If we have a frame pointer, we can restore the old stack pointer
20783 from it. */
20784 else if (frame_pointer_needed)
20786 frame_reg_rtx = sp_reg_rtx;
20787 if (DEFAULT_ABI == ABI_V4)
20788 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20789 /* Prevent reordering memory accesses against stack pointer restore. */
20790 else if (cfun->calls_alloca
20791 || offset_below_red_zone_p (-info->total_size))
20792 rs6000_emit_stack_tie (frame_reg_rtx, true);
20794 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
20795 GEN_INT (info->total_size)));
20796 frame_off = 0;
20798 else if (info->push_p
20799 && DEFAULT_ABI != ABI_V4
20800 && !crtl->calls_eh_return)
20802 /* Prevent reordering memory accesses against stack pointer restore. */
20803 if (cfun->calls_alloca
20804 || offset_below_red_zone_p (-info->total_size))
20805 rs6000_emit_stack_tie (frame_reg_rtx, false);
20806 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
20807 GEN_INT (info->total_size)));
20808 frame_off = 0;
20810 if (insn && frame_reg_rtx == sp_reg_rtx)
20812 if (cfa_restores)
20814 REG_NOTES (insn) = cfa_restores;
20815 cfa_restores = NULL_RTX;
20817 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20818 RTX_FRAME_RELATED_P (insn) = 1;
20821 /* Restore AltiVec registers if we have not done so already. */
20822 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20823 && TARGET_ALTIVEC_ABI
20824 && info->altivec_size != 0
20825 && (DEFAULT_ABI == ABI_V4
20826 || !offset_below_red_zone_p (info->altivec_save_offset)))
20828 int i;
20830 if ((strategy & REST_INLINE_VRS) == 0)
20832 int end_save = info->altivec_save_offset + info->altivec_size;
20833 int ptr_off;
20834 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20835 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20836 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20838 if (end_save + frame_off != 0)
20840 rtx offset = GEN_INT (end_save + frame_off);
20842 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20844 else
20845 emit_move_insn (ptr_reg, frame_reg_rtx);
20847 ptr_off = -end_save;
20848 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20849 info->altivec_save_offset + ptr_off,
20850 0, V4SImode, SAVRES_VR);
20851 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20853 /* Frame reg was clobbered by out-of-line save. Restore it
20854 from ptr_reg, and if we are calling out-of-line gpr or
20855 fpr restore set up the correct pointer and offset. */
20856 unsigned newptr_regno = 1;
20857 if (!restoring_GPRs_inline)
20859 bool lr = info->gp_save_offset + info->gp_size == 0;
20860 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20861 newptr_regno = ptr_regno_for_savres (sel);
20862 end_save = info->gp_save_offset + info->gp_size;
20864 else if (!restoring_FPRs_inline)
20866 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
20867 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20868 newptr_regno = ptr_regno_for_savres (sel);
20869 end_save = info->gp_save_offset + info->gp_size;
20872 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
20873 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
20875 if (end_save + ptr_off != 0)
20877 rtx offset = GEN_INT (end_save + ptr_off);
20879 frame_off = -end_save;
20880 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
20882 else
20884 frame_off = ptr_off;
20885 emit_move_insn (frame_reg_rtx, ptr_reg);
20889 else
20891 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20892 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20894 rtx addr, areg, mem, reg;
20896 areg = gen_rtx_REG (Pmode, 0);
20897 emit_move_insn
20898 (areg, GEN_INT (info->altivec_save_offset
20899 + frame_off
20900 + 16 * (i - info->first_altivec_reg_save)));
20902 /* AltiVec addressing mode is [reg+reg]. */
20903 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20904 mem = gen_frame_mem (V4SImode, addr);
20906 reg = gen_rtx_REG (V4SImode, i);
20907 emit_move_insn (reg, mem);
20911 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20912 if (((strategy & REST_INLINE_VRS) == 0
20913 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20914 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20916 rtx reg = gen_rtx_REG (V4SImode, i);
20917 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20921 /* Restore VRSAVE if we have not done so already. */
20922 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20923 && TARGET_ALTIVEC
20924 && TARGET_ALTIVEC_VRSAVE
20925 && info->vrsave_mask != 0
20926 && (DEFAULT_ABI == ABI_V4
20927 || !offset_below_red_zone_p (info->vrsave_save_offset)))
20929 rtx reg;
20931 reg = gen_rtx_REG (SImode, 12);
20932 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20933 info->vrsave_save_offset + frame_off));
20935 emit_insn (generate_set_vrsave (reg, info, 1));
20938 /* If we exit by an out-of-line restore function on ABI_V4 then that
20939 function will deallocate the stack, so we don't need to worry
20940 about the unwinder restoring cr from an invalid stack frame
20941 location. */
20942 exit_func = (!restoring_FPRs_inline
20943 || (!restoring_GPRs_inline
20944 && info->first_fp_reg_save == 64));
20946 /* Get the old lr if we saved it. If we are restoring registers
20947 out-of-line, then the out-of-line routines can do this for us. */
20948 if (restore_lr && restoring_GPRs_inline)
20949 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
20951 /* Get the old cr if we saved it. */
20952 if (info->cr_save_p)
20954 unsigned cr_save_regno = 12;
20956 if (!restoring_GPRs_inline)
20958 /* Ensure we don't use the register used by the out-of-line
20959 gpr register restore below. */
20960 bool lr = info->gp_save_offset + info->gp_size == 0;
20961 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20962 int gpr_ptr_regno = ptr_regno_for_savres (sel);
20964 if (gpr_ptr_regno == 12)
20965 cr_save_regno = 11;
20966 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
20968 else if (REGNO (frame_reg_rtx) == 12)
20969 cr_save_regno = 11;
20971 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
20972 info->cr_save_offset + frame_off,
20973 exit_func);
20976 /* Set LR here to try to overlap restores below. */
20977 if (restore_lr && restoring_GPRs_inline)
20978 restore_saved_lr (0, exit_func);
20980 /* Load exception handler data registers, if needed. */
20981 if (crtl->calls_eh_return)
20983 unsigned int i, regno;
20985 if (TARGET_AIX)
20987 rtx reg = gen_rtx_REG (reg_mode, 2);
20988 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20989 frame_off + 5 * reg_size));
20992 for (i = 0; ; ++i)
20994 rtx mem;
20996 regno = EH_RETURN_DATA_REGNO (i);
20997 if (regno == INVALID_REGNUM)
20998 break;
21000 /* Note: possible use of r0 here to address SPE regs. */
21001 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
21002 info->ehrd_offset + frame_off
21003 + reg_size * (int) i);
21005 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
21009 /* Restore GPRs. This is done as a PARALLEL if we are using
21010 the load-multiple instructions. */
21011 if (TARGET_SPE_ABI
21012 && info->spe_64bit_regs_used
21013 && info->first_gp_reg_save != 32)
21015 /* Determine whether we can address all of the registers that need
21016 to be saved with an offset from frame_reg_rtx that fits in
21017 the small const field for SPE memory instructions. */
21018 int spe_regs_addressable
21019 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21020 + reg_size * (32 - info->first_gp_reg_save - 1))
21021 && restoring_GPRs_inline);
21023 if (!spe_regs_addressable)
21025 int ool_adjust = 0;
21026 rtx old_frame_reg_rtx = frame_reg_rtx;
21027 /* Make r11 point to the start of the SPE save area. We worried about
21028 not clobbering it when we were saving registers in the prologue.
21029 There's no need to worry here because the static chain is passed
21030 anew to every function. */
21032 if (!restoring_GPRs_inline)
21033 ool_adjust = 8 * (info->first_gp_reg_save
21034 - (FIRST_SAVRES_REGISTER + 1));
21035 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21036 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
21037 GEN_INT (info->spe_gp_save_offset
21038 + frame_off
21039 - ool_adjust)));
21040 /* Keep the invariant that frame_reg_rtx + frame_off points
21041 at the top of the stack frame. */
21042 frame_off = -info->spe_gp_save_offset + ool_adjust;
21045 if (restoring_GPRs_inline)
21047 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
21049 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21050 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21052 rtx offset, addr, mem, reg;
21054 /* We're doing all this to ensure that the immediate offset
21055 fits into the immediate field of 'evldd'. */
21056 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
21058 offset = GEN_INT (spe_offset + reg_size * i);
21059 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
21060 mem = gen_rtx_MEM (V2SImode, addr);
21061 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21063 emit_move_insn (reg, mem);
21066 else
21067 rs6000_emit_savres_rtx (info, frame_reg_rtx,
21068 info->spe_gp_save_offset + frame_off,
21069 info->lr_save_offset + frame_off,
21070 reg_mode,
21071 SAVRES_GPR | SAVRES_LR);
21073 else if (!restoring_GPRs_inline)
21075 /* We are jumping to an out-of-line function. */
21076 rtx ptr_reg;
21077 int end_save = info->gp_save_offset + info->gp_size;
21078 bool can_use_exit = end_save == 0;
21079 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
21080 int ptr_off;
21082 /* Emit stack reset code if we need it. */
21083 ptr_regno = ptr_regno_for_savres (sel);
21084 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21085 if (can_use_exit)
21086 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21087 else if (end_save + frame_off != 0)
21088 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
21089 GEN_INT (end_save + frame_off)));
21090 else if (REGNO (frame_reg_rtx) != ptr_regno)
21091 emit_move_insn (ptr_reg, frame_reg_rtx);
21092 if (REGNO (frame_reg_rtx) == ptr_regno)
21093 frame_off = -end_save;
21095 if (can_use_exit && info->cr_save_p)
21096 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
21098 ptr_off = -end_save;
21099 rs6000_emit_savres_rtx (info, ptr_reg,
21100 info->gp_save_offset + ptr_off,
21101 info->lr_save_offset + ptr_off,
21102 reg_mode, sel);
21104 else if (using_load_multiple)
21106 rtvec p;
21107 p = rtvec_alloc (32 - info->first_gp_reg_save);
21108 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21109 RTVEC_ELT (p, i)
21110 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21111 frame_reg_rtx,
21112 info->gp_save_offset + frame_off + reg_size * i);
21113 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21115 else
21117 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21118 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21119 emit_insn (gen_frame_load
21120 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21121 frame_reg_rtx,
21122 info->gp_save_offset + frame_off + reg_size * i));
21125 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21127 /* If the frame pointer was used then we can't delay emitting
21128 a REG_CFA_DEF_CFA note. This must happen on the insn that
21129 restores the frame pointer, r31. We may have already emitted
21130 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21131 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21132 be harmless if emitted. */
21133 if (frame_pointer_needed)
21135 insn = get_last_insn ();
21136 add_reg_note (insn, REG_CFA_DEF_CFA,
21137 plus_constant (Pmode, frame_reg_rtx, frame_off));
21138 RTX_FRAME_RELATED_P (insn) = 1;
21141 /* Set up cfa_restores. We always need these when
21142 shrink-wrapping. If not shrink-wrapping then we only need
21143 the cfa_restore when the stack location is no longer valid.
21144 The cfa_restores must be emitted on or before the insn that
21145 invalidates the stack, and of course must not be emitted
21146 before the insn that actually does the restore. The latter
21147 is why it is a bad idea to emit the cfa_restores as a group
21148 on the last instruction here that actually does a restore:
21149 That insn may be reordered with respect to others doing
21150 restores. */
21151 if (flag_shrink_wrap
21152 && !restoring_GPRs_inline
21153 && info->first_fp_reg_save == 64)
21154 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21156 for (i = info->first_gp_reg_save; i < 32; i++)
21157 if (!restoring_GPRs_inline
21158 || using_load_multiple
21159 || rs6000_reg_live_or_pic_offset_p (i))
21161 rtx reg = gen_rtx_REG (reg_mode, i);
21163 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21167 if (!restoring_GPRs_inline
21168 && info->first_fp_reg_save == 64)
21170 /* We are jumping to an out-of-line function. */
21171 if (cfa_restores)
21172 emit_cfa_restores (cfa_restores);
21173 return;
21176 if (restore_lr && !restoring_GPRs_inline)
21178 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21179 restore_saved_lr (0, exit_func);
21182 /* Restore fpr's if we need to do it without calling a function. */
21183 if (restoring_FPRs_inline)
21184 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21185 if (save_reg_p (info->first_fp_reg_save + i))
21187 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21188 ? DFmode : SFmode),
21189 info->first_fp_reg_save + i);
21190 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21191 info->fp_save_offset + frame_off + 8 * i));
21192 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21193 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21196 /* If we saved cr, restore it here. Just those that were used. */
21197 if (info->cr_save_p)
21198 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
21200 /* If this is V.4, unwind the stack pointer after all of the loads
21201 have been done, or set up r11 if we are restoring fp out of line. */
21202 ptr_regno = 1;
21203 if (!restoring_FPRs_inline)
21205 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21206 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21207 ptr_regno = ptr_regno_for_savres (sel);
21210 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21211 if (REGNO (frame_reg_rtx) == ptr_regno)
21212 frame_off = 0;
21214 if (insn && restoring_FPRs_inline)
21216 if (cfa_restores)
21218 REG_NOTES (insn) = cfa_restores;
21219 cfa_restores = NULL_RTX;
21221 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21222 RTX_FRAME_RELATED_P (insn) = 1;
21225 if (crtl->calls_eh_return)
21227 rtx sa = EH_RETURN_STACKADJ_RTX;
21228 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
21231 if (!sibcall)
21233 rtvec p;
21234 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21235 if (! restoring_FPRs_inline)
21237 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
21238 RTVEC_ELT (p, 0) = ret_rtx;
21240 else
21242 if (cfa_restores)
21244 /* We can't hang the cfa_restores off a simple return,
21245 since the shrink-wrap code sometimes uses an existing
21246 return. This means there might be a path from
21247 pre-prologue code to this return, and dwarf2cfi code
21248 wants the eh_frame unwinder state to be the same on
21249 all paths to any point. So we need to emit the
21250 cfa_restores before the return. For -m64 we really
21251 don't need epilogue cfa_restores at all, except for
21252 this irritating dwarf2cfi with shrink-wrap
21253 requirement; The stack red-zone means eh_frame info
21254 from the prologue telling the unwinder to restore
21255 from the stack is perfectly good right to the end of
21256 the function. */
21257 emit_insn (gen_blockage ());
21258 emit_cfa_restores (cfa_restores);
21259 cfa_restores = NULL_RTX;
21261 p = rtvec_alloc (2);
21262 RTVEC_ELT (p, 0) = simple_return_rtx;
21265 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
21266 ? gen_rtx_USE (VOIDmode,
21267 gen_rtx_REG (Pmode, LR_REGNO))
21268 : gen_rtx_CLOBBER (VOIDmode,
21269 gen_rtx_REG (Pmode, LR_REGNO)));
21271 /* If we have to restore more than two FP registers, branch to the
21272 restore function. It will return to our caller. */
21273 if (! restoring_FPRs_inline)
21275 int i;
21276 rtx sym;
21278 if (flag_shrink_wrap)
21279 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21281 sym = rs6000_savres_routine_sym (info,
21282 SAVRES_FPR | (lr ? SAVRES_LR : 0));
21283 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
21284 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
21285 gen_rtx_REG (Pmode,
21286 DEFAULT_ABI == ABI_AIX
21287 ? 1 : 11));
21288 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21290 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
21292 RTVEC_ELT (p, i + 4)
21293 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
21294 if (flag_shrink_wrap)
21295 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
21296 cfa_restores);
21300 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21303 if (cfa_restores)
21305 if (sibcall)
21306 /* Ensure the cfa_restores are hung off an insn that won't
21307 be reordered above other restores. */
21308 emit_insn (gen_blockage ());
21310 emit_cfa_restores (cfa_restores);
21314 /* Write function epilogue. */
21316 static void
21317 rs6000_output_function_epilogue (FILE *file,
21318 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21320 #if TARGET_MACHO
21321 macho_branch_islands ();
21322 /* Mach-O doesn't support labels at the end of objects, so if
21323 it looks like we might want one, insert a NOP. */
21325 rtx insn = get_last_insn ();
21326 rtx deleted_debug_label = NULL_RTX;
21327 while (insn
21328 && NOTE_P (insn)
21329 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
21331 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21332 notes only, instead set their CODE_LABEL_NUMBER to -1,
21333 otherwise there would be code generation differences
21334 in between -g and -g0. */
21335 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21336 deleted_debug_label = insn;
21337 insn = PREV_INSN (insn);
21339 if (insn
21340 && (LABEL_P (insn)
21341 || (NOTE_P (insn)
21342 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
21343 fputs ("\tnop\n", file);
21344 else if (deleted_debug_label)
21345 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
21346 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21347 CODE_LABEL_NUMBER (insn) = -1;
21349 #endif
21351 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21352 on its format.
21354 We don't output a traceback table if -finhibit-size-directive was
21355 used. The documentation for -finhibit-size-directive reads
21356 ``don't output a @code{.size} assembler directive, or anything
21357 else that would cause trouble if the function is split in the
21358 middle, and the two halves are placed at locations far apart in
21359 memory.'' The traceback table has this property, since it
21360 includes the offset from the start of the function to the
21361 traceback table itself.
21363 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21364 different traceback table. */
21365 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
21366 && rs6000_traceback != traceback_none && !cfun->is_thunk)
21368 const char *fname = NULL;
21369 const char *language_string = lang_hooks.name;
21370 int fixed_parms = 0, float_parms = 0, parm_info = 0;
21371 int i;
21372 int optional_tbtab;
21373 rs6000_stack_t *info = rs6000_stack_info ();
21375 if (rs6000_traceback == traceback_full)
21376 optional_tbtab = 1;
21377 else if (rs6000_traceback == traceback_part)
21378 optional_tbtab = 0;
21379 else
21380 optional_tbtab = !optimize_size && !TARGET_ELF;
21382 if (optional_tbtab)
21384 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
21385 while (*fname == '.') /* V.4 encodes . in the name */
21386 fname++;
21388 /* Need label immediately before tbtab, so we can compute
21389 its offset from the function start. */
21390 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21391 ASM_OUTPUT_LABEL (file, fname);
21394 /* The .tbtab pseudo-op can only be used for the first eight
21395 expressions, since it can't handle the possibly variable
21396 length fields that follow. However, if you omit the optional
21397 fields, the assembler outputs zeros for all optional fields
21398 anyways, giving each variable length field is minimum length
21399 (as defined in sys/debug.h). Thus we can not use the .tbtab
21400 pseudo-op at all. */
21402 /* An all-zero word flags the start of the tbtab, for debuggers
21403 that have to find it by searching forward from the entry
21404 point or from the current pc. */
21405 fputs ("\t.long 0\n", file);
21407 /* Tbtab format type. Use format type 0. */
21408 fputs ("\t.byte 0,", file);
21410 /* Language type. Unfortunately, there does not seem to be any
21411 official way to discover the language being compiled, so we
21412 use language_string.
21413 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21414 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21415 a number, so for now use 9. LTO and Go aren't assigned numbers
21416 either, so for now use 0. */
21417 if (! strcmp (language_string, "GNU C")
21418 || ! strcmp (language_string, "GNU GIMPLE")
21419 || ! strcmp (language_string, "GNU Go"))
21420 i = 0;
21421 else if (! strcmp (language_string, "GNU F77")
21422 || ! strcmp (language_string, "GNU Fortran"))
21423 i = 1;
21424 else if (! strcmp (language_string, "GNU Pascal"))
21425 i = 2;
21426 else if (! strcmp (language_string, "GNU Ada"))
21427 i = 3;
21428 else if (! strcmp (language_string, "GNU C++")
21429 || ! strcmp (language_string, "GNU Objective-C++"))
21430 i = 9;
21431 else if (! strcmp (language_string, "GNU Java"))
21432 i = 13;
21433 else if (! strcmp (language_string, "GNU Objective-C"))
21434 i = 14;
21435 else
21436 gcc_unreachable ();
21437 fprintf (file, "%d,", i);
21439 /* 8 single bit fields: global linkage (not set for C extern linkage,
21440 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21441 from start of procedure stored in tbtab, internal function, function
21442 has controlled storage, function has no toc, function uses fp,
21443 function logs/aborts fp operations. */
21444 /* Assume that fp operations are used if any fp reg must be saved. */
21445 fprintf (file, "%d,",
21446 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
21448 /* 6 bitfields: function is interrupt handler, name present in
21449 proc table, function calls alloca, on condition directives
21450 (controls stack walks, 3 bits), saves condition reg, saves
21451 link reg. */
21452 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21453 set up as a frame pointer, even when there is no alloca call. */
21454 fprintf (file, "%d,",
21455 ((optional_tbtab << 6)
21456 | ((optional_tbtab & frame_pointer_needed) << 5)
21457 | (info->cr_save_p << 1)
21458 | (info->lr_save_p)));
21460 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21461 (6 bits). */
21462 fprintf (file, "%d,",
21463 (info->push_p << 7) | (64 - info->first_fp_reg_save));
21465 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21466 fprintf (file, "%d,", (32 - first_reg_to_save ()));
21468 if (optional_tbtab)
21470 /* Compute the parameter info from the function decl argument
21471 list. */
21472 tree decl;
21473 int next_parm_info_bit = 31;
21475 for (decl = DECL_ARGUMENTS (current_function_decl);
21476 decl; decl = DECL_CHAIN (decl))
21478 rtx parameter = DECL_INCOMING_RTL (decl);
21479 enum machine_mode mode = GET_MODE (parameter);
21481 if (GET_CODE (parameter) == REG)
21483 if (SCALAR_FLOAT_MODE_P (mode))
21485 int bits;
21487 float_parms++;
21489 switch (mode)
21491 case SFmode:
21492 case SDmode:
21493 bits = 0x2;
21494 break;
21496 case DFmode:
21497 case DDmode:
21498 case TFmode:
21499 case TDmode:
21500 bits = 0x3;
21501 break;
21503 default:
21504 gcc_unreachable ();
21507 /* If only one bit will fit, don't or in this entry. */
21508 if (next_parm_info_bit > 0)
21509 parm_info |= (bits << (next_parm_info_bit - 1));
21510 next_parm_info_bit -= 2;
21512 else
21514 fixed_parms += ((GET_MODE_SIZE (mode)
21515 + (UNITS_PER_WORD - 1))
21516 / UNITS_PER_WORD);
21517 next_parm_info_bit -= 1;
21523 /* Number of fixed point parameters. */
21524 /* This is actually the number of words of fixed point parameters; thus
21525 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21526 fprintf (file, "%d,", fixed_parms);
21528 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21529 all on stack. */
21530 /* This is actually the number of fp registers that hold parameters;
21531 and thus the maximum value is 13. */
21532 /* Set parameters on stack bit if parameters are not in their original
21533 registers, regardless of whether they are on the stack? Xlc
21534 seems to set the bit when not optimizing. */
21535 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
21537 if (! optional_tbtab)
21538 return;
21540 /* Optional fields follow. Some are variable length. */
21542 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21543 11 double float. */
21544 /* There is an entry for each parameter in a register, in the order that
21545 they occur in the parameter list. Any intervening arguments on the
21546 stack are ignored. If the list overflows a long (max possible length
21547 34 bits) then completely leave off all elements that don't fit. */
21548 /* Only emit this long if there was at least one parameter. */
21549 if (fixed_parms || float_parms)
21550 fprintf (file, "\t.long %d\n", parm_info);
21552 /* Offset from start of code to tb table. */
21553 fputs ("\t.long ", file);
21554 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21555 RS6000_OUTPUT_BASENAME (file, fname);
21556 putc ('-', file);
21557 rs6000_output_function_entry (file, fname);
21558 putc ('\n', file);
21560 /* Interrupt handler mask. */
21561 /* Omit this long, since we never set the interrupt handler bit
21562 above. */
21564 /* Number of CTL (controlled storage) anchors. */
21565 /* Omit this long, since the has_ctl bit is never set above. */
21567 /* Displacement into stack of each CTL anchor. */
21568 /* Omit this list of longs, because there are no CTL anchors. */
21570 /* Length of function name. */
21571 if (*fname == '*')
21572 ++fname;
21573 fprintf (file, "\t.short %d\n", (int) strlen (fname));
21575 /* Function name. */
21576 assemble_string (fname, strlen (fname));
21578 /* Register for alloca automatic storage; this is always reg 31.
21579 Only emit this if the alloca bit was set above. */
21580 if (frame_pointer_needed)
21581 fputs ("\t.byte 31\n", file);
21583 fputs ("\t.align 2\n", file);
21587 /* A C compound statement that outputs the assembler code for a thunk
21588 function, used to implement C++ virtual function calls with
21589 multiple inheritance. The thunk acts as a wrapper around a virtual
21590 function, adjusting the implicit object parameter before handing
21591 control off to the real function.
21593 First, emit code to add the integer DELTA to the location that
21594 contains the incoming first argument. Assume that this argument
21595 contains a pointer, and is the one used to pass the `this' pointer
21596 in C++. This is the incoming argument *before* the function
21597 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21598 values of all other incoming arguments.
21600 After the addition, emit code to jump to FUNCTION, which is a
21601 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21602 not touch the return address. Hence returning from FUNCTION will
21603 return to whoever called the current `thunk'.
21605 The effect must be as if FUNCTION had been called directly with the
21606 adjusted first argument. This macro is responsible for emitting
21607 all of the code for a thunk function; output_function_prologue()
21608 and output_function_epilogue() are not invoked.
21610 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21611 been extracted from it.) It might possibly be useful on some
21612 targets, but probably not.
21614 If you do not define this macro, the target-independent code in the
21615 C++ frontend will generate a less efficient heavyweight thunk that
21616 calls FUNCTION instead of jumping to it. The generic approach does
21617 not support varargs. */
21619 static void
21620 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
21621 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
21622 tree function)
21624 rtx this_rtx, insn, funexp;
21626 reload_completed = 1;
21627 epilogue_completed = 1;
21629 /* Mark the end of the (empty) prologue. */
21630 emit_note (NOTE_INSN_PROLOGUE_END);
21632 /* Find the "this" pointer. If the function returns a structure,
21633 the structure return pointer is in r3. */
21634 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
21635 this_rtx = gen_rtx_REG (Pmode, 4);
21636 else
21637 this_rtx = gen_rtx_REG (Pmode, 3);
21639 /* Apply the constant offset, if required. */
21640 if (delta)
21641 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
21643 /* Apply the offset from the vtable, if required. */
21644 if (vcall_offset)
21646 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
21647 rtx tmp = gen_rtx_REG (Pmode, 12);
21649 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
21650 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
21652 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
21653 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
21655 else
21657 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
21659 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
21661 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
21664 /* Generate a tail call to the target function. */
21665 if (!TREE_USED (function))
21667 assemble_external (function);
21668 TREE_USED (function) = 1;
21670 funexp = XEXP (DECL_RTL (function), 0);
21671 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
21673 #if TARGET_MACHO
21674 if (MACHOPIC_INDIRECT)
21675 funexp = machopic_indirect_call_target (funexp);
21676 #endif
21678 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21679 generate sibcall RTL explicitly. */
21680 insn = emit_call_insn (
21681 gen_rtx_PARALLEL (VOIDmode,
21682 gen_rtvec (4,
21683 gen_rtx_CALL (VOIDmode,
21684 funexp, const0_rtx),
21685 gen_rtx_USE (VOIDmode, const0_rtx),
21686 gen_rtx_USE (VOIDmode,
21687 gen_rtx_REG (SImode,
21688 LR_REGNO)),
21689 simple_return_rtx)));
21690 SIBLING_CALL_P (insn) = 1;
21691 emit_barrier ();
21693 /* Run just enough of rest_of_compilation to get the insns emitted.
21694 There's not really enough bulk here to make other passes such as
21695 instruction scheduling worth while. Note that use_thunk calls
21696 assemble_start_function and assemble_end_function. */
21697 insn = get_insns ();
21698 shorten_branches (insn);
21699 final_start_function (insn, file, 1);
21700 final (insn, file, 1);
21701 final_end_function ();
21703 reload_completed = 0;
21704 epilogue_completed = 0;
21707 /* A quick summary of the various types of 'constant-pool tables'
21708 under PowerPC:
21710 Target Flags Name One table per
21711 AIX (none) AIX TOC object file
21712 AIX -mfull-toc AIX TOC object file
21713 AIX -mminimal-toc AIX minimal TOC translation unit
21714 SVR4/EABI (none) SVR4 SDATA object file
21715 SVR4/EABI -fpic SVR4 pic object file
21716 SVR4/EABI -fPIC SVR4 PIC translation unit
21717 SVR4/EABI -mrelocatable EABI TOC function
21718 SVR4/EABI -maix AIX TOC object file
21719 SVR4/EABI -maix -mminimal-toc
21720 AIX minimal TOC translation unit
21722 Name Reg. Set by entries contains:
21723 made by addrs? fp? sum?
21725 AIX TOC 2 crt0 as Y option option
21726 AIX minimal TOC 30 prolog gcc Y Y option
21727 SVR4 SDATA 13 crt0 gcc N Y N
21728 SVR4 pic 30 prolog ld Y not yet N
21729 SVR4 PIC 30 prolog gcc Y option option
21730 EABI TOC 30 prolog gcc Y option option
21734 /* Hash functions for the hash table. */
21736 static unsigned
21737 rs6000_hash_constant (rtx k)
21739 enum rtx_code code = GET_CODE (k);
21740 enum machine_mode mode = GET_MODE (k);
21741 unsigned result = (code << 3) ^ mode;
21742 const char *format;
21743 int flen, fidx;
21745 format = GET_RTX_FORMAT (code);
21746 flen = strlen (format);
21747 fidx = 0;
21749 switch (code)
21751 case LABEL_REF:
21752 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
21754 case CONST_DOUBLE:
21755 if (mode != VOIDmode)
21756 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
21757 flen = 2;
21758 break;
21760 case CODE_LABEL:
21761 fidx = 3;
21762 break;
21764 default:
21765 break;
21768 for (; fidx < flen; fidx++)
21769 switch (format[fidx])
21771 case 's':
21773 unsigned i, len;
21774 const char *str = XSTR (k, fidx);
21775 len = strlen (str);
21776 result = result * 613 + len;
21777 for (i = 0; i < len; i++)
21778 result = result * 613 + (unsigned) str[i];
21779 break;
21781 case 'u':
21782 case 'e':
21783 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
21784 break;
21785 case 'i':
21786 case 'n':
21787 result = result * 613 + (unsigned) XINT (k, fidx);
21788 break;
21789 case 'w':
21790 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
21791 result = result * 613 + (unsigned) XWINT (k, fidx);
21792 else
21794 size_t i;
21795 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
21796 result = result * 613 + (unsigned) (XWINT (k, fidx)
21797 >> CHAR_BIT * i);
21799 break;
21800 case '0':
21801 break;
21802 default:
21803 gcc_unreachable ();
21806 return result;
21809 static unsigned
21810 toc_hash_function (const void *hash_entry)
21812 const struct toc_hash_struct *thc =
21813 (const struct toc_hash_struct *) hash_entry;
21814 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
21817 /* Compare H1 and H2 for equivalence. */
21819 static int
21820 toc_hash_eq (const void *h1, const void *h2)
21822 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
21823 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
21825 if (((const struct toc_hash_struct *) h1)->key_mode
21826 != ((const struct toc_hash_struct *) h2)->key_mode)
21827 return 0;
21829 return rtx_equal_p (r1, r2);
21832 /* These are the names given by the C++ front-end to vtables, and
21833 vtable-like objects. Ideally, this logic should not be here;
21834 instead, there should be some programmatic way of inquiring as
21835 to whether or not an object is a vtable. */
21837 #define VTABLE_NAME_P(NAME) \
21838 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21839 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21840 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21841 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21842 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21844 #ifdef NO_DOLLAR_IN_LABEL
21845 /* Return a GGC-allocated character string translating dollar signs in
21846 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21848 const char *
21849 rs6000_xcoff_strip_dollar (const char *name)
21851 char *strip, *p;
21852 const char *q;
21853 size_t len;
21855 q = (const char *) strchr (name, '$');
21857 if (q == 0 || q == name)
21858 return name;
21860 len = strlen (name);
21861 strip = XALLOCAVEC (char, len + 1);
21862 strcpy (strip, name);
21863 p = strip + (q - name);
21864 while (p)
21866 *p = '_';
21867 p = strchr (p + 1, '$');
21870 return ggc_alloc_string (strip, len);
21872 #endif
21874 void
21875 rs6000_output_symbol_ref (FILE *file, rtx x)
21877 /* Currently C++ toc references to vtables can be emitted before it
21878 is decided whether the vtable is public or private. If this is
21879 the case, then the linker will eventually complain that there is
21880 a reference to an unknown section. Thus, for vtables only,
21881 we emit the TOC reference to reference the symbol and not the
21882 section. */
21883 const char *name = XSTR (x, 0);
21885 if (VTABLE_NAME_P (name))
21887 RS6000_OUTPUT_BASENAME (file, name);
21889 else
21890 assemble_name (file, name);
21893 /* Output a TOC entry. We derive the entry name from what is being
21894 written. */
21896 void
21897 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
21899 char buf[256];
21900 const char *name = buf;
21901 rtx base = x;
21902 HOST_WIDE_INT offset = 0;
21904 gcc_assert (!TARGET_NO_TOC);
21906 /* When the linker won't eliminate them, don't output duplicate
21907 TOC entries (this happens on AIX if there is any kind of TOC,
21908 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21909 CODE_LABELs. */
21910 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
21912 struct toc_hash_struct *h;
21913 void * * found;
21915 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21916 time because GGC is not initialized at that point. */
21917 if (toc_hash_table == NULL)
21918 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
21919 toc_hash_eq, NULL);
21921 h = ggc_alloc_toc_hash_struct ();
21922 h->key = x;
21923 h->key_mode = mode;
21924 h->labelno = labelno;
21926 found = htab_find_slot (toc_hash_table, h, INSERT);
21927 if (*found == NULL)
21928 *found = h;
21929 else /* This is indeed a duplicate.
21930 Set this label equal to that label. */
21932 fputs ("\t.set ", file);
21933 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21934 fprintf (file, "%d,", labelno);
21935 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21936 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
21937 found)->labelno));
21938 return;
21942 /* If we're going to put a double constant in the TOC, make sure it's
21943 aligned properly when strict alignment is on. */
21944 if (GET_CODE (x) == CONST_DOUBLE
21945 && STRICT_ALIGNMENT
21946 && GET_MODE_BITSIZE (mode) >= 64
21947 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
21948 ASM_OUTPUT_ALIGN (file, 3);
21951 (*targetm.asm_out.internal_label) (file, "LC", labelno);
21953 /* Handle FP constants specially. Note that if we have a minimal
21954 TOC, things we put here aren't actually in the TOC, so we can allow
21955 FP constants. */
21956 if (GET_CODE (x) == CONST_DOUBLE &&
21957 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
21959 REAL_VALUE_TYPE rv;
21960 long k[4];
21962 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21963 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21964 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
21965 else
21966 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
21968 if (TARGET_64BIT)
21970 if (TARGET_MINIMAL_TOC)
21971 fputs (DOUBLE_INT_ASM_OP, file);
21972 else
21973 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21974 k[0] & 0xffffffff, k[1] & 0xffffffff,
21975 k[2] & 0xffffffff, k[3] & 0xffffffff);
21976 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
21977 k[0] & 0xffffffff, k[1] & 0xffffffff,
21978 k[2] & 0xffffffff, k[3] & 0xffffffff);
21979 return;
21981 else
21983 if (TARGET_MINIMAL_TOC)
21984 fputs ("\t.long ", file);
21985 else
21986 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21987 k[0] & 0xffffffff, k[1] & 0xffffffff,
21988 k[2] & 0xffffffff, k[3] & 0xffffffff);
21989 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
21990 k[0] & 0xffffffff, k[1] & 0xffffffff,
21991 k[2] & 0xffffffff, k[3] & 0xffffffff);
21992 return;
21995 else if (GET_CODE (x) == CONST_DOUBLE &&
21996 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
21998 REAL_VALUE_TYPE rv;
21999 long k[2];
22001 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22003 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22004 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
22005 else
22006 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
22008 if (TARGET_64BIT)
22010 if (TARGET_MINIMAL_TOC)
22011 fputs (DOUBLE_INT_ASM_OP, file);
22012 else
22013 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22014 k[0] & 0xffffffff, k[1] & 0xffffffff);
22015 fprintf (file, "0x%lx%08lx\n",
22016 k[0] & 0xffffffff, k[1] & 0xffffffff);
22017 return;
22019 else
22021 if (TARGET_MINIMAL_TOC)
22022 fputs ("\t.long ", file);
22023 else
22024 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22025 k[0] & 0xffffffff, k[1] & 0xffffffff);
22026 fprintf (file, "0x%lx,0x%lx\n",
22027 k[0] & 0xffffffff, k[1] & 0xffffffff);
22028 return;
22031 else if (GET_CODE (x) == CONST_DOUBLE &&
22032 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
22034 REAL_VALUE_TYPE rv;
22035 long l;
22037 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22038 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22039 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
22040 else
22041 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
22043 if (TARGET_64BIT)
22045 if (TARGET_MINIMAL_TOC)
22046 fputs (DOUBLE_INT_ASM_OP, file);
22047 else
22048 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22049 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
22050 return;
22052 else
22054 if (TARGET_MINIMAL_TOC)
22055 fputs ("\t.long ", file);
22056 else
22057 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22058 fprintf (file, "0x%lx\n", l & 0xffffffff);
22059 return;
22062 else if (GET_MODE (x) == VOIDmode
22063 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
22065 unsigned HOST_WIDE_INT low;
22066 HOST_WIDE_INT high;
22068 if (GET_CODE (x) == CONST_DOUBLE)
22070 low = CONST_DOUBLE_LOW (x);
22071 high = CONST_DOUBLE_HIGH (x);
22073 else
22074 #if HOST_BITS_PER_WIDE_INT == 32
22076 low = INTVAL (x);
22077 high = (low & 0x80000000) ? ~0 : 0;
22079 #else
22081 low = INTVAL (x) & 0xffffffff;
22082 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
22084 #endif
22086 /* TOC entries are always Pmode-sized, but since this
22087 is a bigendian machine then if we're putting smaller
22088 integer constants in the TOC we have to pad them.
22089 (This is still a win over putting the constants in
22090 a separate constant pool, because then we'd have
22091 to have both a TOC entry _and_ the actual constant.)
22093 For a 32-bit target, CONST_INT values are loaded and shifted
22094 entirely within `low' and can be stored in one TOC entry. */
22096 /* It would be easy to make this work, but it doesn't now. */
22097 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
22099 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
22101 #if HOST_BITS_PER_WIDE_INT == 32
22102 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
22103 POINTER_SIZE, &low, &high, 0);
22104 #else
22105 low |= high << 32;
22106 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
22107 high = (HOST_WIDE_INT) low >> 32;
22108 low &= 0xffffffff;
22109 #endif
22112 if (TARGET_64BIT)
22114 if (TARGET_MINIMAL_TOC)
22115 fputs (DOUBLE_INT_ASM_OP, file);
22116 else
22117 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22118 (long) high & 0xffffffff, (long) low & 0xffffffff);
22119 fprintf (file, "0x%lx%08lx\n",
22120 (long) high & 0xffffffff, (long) low & 0xffffffff);
22121 return;
22123 else
22125 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
22127 if (TARGET_MINIMAL_TOC)
22128 fputs ("\t.long ", file);
22129 else
22130 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22131 (long) high & 0xffffffff, (long) low & 0xffffffff);
22132 fprintf (file, "0x%lx,0x%lx\n",
22133 (long) high & 0xffffffff, (long) low & 0xffffffff);
22135 else
22137 if (TARGET_MINIMAL_TOC)
22138 fputs ("\t.long ", file);
22139 else
22140 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
22141 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
22143 return;
22147 if (GET_CODE (x) == CONST)
22149 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
22150 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
22152 base = XEXP (XEXP (x, 0), 0);
22153 offset = INTVAL (XEXP (XEXP (x, 0), 1));
22156 switch (GET_CODE (base))
22158 case SYMBOL_REF:
22159 name = XSTR (base, 0);
22160 break;
22162 case LABEL_REF:
22163 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
22164 CODE_LABEL_NUMBER (XEXP (base, 0)));
22165 break;
22167 case CODE_LABEL:
22168 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
22169 break;
22171 default:
22172 gcc_unreachable ();
22175 if (TARGET_MINIMAL_TOC)
22176 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
22177 else
22179 fputs ("\t.tc ", file);
22180 RS6000_OUTPUT_BASENAME (file, name);
22182 if (offset < 0)
22183 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
22184 else if (offset)
22185 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
22187 fputs ("[TC],", file);
22190 /* Currently C++ toc references to vtables can be emitted before it
22191 is decided whether the vtable is public or private. If this is
22192 the case, then the linker will eventually complain that there is
22193 a TOC reference to an unknown section. Thus, for vtables only,
22194 we emit the TOC reference to reference the symbol and not the
22195 section. */
22196 if (VTABLE_NAME_P (name))
22198 RS6000_OUTPUT_BASENAME (file, name);
22199 if (offset < 0)
22200 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
22201 else if (offset > 0)
22202 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
22204 else
22205 output_addr_const (file, x);
22206 putc ('\n', file);
22209 /* Output an assembler pseudo-op to write an ASCII string of N characters
22210 starting at P to FILE.
22212 On the RS/6000, we have to do this using the .byte operation and
22213 write out special characters outside the quoted string.
22214 Also, the assembler is broken; very long strings are truncated,
22215 so we must artificially break them up early. */
22217 void
22218 output_ascii (FILE *file, const char *p, int n)
22220 char c;
22221 int i, count_string;
22222 const char *for_string = "\t.byte \"";
22223 const char *for_decimal = "\t.byte ";
22224 const char *to_close = NULL;
22226 count_string = 0;
22227 for (i = 0; i < n; i++)
22229 c = *p++;
22230 if (c >= ' ' && c < 0177)
22232 if (for_string)
22233 fputs (for_string, file);
22234 putc (c, file);
22236 /* Write two quotes to get one. */
22237 if (c == '"')
22239 putc (c, file);
22240 ++count_string;
22243 for_string = NULL;
22244 for_decimal = "\"\n\t.byte ";
22245 to_close = "\"\n";
22246 ++count_string;
22248 if (count_string >= 512)
22250 fputs (to_close, file);
22252 for_string = "\t.byte \"";
22253 for_decimal = "\t.byte ";
22254 to_close = NULL;
22255 count_string = 0;
22258 else
22260 if (for_decimal)
22261 fputs (for_decimal, file);
22262 fprintf (file, "%d", c);
22264 for_string = "\n\t.byte \"";
22265 for_decimal = ", ";
22266 to_close = "\n";
22267 count_string = 0;
22271 /* Now close the string if we have written one. Then end the line. */
22272 if (to_close)
22273 fputs (to_close, file);
22276 /* Generate a unique section name for FILENAME for a section type
22277 represented by SECTION_DESC. Output goes into BUF.
22279 SECTION_DESC can be any string, as long as it is different for each
22280 possible section type.
22282 We name the section in the same manner as xlc. The name begins with an
22283 underscore followed by the filename (after stripping any leading directory
22284 names) with the last period replaced by the string SECTION_DESC. If
22285 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22286 the name. */
22288 void
22289 rs6000_gen_section_name (char **buf, const char *filename,
22290 const char *section_desc)
22292 const char *q, *after_last_slash, *last_period = 0;
22293 char *p;
22294 int len;
22296 after_last_slash = filename;
22297 for (q = filename; *q; q++)
22299 if (*q == '/')
22300 after_last_slash = q + 1;
22301 else if (*q == '.')
22302 last_period = q;
22305 len = strlen (after_last_slash) + strlen (section_desc) + 2;
22306 *buf = (char *) xmalloc (len);
22308 p = *buf;
22309 *p++ = '_';
22311 for (q = after_last_slash; *q; q++)
22313 if (q == last_period)
22315 strcpy (p, section_desc);
22316 p += strlen (section_desc);
22317 break;
22320 else if (ISALNUM (*q))
22321 *p++ = *q;
22324 if (last_period == 0)
22325 strcpy (p, section_desc);
22326 else
22327 *p = '\0';
22330 /* Emit profile function. */
22332 void
22333 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
22335 /* Non-standard profiling for kernels, which just saves LR then calls
22336 _mcount without worrying about arg saves. The idea is to change
22337 the function prologue as little as possible as it isn't easy to
22338 account for arg save/restore code added just for _mcount. */
22339 if (TARGET_PROFILE_KERNEL)
22340 return;
22342 if (DEFAULT_ABI == ABI_AIX)
22344 #ifndef NO_PROFILE_COUNTERS
22345 # define NO_PROFILE_COUNTERS 0
22346 #endif
22347 if (NO_PROFILE_COUNTERS)
22348 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22349 LCT_NORMAL, VOIDmode, 0);
22350 else
22352 char buf[30];
22353 const char *label_name;
22354 rtx fun;
22356 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22357 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
22358 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
22360 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22361 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
22364 else if (DEFAULT_ABI == ABI_DARWIN)
22366 const char *mcount_name = RS6000_MCOUNT;
22367 int caller_addr_regno = LR_REGNO;
22369 /* Be conservative and always set this, at least for now. */
22370 crtl->uses_pic_offset_table = 1;
22372 #if TARGET_MACHO
22373 /* For PIC code, set up a stub and collect the caller's address
22374 from r0, which is where the prologue puts it. */
22375 if (MACHOPIC_INDIRECT
22376 && crtl->uses_pic_offset_table)
22377 caller_addr_regno = 0;
22378 #endif
22379 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
22380 LCT_NORMAL, VOIDmode, 1,
22381 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
22385 /* Write function profiler code. */
22387 void
22388 output_function_profiler (FILE *file, int labelno)
22390 char buf[100];
22392 switch (DEFAULT_ABI)
22394 default:
22395 gcc_unreachable ();
22397 case ABI_V4:
22398 if (!TARGET_32BIT)
22400 warning (0, "no profiling of 64-bit code for this ABI");
22401 return;
22403 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22404 fprintf (file, "\tmflr %s\n", reg_names[0]);
22405 if (NO_PROFILE_COUNTERS)
22407 asm_fprintf (file, "\tstw %s,4(%s)\n",
22408 reg_names[0], reg_names[1]);
22410 else if (TARGET_SECURE_PLT && flag_pic)
22412 if (TARGET_LINK_STACK)
22414 char name[32];
22415 get_ppc476_thunk_name (name);
22416 asm_fprintf (file, "\tbl %s\n", name);
22418 else
22419 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
22420 asm_fprintf (file, "\tstw %s,4(%s)\n",
22421 reg_names[0], reg_names[1]);
22422 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22423 asm_fprintf (file, "\taddis %s,%s,",
22424 reg_names[12], reg_names[12]);
22425 assemble_name (file, buf);
22426 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
22427 assemble_name (file, buf);
22428 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
22430 else if (flag_pic == 1)
22432 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
22433 asm_fprintf (file, "\tstw %s,4(%s)\n",
22434 reg_names[0], reg_names[1]);
22435 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22436 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
22437 assemble_name (file, buf);
22438 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
22440 else if (flag_pic > 1)
22442 asm_fprintf (file, "\tstw %s,4(%s)\n",
22443 reg_names[0], reg_names[1]);
22444 /* Now, we need to get the address of the label. */
22445 if (TARGET_LINK_STACK)
22447 char name[32];
22448 get_ppc476_thunk_name (name);
22449 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
22450 assemble_name (file, buf);
22451 fputs ("-.\n1:", file);
22452 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22453 asm_fprintf (file, "\taddi %s,%s,4\n",
22454 reg_names[11], reg_names[11]);
22456 else
22458 fputs ("\tbcl 20,31,1f\n\t.long ", file);
22459 assemble_name (file, buf);
22460 fputs ("-.\n1:", file);
22461 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22463 asm_fprintf (file, "\tlwz %s,0(%s)\n",
22464 reg_names[0], reg_names[11]);
22465 asm_fprintf (file, "\tadd %s,%s,%s\n",
22466 reg_names[0], reg_names[0], reg_names[11]);
22468 else
22470 asm_fprintf (file, "\tlis %s,", reg_names[12]);
22471 assemble_name (file, buf);
22472 fputs ("@ha\n", file);
22473 asm_fprintf (file, "\tstw %s,4(%s)\n",
22474 reg_names[0], reg_names[1]);
22475 asm_fprintf (file, "\tla %s,", reg_names[0]);
22476 assemble_name (file, buf);
22477 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
22480 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22481 fprintf (file, "\tbl %s%s\n",
22482 RS6000_MCOUNT, flag_pic ? "@plt" : "");
22483 break;
22485 case ABI_AIX:
22486 case ABI_DARWIN:
22487 if (!TARGET_PROFILE_KERNEL)
22489 /* Don't do anything, done in output_profile_hook (). */
22491 else
22493 gcc_assert (!TARGET_32BIT);
22495 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
22496 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
22498 if (cfun->static_chain_decl != NULL)
22500 asm_fprintf (file, "\tstd %s,24(%s)\n",
22501 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22502 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22503 asm_fprintf (file, "\tld %s,24(%s)\n",
22504 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22506 else
22507 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22509 break;
22515 /* The following variable value is the last issued insn. */
22517 static rtx last_scheduled_insn;
22519 /* The following variable helps to balance issuing of load and
22520 store instructions */
22522 static int load_store_pendulum;
22524 /* Power4 load update and store update instructions are cracked into a
22525 load or store and an integer insn which are executed in the same cycle.
22526 Branches have their own dispatch slot which does not count against the
22527 GCC issue rate, but it changes the program flow so there are no other
22528 instructions to issue in this cycle. */
22530 static int
22531 rs6000_variable_issue_1 (rtx insn, int more)
22533 last_scheduled_insn = insn;
22534 if (GET_CODE (PATTERN (insn)) == USE
22535 || GET_CODE (PATTERN (insn)) == CLOBBER)
22537 cached_can_issue_more = more;
22538 return cached_can_issue_more;
22541 if (insn_terminates_group_p (insn, current_group))
22543 cached_can_issue_more = 0;
22544 return cached_can_issue_more;
22547 /* If no reservation, but reach here */
22548 if (recog_memoized (insn) < 0)
22549 return more;
22551 if (rs6000_sched_groups)
22553 if (is_microcoded_insn (insn))
22554 cached_can_issue_more = 0;
22555 else if (is_cracked_insn (insn))
22556 cached_can_issue_more = more > 2 ? more - 2 : 0;
22557 else
22558 cached_can_issue_more = more - 1;
22560 return cached_can_issue_more;
22563 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
22564 return 0;
22566 cached_can_issue_more = more - 1;
22567 return cached_can_issue_more;
22570 static int
22571 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
22573 int r = rs6000_variable_issue_1 (insn, more);
22574 if (verbose)
22575 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
22576 return r;
22579 /* Adjust the cost of a scheduling dependency. Return the new cost of
22580 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22582 static int
22583 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22585 enum attr_type attr_type;
22587 if (! recog_memoized (insn))
22588 return 0;
22590 switch (REG_NOTE_KIND (link))
22592 case REG_DEP_TRUE:
22594 /* Data dependency; DEP_INSN writes a register that INSN reads
22595 some cycles later. */
22597 /* Separate a load from a narrower, dependent store. */
22598 if (rs6000_sched_groups
22599 && GET_CODE (PATTERN (insn)) == SET
22600 && GET_CODE (PATTERN (dep_insn)) == SET
22601 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
22602 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
22603 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
22604 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
22605 return cost + 14;
22607 attr_type = get_attr_type (insn);
22609 switch (attr_type)
22611 case TYPE_JMPREG:
22612 /* Tell the first scheduling pass about the latency between
22613 a mtctr and bctr (and mtlr and br/blr). The first
22614 scheduling pass will not know about this latency since
22615 the mtctr instruction, which has the latency associated
22616 to it, will be generated by reload. */
22617 return 4;
22618 case TYPE_BRANCH:
22619 /* Leave some extra cycles between a compare and its
22620 dependent branch, to inhibit expensive mispredicts. */
22621 if ((rs6000_cpu_attr == CPU_PPC603
22622 || rs6000_cpu_attr == CPU_PPC604
22623 || rs6000_cpu_attr == CPU_PPC604E
22624 || rs6000_cpu_attr == CPU_PPC620
22625 || rs6000_cpu_attr == CPU_PPC630
22626 || rs6000_cpu_attr == CPU_PPC750
22627 || rs6000_cpu_attr == CPU_PPC7400
22628 || rs6000_cpu_attr == CPU_PPC7450
22629 || rs6000_cpu_attr == CPU_PPCE5500
22630 || rs6000_cpu_attr == CPU_PPCE6500
22631 || rs6000_cpu_attr == CPU_POWER4
22632 || rs6000_cpu_attr == CPU_POWER5
22633 || rs6000_cpu_attr == CPU_POWER7
22634 || rs6000_cpu_attr == CPU_CELL)
22635 && recog_memoized (dep_insn)
22636 && (INSN_CODE (dep_insn) >= 0))
22638 switch (get_attr_type (dep_insn))
22640 case TYPE_CMP:
22641 case TYPE_COMPARE:
22642 case TYPE_DELAYED_COMPARE:
22643 case TYPE_IMUL_COMPARE:
22644 case TYPE_LMUL_COMPARE:
22645 case TYPE_FPCOMPARE:
22646 case TYPE_CR_LOGICAL:
22647 case TYPE_DELAYED_CR:
22648 return cost + 2;
22649 default:
22650 break;
22652 break;
22654 case TYPE_STORE:
22655 case TYPE_STORE_U:
22656 case TYPE_STORE_UX:
22657 case TYPE_FPSTORE:
22658 case TYPE_FPSTORE_U:
22659 case TYPE_FPSTORE_UX:
22660 if ((rs6000_cpu == PROCESSOR_POWER6)
22661 && recog_memoized (dep_insn)
22662 && (INSN_CODE (dep_insn) >= 0))
22665 if (GET_CODE (PATTERN (insn)) != SET)
22666 /* If this happens, we have to extend this to schedule
22667 optimally. Return default for now. */
22668 return cost;
22670 /* Adjust the cost for the case where the value written
22671 by a fixed point operation is used as the address
22672 gen value on a store. */
22673 switch (get_attr_type (dep_insn))
22675 case TYPE_LOAD:
22676 case TYPE_LOAD_U:
22677 case TYPE_LOAD_UX:
22678 case TYPE_CNTLZ:
22680 if (! store_data_bypass_p (dep_insn, insn))
22681 return 4;
22682 break;
22684 case TYPE_LOAD_EXT:
22685 case TYPE_LOAD_EXT_U:
22686 case TYPE_LOAD_EXT_UX:
22687 case TYPE_VAR_SHIFT_ROTATE:
22688 case TYPE_VAR_DELAYED_COMPARE:
22690 if (! store_data_bypass_p (dep_insn, insn))
22691 return 6;
22692 break;
22694 case TYPE_INTEGER:
22695 case TYPE_COMPARE:
22696 case TYPE_FAST_COMPARE:
22697 case TYPE_EXTS:
22698 case TYPE_SHIFT:
22699 case TYPE_INSERT_WORD:
22700 case TYPE_INSERT_DWORD:
22701 case TYPE_FPLOAD_U:
22702 case TYPE_FPLOAD_UX:
22703 case TYPE_STORE_U:
22704 case TYPE_STORE_UX:
22705 case TYPE_FPSTORE_U:
22706 case TYPE_FPSTORE_UX:
22708 if (! store_data_bypass_p (dep_insn, insn))
22709 return 3;
22710 break;
22712 case TYPE_IMUL:
22713 case TYPE_IMUL2:
22714 case TYPE_IMUL3:
22715 case TYPE_LMUL:
22716 case TYPE_IMUL_COMPARE:
22717 case TYPE_LMUL_COMPARE:
22719 if (! store_data_bypass_p (dep_insn, insn))
22720 return 17;
22721 break;
22723 case TYPE_IDIV:
22725 if (! store_data_bypass_p (dep_insn, insn))
22726 return 45;
22727 break;
22729 case TYPE_LDIV:
22731 if (! store_data_bypass_p (dep_insn, insn))
22732 return 57;
22733 break;
22735 default:
22736 break;
22739 break;
22741 case TYPE_LOAD:
22742 case TYPE_LOAD_U:
22743 case TYPE_LOAD_UX:
22744 case TYPE_LOAD_EXT:
22745 case TYPE_LOAD_EXT_U:
22746 case TYPE_LOAD_EXT_UX:
22747 if ((rs6000_cpu == PROCESSOR_POWER6)
22748 && recog_memoized (dep_insn)
22749 && (INSN_CODE (dep_insn) >= 0))
22752 /* Adjust the cost for the case where the value written
22753 by a fixed point instruction is used within the address
22754 gen portion of a subsequent load(u)(x) */
22755 switch (get_attr_type (dep_insn))
22757 case TYPE_LOAD:
22758 case TYPE_LOAD_U:
22759 case TYPE_LOAD_UX:
22760 case TYPE_CNTLZ:
22762 if (set_to_load_agen (dep_insn, insn))
22763 return 4;
22764 break;
22766 case TYPE_LOAD_EXT:
22767 case TYPE_LOAD_EXT_U:
22768 case TYPE_LOAD_EXT_UX:
22769 case TYPE_VAR_SHIFT_ROTATE:
22770 case TYPE_VAR_DELAYED_COMPARE:
22772 if (set_to_load_agen (dep_insn, insn))
22773 return 6;
22774 break;
22776 case TYPE_INTEGER:
22777 case TYPE_COMPARE:
22778 case TYPE_FAST_COMPARE:
22779 case TYPE_EXTS:
22780 case TYPE_SHIFT:
22781 case TYPE_INSERT_WORD:
22782 case TYPE_INSERT_DWORD:
22783 case TYPE_FPLOAD_U:
22784 case TYPE_FPLOAD_UX:
22785 case TYPE_STORE_U:
22786 case TYPE_STORE_UX:
22787 case TYPE_FPSTORE_U:
22788 case TYPE_FPSTORE_UX:
22790 if (set_to_load_agen (dep_insn, insn))
22791 return 3;
22792 break;
22794 case TYPE_IMUL:
22795 case TYPE_IMUL2:
22796 case TYPE_IMUL3:
22797 case TYPE_LMUL:
22798 case TYPE_IMUL_COMPARE:
22799 case TYPE_LMUL_COMPARE:
22801 if (set_to_load_agen (dep_insn, insn))
22802 return 17;
22803 break;
22805 case TYPE_IDIV:
22807 if (set_to_load_agen (dep_insn, insn))
22808 return 45;
22809 break;
22811 case TYPE_LDIV:
22813 if (set_to_load_agen (dep_insn, insn))
22814 return 57;
22815 break;
22817 default:
22818 break;
22821 break;
22823 case TYPE_FPLOAD:
22824 if ((rs6000_cpu == PROCESSOR_POWER6)
22825 && recog_memoized (dep_insn)
22826 && (INSN_CODE (dep_insn) >= 0)
22827 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
22828 return 2;
22830 default:
22831 break;
22834 /* Fall out to return default cost. */
22836 break;
22838 case REG_DEP_OUTPUT:
22839 /* Output dependency; DEP_INSN writes a register that INSN writes some
22840 cycles later. */
22841 if ((rs6000_cpu == PROCESSOR_POWER6)
22842 && recog_memoized (dep_insn)
22843 && (INSN_CODE (dep_insn) >= 0))
22845 attr_type = get_attr_type (insn);
22847 switch (attr_type)
22849 case TYPE_FP:
22850 if (get_attr_type (dep_insn) == TYPE_FP)
22851 return 1;
22852 break;
22853 case TYPE_FPLOAD:
22854 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
22855 return 2;
22856 break;
22857 default:
22858 break;
22861 case REG_DEP_ANTI:
22862 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22863 cycles later. */
22864 return 0;
22866 default:
22867 gcc_unreachable ();
22870 return cost;
22873 /* Debug version of rs6000_adjust_cost. */
22875 static int
22876 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22878 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
22880 if (ret != cost)
22882 const char *dep;
22884 switch (REG_NOTE_KIND (link))
22886 default: dep = "unknown depencency"; break;
22887 case REG_DEP_TRUE: dep = "data dependency"; break;
22888 case REG_DEP_OUTPUT: dep = "output dependency"; break;
22889 case REG_DEP_ANTI: dep = "anti depencency"; break;
22892 fprintf (stderr,
22893 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22894 "%s, insn:\n", ret, cost, dep);
22896 debug_rtx (insn);
22899 return ret;
22902 /* The function returns a true if INSN is microcoded.
22903 Return false otherwise. */
22905 static bool
22906 is_microcoded_insn (rtx insn)
22908 if (!insn || !NONDEBUG_INSN_P (insn)
22909 || GET_CODE (PATTERN (insn)) == USE
22910 || GET_CODE (PATTERN (insn)) == CLOBBER)
22911 return false;
22913 if (rs6000_cpu_attr == CPU_CELL)
22914 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
22916 if (rs6000_sched_groups)
22918 enum attr_type type = get_attr_type (insn);
22919 if (type == TYPE_LOAD_EXT_U
22920 || type == TYPE_LOAD_EXT_UX
22921 || type == TYPE_LOAD_UX
22922 || type == TYPE_STORE_UX
22923 || type == TYPE_MFCR)
22924 return true;
22927 return false;
22930 /* The function returns true if INSN is cracked into 2 instructions
22931 by the processor (and therefore occupies 2 issue slots). */
22933 static bool
22934 is_cracked_insn (rtx insn)
22936 if (!insn || !NONDEBUG_INSN_P (insn)
22937 || GET_CODE (PATTERN (insn)) == USE
22938 || GET_CODE (PATTERN (insn)) == CLOBBER)
22939 return false;
22941 if (rs6000_sched_groups)
22943 enum attr_type type = get_attr_type (insn);
22944 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
22945 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
22946 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
22947 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
22948 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
22949 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
22950 || type == TYPE_IDIV || type == TYPE_LDIV
22951 || type == TYPE_INSERT_WORD)
22952 return true;
22955 return false;
22958 /* The function returns true if INSN can be issued only from
22959 the branch slot. */
22961 static bool
22962 is_branch_slot_insn (rtx insn)
22964 if (!insn || !NONDEBUG_INSN_P (insn)
22965 || GET_CODE (PATTERN (insn)) == USE
22966 || GET_CODE (PATTERN (insn)) == CLOBBER)
22967 return false;
22969 if (rs6000_sched_groups)
22971 enum attr_type type = get_attr_type (insn);
22972 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
22973 return true;
22974 return false;
22977 return false;
22980 /* The function returns true if out_inst sets a value that is
22981 used in the address generation computation of in_insn */
22982 static bool
22983 set_to_load_agen (rtx out_insn, rtx in_insn)
22985 rtx out_set, in_set;
22987 /* For performance reasons, only handle the simple case where
22988 both loads are a single_set. */
22989 out_set = single_set (out_insn);
22990 if (out_set)
22992 in_set = single_set (in_insn);
22993 if (in_set)
22994 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
22997 return false;
23000 /* Try to determine base/offset/size parts of the given MEM.
23001 Return true if successful, false if all the values couldn't
23002 be determined.
23004 This function only looks for REG or REG+CONST address forms.
23005 REG+REG address form will return false. */
23007 static bool
23008 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
23009 HOST_WIDE_INT *size)
23011 rtx addr_rtx;
23012 if MEM_SIZE_KNOWN_P (mem)
23013 *size = MEM_SIZE (mem);
23014 else
23015 return false;
23017 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
23018 addr_rtx = XEXP (XEXP (mem, 0), 1);
23019 else
23020 addr_rtx = (XEXP (mem, 0));
23022 if (GET_CODE (addr_rtx) == REG)
23024 *base = addr_rtx;
23025 *offset = 0;
23027 else if (GET_CODE (addr_rtx) == PLUS
23028 && CONST_INT_P (XEXP (addr_rtx, 1)))
23030 *base = XEXP (addr_rtx, 0);
23031 *offset = INTVAL (XEXP (addr_rtx, 1));
23033 else
23034 return false;
23036 return true;
23039 /* The function returns true if the target storage location of
23040 mem1 is adjacent to the target storage location of mem2 */
23041 /* Return 1 if memory locations are adjacent. */
23043 static bool
23044 adjacent_mem_locations (rtx mem1, rtx mem2)
23046 rtx reg1, reg2;
23047 HOST_WIDE_INT off1, size1, off2, size2;
23049 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23050 && get_memref_parts (mem2, &reg2, &off2, &size2))
23051 return ((REGNO (reg1) == REGNO (reg2))
23052 && ((off1 + size1 == off2)
23053 || (off2 + size2 == off1)));
23055 return false;
23058 /* This function returns true if it can be determined that the two MEM
23059 locations overlap by at least 1 byte based on base reg/offset/size. */
23061 static bool
23062 mem_locations_overlap (rtx mem1, rtx mem2)
23064 rtx reg1, reg2;
23065 HOST_WIDE_INT off1, size1, off2, size2;
23067 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23068 && get_memref_parts (mem2, &reg2, &off2, &size2))
23069 return ((REGNO (reg1) == REGNO (reg2))
23070 && (((off1 <= off2) && (off1 + size1 > off2))
23071 || ((off2 <= off1) && (off2 + size2 > off1))));
23073 return false;
23076 /* A C statement (sans semicolon) to update the integer scheduling
23077 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23078 INSN earlier, reduce the priority to execute INSN later. Do not
23079 define this macro if you do not need to adjust the scheduling
23080 priorities of insns. */
23082 static int
23083 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
23085 rtx load_mem, str_mem;
23086 /* On machines (like the 750) which have asymmetric integer units,
23087 where one integer unit can do multiply and divides and the other
23088 can't, reduce the priority of multiply/divide so it is scheduled
23089 before other integer operations. */
23091 #if 0
23092 if (! INSN_P (insn))
23093 return priority;
23095 if (GET_CODE (PATTERN (insn)) == USE)
23096 return priority;
23098 switch (rs6000_cpu_attr) {
23099 case CPU_PPC750:
23100 switch (get_attr_type (insn))
23102 default:
23103 break;
23105 case TYPE_IMUL:
23106 case TYPE_IDIV:
23107 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
23108 priority, priority);
23109 if (priority >= 0 && priority < 0x01000000)
23110 priority >>= 3;
23111 break;
23114 #endif
23116 if (insn_must_be_first_in_group (insn)
23117 && reload_completed
23118 && current_sched_info->sched_max_insns_priority
23119 && rs6000_sched_restricted_insns_priority)
23122 /* Prioritize insns that can be dispatched only in the first
23123 dispatch slot. */
23124 if (rs6000_sched_restricted_insns_priority == 1)
23125 /* Attach highest priority to insn. This means that in
23126 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23127 precede 'priority' (critical path) considerations. */
23128 return current_sched_info->sched_max_insns_priority;
23129 else if (rs6000_sched_restricted_insns_priority == 2)
23130 /* Increase priority of insn by a minimal amount. This means that in
23131 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23132 considerations precede dispatch-slot restriction considerations. */
23133 return (priority + 1);
23136 if (rs6000_cpu == PROCESSOR_POWER6
23137 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
23138 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
23139 /* Attach highest priority to insn if the scheduler has just issued two
23140 stores and this instruction is a load, or two loads and this instruction
23141 is a store. Power6 wants loads and stores scheduled alternately
23142 when possible */
23143 return current_sched_info->sched_max_insns_priority;
23145 return priority;
23148 /* Return true if the instruction is nonpipelined on the Cell. */
23149 static bool
23150 is_nonpipeline_insn (rtx insn)
23152 enum attr_type type;
23153 if (!insn || !NONDEBUG_INSN_P (insn)
23154 || GET_CODE (PATTERN (insn)) == USE
23155 || GET_CODE (PATTERN (insn)) == CLOBBER)
23156 return false;
23158 type = get_attr_type (insn);
23159 if (type == TYPE_IMUL
23160 || type == TYPE_IMUL2
23161 || type == TYPE_IMUL3
23162 || type == TYPE_LMUL
23163 || type == TYPE_IDIV
23164 || type == TYPE_LDIV
23165 || type == TYPE_SDIV
23166 || type == TYPE_DDIV
23167 || type == TYPE_SSQRT
23168 || type == TYPE_DSQRT
23169 || type == TYPE_MFCR
23170 || type == TYPE_MFCRF
23171 || type == TYPE_MFJMPR)
23173 return true;
23175 return false;
23179 /* Return how many instructions the machine can issue per cycle. */
23181 static int
23182 rs6000_issue_rate (void)
23184 /* Unless scheduling for register pressure, use issue rate of 1 for
23185 first scheduling pass to decrease degradation. */
23186 if (!reload_completed && !flag_sched_pressure)
23187 return 1;
23189 switch (rs6000_cpu_attr) {
23190 case CPU_RS64A:
23191 case CPU_PPC601: /* ? */
23192 case CPU_PPC7450:
23193 return 3;
23194 case CPU_PPC440:
23195 case CPU_PPC603:
23196 case CPU_PPC750:
23197 case CPU_PPC7400:
23198 case CPU_PPC8540:
23199 case CPU_PPC8548:
23200 case CPU_CELL:
23201 case CPU_PPCE300C2:
23202 case CPU_PPCE300C3:
23203 case CPU_PPCE500MC:
23204 case CPU_PPCE500MC64:
23205 case CPU_PPCE5500:
23206 case CPU_PPCE6500:
23207 case CPU_TITAN:
23208 return 2;
23209 case CPU_PPC476:
23210 case CPU_PPC604:
23211 case CPU_PPC604E:
23212 case CPU_PPC620:
23213 case CPU_PPC630:
23214 return 4;
23215 case CPU_POWER4:
23216 case CPU_POWER5:
23217 case CPU_POWER6:
23218 case CPU_POWER7:
23219 return 5;
23220 default:
23221 return 1;
23225 /* Return how many instructions to look ahead for better insn
23226 scheduling. */
23228 static int
23229 rs6000_use_sched_lookahead (void)
23231 switch (rs6000_cpu_attr)
23233 case CPU_PPC8540:
23234 case CPU_PPC8548:
23235 return 4;
23237 case CPU_CELL:
23238 return (reload_completed ? 8 : 0);
23240 default:
23241 return 0;
23245 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23246 static int
23247 rs6000_use_sched_lookahead_guard (rtx insn)
23249 if (rs6000_cpu_attr != CPU_CELL)
23250 return 1;
23252 if (insn == NULL_RTX || !INSN_P (insn))
23253 abort ();
23255 if (!reload_completed
23256 || is_nonpipeline_insn (insn)
23257 || is_microcoded_insn (insn))
23258 return 0;
23260 return 1;
23263 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23264 and return true. */
23266 static bool
23267 find_mem_ref (rtx pat, rtx *mem_ref)
23269 const char * fmt;
23270 int i, j;
23272 /* stack_tie does not produce any real memory traffic. */
23273 if (tie_operand (pat, VOIDmode))
23274 return false;
23276 if (GET_CODE (pat) == MEM)
23278 *mem_ref = pat;
23279 return true;
23282 /* Recursively process the pattern. */
23283 fmt = GET_RTX_FORMAT (GET_CODE (pat));
23285 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
23287 if (fmt[i] == 'e')
23289 if (find_mem_ref (XEXP (pat, i), mem_ref))
23290 return true;
23292 else if (fmt[i] == 'E')
23293 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
23295 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
23296 return true;
23300 return false;
23303 /* Determine if PAT is a PATTERN of a load insn. */
23305 static bool
23306 is_load_insn1 (rtx pat, rtx *load_mem)
23308 if (!pat || pat == NULL_RTX)
23309 return false;
23311 if (GET_CODE (pat) == SET)
23312 return find_mem_ref (SET_SRC (pat), load_mem);
23314 if (GET_CODE (pat) == PARALLEL)
23316 int i;
23318 for (i = 0; i < XVECLEN (pat, 0); i++)
23319 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
23320 return true;
23323 return false;
23326 /* Determine if INSN loads from memory. */
23328 static bool
23329 is_load_insn (rtx insn, rtx *load_mem)
23331 if (!insn || !INSN_P (insn))
23332 return false;
23334 if (GET_CODE (insn) == CALL_INSN)
23335 return false;
23337 return is_load_insn1 (PATTERN (insn), load_mem);
23340 /* Determine if PAT is a PATTERN of a store insn. */
23342 static bool
23343 is_store_insn1 (rtx pat, rtx *str_mem)
23345 if (!pat || pat == NULL_RTX)
23346 return false;
23348 if (GET_CODE (pat) == SET)
23349 return find_mem_ref (SET_DEST (pat), str_mem);
23351 if (GET_CODE (pat) == PARALLEL)
23353 int i;
23355 for (i = 0; i < XVECLEN (pat, 0); i++)
23356 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
23357 return true;
23360 return false;
23363 /* Determine if INSN stores to memory. */
23365 static bool
23366 is_store_insn (rtx insn, rtx *str_mem)
23368 if (!insn || !INSN_P (insn))
23369 return false;
23371 return is_store_insn1 (PATTERN (insn), str_mem);
23374 /* Returns whether the dependence between INSN and NEXT is considered
23375 costly by the given target. */
23377 static bool
23378 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
23380 rtx insn;
23381 rtx next;
23382 rtx load_mem, str_mem;
23384 /* If the flag is not enabled - no dependence is considered costly;
23385 allow all dependent insns in the same group.
23386 This is the most aggressive option. */
23387 if (rs6000_sched_costly_dep == no_dep_costly)
23388 return false;
23390 /* If the flag is set to 1 - a dependence is always considered costly;
23391 do not allow dependent instructions in the same group.
23392 This is the most conservative option. */
23393 if (rs6000_sched_costly_dep == all_deps_costly)
23394 return true;
23396 insn = DEP_PRO (dep);
23397 next = DEP_CON (dep);
23399 if (rs6000_sched_costly_dep == store_to_load_dep_costly
23400 && is_load_insn (next, &load_mem)
23401 && is_store_insn (insn, &str_mem))
23402 /* Prevent load after store in the same group. */
23403 return true;
23405 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
23406 && is_load_insn (next, &load_mem)
23407 && is_store_insn (insn, &str_mem)
23408 && DEP_TYPE (dep) == REG_DEP_TRUE
23409 && mem_locations_overlap(str_mem, load_mem))
23410 /* Prevent load after store in the same group if it is a true
23411 dependence. */
23412 return true;
23414 /* The flag is set to X; dependences with latency >= X are considered costly,
23415 and will not be scheduled in the same group. */
23416 if (rs6000_sched_costly_dep <= max_dep_latency
23417 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
23418 return true;
23420 return false;
23423 /* Return the next insn after INSN that is found before TAIL is reached,
23424 skipping any "non-active" insns - insns that will not actually occupy
23425 an issue slot. Return NULL_RTX if such an insn is not found. */
23427 static rtx
23428 get_next_active_insn (rtx insn, rtx tail)
23430 if (insn == NULL_RTX || insn == tail)
23431 return NULL_RTX;
23433 while (1)
23435 insn = NEXT_INSN (insn);
23436 if (insn == NULL_RTX || insn == tail)
23437 return NULL_RTX;
23439 if (CALL_P (insn)
23440 || JUMP_P (insn)
23441 || (NONJUMP_INSN_P (insn)
23442 && GET_CODE (PATTERN (insn)) != USE
23443 && GET_CODE (PATTERN (insn)) != CLOBBER
23444 && INSN_CODE (insn) != CODE_FOR_stack_tie))
23445 break;
23447 return insn;
23450 /* We are about to begin issuing insns for this clock cycle. */
23452 static int
23453 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
23454 rtx *ready ATTRIBUTE_UNUSED,
23455 int *pn_ready ATTRIBUTE_UNUSED,
23456 int clock_var ATTRIBUTE_UNUSED)
23458 int n_ready = *pn_ready;
23460 if (sched_verbose)
23461 fprintf (dump, "// rs6000_sched_reorder :\n");
23463 /* Reorder the ready list, if the second to last ready insn
23464 is a nonepipeline insn. */
23465 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
23467 if (is_nonpipeline_insn (ready[n_ready - 1])
23468 && (recog_memoized (ready[n_ready - 2]) > 0))
23469 /* Simply swap first two insns. */
23471 rtx tmp = ready[n_ready - 1];
23472 ready[n_ready - 1] = ready[n_ready - 2];
23473 ready[n_ready - 2] = tmp;
23477 if (rs6000_cpu == PROCESSOR_POWER6)
23478 load_store_pendulum = 0;
23480 return rs6000_issue_rate ();
23483 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23485 static int
23486 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
23487 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
23489 if (sched_verbose)
23490 fprintf (dump, "// rs6000_sched_reorder2 :\n");
23492 /* For Power6, we need to handle some special cases to try and keep the
23493 store queue from overflowing and triggering expensive flushes.
23495 This code monitors how load and store instructions are being issued
23496 and skews the ready list one way or the other to increase the likelihood
23497 that a desired instruction is issued at the proper time.
23499 A couple of things are done. First, we maintain a "load_store_pendulum"
23500 to track the current state of load/store issue.
23502 - If the pendulum is at zero, then no loads or stores have been
23503 issued in the current cycle so we do nothing.
23505 - If the pendulum is 1, then a single load has been issued in this
23506 cycle and we attempt to locate another load in the ready list to
23507 issue with it.
23509 - If the pendulum is -2, then two stores have already been
23510 issued in this cycle, so we increase the priority of the first load
23511 in the ready list to increase it's likelihood of being chosen first
23512 in the next cycle.
23514 - If the pendulum is -1, then a single store has been issued in this
23515 cycle and we attempt to locate another store in the ready list to
23516 issue with it, preferring a store to an adjacent memory location to
23517 facilitate store pairing in the store queue.
23519 - If the pendulum is 2, then two loads have already been
23520 issued in this cycle, so we increase the priority of the first store
23521 in the ready list to increase it's likelihood of being chosen first
23522 in the next cycle.
23524 - If the pendulum < -2 or > 2, then do nothing.
23526 Note: This code covers the most common scenarios. There exist non
23527 load/store instructions which make use of the LSU and which
23528 would need to be accounted for to strictly model the behavior
23529 of the machine. Those instructions are currently unaccounted
23530 for to help minimize compile time overhead of this code.
23532 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
23534 int pos;
23535 int i;
23536 rtx tmp, load_mem, str_mem;
23538 if (is_store_insn (last_scheduled_insn, &str_mem))
23539 /* Issuing a store, swing the load_store_pendulum to the left */
23540 load_store_pendulum--;
23541 else if (is_load_insn (last_scheduled_insn, &load_mem))
23542 /* Issuing a load, swing the load_store_pendulum to the right */
23543 load_store_pendulum++;
23544 else
23545 return cached_can_issue_more;
23547 /* If the pendulum is balanced, or there is only one instruction on
23548 the ready list, then all is well, so return. */
23549 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
23550 return cached_can_issue_more;
23552 if (load_store_pendulum == 1)
23554 /* A load has been issued in this cycle. Scan the ready list
23555 for another load to issue with it */
23556 pos = *pn_ready-1;
23558 while (pos >= 0)
23560 if (is_load_insn (ready[pos], &load_mem))
23562 /* Found a load. Move it to the head of the ready list,
23563 and adjust it's priority so that it is more likely to
23564 stay there */
23565 tmp = ready[pos];
23566 for (i=pos; i<*pn_ready-1; i++)
23567 ready[i] = ready[i + 1];
23568 ready[*pn_ready-1] = tmp;
23570 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23571 INSN_PRIORITY (tmp)++;
23572 break;
23574 pos--;
23577 else if (load_store_pendulum == -2)
23579 /* Two stores have been issued in this cycle. Increase the
23580 priority of the first load in the ready list to favor it for
23581 issuing in the next cycle. */
23582 pos = *pn_ready-1;
23584 while (pos >= 0)
23586 if (is_load_insn (ready[pos], &load_mem)
23587 && !sel_sched_p ()
23588 && INSN_PRIORITY_KNOWN (ready[pos]))
23590 INSN_PRIORITY (ready[pos])++;
23592 /* Adjust the pendulum to account for the fact that a load
23593 was found and increased in priority. This is to prevent
23594 increasing the priority of multiple loads */
23595 load_store_pendulum--;
23597 break;
23599 pos--;
23602 else if (load_store_pendulum == -1)
23604 /* A store has been issued in this cycle. Scan the ready list for
23605 another store to issue with it, preferring a store to an adjacent
23606 memory location */
23607 int first_store_pos = -1;
23609 pos = *pn_ready-1;
23611 while (pos >= 0)
23613 if (is_store_insn (ready[pos], &str_mem))
23615 rtx str_mem2;
23616 /* Maintain the index of the first store found on the
23617 list */
23618 if (first_store_pos == -1)
23619 first_store_pos = pos;
23621 if (is_store_insn (last_scheduled_insn, &str_mem2)
23622 && adjacent_mem_locations (str_mem, str_mem2))
23624 /* Found an adjacent store. Move it to the head of the
23625 ready list, and adjust it's priority so that it is
23626 more likely to stay there */
23627 tmp = ready[pos];
23628 for (i=pos; i<*pn_ready-1; i++)
23629 ready[i] = ready[i + 1];
23630 ready[*pn_ready-1] = tmp;
23632 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23633 INSN_PRIORITY (tmp)++;
23635 first_store_pos = -1;
23637 break;
23640 pos--;
23643 if (first_store_pos >= 0)
23645 /* An adjacent store wasn't found, but a non-adjacent store was,
23646 so move the non-adjacent store to the front of the ready
23647 list, and adjust its priority so that it is more likely to
23648 stay there. */
23649 tmp = ready[first_store_pos];
23650 for (i=first_store_pos; i<*pn_ready-1; i++)
23651 ready[i] = ready[i + 1];
23652 ready[*pn_ready-1] = tmp;
23653 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23654 INSN_PRIORITY (tmp)++;
23657 else if (load_store_pendulum == 2)
23659 /* Two loads have been issued in this cycle. Increase the priority
23660 of the first store in the ready list to favor it for issuing in
23661 the next cycle. */
23662 pos = *pn_ready-1;
23664 while (pos >= 0)
23666 if (is_store_insn (ready[pos], &str_mem)
23667 && !sel_sched_p ()
23668 && INSN_PRIORITY_KNOWN (ready[pos]))
23670 INSN_PRIORITY (ready[pos])++;
23672 /* Adjust the pendulum to account for the fact that a store
23673 was found and increased in priority. This is to prevent
23674 increasing the priority of multiple stores */
23675 load_store_pendulum++;
23677 break;
23679 pos--;
23684 return cached_can_issue_more;
23687 /* Return whether the presence of INSN causes a dispatch group termination
23688 of group WHICH_GROUP.
23690 If WHICH_GROUP == current_group, this function will return true if INSN
23691 causes the termination of the current group (i.e, the dispatch group to
23692 which INSN belongs). This means that INSN will be the last insn in the
23693 group it belongs to.
23695 If WHICH_GROUP == previous_group, this function will return true if INSN
23696 causes the termination of the previous group (i.e, the dispatch group that
23697 precedes the group to which INSN belongs). This means that INSN will be
23698 the first insn in the group it belongs to). */
23700 static bool
23701 insn_terminates_group_p (rtx insn, enum group_termination which_group)
23703 bool first, last;
23705 if (! insn)
23706 return false;
23708 first = insn_must_be_first_in_group (insn);
23709 last = insn_must_be_last_in_group (insn);
23711 if (first && last)
23712 return true;
23714 if (which_group == current_group)
23715 return last;
23716 else if (which_group == previous_group)
23717 return first;
23719 return false;
23723 static bool
23724 insn_must_be_first_in_group (rtx insn)
23726 enum attr_type type;
23728 if (!insn
23729 || GET_CODE (insn) == NOTE
23730 || DEBUG_INSN_P (insn)
23731 || GET_CODE (PATTERN (insn)) == USE
23732 || GET_CODE (PATTERN (insn)) == CLOBBER)
23733 return false;
23735 switch (rs6000_cpu)
23737 case PROCESSOR_POWER5:
23738 if (is_cracked_insn (insn))
23739 return true;
23740 case PROCESSOR_POWER4:
23741 if (is_microcoded_insn (insn))
23742 return true;
23744 if (!rs6000_sched_groups)
23745 return false;
23747 type = get_attr_type (insn);
23749 switch (type)
23751 case TYPE_MFCR:
23752 case TYPE_MFCRF:
23753 case TYPE_MTCR:
23754 case TYPE_DELAYED_CR:
23755 case TYPE_CR_LOGICAL:
23756 case TYPE_MTJMPR:
23757 case TYPE_MFJMPR:
23758 case TYPE_IDIV:
23759 case TYPE_LDIV:
23760 case TYPE_LOAD_L:
23761 case TYPE_STORE_C:
23762 case TYPE_ISYNC:
23763 case TYPE_SYNC:
23764 return true;
23765 default:
23766 break;
23768 break;
23769 case PROCESSOR_POWER6:
23770 type = get_attr_type (insn);
23772 switch (type)
23774 case TYPE_INSERT_DWORD:
23775 case TYPE_EXTS:
23776 case TYPE_CNTLZ:
23777 case TYPE_SHIFT:
23778 case TYPE_VAR_SHIFT_ROTATE:
23779 case TYPE_TRAP:
23780 case TYPE_IMUL:
23781 case TYPE_IMUL2:
23782 case TYPE_IMUL3:
23783 case TYPE_LMUL:
23784 case TYPE_IDIV:
23785 case TYPE_INSERT_WORD:
23786 case TYPE_DELAYED_COMPARE:
23787 case TYPE_IMUL_COMPARE:
23788 case TYPE_LMUL_COMPARE:
23789 case TYPE_FPCOMPARE:
23790 case TYPE_MFCR:
23791 case TYPE_MTCR:
23792 case TYPE_MFJMPR:
23793 case TYPE_MTJMPR:
23794 case TYPE_ISYNC:
23795 case TYPE_SYNC:
23796 case TYPE_LOAD_L:
23797 case TYPE_STORE_C:
23798 case TYPE_LOAD_U:
23799 case TYPE_LOAD_UX:
23800 case TYPE_LOAD_EXT_UX:
23801 case TYPE_STORE_U:
23802 case TYPE_STORE_UX:
23803 case TYPE_FPLOAD_U:
23804 case TYPE_FPLOAD_UX:
23805 case TYPE_FPSTORE_U:
23806 case TYPE_FPSTORE_UX:
23807 return true;
23808 default:
23809 break;
23811 break;
23812 case PROCESSOR_POWER7:
23813 type = get_attr_type (insn);
23815 switch (type)
23817 case TYPE_CR_LOGICAL:
23818 case TYPE_MFCR:
23819 case TYPE_MFCRF:
23820 case TYPE_MTCR:
23821 case TYPE_IDIV:
23822 case TYPE_LDIV:
23823 case TYPE_COMPARE:
23824 case TYPE_DELAYED_COMPARE:
23825 case TYPE_VAR_DELAYED_COMPARE:
23826 case TYPE_ISYNC:
23827 case TYPE_LOAD_L:
23828 case TYPE_STORE_C:
23829 case TYPE_LOAD_U:
23830 case TYPE_LOAD_UX:
23831 case TYPE_LOAD_EXT:
23832 case TYPE_LOAD_EXT_U:
23833 case TYPE_LOAD_EXT_UX:
23834 case TYPE_STORE_U:
23835 case TYPE_STORE_UX:
23836 case TYPE_FPLOAD_U:
23837 case TYPE_FPLOAD_UX:
23838 case TYPE_FPSTORE_U:
23839 case TYPE_FPSTORE_UX:
23840 case TYPE_MFJMPR:
23841 case TYPE_MTJMPR:
23842 return true;
23843 default:
23844 break;
23846 break;
23847 default:
23848 break;
23851 return false;
23854 static bool
23855 insn_must_be_last_in_group (rtx insn)
23857 enum attr_type type;
23859 if (!insn
23860 || GET_CODE (insn) == NOTE
23861 || DEBUG_INSN_P (insn)
23862 || GET_CODE (PATTERN (insn)) == USE
23863 || GET_CODE (PATTERN (insn)) == CLOBBER)
23864 return false;
23866 switch (rs6000_cpu) {
23867 case PROCESSOR_POWER4:
23868 case PROCESSOR_POWER5:
23869 if (is_microcoded_insn (insn))
23870 return true;
23872 if (is_branch_slot_insn (insn))
23873 return true;
23875 break;
23876 case PROCESSOR_POWER6:
23877 type = get_attr_type (insn);
23879 switch (type)
23881 case TYPE_EXTS:
23882 case TYPE_CNTLZ:
23883 case TYPE_SHIFT:
23884 case TYPE_VAR_SHIFT_ROTATE:
23885 case TYPE_TRAP:
23886 case TYPE_IMUL:
23887 case TYPE_IMUL2:
23888 case TYPE_IMUL3:
23889 case TYPE_LMUL:
23890 case TYPE_IDIV:
23891 case TYPE_DELAYED_COMPARE:
23892 case TYPE_IMUL_COMPARE:
23893 case TYPE_LMUL_COMPARE:
23894 case TYPE_FPCOMPARE:
23895 case TYPE_MFCR:
23896 case TYPE_MTCR:
23897 case TYPE_MFJMPR:
23898 case TYPE_MTJMPR:
23899 case TYPE_ISYNC:
23900 case TYPE_SYNC:
23901 case TYPE_LOAD_L:
23902 case TYPE_STORE_C:
23903 return true;
23904 default:
23905 break;
23907 break;
23908 case PROCESSOR_POWER7:
23909 type = get_attr_type (insn);
23911 switch (type)
23913 case TYPE_ISYNC:
23914 case TYPE_SYNC:
23915 case TYPE_LOAD_L:
23916 case TYPE_STORE_C:
23917 case TYPE_LOAD_EXT_U:
23918 case TYPE_LOAD_EXT_UX:
23919 case TYPE_STORE_UX:
23920 return true;
23921 default:
23922 break;
23924 break;
23925 default:
23926 break;
23929 return false;
23932 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23933 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23935 static bool
23936 is_costly_group (rtx *group_insns, rtx next_insn)
23938 int i;
23939 int issue_rate = rs6000_issue_rate ();
23941 for (i = 0; i < issue_rate; i++)
23943 sd_iterator_def sd_it;
23944 dep_t dep;
23945 rtx insn = group_insns[i];
23947 if (!insn)
23948 continue;
23950 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
23952 rtx next = DEP_CON (dep);
23954 if (next == next_insn
23955 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
23956 return true;
23960 return false;
23963 /* Utility of the function redefine_groups.
23964 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23965 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23966 to keep it "far" (in a separate group) from GROUP_INSNS, following
23967 one of the following schemes, depending on the value of the flag
23968 -minsert_sched_nops = X:
23969 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23970 in order to force NEXT_INSN into a separate group.
23971 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23972 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23973 insertion (has a group just ended, how many vacant issue slots remain in the
23974 last group, and how many dispatch groups were encountered so far). */
23976 static int
23977 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
23978 rtx next_insn, bool *group_end, int can_issue_more,
23979 int *group_count)
23981 rtx nop;
23982 bool force;
23983 int issue_rate = rs6000_issue_rate ();
23984 bool end = *group_end;
23985 int i;
23987 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
23988 return can_issue_more;
23990 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
23991 return can_issue_more;
23993 force = is_costly_group (group_insns, next_insn);
23994 if (!force)
23995 return can_issue_more;
23997 if (sched_verbose > 6)
23998 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
23999 *group_count ,can_issue_more);
24001 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
24003 if (*group_end)
24004 can_issue_more = 0;
24006 /* Since only a branch can be issued in the last issue_slot, it is
24007 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
24008 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
24009 in this case the last nop will start a new group and the branch
24010 will be forced to the new group. */
24011 if (can_issue_more && !is_branch_slot_insn (next_insn))
24012 can_issue_more--;
24014 /* Power6 and Power7 have special group ending nop. */
24015 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
24017 nop = gen_group_ending_nop ();
24018 emit_insn_before (nop, next_insn);
24019 can_issue_more = 0;
24021 else
24022 while (can_issue_more > 0)
24024 nop = gen_nop ();
24025 emit_insn_before (nop, next_insn);
24026 can_issue_more--;
24029 *group_end = true;
24030 return 0;
24033 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
24035 int n_nops = rs6000_sched_insert_nops;
24037 /* Nops can't be issued from the branch slot, so the effective
24038 issue_rate for nops is 'issue_rate - 1'. */
24039 if (can_issue_more == 0)
24040 can_issue_more = issue_rate;
24041 can_issue_more--;
24042 if (can_issue_more == 0)
24044 can_issue_more = issue_rate - 1;
24045 (*group_count)++;
24046 end = true;
24047 for (i = 0; i < issue_rate; i++)
24049 group_insns[i] = 0;
24053 while (n_nops > 0)
24055 nop = gen_nop ();
24056 emit_insn_before (nop, next_insn);
24057 if (can_issue_more == issue_rate - 1) /* new group begins */
24058 end = false;
24059 can_issue_more--;
24060 if (can_issue_more == 0)
24062 can_issue_more = issue_rate - 1;
24063 (*group_count)++;
24064 end = true;
24065 for (i = 0; i < issue_rate; i++)
24067 group_insns[i] = 0;
24070 n_nops--;
24073 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24074 can_issue_more++;
24076 /* Is next_insn going to start a new group? */
24077 *group_end
24078 = (end
24079 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24080 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24081 || (can_issue_more < issue_rate &&
24082 insn_terminates_group_p (next_insn, previous_group)));
24083 if (*group_end && end)
24084 (*group_count)--;
24086 if (sched_verbose > 6)
24087 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
24088 *group_count, can_issue_more);
24089 return can_issue_more;
24092 return can_issue_more;
24095 /* This function tries to synch the dispatch groups that the compiler "sees"
24096 with the dispatch groups that the processor dispatcher is expected to
24097 form in practice. It tries to achieve this synchronization by forcing the
24098 estimated processor grouping on the compiler (as opposed to the function
24099 'pad_goups' which tries to force the scheduler's grouping on the processor).
24101 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24102 examines the (estimated) dispatch groups that will be formed by the processor
24103 dispatcher. It marks these group boundaries to reflect the estimated
24104 processor grouping, overriding the grouping that the scheduler had marked.
24105 Depending on the value of the flag '-minsert-sched-nops' this function can
24106 force certain insns into separate groups or force a certain distance between
24107 them by inserting nops, for example, if there exists a "costly dependence"
24108 between the insns.
24110 The function estimates the group boundaries that the processor will form as
24111 follows: It keeps track of how many vacant issue slots are available after
24112 each insn. A subsequent insn will start a new group if one of the following
24113 4 cases applies:
24114 - no more vacant issue slots remain in the current dispatch group.
24115 - only the last issue slot, which is the branch slot, is vacant, but the next
24116 insn is not a branch.
24117 - only the last 2 or less issue slots, including the branch slot, are vacant,
24118 which means that a cracked insn (which occupies two issue slots) can't be
24119 issued in this group.
24120 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24121 start a new group. */
24123 static int
24124 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24126 rtx insn, next_insn;
24127 int issue_rate;
24128 int can_issue_more;
24129 int slot, i;
24130 bool group_end;
24131 int group_count = 0;
24132 rtx *group_insns;
24134 /* Initialize. */
24135 issue_rate = rs6000_issue_rate ();
24136 group_insns = XALLOCAVEC (rtx, issue_rate);
24137 for (i = 0; i < issue_rate; i++)
24139 group_insns[i] = 0;
24141 can_issue_more = issue_rate;
24142 slot = 0;
24143 insn = get_next_active_insn (prev_head_insn, tail);
24144 group_end = false;
24146 while (insn != NULL_RTX)
24148 slot = (issue_rate - can_issue_more);
24149 group_insns[slot] = insn;
24150 can_issue_more =
24151 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24152 if (insn_terminates_group_p (insn, current_group))
24153 can_issue_more = 0;
24155 next_insn = get_next_active_insn (insn, tail);
24156 if (next_insn == NULL_RTX)
24157 return group_count + 1;
24159 /* Is next_insn going to start a new group? */
24160 group_end
24161 = (can_issue_more == 0
24162 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24163 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24164 || (can_issue_more < issue_rate &&
24165 insn_terminates_group_p (next_insn, previous_group)));
24167 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
24168 next_insn, &group_end, can_issue_more,
24169 &group_count);
24171 if (group_end)
24173 group_count++;
24174 can_issue_more = 0;
24175 for (i = 0; i < issue_rate; i++)
24177 group_insns[i] = 0;
24181 if (GET_MODE (next_insn) == TImode && can_issue_more)
24182 PUT_MODE (next_insn, VOIDmode);
24183 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
24184 PUT_MODE (next_insn, TImode);
24186 insn = next_insn;
24187 if (can_issue_more == 0)
24188 can_issue_more = issue_rate;
24189 } /* while */
24191 return group_count;
24194 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24195 dispatch group boundaries that the scheduler had marked. Pad with nops
24196 any dispatch groups which have vacant issue slots, in order to force the
24197 scheduler's grouping on the processor dispatcher. The function
24198 returns the number of dispatch groups found. */
24200 static int
24201 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24203 rtx insn, next_insn;
24204 rtx nop;
24205 int issue_rate;
24206 int can_issue_more;
24207 int group_end;
24208 int group_count = 0;
24210 /* Initialize issue_rate. */
24211 issue_rate = rs6000_issue_rate ();
24212 can_issue_more = issue_rate;
24214 insn = get_next_active_insn (prev_head_insn, tail);
24215 next_insn = get_next_active_insn (insn, tail);
24217 while (insn != NULL_RTX)
24219 can_issue_more =
24220 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24222 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
24224 if (next_insn == NULL_RTX)
24225 break;
24227 if (group_end)
24229 /* If the scheduler had marked group termination at this location
24230 (between insn and next_insn), and neither insn nor next_insn will
24231 force group termination, pad the group with nops to force group
24232 termination. */
24233 if (can_issue_more
24234 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
24235 && !insn_terminates_group_p (insn, current_group)
24236 && !insn_terminates_group_p (next_insn, previous_group))
24238 if (!is_branch_slot_insn (next_insn))
24239 can_issue_more--;
24241 while (can_issue_more)
24243 nop = gen_nop ();
24244 emit_insn_before (nop, next_insn);
24245 can_issue_more--;
24249 can_issue_more = issue_rate;
24250 group_count++;
24253 insn = next_insn;
24254 next_insn = get_next_active_insn (insn, tail);
24257 return group_count;
24260 /* We're beginning a new block. Initialize data structures as necessary. */
24262 static void
24263 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
24264 int sched_verbose ATTRIBUTE_UNUSED,
24265 int max_ready ATTRIBUTE_UNUSED)
24267 last_scheduled_insn = NULL_RTX;
24268 load_store_pendulum = 0;
24271 /* The following function is called at the end of scheduling BB.
24272 After reload, it inserts nops at insn group bundling. */
24274 static void
24275 rs6000_sched_finish (FILE *dump, int sched_verbose)
24277 int n_groups;
24279 if (sched_verbose)
24280 fprintf (dump, "=== Finishing schedule.\n");
24282 if (reload_completed && rs6000_sched_groups)
24284 /* Do not run sched_finish hook when selective scheduling enabled. */
24285 if (sel_sched_p ())
24286 return;
24288 if (rs6000_sched_insert_nops == sched_finish_none)
24289 return;
24291 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
24292 n_groups = pad_groups (dump, sched_verbose,
24293 current_sched_info->prev_head,
24294 current_sched_info->next_tail);
24295 else
24296 n_groups = redefine_groups (dump, sched_verbose,
24297 current_sched_info->prev_head,
24298 current_sched_info->next_tail);
24300 if (sched_verbose >= 6)
24302 fprintf (dump, "ngroups = %d\n", n_groups);
24303 print_rtl (dump, current_sched_info->prev_head);
24304 fprintf (dump, "Done finish_sched\n");
24309 struct _rs6000_sched_context
24311 short cached_can_issue_more;
24312 rtx last_scheduled_insn;
24313 int load_store_pendulum;
24316 typedef struct _rs6000_sched_context rs6000_sched_context_def;
24317 typedef rs6000_sched_context_def *rs6000_sched_context_t;
24319 /* Allocate store for new scheduling context. */
24320 static void *
24321 rs6000_alloc_sched_context (void)
24323 return xmalloc (sizeof (rs6000_sched_context_def));
24326 /* If CLEAN_P is true then initializes _SC with clean data,
24327 and from the global context otherwise. */
24328 static void
24329 rs6000_init_sched_context (void *_sc, bool clean_p)
24331 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24333 if (clean_p)
24335 sc->cached_can_issue_more = 0;
24336 sc->last_scheduled_insn = NULL_RTX;
24337 sc->load_store_pendulum = 0;
24339 else
24341 sc->cached_can_issue_more = cached_can_issue_more;
24342 sc->last_scheduled_insn = last_scheduled_insn;
24343 sc->load_store_pendulum = load_store_pendulum;
24347 /* Sets the global scheduling context to the one pointed to by _SC. */
24348 static void
24349 rs6000_set_sched_context (void *_sc)
24351 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24353 gcc_assert (sc != NULL);
24355 cached_can_issue_more = sc->cached_can_issue_more;
24356 last_scheduled_insn = sc->last_scheduled_insn;
24357 load_store_pendulum = sc->load_store_pendulum;
24360 /* Free _SC. */
24361 static void
24362 rs6000_free_sched_context (void *_sc)
24364 gcc_assert (_sc != NULL);
24366 free (_sc);
24370 /* Length in units of the trampoline for entering a nested function. */
24373 rs6000_trampoline_size (void)
24375 int ret = 0;
24377 switch (DEFAULT_ABI)
24379 default:
24380 gcc_unreachable ();
24382 case ABI_AIX:
24383 ret = (TARGET_32BIT) ? 12 : 24;
24384 break;
24386 case ABI_DARWIN:
24387 case ABI_V4:
24388 ret = (TARGET_32BIT) ? 40 : 48;
24389 break;
24392 return ret;
24395 /* Emit RTL insns to initialize the variable parts of a trampoline.
24396 FNADDR is an RTX for the address of the function's pure code.
24397 CXT is an RTX for the static chain value for the function. */
24399 static void
24400 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
24402 int regsize = (TARGET_32BIT) ? 4 : 8;
24403 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
24404 rtx ctx_reg = force_reg (Pmode, cxt);
24405 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
24407 switch (DEFAULT_ABI)
24409 default:
24410 gcc_unreachable ();
24412 /* Under AIX, just build the 3 word function descriptor */
24413 case ABI_AIX:
24415 rtx fnmem, fn_reg, toc_reg;
24417 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
24418 error ("You cannot take the address of a nested function if you use "
24419 "the -mno-pointers-to-nested-functions option.");
24421 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
24422 fn_reg = gen_reg_rtx (Pmode);
24423 toc_reg = gen_reg_rtx (Pmode);
24425 /* Macro to shorten the code expansions below. */
24426 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24428 m_tramp = replace_equiv_address (m_tramp, addr);
24430 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
24431 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
24432 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
24433 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
24434 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
24436 # undef MEM_PLUS
24438 break;
24440 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24441 case ABI_DARWIN:
24442 case ABI_V4:
24443 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
24444 LCT_NORMAL, VOIDmode, 4,
24445 addr, Pmode,
24446 GEN_INT (rs6000_trampoline_size ()), SImode,
24447 fnaddr, Pmode,
24448 ctx_reg, Pmode);
24449 break;
24454 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24455 identifier as an argument, so the front end shouldn't look it up. */
24457 static bool
24458 rs6000_attribute_takes_identifier_p (const_tree attr_id)
24460 return is_attribute_p ("altivec", attr_id);
24463 /* Handle the "altivec" attribute. The attribute may have
24464 arguments as follows:
24466 __attribute__((altivec(vector__)))
24467 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24468 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24470 and may appear more than once (e.g., 'vector bool char') in a
24471 given declaration. */
24473 static tree
24474 rs6000_handle_altivec_attribute (tree *node,
24475 tree name ATTRIBUTE_UNUSED,
24476 tree args,
24477 int flags ATTRIBUTE_UNUSED,
24478 bool *no_add_attrs)
24480 tree type = *node, result = NULL_TREE;
24481 enum machine_mode mode;
24482 int unsigned_p;
24483 char altivec_type
24484 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
24485 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
24486 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
24487 : '?');
24489 while (POINTER_TYPE_P (type)
24490 || TREE_CODE (type) == FUNCTION_TYPE
24491 || TREE_CODE (type) == METHOD_TYPE
24492 || TREE_CODE (type) == ARRAY_TYPE)
24493 type = TREE_TYPE (type);
24495 mode = TYPE_MODE (type);
24497 /* Check for invalid AltiVec type qualifiers. */
24498 if (type == long_double_type_node)
24499 error ("use of %<long double%> in AltiVec types is invalid");
24500 else if (type == boolean_type_node)
24501 error ("use of boolean types in AltiVec types is invalid");
24502 else if (TREE_CODE (type) == COMPLEX_TYPE)
24503 error ("use of %<complex%> in AltiVec types is invalid");
24504 else if (DECIMAL_FLOAT_MODE_P (mode))
24505 error ("use of decimal floating point types in AltiVec types is invalid");
24506 else if (!TARGET_VSX)
24508 if (type == long_unsigned_type_node || type == long_integer_type_node)
24510 if (TARGET_64BIT)
24511 error ("use of %<long%> in AltiVec types is invalid for "
24512 "64-bit code without -mvsx");
24513 else if (rs6000_warn_altivec_long)
24514 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24515 "use %<int%>");
24517 else if (type == long_long_unsigned_type_node
24518 || type == long_long_integer_type_node)
24519 error ("use of %<long long%> in AltiVec types is invalid without "
24520 "-mvsx");
24521 else if (type == double_type_node)
24522 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24525 switch (altivec_type)
24527 case 'v':
24528 unsigned_p = TYPE_UNSIGNED (type);
24529 switch (mode)
24531 case DImode:
24532 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
24533 break;
24534 case SImode:
24535 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
24536 break;
24537 case HImode:
24538 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
24539 break;
24540 case QImode:
24541 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
24542 break;
24543 case SFmode: result = V4SF_type_node; break;
24544 case DFmode: result = V2DF_type_node; break;
24545 /* If the user says 'vector int bool', we may be handed the 'bool'
24546 attribute _before_ the 'vector' attribute, and so select the
24547 proper type in the 'b' case below. */
24548 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
24549 case V2DImode: case V2DFmode:
24550 result = type;
24551 default: break;
24553 break;
24554 case 'b':
24555 switch (mode)
24557 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
24558 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
24559 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
24560 case QImode: case V16QImode: result = bool_V16QI_type_node;
24561 default: break;
24563 break;
24564 case 'p':
24565 switch (mode)
24567 case V8HImode: result = pixel_V8HI_type_node;
24568 default: break;
24570 default: break;
24573 /* Propagate qualifiers attached to the element type
24574 onto the vector type. */
24575 if (result && result != type && TYPE_QUALS (type))
24576 result = build_qualified_type (result, TYPE_QUALS (type));
24578 *no_add_attrs = true; /* No need to hang on to the attribute. */
24580 if (result)
24581 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
24583 return NULL_TREE;
24586 /* AltiVec defines four built-in scalar types that serve as vector
24587 elements; we must teach the compiler how to mangle them. */
24589 static const char *
24590 rs6000_mangle_type (const_tree type)
24592 type = TYPE_MAIN_VARIANT (type);
24594 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24595 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24596 return NULL;
24598 if (type == bool_char_type_node) return "U6__boolc";
24599 if (type == bool_short_type_node) return "U6__bools";
24600 if (type == pixel_type_node) return "u7__pixel";
24601 if (type == bool_int_type_node) return "U6__booli";
24602 if (type == bool_long_type_node) return "U6__booll";
24604 /* Mangle IBM extended float long double as `g' (__float128) on
24605 powerpc*-linux where long-double-64 previously was the default. */
24606 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
24607 && TARGET_ELF
24608 && TARGET_LONG_DOUBLE_128
24609 && !TARGET_IEEEQUAD)
24610 return "g";
24612 /* For all other types, use normal C++ mangling. */
24613 return NULL;
24616 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24617 struct attribute_spec.handler. */
24619 static tree
24620 rs6000_handle_longcall_attribute (tree *node, tree name,
24621 tree args ATTRIBUTE_UNUSED,
24622 int flags ATTRIBUTE_UNUSED,
24623 bool *no_add_attrs)
24625 if (TREE_CODE (*node) != FUNCTION_TYPE
24626 && TREE_CODE (*node) != FIELD_DECL
24627 && TREE_CODE (*node) != TYPE_DECL)
24629 warning (OPT_Wattributes, "%qE attribute only applies to functions",
24630 name);
24631 *no_add_attrs = true;
24634 return NULL_TREE;
24637 /* Set longcall attributes on all functions declared when
24638 rs6000_default_long_calls is true. */
24639 static void
24640 rs6000_set_default_type_attributes (tree type)
24642 if (rs6000_default_long_calls
24643 && (TREE_CODE (type) == FUNCTION_TYPE
24644 || TREE_CODE (type) == METHOD_TYPE))
24645 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
24646 NULL_TREE,
24647 TYPE_ATTRIBUTES (type));
24649 #if TARGET_MACHO
24650 darwin_set_default_type_attributes (type);
24651 #endif
24654 /* Return a reference suitable for calling a function with the
24655 longcall attribute. */
24658 rs6000_longcall_ref (rtx call_ref)
24660 const char *call_name;
24661 tree node;
24663 if (GET_CODE (call_ref) != SYMBOL_REF)
24664 return call_ref;
24666 /* System V adds '.' to the internal name, so skip them. */
24667 call_name = XSTR (call_ref, 0);
24668 if (*call_name == '.')
24670 while (*call_name == '.')
24671 call_name++;
24673 node = get_identifier (call_name);
24674 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
24677 return force_reg (Pmode, call_ref);
24680 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24681 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24682 #endif
24684 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24685 struct attribute_spec.handler. */
24686 static tree
24687 rs6000_handle_struct_attribute (tree *node, tree name,
24688 tree args ATTRIBUTE_UNUSED,
24689 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24691 tree *type = NULL;
24692 if (DECL_P (*node))
24694 if (TREE_CODE (*node) == TYPE_DECL)
24695 type = &TREE_TYPE (*node);
24697 else
24698 type = node;
24700 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
24701 || TREE_CODE (*type) == UNION_TYPE)))
24703 warning (OPT_Wattributes, "%qE attribute ignored", name);
24704 *no_add_attrs = true;
24707 else if ((is_attribute_p ("ms_struct", name)
24708 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
24709 || ((is_attribute_p ("gcc_struct", name)
24710 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
24712 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
24713 name);
24714 *no_add_attrs = true;
24717 return NULL_TREE;
24720 static bool
24721 rs6000_ms_bitfield_layout_p (const_tree record_type)
24723 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
24724 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
24725 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
24728 #ifdef USING_ELFOS_H
24730 /* A get_unnamed_section callback, used for switching to toc_section. */
24732 static void
24733 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
24735 if (DEFAULT_ABI == ABI_AIX
24736 && TARGET_MINIMAL_TOC
24737 && !TARGET_RELOCATABLE)
24739 if (!toc_initialized)
24741 toc_initialized = 1;
24742 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24743 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
24744 fprintf (asm_out_file, "\t.tc ");
24745 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
24746 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24747 fprintf (asm_out_file, "\n");
24749 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24750 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24751 fprintf (asm_out_file, " = .+32768\n");
24753 else
24754 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24756 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
24757 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24758 else
24760 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24761 if (!toc_initialized)
24763 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24764 fprintf (asm_out_file, " = .+32768\n");
24765 toc_initialized = 1;
24770 /* Implement TARGET_ASM_INIT_SECTIONS. */
24772 static void
24773 rs6000_elf_asm_init_sections (void)
24775 toc_section
24776 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
24778 sdata2_section
24779 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
24780 SDATA2_SECTION_ASM_OP);
24783 /* Implement TARGET_SELECT_RTX_SECTION. */
24785 static section *
24786 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
24787 unsigned HOST_WIDE_INT align)
24789 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
24790 return toc_section;
24791 else
24792 return default_elf_select_rtx_section (mode, x, align);
24795 /* For a SYMBOL_REF, set generic flags and then perform some
24796 target-specific processing.
24798 When the AIX ABI is requested on a non-AIX system, replace the
24799 function name with the real name (with a leading .) rather than the
24800 function descriptor name. This saves a lot of overriding code to
24801 read the prefixes. */
24803 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
24804 static void
24805 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
24807 default_encode_section_info (decl, rtl, first);
24809 if (first
24810 && TREE_CODE (decl) == FUNCTION_DECL
24811 && !TARGET_AIX
24812 && DEFAULT_ABI == ABI_AIX)
24814 rtx sym_ref = XEXP (rtl, 0);
24815 size_t len = strlen (XSTR (sym_ref, 0));
24816 char *str = XALLOCAVEC (char, len + 2);
24817 str[0] = '.';
24818 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
24819 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
24823 static inline bool
24824 compare_section_name (const char *section, const char *templ)
24826 int len;
24828 len = strlen (templ);
24829 return (strncmp (section, templ, len) == 0
24830 && (section[len] == 0 || section[len] == '.'));
24833 bool
24834 rs6000_elf_in_small_data_p (const_tree decl)
24836 if (rs6000_sdata == SDATA_NONE)
24837 return false;
24839 /* We want to merge strings, so we never consider them small data. */
24840 if (TREE_CODE (decl) == STRING_CST)
24841 return false;
24843 /* Functions are never in the small data area. */
24844 if (TREE_CODE (decl) == FUNCTION_DECL)
24845 return false;
24847 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
24849 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
24850 if (compare_section_name (section, ".sdata")
24851 || compare_section_name (section, ".sdata2")
24852 || compare_section_name (section, ".gnu.linkonce.s")
24853 || compare_section_name (section, ".sbss")
24854 || compare_section_name (section, ".sbss2")
24855 || compare_section_name (section, ".gnu.linkonce.sb")
24856 || strcmp (section, ".PPC.EMB.sdata0") == 0
24857 || strcmp (section, ".PPC.EMB.sbss0") == 0)
24858 return true;
24860 else
24862 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
24864 if (size > 0
24865 && size <= g_switch_value
24866 /* If it's not public, and we're not going to reference it there,
24867 there's no need to put it in the small data section. */
24868 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
24869 return true;
24872 return false;
24875 #endif /* USING_ELFOS_H */
24877 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24879 static bool
24880 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
24882 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
24885 /* Return a REG that occurs in ADDR with coefficient 1.
24886 ADDR can be effectively incremented by incrementing REG.
24888 r0 is special and we must not select it as an address
24889 register by this routine since our caller will try to
24890 increment the returned register via an "la" instruction. */
24893 find_addr_reg (rtx addr)
24895 while (GET_CODE (addr) == PLUS)
24897 if (GET_CODE (XEXP (addr, 0)) == REG
24898 && REGNO (XEXP (addr, 0)) != 0)
24899 addr = XEXP (addr, 0);
24900 else if (GET_CODE (XEXP (addr, 1)) == REG
24901 && REGNO (XEXP (addr, 1)) != 0)
24902 addr = XEXP (addr, 1);
24903 else if (CONSTANT_P (XEXP (addr, 0)))
24904 addr = XEXP (addr, 1);
24905 else if (CONSTANT_P (XEXP (addr, 1)))
24906 addr = XEXP (addr, 0);
24907 else
24908 gcc_unreachable ();
24910 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
24911 return addr;
24914 void
24915 rs6000_fatal_bad_address (rtx op)
24917 fatal_insn ("bad address", op);
24920 #if TARGET_MACHO
24922 typedef struct branch_island_d {
24923 tree function_name;
24924 tree label_name;
24925 int line_number;
24926 } branch_island;
24928 DEF_VEC_O(branch_island);
24929 DEF_VEC_ALLOC_O(branch_island,gc);
24931 static VEC(branch_island,gc) *branch_islands;
24933 /* Remember to generate a branch island for far calls to the given
24934 function. */
24936 static void
24937 add_compiler_branch_island (tree label_name, tree function_name,
24938 int line_number)
24940 branch_island bi = {function_name, label_name, line_number};
24941 VEC_safe_push (branch_island, gc, branch_islands, bi);
24944 /* Generate far-jump branch islands for everything recorded in
24945 branch_islands. Invoked immediately after the last instruction of
24946 the epilogue has been emitted; the branch islands must be appended
24947 to, and contiguous with, the function body. Mach-O stubs are
24948 generated in machopic_output_stub(). */
24950 static void
24951 macho_branch_islands (void)
24953 char tmp_buf[512];
24955 while (!VEC_empty (branch_island, branch_islands))
24957 branch_island *bi = &VEC_last (branch_island, branch_islands);
24958 const char *label = IDENTIFIER_POINTER (bi->label_name);
24959 const char *name = IDENTIFIER_POINTER (bi->function_name);
24960 char name_buf[512];
24961 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24962 if (name[0] == '*' || name[0] == '&')
24963 strcpy (name_buf, name+1);
24964 else
24966 name_buf[0] = '_';
24967 strcpy (name_buf+1, name);
24969 strcpy (tmp_buf, "\n");
24970 strcat (tmp_buf, label);
24971 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24972 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
24973 dbxout_stabd (N_SLINE, bi->line_number);
24974 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24975 if (flag_pic)
24977 if (TARGET_LINK_STACK)
24979 char name[32];
24980 get_ppc476_thunk_name (name);
24981 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
24982 strcat (tmp_buf, name);
24983 strcat (tmp_buf, "\n");
24984 strcat (tmp_buf, label);
24985 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24987 else
24989 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
24990 strcat (tmp_buf, label);
24991 strcat (tmp_buf, "_pic\n");
24992 strcat (tmp_buf, label);
24993 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24996 strcat (tmp_buf, "\taddis r11,r11,ha16(");
24997 strcat (tmp_buf, name_buf);
24998 strcat (tmp_buf, " - ");
24999 strcat (tmp_buf, label);
25000 strcat (tmp_buf, "_pic)\n");
25002 strcat (tmp_buf, "\tmtlr r0\n");
25004 strcat (tmp_buf, "\taddi r12,r11,lo16(");
25005 strcat (tmp_buf, name_buf);
25006 strcat (tmp_buf, " - ");
25007 strcat (tmp_buf, label);
25008 strcat (tmp_buf, "_pic)\n");
25010 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
25012 else
25014 strcat (tmp_buf, ":\nlis r12,hi16(");
25015 strcat (tmp_buf, name_buf);
25016 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
25017 strcat (tmp_buf, name_buf);
25018 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
25020 output_asm_insn (tmp_buf, 0);
25021 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25022 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25023 dbxout_stabd (N_SLINE, bi->line_number);
25024 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25025 VEC_pop (branch_island, branch_islands);
25029 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25030 already there or not. */
25032 static int
25033 no_previous_def (tree function_name)
25035 branch_island *bi;
25036 unsigned ix;
25038 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25039 if (function_name == bi->function_name)
25040 return 0;
25041 return 1;
25044 /* GET_PREV_LABEL gets the label name from the previous definition of
25045 the function. */
25047 static tree
25048 get_prev_label (tree function_name)
25050 branch_island *bi;
25051 unsigned ix;
25053 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25054 if (function_name == bi->function_name)
25055 return bi->label_name;
25056 return NULL_TREE;
25059 /* INSN is either a function call or a millicode call. It may have an
25060 unconditional jump in its delay slot.
25062 CALL_DEST is the routine we are calling. */
25064 char *
25065 output_call (rtx insn, rtx *operands, int dest_operand_number,
25066 int cookie_operand_number)
25068 static char buf[256];
25069 if (darwin_emit_branch_islands
25070 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
25071 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
25073 tree labelname;
25074 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
25076 if (no_previous_def (funname))
25078 rtx label_rtx = gen_label_rtx ();
25079 char *label_buf, temp_buf[256];
25080 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
25081 CODE_LABEL_NUMBER (label_rtx));
25082 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
25083 labelname = get_identifier (label_buf);
25084 add_compiler_branch_island (labelname, funname, insn_line (insn));
25086 else
25087 labelname = get_prev_label (funname);
25089 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25090 instruction will reach 'foo', otherwise link as 'bl L42'".
25091 "L42" should be a 'branch island', that will do a far jump to
25092 'foo'. Branch islands are generated in
25093 macho_branch_islands(). */
25094 sprintf (buf, "jbsr %%z%d,%.246s",
25095 dest_operand_number, IDENTIFIER_POINTER (labelname));
25097 else
25098 sprintf (buf, "bl %%z%d", dest_operand_number);
25099 return buf;
25102 /* Generate PIC and indirect symbol stubs. */
25104 void
25105 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25107 unsigned int length;
25108 char *symbol_name, *lazy_ptr_name;
25109 char *local_label_0;
25110 static int label = 0;
25112 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25113 symb = (*targetm.strip_name_encoding) (symb);
25116 length = strlen (symb);
25117 symbol_name = XALLOCAVEC (char, length + 32);
25118 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25120 lazy_ptr_name = XALLOCAVEC (char, length + 32);
25121 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
25123 if (flag_pic == 2)
25124 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
25125 else
25126 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
25128 if (flag_pic == 2)
25130 fprintf (file, "\t.align 5\n");
25132 fprintf (file, "%s:\n", stub);
25133 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25135 label++;
25136 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25137 sprintf (local_label_0, "\"L%011d$spb\"", label);
25139 fprintf (file, "\tmflr r0\n");
25140 if (TARGET_LINK_STACK)
25142 char name[32];
25143 get_ppc476_thunk_name (name);
25144 fprintf (file, "\tbl %s\n", name);
25145 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25147 else
25149 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
25150 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25152 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
25153 lazy_ptr_name, local_label_0);
25154 fprintf (file, "\tmtlr r0\n");
25155 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
25156 (TARGET_64BIT ? "ldu" : "lwzu"),
25157 lazy_ptr_name, local_label_0);
25158 fprintf (file, "\tmtctr r12\n");
25159 fprintf (file, "\tbctr\n");
25161 else
25163 fprintf (file, "\t.align 4\n");
25165 fprintf (file, "%s:\n", stub);
25166 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25168 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
25169 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
25170 (TARGET_64BIT ? "ldu" : "lwzu"),
25171 lazy_ptr_name);
25172 fprintf (file, "\tmtctr r12\n");
25173 fprintf (file, "\tbctr\n");
25176 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25177 fprintf (file, "%s:\n", lazy_ptr_name);
25178 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25179 fprintf (file, "%sdyld_stub_binding_helper\n",
25180 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
25183 /* Legitimize PIC addresses. If the address is already
25184 position-independent, we return ORIG. Newly generated
25185 position-independent addresses go into a reg. This is REG if non
25186 zero, otherwise we allocate register(s) as necessary. */
25188 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25191 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
25192 rtx reg)
25194 rtx base, offset;
25196 if (reg == NULL && ! reload_in_progress && ! reload_completed)
25197 reg = gen_reg_rtx (Pmode);
25199 if (GET_CODE (orig) == CONST)
25201 rtx reg_temp;
25203 if (GET_CODE (XEXP (orig, 0)) == PLUS
25204 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
25205 return orig;
25207 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
25209 /* Use a different reg for the intermediate value, as
25210 it will be marked UNCHANGING. */
25211 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
25212 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
25213 Pmode, reg_temp);
25214 offset =
25215 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
25216 Pmode, reg);
25218 if (GET_CODE (offset) == CONST_INT)
25220 if (SMALL_INT (offset))
25221 return plus_constant (Pmode, base, INTVAL (offset));
25222 else if (! reload_in_progress && ! reload_completed)
25223 offset = force_reg (Pmode, offset);
25224 else
25226 rtx mem = force_const_mem (Pmode, orig);
25227 return machopic_legitimize_pic_address (mem, Pmode, reg);
25230 return gen_rtx_PLUS (Pmode, base, offset);
25233 /* Fall back on generic machopic code. */
25234 return machopic_legitimize_pic_address (orig, mode, reg);
25237 /* Output a .machine directive for the Darwin assembler, and call
25238 the generic start_file routine. */
25240 static void
25241 rs6000_darwin_file_start (void)
25243 static const struct
25245 const char *arg;
25246 const char *name;
25247 int if_set;
25248 } mapping[] = {
25249 { "ppc64", "ppc64", MASK_64BIT },
25250 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
25251 { "power4", "ppc970", 0 },
25252 { "G5", "ppc970", 0 },
25253 { "7450", "ppc7450", 0 },
25254 { "7400", "ppc7400", MASK_ALTIVEC },
25255 { "G4", "ppc7400", 0 },
25256 { "750", "ppc750", 0 },
25257 { "740", "ppc750", 0 },
25258 { "G3", "ppc750", 0 },
25259 { "604e", "ppc604e", 0 },
25260 { "604", "ppc604", 0 },
25261 { "603e", "ppc603", 0 },
25262 { "603", "ppc603", 0 },
25263 { "601", "ppc601", 0 },
25264 { NULL, "ppc", 0 } };
25265 const char *cpu_id = "";
25266 size_t i;
25268 rs6000_file_start ();
25269 darwin_file_start ();
25271 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25273 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
25274 cpu_id = rs6000_default_cpu;
25276 if (global_options_set.x_rs6000_cpu_index)
25277 cpu_id = processor_target_table[rs6000_cpu_index].name;
25279 /* Look through the mapping array. Pick the first name that either
25280 matches the argument, has a bit set in IF_SET that is also set
25281 in the target flags, or has a NULL name. */
25283 i = 0;
25284 while (mapping[i].arg != NULL
25285 && strcmp (mapping[i].arg, cpu_id) != 0
25286 && (mapping[i].if_set & target_flags) == 0)
25287 i++;
25289 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
25292 #endif /* TARGET_MACHO */
25294 #if TARGET_ELF
25295 static int
25296 rs6000_elf_reloc_rw_mask (void)
25298 if (flag_pic)
25299 return 3;
25300 else if (DEFAULT_ABI == ABI_AIX)
25301 return 2;
25302 else
25303 return 0;
25306 /* Record an element in the table of global constructors. SYMBOL is
25307 a SYMBOL_REF of the function to be called; PRIORITY is a number
25308 between 0 and MAX_INIT_PRIORITY.
25310 This differs from default_named_section_asm_out_constructor in
25311 that we have special handling for -mrelocatable. */
25313 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
25314 static void
25315 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
25317 const char *section = ".ctors";
25318 char buf[16];
25320 if (priority != DEFAULT_INIT_PRIORITY)
25322 sprintf (buf, ".ctors.%.5u",
25323 /* Invert the numbering so the linker puts us in the proper
25324 order; constructors are run from right to left, and the
25325 linker sorts in increasing order. */
25326 MAX_INIT_PRIORITY - priority);
25327 section = buf;
25330 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25331 assemble_align (POINTER_SIZE);
25333 if (TARGET_RELOCATABLE)
25335 fputs ("\t.long (", asm_out_file);
25336 output_addr_const (asm_out_file, symbol);
25337 fputs (")@fixup\n", asm_out_file);
25339 else
25340 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25343 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
25344 static void
25345 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
25347 const char *section = ".dtors";
25348 char buf[16];
25350 if (priority != DEFAULT_INIT_PRIORITY)
25352 sprintf (buf, ".dtors.%.5u",
25353 /* Invert the numbering so the linker puts us in the proper
25354 order; constructors are run from right to left, and the
25355 linker sorts in increasing order. */
25356 MAX_INIT_PRIORITY - priority);
25357 section = buf;
25360 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25361 assemble_align (POINTER_SIZE);
25363 if (TARGET_RELOCATABLE)
25365 fputs ("\t.long (", asm_out_file);
25366 output_addr_const (asm_out_file, symbol);
25367 fputs (")@fixup\n", asm_out_file);
25369 else
25370 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25373 void
25374 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
25376 if (TARGET_64BIT)
25378 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
25379 ASM_OUTPUT_LABEL (file, name);
25380 fputs (DOUBLE_INT_ASM_OP, file);
25381 rs6000_output_function_entry (file, name);
25382 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
25383 if (DOT_SYMBOLS)
25385 fputs ("\t.size\t", file);
25386 assemble_name (file, name);
25387 fputs (",24\n\t.type\t.", file);
25388 assemble_name (file, name);
25389 fputs (",@function\n", file);
25390 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
25392 fputs ("\t.globl\t.", file);
25393 assemble_name (file, name);
25394 putc ('\n', file);
25397 else
25398 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25399 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25400 rs6000_output_function_entry (file, name);
25401 fputs (":\n", file);
25402 return;
25405 if (TARGET_RELOCATABLE
25406 && !TARGET_SECURE_PLT
25407 && (get_pool_size () != 0 || crtl->profile)
25408 && uses_TOC ())
25410 char buf[256];
25412 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
25414 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
25415 fprintf (file, "\t.long ");
25416 assemble_name (file, buf);
25417 putc ('-', file);
25418 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25419 assemble_name (file, buf);
25420 putc ('\n', file);
25423 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25424 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25426 if (DEFAULT_ABI == ABI_AIX)
25428 const char *desc_name, *orig_name;
25430 orig_name = (*targetm.strip_name_encoding) (name);
25431 desc_name = orig_name;
25432 while (*desc_name == '.')
25433 desc_name++;
25435 if (TREE_PUBLIC (decl))
25436 fprintf (file, "\t.globl %s\n", desc_name);
25438 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25439 fprintf (file, "%s:\n", desc_name);
25440 fprintf (file, "\t.long %s\n", orig_name);
25441 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
25442 if (DEFAULT_ABI == ABI_AIX)
25443 fputs ("\t.long 0\n", file);
25444 fprintf (file, "\t.previous\n");
25446 ASM_OUTPUT_LABEL (file, name);
25449 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
25450 static void
25451 rs6000_elf_file_end (void)
25453 #ifdef HAVE_AS_GNU_ATTRIBUTE
25454 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
25456 if (rs6000_passes_float)
25457 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
25458 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
25459 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
25460 : 2));
25461 if (rs6000_passes_vector)
25462 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
25463 (TARGET_ALTIVEC_ABI ? 2
25464 : TARGET_SPE_ABI ? 3
25465 : 1));
25466 if (rs6000_returns_struct)
25467 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
25468 aix_struct_return ? 2 : 1);
25470 #endif
25471 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25472 if (TARGET_32BIT)
25473 file_end_indicate_exec_stack ();
25474 #endif
25476 #endif
25478 #if TARGET_XCOFF
25479 static void
25480 rs6000_xcoff_asm_output_anchor (rtx symbol)
25482 char buffer[100];
25484 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
25485 SYMBOL_REF_BLOCK_OFFSET (symbol));
25486 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
25489 static void
25490 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
25492 fputs (GLOBAL_ASM_OP, stream);
25493 RS6000_OUTPUT_BASENAME (stream, name);
25494 putc ('\n', stream);
25497 /* A get_unnamed_decl callback, used for read-only sections. PTR
25498 points to the section string variable. */
25500 static void
25501 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
25503 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
25504 *(const char *const *) directive,
25505 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25508 /* Likewise for read-write sections. */
25510 static void
25511 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
25513 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
25514 *(const char *const *) directive,
25515 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25518 /* A get_unnamed_section callback, used for switching to toc_section. */
25520 static void
25521 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
25523 if (TARGET_MINIMAL_TOC)
25525 /* toc_section is always selected at least once from
25526 rs6000_xcoff_file_start, so this is guaranteed to
25527 always be defined once and only once in each file. */
25528 if (!toc_initialized)
25530 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
25531 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
25532 toc_initialized = 1;
25534 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
25535 (TARGET_32BIT ? "" : ",3"));
25537 else
25538 fputs ("\t.toc\n", asm_out_file);
25541 /* Implement TARGET_ASM_INIT_SECTIONS. */
25543 static void
25544 rs6000_xcoff_asm_init_sections (void)
25546 read_only_data_section
25547 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25548 &xcoff_read_only_section_name);
25550 private_data_section
25551 = get_unnamed_section (SECTION_WRITE,
25552 rs6000_xcoff_output_readwrite_section_asm_op,
25553 &xcoff_private_data_section_name);
25555 read_only_private_data_section
25556 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25557 &xcoff_private_data_section_name);
25559 toc_section
25560 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
25562 readonly_data_section = read_only_data_section;
25563 exception_section = data_section;
25566 static int
25567 rs6000_xcoff_reloc_rw_mask (void)
25569 return 3;
25572 static void
25573 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
25574 tree decl ATTRIBUTE_UNUSED)
25576 int smclass;
25577 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
25579 if (flags & SECTION_CODE)
25580 smclass = 0;
25581 else if (flags & SECTION_TLS)
25582 smclass = 3;
25583 else if (flags & SECTION_WRITE)
25584 smclass = 2;
25585 else
25586 smclass = 1;
25588 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
25589 (flags & SECTION_CODE) ? "." : "",
25590 name, suffix[smclass], flags & SECTION_ENTSIZE);
25593 static section *
25594 rs6000_xcoff_select_section (tree decl, int reloc,
25595 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25597 if (decl_readonly_section (decl, reloc))
25599 if (TREE_PUBLIC (decl))
25600 return read_only_data_section;
25601 else
25602 return read_only_private_data_section;
25604 else
25606 if (TREE_PUBLIC (decl))
25607 return data_section;
25608 else
25609 return private_data_section;
25613 static void
25614 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
25616 const char *name;
25618 /* Use select_section for private and uninitialized data. */
25619 if (!TREE_PUBLIC (decl)
25620 || DECL_COMMON (decl)
25621 || DECL_INITIAL (decl) == NULL_TREE
25622 || DECL_INITIAL (decl) == error_mark_node
25623 || (flag_zero_initialized_in_bss
25624 && initializer_zerop (DECL_INITIAL (decl))))
25625 return;
25627 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
25628 name = (*targetm.strip_name_encoding) (name);
25629 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
25632 /* Select section for constant in constant pool.
25634 On RS/6000, all constants are in the private read-only data area.
25635 However, if this is being placed in the TOC it must be output as a
25636 toc entry. */
25638 static section *
25639 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
25640 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25642 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25643 return toc_section;
25644 else
25645 return read_only_private_data_section;
25648 /* Remove any trailing [DS] or the like from the symbol name. */
25650 static const char *
25651 rs6000_xcoff_strip_name_encoding (const char *name)
25653 size_t len;
25654 if (*name == '*')
25655 name++;
25656 len = strlen (name);
25657 if (name[len - 1] == ']')
25658 return ggc_alloc_string (name, len - 4);
25659 else
25660 return name;
25663 /* Section attributes. AIX is always PIC. */
25665 static unsigned int
25666 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
25668 unsigned int align;
25669 unsigned int flags = default_section_type_flags (decl, name, reloc);
25671 /* Align to at least UNIT size. */
25672 if (flags & SECTION_CODE || !decl)
25673 align = MIN_UNITS_PER_WORD;
25674 else
25675 /* Increase alignment of large objects if not already stricter. */
25676 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
25677 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
25678 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
25680 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
25683 /* Output at beginning of assembler file.
25685 Initialize the section names for the RS/6000 at this point.
25687 Specify filename, including full path, to assembler.
25689 We want to go into the TOC section so at least one .toc will be emitted.
25690 Also, in order to output proper .bs/.es pairs, we need at least one static
25691 [RW] section emitted.
25693 Finally, declare mcount when profiling to make the assembler happy. */
25695 static void
25696 rs6000_xcoff_file_start (void)
25698 rs6000_gen_section_name (&xcoff_bss_section_name,
25699 main_input_filename, ".bss_");
25700 rs6000_gen_section_name (&xcoff_private_data_section_name,
25701 main_input_filename, ".rw_");
25702 rs6000_gen_section_name (&xcoff_read_only_section_name,
25703 main_input_filename, ".ro_");
25705 fputs ("\t.file\t", asm_out_file);
25706 output_quoted_string (asm_out_file, main_input_filename);
25707 fputc ('\n', asm_out_file);
25708 if (write_symbols != NO_DEBUG)
25709 switch_to_section (private_data_section);
25710 switch_to_section (text_section);
25711 if (profile_flag)
25712 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
25713 rs6000_file_start ();
25716 /* Output at end of assembler file.
25717 On the RS/6000, referencing data should automatically pull in text. */
25719 static void
25720 rs6000_xcoff_file_end (void)
25722 switch_to_section (text_section);
25723 fputs ("_section_.text:\n", asm_out_file);
25724 switch_to_section (data_section);
25725 fputs (TARGET_32BIT
25726 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25727 asm_out_file);
25729 #endif /* TARGET_XCOFF */
25731 /* Compute a (partial) cost for rtx X. Return true if the complete
25732 cost has been computed, and false if subexpressions should be
25733 scanned. In either case, *TOTAL contains the cost result. */
25735 static bool
25736 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
25737 int *total, bool speed)
25739 enum machine_mode mode = GET_MODE (x);
25741 switch (code)
25743 /* On the RS/6000, if it is valid in the insn, it is free. */
25744 case CONST_INT:
25745 if (((outer_code == SET
25746 || outer_code == PLUS
25747 || outer_code == MINUS)
25748 && (satisfies_constraint_I (x)
25749 || satisfies_constraint_L (x)))
25750 || (outer_code == AND
25751 && (satisfies_constraint_K (x)
25752 || (mode == SImode
25753 ? satisfies_constraint_L (x)
25754 : satisfies_constraint_J (x))
25755 || mask_operand (x, mode)
25756 || (mode == DImode
25757 && mask64_operand (x, DImode))))
25758 || ((outer_code == IOR || outer_code == XOR)
25759 && (satisfies_constraint_K (x)
25760 || (mode == SImode
25761 ? satisfies_constraint_L (x)
25762 : satisfies_constraint_J (x))))
25763 || outer_code == ASHIFT
25764 || outer_code == ASHIFTRT
25765 || outer_code == LSHIFTRT
25766 || outer_code == ROTATE
25767 || outer_code == ROTATERT
25768 || outer_code == ZERO_EXTRACT
25769 || (outer_code == MULT
25770 && satisfies_constraint_I (x))
25771 || ((outer_code == DIV || outer_code == UDIV
25772 || outer_code == MOD || outer_code == UMOD)
25773 && exact_log2 (INTVAL (x)) >= 0)
25774 || (outer_code == COMPARE
25775 && (satisfies_constraint_I (x)
25776 || satisfies_constraint_K (x)))
25777 || ((outer_code == EQ || outer_code == NE)
25778 && (satisfies_constraint_I (x)
25779 || satisfies_constraint_K (x)
25780 || (mode == SImode
25781 ? satisfies_constraint_L (x)
25782 : satisfies_constraint_J (x))))
25783 || (outer_code == GTU
25784 && satisfies_constraint_I (x))
25785 || (outer_code == LTU
25786 && satisfies_constraint_P (x)))
25788 *total = 0;
25789 return true;
25791 else if ((outer_code == PLUS
25792 && reg_or_add_cint_operand (x, VOIDmode))
25793 || (outer_code == MINUS
25794 && reg_or_sub_cint_operand (x, VOIDmode))
25795 || ((outer_code == SET
25796 || outer_code == IOR
25797 || outer_code == XOR)
25798 && (INTVAL (x)
25799 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
25801 *total = COSTS_N_INSNS (1);
25802 return true;
25804 /* FALLTHRU */
25806 case CONST_DOUBLE:
25807 if (mode == DImode && code == CONST_DOUBLE)
25809 if ((outer_code == IOR || outer_code == XOR)
25810 && CONST_DOUBLE_HIGH (x) == 0
25811 && (CONST_DOUBLE_LOW (x)
25812 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
25814 *total = 0;
25815 return true;
25817 else if ((outer_code == AND && and64_2_operand (x, DImode))
25818 || ((outer_code == SET
25819 || outer_code == IOR
25820 || outer_code == XOR)
25821 && CONST_DOUBLE_HIGH (x) == 0))
25823 *total = COSTS_N_INSNS (1);
25824 return true;
25827 /* FALLTHRU */
25829 case CONST:
25830 case HIGH:
25831 case SYMBOL_REF:
25832 case MEM:
25833 /* When optimizing for size, MEM should be slightly more expensive
25834 than generating address, e.g., (plus (reg) (const)).
25835 L1 cache latency is about two instructions. */
25836 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25837 return true;
25839 case LABEL_REF:
25840 *total = 0;
25841 return true;
25843 case PLUS:
25844 case MINUS:
25845 if (FLOAT_MODE_P (mode))
25846 *total = rs6000_cost->fp;
25847 else
25848 *total = COSTS_N_INSNS (1);
25849 return false;
25851 case MULT:
25852 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25853 && satisfies_constraint_I (XEXP (x, 1)))
25855 if (INTVAL (XEXP (x, 1)) >= -256
25856 && INTVAL (XEXP (x, 1)) <= 255)
25857 *total = rs6000_cost->mulsi_const9;
25858 else
25859 *total = rs6000_cost->mulsi_const;
25861 else if (mode == SFmode)
25862 *total = rs6000_cost->fp;
25863 else if (FLOAT_MODE_P (mode))
25864 *total = rs6000_cost->dmul;
25865 else if (mode == DImode)
25866 *total = rs6000_cost->muldi;
25867 else
25868 *total = rs6000_cost->mulsi;
25869 return false;
25871 case FMA:
25872 if (mode == SFmode)
25873 *total = rs6000_cost->fp;
25874 else
25875 *total = rs6000_cost->dmul;
25876 break;
25878 case DIV:
25879 case MOD:
25880 if (FLOAT_MODE_P (mode))
25882 *total = mode == DFmode ? rs6000_cost->ddiv
25883 : rs6000_cost->sdiv;
25884 return false;
25886 /* FALLTHRU */
25888 case UDIV:
25889 case UMOD:
25890 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25891 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
25893 if (code == DIV || code == MOD)
25894 /* Shift, addze */
25895 *total = COSTS_N_INSNS (2);
25896 else
25897 /* Shift */
25898 *total = COSTS_N_INSNS (1);
25900 else
25902 if (GET_MODE (XEXP (x, 1)) == DImode)
25903 *total = rs6000_cost->divdi;
25904 else
25905 *total = rs6000_cost->divsi;
25907 /* Add in shift and subtract for MOD. */
25908 if (code == MOD || code == UMOD)
25909 *total += COSTS_N_INSNS (2);
25910 return false;
25912 case CTZ:
25913 case FFS:
25914 *total = COSTS_N_INSNS (4);
25915 return false;
25917 case POPCOUNT:
25918 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
25919 return false;
25921 case PARITY:
25922 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
25923 return false;
25925 case NOT:
25926 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
25928 *total = 0;
25929 return false;
25931 /* FALLTHRU */
25933 case AND:
25934 case CLZ:
25935 case IOR:
25936 case XOR:
25937 case ZERO_EXTRACT:
25938 *total = COSTS_N_INSNS (1);
25939 return false;
25941 case ASHIFT:
25942 case ASHIFTRT:
25943 case LSHIFTRT:
25944 case ROTATE:
25945 case ROTATERT:
25946 /* Handle mul_highpart. */
25947 if (outer_code == TRUNCATE
25948 && GET_CODE (XEXP (x, 0)) == MULT)
25950 if (mode == DImode)
25951 *total = rs6000_cost->muldi;
25952 else
25953 *total = rs6000_cost->mulsi;
25954 return true;
25956 else if (outer_code == AND)
25957 *total = 0;
25958 else
25959 *total = COSTS_N_INSNS (1);
25960 return false;
25962 case SIGN_EXTEND:
25963 case ZERO_EXTEND:
25964 if (GET_CODE (XEXP (x, 0)) == MEM)
25965 *total = 0;
25966 else
25967 *total = COSTS_N_INSNS (1);
25968 return false;
25970 case COMPARE:
25971 case NEG:
25972 case ABS:
25973 if (!FLOAT_MODE_P (mode))
25975 *total = COSTS_N_INSNS (1);
25976 return false;
25978 /* FALLTHRU */
25980 case FLOAT:
25981 case UNSIGNED_FLOAT:
25982 case FIX:
25983 case UNSIGNED_FIX:
25984 case FLOAT_TRUNCATE:
25985 *total = rs6000_cost->fp;
25986 return false;
25988 case FLOAT_EXTEND:
25989 if (mode == DFmode)
25990 *total = 0;
25991 else
25992 *total = rs6000_cost->fp;
25993 return false;
25995 case UNSPEC:
25996 switch (XINT (x, 1))
25998 case UNSPEC_FRSP:
25999 *total = rs6000_cost->fp;
26000 return true;
26002 default:
26003 break;
26005 break;
26007 case CALL:
26008 case IF_THEN_ELSE:
26009 if (!speed)
26011 *total = COSTS_N_INSNS (1);
26012 return true;
26014 else if (FLOAT_MODE_P (mode)
26015 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
26017 *total = rs6000_cost->fp;
26018 return false;
26020 break;
26022 case EQ:
26023 case GTU:
26024 case LTU:
26025 /* Carry bit requires mode == Pmode.
26026 NEG or PLUS already counted so only add one. */
26027 if (mode == Pmode
26028 && (outer_code == NEG || outer_code == PLUS))
26030 *total = COSTS_N_INSNS (1);
26031 return true;
26033 if (outer_code == SET)
26035 if (XEXP (x, 1) == const0_rtx)
26037 if (TARGET_ISEL && !TARGET_MFCRF)
26038 *total = COSTS_N_INSNS (8);
26039 else
26040 *total = COSTS_N_INSNS (2);
26041 return true;
26043 else if (mode == Pmode)
26045 *total = COSTS_N_INSNS (3);
26046 return false;
26049 /* FALLTHRU */
26051 case GT:
26052 case LT:
26053 case UNORDERED:
26054 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
26056 if (TARGET_ISEL && !TARGET_MFCRF)
26057 *total = COSTS_N_INSNS (8);
26058 else
26059 *total = COSTS_N_INSNS (2);
26060 return true;
26062 /* CC COMPARE. */
26063 if (outer_code == COMPARE)
26065 *total = 0;
26066 return true;
26068 break;
26070 default:
26071 break;
26074 return false;
26077 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26079 static bool
26080 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
26081 bool speed)
26083 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
26085 fprintf (stderr,
26086 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26087 "opno = %d, total = %d, speed = %s, x:\n",
26088 ret ? "complete" : "scan inner",
26089 GET_RTX_NAME (code),
26090 GET_RTX_NAME (outer_code),
26091 opno,
26092 *total,
26093 speed ? "true" : "false");
26095 debug_rtx (x);
26097 return ret;
26100 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26102 static int
26103 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
26104 addr_space_t as, bool speed)
26106 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
26108 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26109 ret, speed ? "true" : "false");
26110 debug_rtx (x);
26112 return ret;
26116 /* A C expression returning the cost of moving data from a register of class
26117 CLASS1 to one of CLASS2. */
26119 static int
26120 rs6000_register_move_cost (enum machine_mode mode,
26121 reg_class_t from, reg_class_t to)
26123 int ret;
26125 if (TARGET_DEBUG_COST)
26126 dbg_cost_ctrl++;
26128 /* Moves from/to GENERAL_REGS. */
26129 if (reg_classes_intersect_p (to, GENERAL_REGS)
26130 || reg_classes_intersect_p (from, GENERAL_REGS))
26132 reg_class_t rclass = from;
26134 if (! reg_classes_intersect_p (to, GENERAL_REGS))
26135 rclass = to;
26137 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
26138 ret = (rs6000_memory_move_cost (mode, rclass, false)
26139 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
26141 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26142 shift. */
26143 else if (rclass == CR_REGS)
26144 ret = 4;
26146 /* For those processors that have slow LR/CTR moves, make them more
26147 expensive than memory in order to bias spills to memory .*/
26148 else if ((rs6000_cpu == PROCESSOR_POWER6
26149 || rs6000_cpu == PROCESSOR_POWER7)
26150 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
26151 ret = 6 * hard_regno_nregs[0][mode];
26153 else
26154 /* A move will cost one instruction per GPR moved. */
26155 ret = 2 * hard_regno_nregs[0][mode];
26158 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26159 else if (VECTOR_UNIT_VSX_P (mode)
26160 && reg_classes_intersect_p (to, VSX_REGS)
26161 && reg_classes_intersect_p (from, VSX_REGS))
26162 ret = 2 * hard_regno_nregs[32][mode];
26164 /* Moving between two similar registers is just one instruction. */
26165 else if (reg_classes_intersect_p (to, from))
26166 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
26168 /* Everything else has to go through GENERAL_REGS. */
26169 else
26170 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
26171 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
26173 if (TARGET_DEBUG_COST)
26175 if (dbg_cost_ctrl == 1)
26176 fprintf (stderr,
26177 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26178 ret, GET_MODE_NAME (mode), reg_class_names[from],
26179 reg_class_names[to]);
26180 dbg_cost_ctrl--;
26183 return ret;
26186 /* A C expressions returning the cost of moving data of MODE from a register to
26187 or from memory. */
26189 static int
26190 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
26191 bool in ATTRIBUTE_UNUSED)
26193 int ret;
26195 if (TARGET_DEBUG_COST)
26196 dbg_cost_ctrl++;
26198 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
26199 ret = 4 * hard_regno_nregs[0][mode];
26200 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
26201 ret = 4 * hard_regno_nregs[32][mode];
26202 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
26203 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
26204 else
26205 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
26207 if (TARGET_DEBUG_COST)
26209 if (dbg_cost_ctrl == 1)
26210 fprintf (stderr,
26211 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26212 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
26213 dbg_cost_ctrl--;
26216 return ret;
26219 /* Returns a code for a target-specific builtin that implements
26220 reciprocal of the function, or NULL_TREE if not available. */
26222 static tree
26223 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
26224 bool sqrt ATTRIBUTE_UNUSED)
26226 if (optimize_insn_for_size_p ())
26227 return NULL_TREE;
26229 if (md_fn)
26230 switch (fn)
26232 case VSX_BUILTIN_XVSQRTDP:
26233 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
26234 return NULL_TREE;
26236 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
26238 case VSX_BUILTIN_XVSQRTSP:
26239 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
26240 return NULL_TREE;
26242 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
26244 default:
26245 return NULL_TREE;
26248 else
26249 switch (fn)
26251 case BUILT_IN_SQRT:
26252 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
26253 return NULL_TREE;
26255 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
26257 case BUILT_IN_SQRTF:
26258 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
26259 return NULL_TREE;
26261 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
26263 default:
26264 return NULL_TREE;
26268 /* Load up a constant. If the mode is a vector mode, splat the value across
26269 all of the vector elements. */
26271 static rtx
26272 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
26274 rtx reg;
26276 if (mode == SFmode || mode == DFmode)
26278 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
26279 reg = force_reg (mode, d);
26281 else if (mode == V4SFmode)
26283 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
26284 rtvec v = gen_rtvec (4, d, d, d, d);
26285 reg = gen_reg_rtx (mode);
26286 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26288 else if (mode == V2DFmode)
26290 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
26291 rtvec v = gen_rtvec (2, d, d);
26292 reg = gen_reg_rtx (mode);
26293 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26295 else
26296 gcc_unreachable ();
26298 return reg;
26301 /* Generate an FMA instruction. */
26303 static void
26304 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
26306 enum machine_mode mode = GET_MODE (target);
26307 rtx dst;
26309 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26310 gcc_assert (dst != NULL);
26312 if (dst != target)
26313 emit_move_insn (target, dst);
26316 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26318 static void
26319 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
26321 enum machine_mode mode = GET_MODE (target);
26322 rtx dst;
26324 /* Altivec does not support fms directly;
26325 generate in terms of fma in that case. */
26326 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
26327 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
26328 else
26330 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
26331 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26333 gcc_assert (dst != NULL);
26335 if (dst != target)
26336 emit_move_insn (target, dst);
26339 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26341 static void
26342 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
26344 enum machine_mode mode = GET_MODE (dst);
26345 rtx r;
26347 /* This is a tad more complicated, since the fnma_optab is for
26348 a different expression: fma(-m1, m2, a), which is the same
26349 thing except in the case of signed zeros.
26351 Fortunately we know that if FMA is supported that FNMSUB is
26352 also supported in the ISA. Just expand it directly. */
26354 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
26356 r = gen_rtx_NEG (mode, a);
26357 r = gen_rtx_FMA (mode, m1, m2, r);
26358 r = gen_rtx_NEG (mode, r);
26359 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
26362 /* Newton-Raphson approximation of floating point divide with just 2 passes
26363 (either single precision floating point, or newer machines with higher
26364 accuracy estimates). Support both scalar and vector divide. Assumes no
26365 trapping math and finite arguments. */
26367 static void
26368 rs6000_emit_swdiv_high_precision (rtx dst, rtx n, rtx d)
26370 enum machine_mode mode = GET_MODE (dst);
26371 rtx x0, e0, e1, y1, u0, v0;
26372 enum insn_code code = optab_handler (smul_optab, mode);
26373 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26374 rtx one = rs6000_load_constant_and_splat (mode, dconst1);
26376 gcc_assert (code != CODE_FOR_nothing);
26378 /* x0 = 1./d estimate */
26379 x0 = gen_reg_rtx (mode);
26380 emit_insn (gen_rtx_SET (VOIDmode, x0,
26381 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26382 UNSPEC_FRES)));
26384 e0 = gen_reg_rtx (mode);
26385 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - (d * x0) */
26387 e1 = gen_reg_rtx (mode);
26388 rs6000_emit_madd (e1, e0, e0, e0); /* e1 = (e0 * e0) + e0 */
26390 y1 = gen_reg_rtx (mode);
26391 rs6000_emit_madd (y1, e1, x0, x0); /* y1 = (e1 * x0) + x0 */
26393 u0 = gen_reg_rtx (mode);
26394 emit_insn (gen_mul (u0, n, y1)); /* u0 = n * y1 */
26396 v0 = gen_reg_rtx (mode);
26397 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - (d * u0) */
26399 rs6000_emit_madd (dst, v0, y1, u0); /* dst = (v0 * y1) + u0 */
26402 /* Newton-Raphson approximation of floating point divide that has a low
26403 precision estimate. Assumes no trapping math and finite arguments. */
26405 static void
26406 rs6000_emit_swdiv_low_precision (rtx dst, rtx n, rtx d)
26408 enum machine_mode mode = GET_MODE (dst);
26409 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
26410 enum insn_code code = optab_handler (smul_optab, mode);
26411 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26413 gcc_assert (code != CODE_FOR_nothing);
26415 one = rs6000_load_constant_and_splat (mode, dconst1);
26417 /* x0 = 1./d estimate */
26418 x0 = gen_reg_rtx (mode);
26419 emit_insn (gen_rtx_SET (VOIDmode, x0,
26420 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26421 UNSPEC_FRES)));
26423 e0 = gen_reg_rtx (mode);
26424 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - d * x0 */
26426 y1 = gen_reg_rtx (mode);
26427 rs6000_emit_madd (y1, e0, x0, x0); /* y1 = x0 + e0 * x0 */
26429 e1 = gen_reg_rtx (mode);
26430 emit_insn (gen_mul (e1, e0, e0)); /* e1 = e0 * e0 */
26432 y2 = gen_reg_rtx (mode);
26433 rs6000_emit_madd (y2, e1, y1, y1); /* y2 = y1 + e1 * y1 */
26435 e2 = gen_reg_rtx (mode);
26436 emit_insn (gen_mul (e2, e1, e1)); /* e2 = e1 * e1 */
26438 y3 = gen_reg_rtx (mode);
26439 rs6000_emit_madd (y3, e2, y2, y2); /* y3 = y2 + e2 * y2 */
26441 u0 = gen_reg_rtx (mode);
26442 emit_insn (gen_mul (u0, n, y3)); /* u0 = n * y3 */
26444 v0 = gen_reg_rtx (mode);
26445 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - d * u0 */
26447 rs6000_emit_madd (dst, v0, y3, u0); /* dst = u0 + v0 * y3 */
26450 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26451 add a reg_note saying that this was a division. Support both scalar and
26452 vector divide. Assumes no trapping math and finite arguments. */
26454 void
26455 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
26457 enum machine_mode mode = GET_MODE (dst);
26459 if (RS6000_RECIP_HIGH_PRECISION_P (mode))
26460 rs6000_emit_swdiv_high_precision (dst, n, d);
26461 else
26462 rs6000_emit_swdiv_low_precision (dst, n, d);
26464 if (note_p)
26465 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
26468 /* Newton-Raphson approximation of single/double-precision floating point
26469 rsqrt. Assumes no trapping math and finite arguments. */
26471 void
26472 rs6000_emit_swrsqrt (rtx dst, rtx src)
26474 enum machine_mode mode = GET_MODE (src);
26475 rtx x0 = gen_reg_rtx (mode);
26476 rtx y = gen_reg_rtx (mode);
26477 int passes = (TARGET_RECIP_PRECISION) ? 2 : 3;
26478 REAL_VALUE_TYPE dconst3_2;
26479 int i;
26480 rtx halfthree;
26481 enum insn_code code = optab_handler (smul_optab, mode);
26482 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26484 gcc_assert (code != CODE_FOR_nothing);
26486 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26487 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
26488 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
26490 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
26492 /* x0 = rsqrt estimate */
26493 emit_insn (gen_rtx_SET (VOIDmode, x0,
26494 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
26495 UNSPEC_RSQRT)));
26497 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26498 rs6000_emit_msub (y, src, halfthree, src);
26500 for (i = 0; i < passes; i++)
26502 rtx x1 = gen_reg_rtx (mode);
26503 rtx u = gen_reg_rtx (mode);
26504 rtx v = gen_reg_rtx (mode);
26506 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26507 emit_insn (gen_mul (u, x0, x0));
26508 rs6000_emit_nmsub (v, y, u, halfthree);
26509 emit_insn (gen_mul (x1, x0, v));
26510 x0 = x1;
26513 emit_move_insn (dst, x0);
26514 return;
26517 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26518 (Power7) targets. DST is the target, and SRC is the argument operand. */
26520 void
26521 rs6000_emit_popcount (rtx dst, rtx src)
26523 enum machine_mode mode = GET_MODE (dst);
26524 rtx tmp1, tmp2;
26526 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26527 if (TARGET_POPCNTD)
26529 if (mode == SImode)
26530 emit_insn (gen_popcntdsi2 (dst, src));
26531 else
26532 emit_insn (gen_popcntddi2 (dst, src));
26533 return;
26536 tmp1 = gen_reg_rtx (mode);
26538 if (mode == SImode)
26540 emit_insn (gen_popcntbsi2 (tmp1, src));
26541 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
26542 NULL_RTX, 0);
26543 tmp2 = force_reg (SImode, tmp2);
26544 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
26546 else
26548 emit_insn (gen_popcntbdi2 (tmp1, src));
26549 tmp2 = expand_mult (DImode, tmp1,
26550 GEN_INT ((HOST_WIDE_INT)
26551 0x01010101 << 32 | 0x01010101),
26552 NULL_RTX, 0);
26553 tmp2 = force_reg (DImode, tmp2);
26554 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
26559 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26560 target, and SRC is the argument operand. */
26562 void
26563 rs6000_emit_parity (rtx dst, rtx src)
26565 enum machine_mode mode = GET_MODE (dst);
26566 rtx tmp;
26568 tmp = gen_reg_rtx (mode);
26570 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26571 if (TARGET_CMPB)
26573 if (mode == SImode)
26575 emit_insn (gen_popcntbsi2 (tmp, src));
26576 emit_insn (gen_paritysi2_cmpb (dst, tmp));
26578 else
26580 emit_insn (gen_popcntbdi2 (tmp, src));
26581 emit_insn (gen_paritydi2_cmpb (dst, tmp));
26583 return;
26586 if (mode == SImode)
26588 /* Is mult+shift >= shift+xor+shift+xor? */
26589 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
26591 rtx tmp1, tmp2, tmp3, tmp4;
26593 tmp1 = gen_reg_rtx (SImode);
26594 emit_insn (gen_popcntbsi2 (tmp1, src));
26596 tmp2 = gen_reg_rtx (SImode);
26597 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
26598 tmp3 = gen_reg_rtx (SImode);
26599 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
26601 tmp4 = gen_reg_rtx (SImode);
26602 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
26603 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
26605 else
26606 rs6000_emit_popcount (tmp, src);
26607 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
26609 else
26611 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26612 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
26614 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
26616 tmp1 = gen_reg_rtx (DImode);
26617 emit_insn (gen_popcntbdi2 (tmp1, src));
26619 tmp2 = gen_reg_rtx (DImode);
26620 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
26621 tmp3 = gen_reg_rtx (DImode);
26622 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
26624 tmp4 = gen_reg_rtx (DImode);
26625 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
26626 tmp5 = gen_reg_rtx (DImode);
26627 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
26629 tmp6 = gen_reg_rtx (DImode);
26630 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
26631 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
26633 else
26634 rs6000_emit_popcount (tmp, src);
26635 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
26639 /* Expand an Altivec constant permutation. Return true if we match
26640 an efficient implementation; false to fall back to VPERM. */
26642 bool
26643 altivec_expand_vec_perm_const (rtx operands[4])
26645 struct altivec_perm_insn {
26646 enum insn_code impl;
26647 unsigned char perm[16];
26649 static const struct altivec_perm_insn patterns[] = {
26650 { CODE_FOR_altivec_vpkuhum,
26651 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26652 { CODE_FOR_altivec_vpkuwum,
26653 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26654 { CODE_FOR_altivec_vmrghb,
26655 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26656 { CODE_FOR_altivec_vmrghh,
26657 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26658 { CODE_FOR_altivec_vmrghw,
26659 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26660 { CODE_FOR_altivec_vmrglb,
26661 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26662 { CODE_FOR_altivec_vmrglh,
26663 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26664 { CODE_FOR_altivec_vmrglw,
26665 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26668 unsigned int i, j, elt, which;
26669 unsigned char perm[16];
26670 rtx target, op0, op1, sel, x;
26671 bool one_vec;
26673 target = operands[0];
26674 op0 = operands[1];
26675 op1 = operands[2];
26676 sel = operands[3];
26678 /* Unpack the constant selector. */
26679 for (i = which = 0; i < 16; ++i)
26681 rtx e = XVECEXP (sel, 0, i);
26682 elt = INTVAL (e) & 31;
26683 which |= (elt < 16 ? 1 : 2);
26684 perm[i] = elt;
26687 /* Simplify the constant selector based on operands. */
26688 switch (which)
26690 default:
26691 gcc_unreachable ();
26693 case 3:
26694 one_vec = false;
26695 if (!rtx_equal_p (op0, op1))
26696 break;
26697 /* FALLTHRU */
26699 case 2:
26700 for (i = 0; i < 16; ++i)
26701 perm[i] &= 15;
26702 op0 = op1;
26703 one_vec = true;
26704 break;
26706 case 1:
26707 op1 = op0;
26708 one_vec = true;
26709 break;
26712 /* Look for splat patterns. */
26713 if (one_vec)
26715 elt = perm[0];
26717 for (i = 0; i < 16; ++i)
26718 if (perm[i] != elt)
26719 break;
26720 if (i == 16)
26722 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
26723 return true;
26726 if (elt % 2 == 0)
26728 for (i = 0; i < 16; i += 2)
26729 if (perm[i] != elt || perm[i + 1] != elt + 1)
26730 break;
26731 if (i == 16)
26733 x = gen_reg_rtx (V8HImode);
26734 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
26735 GEN_INT (elt / 2)));
26736 emit_move_insn (target, gen_lowpart (V16QImode, x));
26737 return true;
26741 if (elt % 4 == 0)
26743 for (i = 0; i < 16; i += 4)
26744 if (perm[i] != elt
26745 || perm[i + 1] != elt + 1
26746 || perm[i + 2] != elt + 2
26747 || perm[i + 3] != elt + 3)
26748 break;
26749 if (i == 16)
26751 x = gen_reg_rtx (V4SImode);
26752 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
26753 GEN_INT (elt / 4)));
26754 emit_move_insn (target, gen_lowpart (V16QImode, x));
26755 return true;
26760 /* Look for merge and pack patterns. */
26761 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
26763 bool swapped;
26765 elt = patterns[j].perm[0];
26766 if (perm[0] == elt)
26767 swapped = false;
26768 else if (perm[0] == elt + 16)
26769 swapped = true;
26770 else
26771 continue;
26772 for (i = 1; i < 16; ++i)
26774 elt = patterns[j].perm[i];
26775 if (swapped)
26776 elt = (elt >= 16 ? elt - 16 : elt + 16);
26777 else if (one_vec && elt >= 16)
26778 elt -= 16;
26779 if (perm[i] != elt)
26780 break;
26782 if (i == 16)
26784 enum insn_code icode = patterns[j].impl;
26785 enum machine_mode omode = insn_data[icode].operand[0].mode;
26786 enum machine_mode imode = insn_data[icode].operand[1].mode;
26788 if (swapped)
26789 x = op0, op0 = op1, op1 = x;
26790 if (imode != V16QImode)
26792 op0 = gen_lowpart (imode, op0);
26793 op1 = gen_lowpart (imode, op1);
26795 if (omode == V16QImode)
26796 x = target;
26797 else
26798 x = gen_reg_rtx (omode);
26799 emit_insn (GEN_FCN (icode) (x, op0, op1));
26800 if (omode != V16QImode)
26801 emit_move_insn (target, gen_lowpart (V16QImode, x));
26802 return true;
26806 return false;
26809 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26810 Return true if we match an efficient implementation. */
26812 static bool
26813 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
26814 unsigned char perm0, unsigned char perm1)
26816 rtx x;
26818 /* If both selectors come from the same operand, fold to single op. */
26819 if ((perm0 & 2) == (perm1 & 2))
26821 if (perm0 & 2)
26822 op0 = op1;
26823 else
26824 op1 = op0;
26826 /* If both operands are equal, fold to simpler permutation. */
26827 if (rtx_equal_p (op0, op1))
26829 perm0 = perm0 & 1;
26830 perm1 = (perm1 & 1) + 2;
26832 /* If the first selector comes from the second operand, swap. */
26833 else if (perm0 & 2)
26835 if (perm1 & 2)
26836 return false;
26837 perm0 -= 2;
26838 perm1 += 2;
26839 x = op0, op0 = op1, op1 = x;
26841 /* If the second selector does not come from the second operand, fail. */
26842 else if ((perm1 & 2) == 0)
26843 return false;
26845 /* Success! */
26846 if (target != NULL)
26848 enum machine_mode vmode, dmode;
26849 rtvec v;
26851 vmode = GET_MODE (target);
26852 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
26853 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
26855 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
26856 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
26857 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
26858 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26860 return true;
26863 bool
26864 rs6000_expand_vec_perm_const (rtx operands[4])
26866 rtx target, op0, op1, sel;
26867 unsigned char perm0, perm1;
26869 target = operands[0];
26870 op0 = operands[1];
26871 op1 = operands[2];
26872 sel = operands[3];
26874 /* Unpack the constant selector. */
26875 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
26876 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
26878 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
26881 /* Test whether a constant permutation is supported. */
26883 static bool
26884 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
26885 const unsigned char *sel)
26887 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26888 if (TARGET_ALTIVEC)
26889 return true;
26891 /* Check for ps_merge* or evmerge* insns. */
26892 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
26893 || (TARGET_SPE && vmode == V2SImode))
26895 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
26896 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
26897 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
26900 return false;
26903 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26905 static void
26906 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
26907 enum machine_mode vmode, unsigned nelt, rtx perm[])
26909 enum machine_mode imode;
26910 rtx x;
26912 imode = vmode;
26913 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
26915 imode = GET_MODE_INNER (vmode);
26916 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
26917 imode = mode_for_vector (imode, nelt);
26920 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
26921 x = expand_vec_perm (vmode, op0, op1, x, target);
26922 if (x != target)
26923 emit_move_insn (target, x);
26926 /* Expand an extract even operation. */
26928 void
26929 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
26931 enum machine_mode vmode = GET_MODE (target);
26932 unsigned i, nelt = GET_MODE_NUNITS (vmode);
26933 rtx perm[16];
26935 for (i = 0; i < nelt; i++)
26936 perm[i] = GEN_INT (i * 2);
26938 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26941 /* Expand a vector interleave operation. */
26943 void
26944 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
26946 enum machine_mode vmode = GET_MODE (target);
26947 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
26948 rtx perm[16];
26950 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
26951 for (i = 0; i < nelt / 2; i++)
26953 perm[i * 2] = GEN_INT (i + high);
26954 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
26957 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26960 /* Return an RTX representing where to find the function value of a
26961 function returning MODE. */
26962 static rtx
26963 rs6000_complex_function_value (enum machine_mode mode)
26965 unsigned int regno;
26966 rtx r1, r2;
26967 enum machine_mode inner = GET_MODE_INNER (mode);
26968 unsigned int inner_bytes = GET_MODE_SIZE (inner);
26970 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
26971 regno = FP_ARG_RETURN;
26972 else
26974 regno = GP_ARG_RETURN;
26976 /* 32-bit is OK since it'll go in r3/r4. */
26977 if (TARGET_32BIT && inner_bytes >= 4)
26978 return gen_rtx_REG (mode, regno);
26981 if (inner_bytes >= 8)
26982 return gen_rtx_REG (mode, regno);
26984 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
26985 const0_rtx);
26986 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
26987 GEN_INT (inner_bytes));
26988 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
26991 /* Target hook for TARGET_FUNCTION_VALUE.
26993 On the SPE, both FPs and vectors are returned in r3.
26995 On RS/6000 an integer value is in r3 and a floating-point value is in
26996 fp1, unless -msoft-float. */
26998 static rtx
26999 rs6000_function_value (const_tree valtype,
27000 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
27001 bool outgoing ATTRIBUTE_UNUSED)
27003 enum machine_mode mode;
27004 unsigned int regno;
27006 /* Special handling for structs in darwin64. */
27007 if (TARGET_MACHO
27008 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
27010 CUMULATIVE_ARGS valcum;
27011 rtx valret;
27013 valcum.words = 0;
27014 valcum.fregno = FP_ARG_MIN_REG;
27015 valcum.vregno = ALTIVEC_ARG_MIN_REG;
27016 /* Do a trial code generation as if this were going to be passed as
27017 an argument; if any part goes in memory, we return NULL. */
27018 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
27019 if (valret)
27020 return valret;
27021 /* Otherwise fall through to standard ABI rules. */
27024 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
27026 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27027 return gen_rtx_PARALLEL (DImode,
27028 gen_rtvec (2,
27029 gen_rtx_EXPR_LIST (VOIDmode,
27030 gen_rtx_REG (SImode, GP_ARG_RETURN),
27031 const0_rtx),
27032 gen_rtx_EXPR_LIST (VOIDmode,
27033 gen_rtx_REG (SImode,
27034 GP_ARG_RETURN + 1),
27035 GEN_INT (4))));
27037 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
27039 return gen_rtx_PARALLEL (DCmode,
27040 gen_rtvec (4,
27041 gen_rtx_EXPR_LIST (VOIDmode,
27042 gen_rtx_REG (SImode, GP_ARG_RETURN),
27043 const0_rtx),
27044 gen_rtx_EXPR_LIST (VOIDmode,
27045 gen_rtx_REG (SImode,
27046 GP_ARG_RETURN + 1),
27047 GEN_INT (4)),
27048 gen_rtx_EXPR_LIST (VOIDmode,
27049 gen_rtx_REG (SImode,
27050 GP_ARG_RETURN + 2),
27051 GEN_INT (8)),
27052 gen_rtx_EXPR_LIST (VOIDmode,
27053 gen_rtx_REG (SImode,
27054 GP_ARG_RETURN + 3),
27055 GEN_INT (12))));
27058 mode = TYPE_MODE (valtype);
27059 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
27060 || POINTER_TYPE_P (valtype))
27061 mode = TARGET_32BIT ? SImode : DImode;
27063 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27064 /* _Decimal128 must use an even/odd register pair. */
27065 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27066 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
27067 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
27068 regno = FP_ARG_RETURN;
27069 else if (TREE_CODE (valtype) == COMPLEX_TYPE
27070 && targetm.calls.split_complex_arg)
27071 return rs6000_complex_function_value (mode);
27072 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27073 return register is used in both cases, and we won't see V2DImode/V2DFmode
27074 for pure altivec, combine the two cases. */
27075 else if (TREE_CODE (valtype) == VECTOR_TYPE
27076 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
27077 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
27078 regno = ALTIVEC_ARG_RETURN;
27079 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27080 && (mode == DFmode || mode == DCmode
27081 || mode == TFmode || mode == TCmode))
27082 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27083 else
27084 regno = GP_ARG_RETURN;
27086 return gen_rtx_REG (mode, regno);
27089 /* Define how to find the value returned by a library function
27090 assuming the value has mode MODE. */
27092 rs6000_libcall_value (enum machine_mode mode)
27094 unsigned int regno;
27096 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
27098 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27099 return gen_rtx_PARALLEL (DImode,
27100 gen_rtvec (2,
27101 gen_rtx_EXPR_LIST (VOIDmode,
27102 gen_rtx_REG (SImode, GP_ARG_RETURN),
27103 const0_rtx),
27104 gen_rtx_EXPR_LIST (VOIDmode,
27105 gen_rtx_REG (SImode,
27106 GP_ARG_RETURN + 1),
27107 GEN_INT (4))));
27110 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27111 /* _Decimal128 must use an even/odd register pair. */
27112 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27113 else if (SCALAR_FLOAT_MODE_P (mode)
27114 && TARGET_HARD_FLOAT && TARGET_FPRS
27115 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
27116 regno = FP_ARG_RETURN;
27117 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27118 return register is used in both cases, and we won't see V2DImode/V2DFmode
27119 for pure altivec, combine the two cases. */
27120 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
27121 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
27122 regno = ALTIVEC_ARG_RETURN;
27123 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
27124 return rs6000_complex_function_value (mode);
27125 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27126 && (mode == DFmode || mode == DCmode
27127 || mode == TFmode || mode == TCmode))
27128 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27129 else
27130 regno = GP_ARG_RETURN;
27132 return gen_rtx_REG (mode, regno);
27136 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27137 Frame pointer elimination is automatically handled.
27139 For the RS/6000, if frame pointer elimination is being done, we would like
27140 to convert ap into fp, not sp.
27142 We need r30 if -mminimal-toc was specified, and there are constant pool
27143 references. */
27145 static bool
27146 rs6000_can_eliminate (const int from, const int to)
27148 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
27149 ? ! frame_pointer_needed
27150 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
27151 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
27152 : true);
27155 /* Define the offset between two registers, FROM to be eliminated and its
27156 replacement TO, at the start of a routine. */
27157 HOST_WIDE_INT
27158 rs6000_initial_elimination_offset (int from, int to)
27160 rs6000_stack_t *info = rs6000_stack_info ();
27161 HOST_WIDE_INT offset;
27163 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27164 offset = info->push_p ? 0 : -info->total_size;
27165 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27167 offset = info->push_p ? 0 : -info->total_size;
27168 if (FRAME_GROWS_DOWNWARD)
27169 offset += info->fixed_size + info->vars_size + info->parm_size;
27171 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27172 offset = FRAME_GROWS_DOWNWARD
27173 ? info->fixed_size + info->vars_size + info->parm_size
27174 : 0;
27175 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27176 offset = info->total_size;
27177 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27178 offset = info->push_p ? info->total_size : 0;
27179 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
27180 offset = 0;
27181 else
27182 gcc_unreachable ();
27184 return offset;
27187 static rtx
27188 rs6000_dwarf_register_span (rtx reg)
27190 rtx parts[8];
27191 int i, words;
27192 unsigned regno = REGNO (reg);
27193 enum machine_mode mode = GET_MODE (reg);
27195 if (TARGET_SPE
27196 && regno < 32
27197 && (SPE_VECTOR_MODE (GET_MODE (reg))
27198 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
27199 && mode != SFmode && mode != SDmode && mode != SCmode)))
27201 else
27202 return NULL_RTX;
27204 regno = REGNO (reg);
27206 /* The duality of the SPE register size wreaks all kinds of havoc.
27207 This is a way of distinguishing r0 in 32-bits from r0 in
27208 64-bits. */
27209 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
27210 gcc_assert (words <= 4);
27211 for (i = 0; i < words; i++, regno++)
27213 if (BYTES_BIG_ENDIAN)
27215 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
27216 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
27218 else
27220 parts[2 * i] = gen_rtx_REG (SImode, regno);
27221 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
27225 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
27228 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27230 static void
27231 rs6000_init_dwarf_reg_sizes_extra (tree address)
27233 if (TARGET_SPE)
27235 int i;
27236 enum machine_mode mode = TYPE_MODE (char_type_node);
27237 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
27238 rtx mem = gen_rtx_MEM (BLKmode, addr);
27239 rtx value = gen_int_mode (4, mode);
27241 for (i = 1201; i < 1232; i++)
27243 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
27244 HOST_WIDE_INT offset
27245 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
27247 emit_move_insn (adjust_address (mem, mode, offset), value);
27252 /* Map internal gcc register numbers to DWARF2 register numbers. */
27254 unsigned int
27255 rs6000_dbx_register_number (unsigned int regno)
27257 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
27258 return regno;
27259 if (regno == LR_REGNO)
27260 return 108;
27261 if (regno == CTR_REGNO)
27262 return 109;
27263 if (CR_REGNO_P (regno))
27264 return regno - CR0_REGNO + 86;
27265 if (regno == CA_REGNO)
27266 return 101; /* XER */
27267 if (ALTIVEC_REGNO_P (regno))
27268 return regno - FIRST_ALTIVEC_REGNO + 1124;
27269 if (regno == VRSAVE_REGNO)
27270 return 356;
27271 if (regno == VSCR_REGNO)
27272 return 67;
27273 if (regno == SPE_ACC_REGNO)
27274 return 99;
27275 if (regno == SPEFSCR_REGNO)
27276 return 612;
27277 /* SPE high reg number. We get these values of regno from
27278 rs6000_dwarf_register_span. */
27279 gcc_assert (regno >= 1200 && regno < 1232);
27280 return regno;
27283 /* target hook eh_return_filter_mode */
27284 static enum machine_mode
27285 rs6000_eh_return_filter_mode (void)
27287 return TARGET_32BIT ? SImode : word_mode;
27290 /* Target hook for scalar_mode_supported_p. */
27291 static bool
27292 rs6000_scalar_mode_supported_p (enum machine_mode mode)
27294 if (DECIMAL_FLOAT_MODE_P (mode))
27295 return default_decimal_float_supported_p ();
27296 else
27297 return default_scalar_mode_supported_p (mode);
27300 /* Target hook for vector_mode_supported_p. */
27301 static bool
27302 rs6000_vector_mode_supported_p (enum machine_mode mode)
27305 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
27306 return true;
27308 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
27309 return true;
27311 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
27312 return true;
27314 else
27315 return false;
27318 /* Target hook for invalid_arg_for_unprototyped_fn. */
27319 static const char *
27320 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
27322 return (!rs6000_darwin64_abi
27323 && typelist == 0
27324 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
27325 && (funcdecl == NULL_TREE
27326 || (TREE_CODE (funcdecl) == FUNCTION_DECL
27327 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
27328 ? N_("AltiVec argument passed to unprototyped function")
27329 : NULL;
27332 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27333 setup by using __stack_chk_fail_local hidden function instead of
27334 calling __stack_chk_fail directly. Otherwise it is better to call
27335 __stack_chk_fail directly. */
27337 static tree ATTRIBUTE_UNUSED
27338 rs6000_stack_protect_fail (void)
27340 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
27341 ? default_hidden_stack_protect_fail ()
27342 : default_external_stack_protect_fail ();
27345 void
27346 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
27347 int num_operands ATTRIBUTE_UNUSED)
27349 if (rs6000_warn_cell_microcode)
27351 const char *temp;
27352 int insn_code_number = recog_memoized (insn);
27353 location_t location = INSN_LOCATION (insn);
27355 /* Punt on insns we cannot recognize. */
27356 if (insn_code_number < 0)
27357 return;
27359 temp = get_insn_template (insn_code_number, insn);
27361 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
27362 warning_at (location, OPT_mwarn_cell_microcode,
27363 "emitting microcode insn %s\t[%s] #%d",
27364 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27365 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
27366 warning_at (location, OPT_mwarn_cell_microcode,
27367 "emitting conditional microcode insn %s\t[%s] #%d",
27368 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27373 /* Mask options that we want to support inside of attribute((target)) and
27374 #pragma GCC target operations. Note, we do not include things like
27375 64/32-bit, endianess, hard/soft floating point, etc. that would have
27376 different calling sequences. */
27378 struct rs6000_opt_mask {
27379 const char *name; /* option name */
27380 int mask; /* mask to set */
27381 bool invert; /* invert sense of mask */
27382 bool valid_target; /* option is a target option */
27385 static struct rs6000_opt_mask const rs6000_opt_masks[] =
27387 { "altivec", MASK_ALTIVEC, false, true },
27388 { "cmpb", MASK_CMPB, false, true },
27389 { "dlmzb", MASK_DLMZB, false, true },
27390 { "fprnd", MASK_FPRND, false, true },
27391 { "hard-dfp", MASK_DFP, false, true },
27392 { "isel", MASK_ISEL, false, true },
27393 { "mfcrf", MASK_MFCRF, false, true },
27394 { "mfpgpr", MASK_MFPGPR, false, true },
27395 { "mulhw", MASK_MULHW, false, true },
27396 { "multiple", MASK_MULTIPLE, false, true },
27397 { "update", MASK_NO_UPDATE, true , true },
27398 { "popcntb", MASK_POPCNTB, false, true },
27399 { "popcntd", MASK_POPCNTD, false, true },
27400 { "powerpc-gfxopt", MASK_PPC_GFXOPT, false, true },
27401 { "powerpc-gpopt", MASK_PPC_GPOPT, false, true },
27402 { "recip-precision", MASK_RECIP_PRECISION, false, true },
27403 { "string", MASK_STRING, false, true },
27404 { "vsx", MASK_VSX, false, true },
27405 #ifdef MASK_64BIT
27406 #if TARGET_AIX_OS
27407 { "aix64", MASK_64BIT, false, false },
27408 { "aix32", MASK_64BIT, true, false },
27409 #else
27410 { "64", MASK_64BIT, false, false },
27411 { "32", MASK_64BIT, true, false },
27412 #endif
27413 #endif
27414 #ifdef MASK_EABI
27415 { "eabi", MASK_EABI, false, false },
27416 #endif
27417 #ifdef MASK_LITTLE_ENDIAN
27418 { "little", MASK_LITTLE_ENDIAN, false, false },
27419 { "big", MASK_LITTLE_ENDIAN, true, false },
27420 #endif
27421 #ifdef MASK_RELOCATABLE
27422 { "relocatable", MASK_RELOCATABLE, false, false },
27423 #endif
27424 #ifdef MASK_STRICT_ALIGN
27425 { "strict-align", MASK_STRICT_ALIGN, false, false },
27426 #endif
27427 { "soft-float", MASK_SOFT_FLOAT, false, false },
27428 { "string", MASK_STRING, false, false },
27431 /* Builtin mask mapping for printing the flags. */
27432 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
27434 { "altivec", RS6000_BTM_ALTIVEC, false, false },
27435 { "vsx", RS6000_BTM_VSX, false, false },
27436 { "spe", RS6000_BTM_SPE, false, false },
27437 { "paired", RS6000_BTM_PAIRED, false, false },
27438 { "fre", RS6000_BTM_FRE, false, false },
27439 { "fres", RS6000_BTM_FRES, false, false },
27440 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
27441 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
27442 { "popcntd", RS6000_BTM_POPCNTD, false, false },
27443 { "cell", RS6000_BTM_CELL, false, false },
27446 /* Option variables that we want to support inside attribute((target)) and
27447 #pragma GCC target operations. */
27449 struct rs6000_opt_var {
27450 const char *name; /* option name */
27451 size_t global_offset; /* offset of the option in global_options. */
27452 size_t target_offset; /* offset of the option in target optiosn. */
27455 static struct rs6000_opt_var const rs6000_opt_vars[] =
27457 { "friz",
27458 offsetof (struct gcc_options, x_TARGET_FRIZ),
27459 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
27460 { "avoid-indexed-addresses",
27461 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
27462 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
27463 { "paired",
27464 offsetof (struct gcc_options, x_rs6000_paired_float),
27465 offsetof (struct cl_target_option, x_rs6000_paired_float), },
27466 { "longcall",
27467 offsetof (struct gcc_options, x_rs6000_default_long_calls),
27468 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
27471 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27472 parsing. Return true if there were no errors. */
27474 static bool
27475 rs6000_inner_target_options (tree args, bool attr_p)
27477 bool ret = true;
27479 if (args == NULL_TREE)
27482 else if (TREE_CODE (args) == STRING_CST)
27484 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27485 char *q;
27487 while ((q = strtok (p, ",")) != NULL)
27489 bool error_p = false;
27490 bool not_valid_p = false;
27491 const char *cpu_opt = NULL;
27493 p = NULL;
27494 if (strncmp (q, "cpu=", 4) == 0)
27496 int cpu_index = rs6000_cpu_name_lookup (q+4);
27497 if (cpu_index >= 0)
27498 rs6000_cpu_index = cpu_index;
27499 else
27501 error_p = true;
27502 cpu_opt = q+4;
27505 else if (strncmp (q, "tune=", 5) == 0)
27507 int tune_index = rs6000_cpu_name_lookup (q+5);
27508 if (tune_index >= 0)
27509 rs6000_tune_index = tune_index;
27510 else
27512 error_p = true;
27513 cpu_opt = q+5;
27516 else
27518 size_t i;
27519 bool invert = false;
27520 char *r = q;
27522 error_p = true;
27523 if (strncmp (r, "no-", 3) == 0)
27525 invert = true;
27526 r += 3;
27529 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27530 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
27532 int mask = rs6000_opt_masks[i].mask;
27534 if (!rs6000_opt_masks[i].valid_target)
27535 not_valid_p = true;
27536 else
27538 error_p = false;
27539 target_flags_explicit |= mask;
27541 /* VSX needs altivec, so -mvsx automagically sets
27542 altivec. */
27543 if (mask == MASK_VSX && !invert)
27544 mask |= MASK_ALTIVEC;
27546 if (rs6000_opt_masks[i].invert)
27547 invert = !invert;
27549 if (invert)
27550 target_flags &= ~mask;
27551 else
27552 target_flags |= mask;
27554 break;
27557 if (error_p && !not_valid_p)
27559 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27560 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
27562 size_t j = rs6000_opt_vars[i].global_offset;
27563 *((int *) ((char *)&global_options + j)) = !invert;
27564 error_p = false;
27565 break;
27570 if (error_p)
27572 const char *eprefix, *esuffix;
27574 ret = false;
27575 if (attr_p)
27577 eprefix = "__attribute__((__target__(";
27578 esuffix = ")))";
27580 else
27582 eprefix = "#pragma GCC target ";
27583 esuffix = "";
27586 if (cpu_opt)
27587 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
27588 q, esuffix);
27589 else if (not_valid_p)
27590 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
27591 else
27592 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
27597 else if (TREE_CODE (args) == TREE_LIST)
27601 tree value = TREE_VALUE (args);
27602 if (value)
27604 bool ret2 = rs6000_inner_target_options (value, attr_p);
27605 if (!ret2)
27606 ret = false;
27608 args = TREE_CHAIN (args);
27610 while (args != NULL_TREE);
27613 else
27614 gcc_unreachable ();
27616 return ret;
27619 /* Print out the target options as a list for -mdebug=target. */
27621 static void
27622 rs6000_debug_target_options (tree args, const char *prefix)
27624 if (args == NULL_TREE)
27625 fprintf (stderr, "%s<NULL>", prefix);
27627 else if (TREE_CODE (args) == STRING_CST)
27629 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27630 char *q;
27632 while ((q = strtok (p, ",")) != NULL)
27634 p = NULL;
27635 fprintf (stderr, "%s\"%s\"", prefix, q);
27636 prefix = ", ";
27640 else if (TREE_CODE (args) == TREE_LIST)
27644 tree value = TREE_VALUE (args);
27645 if (value)
27647 rs6000_debug_target_options (value, prefix);
27648 prefix = ", ";
27650 args = TREE_CHAIN (args);
27652 while (args != NULL_TREE);
27655 else
27656 gcc_unreachable ();
27658 return;
27662 /* Hook to validate attribute((target("..."))). */
27664 static bool
27665 rs6000_valid_attribute_p (tree fndecl,
27666 tree ARG_UNUSED (name),
27667 tree args,
27668 int flags)
27670 struct cl_target_option cur_target;
27671 bool ret;
27672 tree old_optimize = build_optimization_node ();
27673 tree new_target, new_optimize;
27674 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27676 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
27678 if (TARGET_DEBUG_TARGET)
27680 tree tname = DECL_NAME (fndecl);
27681 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
27682 if (tname)
27683 fprintf (stderr, "function: %.*s\n",
27684 (int) IDENTIFIER_LENGTH (tname),
27685 IDENTIFIER_POINTER (tname));
27686 else
27687 fprintf (stderr, "function: unknown\n");
27689 fprintf (stderr, "args:");
27690 rs6000_debug_target_options (args, " ");
27691 fprintf (stderr, "\n");
27693 if (flags)
27694 fprintf (stderr, "flags: 0x%x\n", flags);
27696 fprintf (stderr, "--------------------\n");
27699 old_optimize = build_optimization_node ();
27700 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27702 /* If the function changed the optimization levels as well as setting target
27703 options, start with the optimizations specified. */
27704 if (func_optimize && func_optimize != old_optimize)
27705 cl_optimization_restore (&global_options,
27706 TREE_OPTIMIZATION (func_optimize));
27708 /* The target attributes may also change some optimization flags, so update
27709 the optimization options if necessary. */
27710 cl_target_option_save (&cur_target, &global_options);
27711 rs6000_cpu_index = rs6000_tune_index = -1;
27712 ret = rs6000_inner_target_options (args, true);
27714 /* Set up any additional state. */
27715 if (ret)
27717 ret = rs6000_option_override_internal (false);
27718 new_target = build_target_option_node ();
27720 else
27721 new_target = NULL;
27723 new_optimize = build_optimization_node ();
27725 if (!new_target)
27726 ret = false;
27728 else if (fndecl)
27730 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
27732 if (old_optimize != new_optimize)
27733 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
27736 cl_target_option_restore (&global_options, &cur_target);
27738 if (old_optimize != new_optimize)
27739 cl_optimization_restore (&global_options,
27740 TREE_OPTIMIZATION (old_optimize));
27742 return ret;
27746 /* Hook to validate the current #pragma GCC target and set the state, and
27747 update the macros based on what was changed. If ARGS is NULL, then
27748 POP_TARGET is used to reset the options. */
27750 bool
27751 rs6000_pragma_target_parse (tree args, tree pop_target)
27753 tree prev_tree = build_target_option_node ();
27754 tree cur_tree;
27755 struct cl_target_option *prev_opt, *cur_opt;
27756 unsigned prev_bumask, cur_bumask, diff_bumask;
27757 int prev_flags, cur_flags, diff_flags;
27759 if (TARGET_DEBUG_TARGET)
27761 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
27762 fprintf (stderr, "args:");
27763 rs6000_debug_target_options (args, " ");
27764 fprintf (stderr, "\n");
27766 if (pop_target)
27768 fprintf (stderr, "pop_target:\n");
27769 debug_tree (pop_target);
27771 else
27772 fprintf (stderr, "pop_target: <NULL>\n");
27774 fprintf (stderr, "--------------------\n");
27777 if (! args)
27779 cur_tree = ((pop_target)
27780 ? pop_target
27781 : target_option_default_node);
27782 cl_target_option_restore (&global_options,
27783 TREE_TARGET_OPTION (cur_tree));
27785 else
27787 rs6000_cpu_index = rs6000_tune_index = -1;
27788 if (!rs6000_inner_target_options (args, false)
27789 || !rs6000_option_override_internal (false)
27790 || (cur_tree = build_target_option_node ()) == NULL_TREE)
27792 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
27793 fprintf (stderr, "invalid pragma\n");
27795 return false;
27799 target_option_current_node = cur_tree;
27801 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27802 change the macros that are defined. */
27803 if (rs6000_target_modify_macros_ptr)
27805 prev_opt = TREE_TARGET_OPTION (prev_tree);
27806 prev_bumask = prev_opt->x_rs6000_builtin_mask;
27807 prev_flags = prev_opt->x_target_flags;
27809 cur_opt = TREE_TARGET_OPTION (cur_tree);
27810 cur_flags = cur_opt->x_target_flags;
27811 cur_bumask = cur_opt->x_rs6000_builtin_mask;
27813 diff_bumask = (prev_bumask ^ cur_bumask);
27814 diff_flags = (prev_flags ^ cur_flags);
27816 if ((diff_flags != 0) || (diff_bumask != 0))
27818 /* Delete old macros. */
27819 rs6000_target_modify_macros_ptr (false,
27820 prev_flags & diff_flags,
27821 prev_bumask & diff_bumask);
27823 /* Define new macros. */
27824 rs6000_target_modify_macros_ptr (true,
27825 cur_flags & diff_flags,
27826 cur_bumask & diff_bumask);
27830 return true;
27834 /* Remember the last target of rs6000_set_current_function. */
27835 static GTY(()) tree rs6000_previous_fndecl;
27837 /* Establish appropriate back-end context for processing the function
27838 FNDECL. The argument might be NULL to indicate processing at top
27839 level, outside of any function scope. */
27840 static void
27841 rs6000_set_current_function (tree fndecl)
27843 tree old_tree = (rs6000_previous_fndecl
27844 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
27845 : NULL_TREE);
27847 tree new_tree = (fndecl
27848 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
27849 : NULL_TREE);
27851 if (TARGET_DEBUG_TARGET)
27853 bool print_final = false;
27854 fprintf (stderr, "\n==================== rs6000_set_current_function");
27856 if (fndecl)
27857 fprintf (stderr, ", fndecl %s (%p)",
27858 (DECL_NAME (fndecl)
27859 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
27860 : "<unknown>"), (void *)fndecl);
27862 if (rs6000_previous_fndecl)
27863 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
27865 fprintf (stderr, "\n");
27866 if (new_tree)
27868 fprintf (stderr, "\nnew fndecl target specific options:\n");
27869 debug_tree (new_tree);
27870 print_final = true;
27873 if (old_tree)
27875 fprintf (stderr, "\nold fndecl target specific options:\n");
27876 debug_tree (old_tree);
27877 print_final = true;
27880 if (print_final)
27881 fprintf (stderr, "--------------------\n");
27884 /* Only change the context if the function changes. This hook is called
27885 several times in the course of compiling a function, and we don't want to
27886 slow things down too much or call target_reinit when it isn't safe. */
27887 if (fndecl && fndecl != rs6000_previous_fndecl)
27889 rs6000_previous_fndecl = fndecl;
27890 if (old_tree == new_tree)
27893 else if (new_tree)
27895 cl_target_option_restore (&global_options,
27896 TREE_TARGET_OPTION (new_tree));
27897 target_reinit ();
27900 else if (old_tree)
27902 struct cl_target_option *def
27903 = TREE_TARGET_OPTION (target_option_current_node);
27905 cl_target_option_restore (&global_options, def);
27906 target_reinit ();
27912 /* Save the current options */
27914 static void
27915 rs6000_function_specific_save (struct cl_target_option *ptr)
27917 ptr->rs6000_target_flags_explicit = target_flags_explicit;
27920 /* Restore the current options */
27922 static void
27923 rs6000_function_specific_restore (struct cl_target_option *ptr)
27925 target_flags_explicit = ptr->rs6000_target_flags_explicit;
27926 (void) rs6000_option_override_internal (false);
27929 /* Print the current options */
27931 static void
27932 rs6000_function_specific_print (FILE *file, int indent,
27933 struct cl_target_option *ptr)
27935 size_t i;
27936 int flags = ptr->x_target_flags;
27937 unsigned bu_mask = ptr->x_rs6000_builtin_mask;
27939 /* Print the various mask options. */
27940 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27941 if ((flags & rs6000_opt_masks[i].mask) != 0)
27943 flags &= ~ rs6000_opt_masks[i].mask;
27944 fprintf (file, "%*s-m%s%s\n", indent, "",
27945 rs6000_opt_masks[i].invert ? "no-" : "",
27946 rs6000_opt_masks[i].name);
27949 /* Print the various options that are variables. */
27950 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27952 size_t j = rs6000_opt_vars[i].target_offset;
27953 if (((signed char *) ptr)[j])
27954 fprintf (file, "%*s-m%s\n", indent, "",
27955 rs6000_opt_vars[i].name);
27958 /* Print the various builtin flags. */
27959 fprintf (file, "%*sbuiltin mask = 0x%x\n", indent, "", bu_mask);
27960 for (i = 0; i < ARRAY_SIZE (rs6000_builtin_mask_names); i++)
27961 if ((bu_mask & rs6000_builtin_mask_names[i].mask) != 0)
27963 fprintf (file, "%*s%s builtins supported\n", indent, "",
27964 rs6000_builtin_mask_names[i].name);
27969 /* Hook to determine if one function can safely inline another. */
27971 static bool
27972 rs6000_can_inline_p (tree caller, tree callee)
27974 bool ret = false;
27975 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
27976 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
27978 /* If callee has no option attributes, then it is ok to inline. */
27979 if (!callee_tree)
27980 ret = true;
27982 /* If caller has no option attributes, but callee does then it is not ok to
27983 inline. */
27984 else if (!caller_tree)
27985 ret = false;
27987 else
27989 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
27990 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
27992 /* Callee's options should a subset of the caller's, i.e. a vsx function
27993 can inline an altivec function but a non-vsx function can't inline a
27994 vsx function. */
27995 if ((caller_opts->x_target_flags & callee_opts->x_target_flags)
27996 == callee_opts->x_target_flags)
27997 ret = true;
28000 if (TARGET_DEBUG_TARGET)
28001 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
28002 (DECL_NAME (caller)
28003 ? IDENTIFIER_POINTER (DECL_NAME (caller))
28004 : "<unknown>"),
28005 (DECL_NAME (callee)
28006 ? IDENTIFIER_POINTER (DECL_NAME (callee))
28007 : "<unknown>"),
28008 (ret ? "can" : "cannot"));
28010 return ret;
28013 /* Allocate a stack temp and fixup the address so it meets the particular
28014 memory requirements (either offetable or REG+REG addressing). */
28017 rs6000_allocate_stack_temp (enum machine_mode mode,
28018 bool offsettable_p,
28019 bool reg_reg_p)
28021 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
28022 rtx addr = XEXP (stack, 0);
28023 int strict_p = (reload_in_progress || reload_completed);
28025 if (!legitimate_indirect_address_p (addr, strict_p))
28027 if (offsettable_p
28028 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
28029 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28031 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
28032 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28035 return stack;
28038 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28039 to such a form to deal with memory reference instructions like STFIWX that
28040 only take reg+reg addressing. */
28043 rs6000_address_for_fpconvert (rtx x)
28045 int strict_p = (reload_in_progress || reload_completed);
28046 rtx addr;
28048 gcc_assert (MEM_P (x));
28049 addr = XEXP (x, 0);
28050 if (! legitimate_indirect_address_p (addr, strict_p)
28051 && ! legitimate_indexed_address_p (addr, strict_p))
28053 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
28055 rtx reg = XEXP (addr, 0);
28056 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
28057 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
28058 gcc_assert (REG_P (reg));
28059 emit_insn (gen_add3_insn (reg, reg, size_rtx));
28060 addr = reg;
28062 else if (GET_CODE (addr) == PRE_MODIFY)
28064 rtx reg = XEXP (addr, 0);
28065 rtx expr = XEXP (addr, 1);
28066 gcc_assert (REG_P (reg));
28067 gcc_assert (GET_CODE (expr) == PLUS);
28068 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
28069 addr = reg;
28072 x = replace_equiv_address (x, copy_addr_to_reg (addr));
28075 return x;
28078 /* Given a memory reference, if it is not in the form for altivec memory
28079 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28080 convert to the altivec format. */
28083 rs6000_address_for_altivec (rtx x)
28085 gcc_assert (MEM_P (x));
28086 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
28088 rtx addr = XEXP (x, 0);
28089 int strict_p = (reload_in_progress || reload_completed);
28091 if (!legitimate_indexed_address_p (addr, strict_p)
28092 && !legitimate_indirect_address_p (addr, strict_p))
28093 addr = copy_to_mode_reg (Pmode, addr);
28095 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
28096 x = change_address (x, GET_MODE (x), addr);
28099 return x;
28102 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28104 On the RS/6000, all integer constants are acceptable, most won't be valid
28105 for particular insns, though. Only easy FP constants are acceptable. */
28107 static bool
28108 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
28110 if (rs6000_tls_referenced_p (x))
28111 return false;
28113 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
28114 || GET_MODE (x) == VOIDmode
28115 || (TARGET_POWERPC64 && mode == DImode)
28116 || easy_fp_constant (x, mode)
28117 || easy_vector_constant (x, mode));
28121 /* A function pointer under AIX is a pointer to a data area whose first word
28122 contains the actual address of the function, whose second word contains a
28123 pointer to its TOC, and whose third word contains a value to place in the
28124 static chain register (r11). Note that if we load the static chain, our
28125 "trampoline" need not have any executable code. */
28127 void
28128 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
28130 rtx func_addr;
28131 rtx toc_reg;
28132 rtx sc_reg;
28133 rtx stack_ptr;
28134 rtx stack_toc_offset;
28135 rtx stack_toc_mem;
28136 rtx func_toc_offset;
28137 rtx func_toc_mem;
28138 rtx func_sc_offset;
28139 rtx func_sc_mem;
28140 rtx insn;
28141 rtx (*call_func) (rtx, rtx, rtx, rtx);
28142 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
28144 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28145 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
28147 /* Load up address of the actual function. */
28148 func_desc = force_reg (Pmode, func_desc);
28149 func_addr = gen_reg_rtx (Pmode);
28150 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
28152 if (TARGET_32BIT)
28155 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
28156 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
28157 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
28158 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28160 call_func = gen_call_indirect_aix32bit;
28161 call_value_func = gen_call_value_indirect_aix32bit;
28163 else
28165 call_func = gen_call_indirect_aix32bit_nor11;
28166 call_value_func = gen_call_value_indirect_aix32bit_nor11;
28169 else
28171 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
28172 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
28173 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
28174 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28176 call_func = gen_call_indirect_aix64bit;
28177 call_value_func = gen_call_value_indirect_aix64bit;
28179 else
28181 call_func = gen_call_indirect_aix64bit_nor11;
28182 call_value_func = gen_call_value_indirect_aix64bit_nor11;
28186 /* Reserved spot to store the TOC. */
28187 stack_toc_mem = gen_frame_mem (Pmode,
28188 gen_rtx_PLUS (Pmode,
28189 stack_ptr,
28190 stack_toc_offset));
28192 gcc_assert (cfun);
28193 gcc_assert (cfun->machine);
28195 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28196 every call? */
28197 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
28198 cfun->machine->save_toc_in_prologue = true;
28200 else
28202 MEM_VOLATILE_P (stack_toc_mem) = 1;
28203 emit_move_insn (stack_toc_mem, toc_reg);
28206 /* Calculate the address to load the TOC of the called function. We don't
28207 actually load this until the split after reload. */
28208 func_toc_mem = gen_rtx_MEM (Pmode,
28209 gen_rtx_PLUS (Pmode,
28210 func_desc,
28211 func_toc_offset));
28213 /* If we have a static chain, load it up. */
28214 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28216 func_sc_mem = gen_rtx_MEM (Pmode,
28217 gen_rtx_PLUS (Pmode,
28218 func_desc,
28219 func_sc_offset));
28221 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
28222 emit_move_insn (sc_reg, func_sc_mem);
28225 /* Create the call. */
28226 if (value)
28227 insn = call_value_func (value, func_addr, flag, func_toc_mem,
28228 stack_toc_mem);
28229 else
28230 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
28232 emit_call_insn (insn);
28235 /* Return whether we need to always update the saved TOC pointer when we update
28236 the stack pointer. */
28238 static bool
28239 rs6000_save_toc_in_prologue_p (void)
28241 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
28244 #ifdef HAVE_GAS_HIDDEN
28245 # define USE_HIDDEN_LINKONCE 1
28246 #else
28247 # define USE_HIDDEN_LINKONCE 0
28248 #endif
28250 /* Fills in the label name that should be used for a 476 link stack thunk. */
28252 void
28253 get_ppc476_thunk_name (char name[32])
28255 gcc_assert (TARGET_LINK_STACK);
28257 if (USE_HIDDEN_LINKONCE)
28258 sprintf (name, "__ppc476.get_thunk");
28259 else
28260 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
28263 /* This function emits the simple thunk routine that is used to preserve
28264 the link stack on the 476 cpu. */
28266 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
28267 static void
28268 rs6000_code_end (void)
28270 char name[32];
28271 tree decl;
28273 if (!TARGET_LINK_STACK)
28274 return;
28276 get_ppc476_thunk_name (name);
28278 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
28279 build_function_type_list (void_type_node, NULL_TREE));
28280 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
28281 NULL_TREE, void_type_node);
28282 TREE_PUBLIC (decl) = 1;
28283 TREE_STATIC (decl) = 1;
28285 if (USE_HIDDEN_LINKONCE)
28287 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
28288 targetm.asm_out.unique_section (decl, 0);
28289 switch_to_section (get_named_section (decl, NULL, 0));
28290 DECL_WEAK (decl) = 1;
28291 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
28292 targetm.asm_out.globalize_label (asm_out_file, name);
28293 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
28294 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
28296 else
28298 switch_to_section (text_section);
28299 ASM_OUTPUT_LABEL (asm_out_file, name);
28302 DECL_INITIAL (decl) = make_node (BLOCK);
28303 current_function_decl = decl;
28304 init_function_start (decl);
28305 first_function_block_is_cold = false;
28306 /* Make sure unwind info is emitted for the thunk if needed. */
28307 final_start_function (emit_barrier (), asm_out_file, 1);
28309 fputs ("\tblr\n", asm_out_file);
28311 final_end_function ();
28312 init_insn_lengths ();
28313 free_after_compilation (cfun);
28314 set_cfun (NULL);
28315 current_function_decl = NULL;
28318 /* Add r30 to hard reg set if the prologue sets it up and it is not
28319 pic_offset_table_rtx. */
28321 static void
28322 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
28324 if (!TARGET_SINGLE_PIC_BASE
28325 && TARGET_TOC
28326 && TARGET_MINIMAL_TOC
28327 && get_pool_size () != 0)
28328 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28331 struct gcc_target targetm = TARGET_INITIALIZER;
28333 #include "gt-rs6000.h"