gcc/
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob1384a819b1cbb9699a5110ab41d49976a47e9570
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2012 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "except.h"
38 #include "function.h"
39 #include "output.h"
40 #include "dbxout.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "hashtab.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "cfgloop.h"
53 #include "sched-int.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "intl.h"
57 #include "params.h"
58 #include "tm-constrs.h"
59 #include "opts.h"
60 #include "tree-vectorizer.h"
61 #include "dumpfile.h"
62 #if TARGET_XCOFF
63 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #endif
65 #if TARGET_MACHO
66 #include "gstab.h" /* for N_SLINE */
67 #endif
69 #ifndef TARGET_NO_PROTOTYPE
70 #define TARGET_NO_PROTOTYPE 0
71 #endif
73 #define min(A,B) ((A) < (B) ? (A) : (B))
74 #define max(A,B) ((A) > (B) ? (A) : (B))
76 /* Structure used to define the rs6000 stack */
77 typedef struct rs6000_stack {
78 int reload_completed; /* stack info won't change from here on */
79 int first_gp_reg_save; /* first callee saved GP register used */
80 int first_fp_reg_save; /* first callee saved FP register used */
81 int first_altivec_reg_save; /* first callee saved AltiVec register used */
82 int lr_save_p; /* true if the link reg needs to be saved */
83 int cr_save_p; /* true if the CR reg needs to be saved */
84 unsigned int vrsave_mask; /* mask of vec registers to save */
85 int push_p; /* true if we need to allocate stack space */
86 int calls_p; /* true if the function makes any calls */
87 int world_save_p; /* true if we're saving *everything*:
88 r13-r31, cr, f14-f31, vrsave, v20-v31 */
89 enum rs6000_abi abi; /* which ABI to use */
90 int gp_save_offset; /* offset to save GP regs from initial SP */
91 int fp_save_offset; /* offset to save FP regs from initial SP */
92 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
93 int lr_save_offset; /* offset to save LR from initial SP */
94 int cr_save_offset; /* offset to save CR from initial SP */
95 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
96 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
97 int varargs_save_offset; /* offset to save the varargs registers */
98 int ehrd_offset; /* offset to EH return data */
99 int reg_size; /* register size (4 or 8) */
100 HOST_WIDE_INT vars_size; /* variable save area size */
101 int parm_size; /* outgoing parameter size */
102 int save_size; /* save area size */
103 int fixed_size; /* fixed size of stack frame */
104 int gp_size; /* size of saved GP registers */
105 int fp_size; /* size of saved FP registers */
106 int altivec_size; /* size of saved AltiVec registers */
107 int cr_size; /* size to hold CR if not in save_size */
108 int vrsave_size; /* size to hold VRSAVE if not in save_size */
109 int altivec_padding_size; /* size of altivec alignment padding if
110 not in save_size */
111 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
112 int spe_padding_size;
113 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
114 int spe_64bit_regs_used;
115 int savres_strategy;
116 } rs6000_stack_t;
118 /* A C structure for machine-specific, per-function data.
119 This is added to the cfun structure. */
120 typedef struct GTY(()) machine_function
122 /* Some local-dynamic symbol. */
123 const char *some_ld_name;
124 /* Whether the instruction chain has been scanned already. */
125 int insn_chain_scanned_p;
126 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
127 int ra_needs_full_frame;
128 /* Flags if __builtin_return_address (0) was used. */
129 int ra_need_lr;
130 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 int lr_save_state;
132 /* Whether we need to save the TOC to the reserved stack location in the
133 function prologue. */
134 bool save_toc_in_prologue;
135 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
136 varargs save area. */
137 HOST_WIDE_INT varargs_save_offset;
138 /* Temporary stack slot to use for SDmode copies. This slot is
139 64-bits wide and is allocated early enough so that the offset
140 does not overflow the 16-bit load/store offset field. */
141 rtx sdmode_stack_slot;
142 } machine_function;
144 /* Support targetm.vectorize.builtin_mask_for_load. */
145 static GTY(()) tree altivec_builtin_mask_for_load;
147 /* Set to nonzero once AIX common-mode calls have been defined. */
148 static GTY(()) int common_mode_defined;
150 /* Label number of label created for -mrelocatable, to call to so we can
151 get the address of the GOT section */
152 static int rs6000_pic_labelno;
154 #ifdef USING_ELFOS_H
155 /* Counter for labels which are to be placed in .fixup. */
156 int fixuplabelno = 0;
157 #endif
159 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 int dot_symbols;
162 /* Specify the machine mode that pointers have. After generation of rtl, the
163 compiler makes no further distinction between pointers and any other objects
164 of this machine mode. The type is unsigned since not all things that
165 include rs6000.h also include machmode.h. */
166 unsigned rs6000_pmode;
168 /* Width in bits of a pointer. */
169 unsigned rs6000_pointer_size;
171 #ifdef HAVE_AS_GNU_ATTRIBUTE
172 /* Flag whether floating point values have been passed/returned. */
173 static bool rs6000_passes_float;
174 /* Flag whether vector values have been passed/returned. */
175 static bool rs6000_passes_vector;
176 /* Flag whether small (<= 8 byte) structures have been returned. */
177 static bool rs6000_returns_struct;
178 #endif
180 /* Value is TRUE if register/mode pair is acceptable. */
181 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
183 /* Maximum number of registers needed for a given register class and mode. */
184 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
186 /* How many registers are needed for a given register and mode. */
187 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
189 /* Map register number to register class. */
190 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
192 /* Reload functions based on the type and the vector unit. */
193 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
195 static int dbg_cost_ctrl;
197 /* Built in types. */
198 tree rs6000_builtin_types[RS6000_BTI_MAX];
199 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
201 /* Flag to say the TOC is initialized */
202 int toc_initialized;
203 char toc_label_name[10];
205 /* Cached value of rs6000_variable_issue. This is cached in
206 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
207 static short cached_can_issue_more;
209 static GTY(()) section *read_only_data_section;
210 static GTY(()) section *private_data_section;
211 static GTY(()) section *read_only_private_data_section;
212 static GTY(()) section *sdata2_section;
213 static GTY(()) section *toc_section;
215 struct builtin_description
217 const unsigned int mask;
218 const enum insn_code icode;
219 const char *const name;
220 const enum rs6000_builtins code;
223 /* Describe the vector unit used for modes. */
224 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
225 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
227 /* Register classes for various constraints that are based on the target
228 switches. */
229 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
231 /* Describe the alignment of a vector. */
232 int rs6000_vector_align[NUM_MACHINE_MODES];
234 /* Map selected modes to types for builtins. */
235 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
237 /* What modes to automatically generate reciprocal divide estimate (fre) and
238 reciprocal sqrt (frsqrte) for. */
239 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
241 /* Masks to determine which reciprocal esitmate instructions to generate
242 automatically. */
243 enum rs6000_recip_mask {
244 RECIP_SF_DIV = 0x001, /* Use divide estimate */
245 RECIP_DF_DIV = 0x002,
246 RECIP_V4SF_DIV = 0x004,
247 RECIP_V2DF_DIV = 0x008,
249 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
250 RECIP_DF_RSQRT = 0x020,
251 RECIP_V4SF_RSQRT = 0x040,
252 RECIP_V2DF_RSQRT = 0x080,
254 /* Various combination of flags for -mrecip=xxx. */
255 RECIP_NONE = 0,
256 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
257 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
258 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
260 RECIP_HIGH_PRECISION = RECIP_ALL,
262 /* On low precision machines like the power5, don't enable double precision
263 reciprocal square root estimate, since it isn't accurate enough. */
264 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
267 /* -mrecip options. */
268 static struct
270 const char *string; /* option name */
271 unsigned int mask; /* mask bits to set */
272 } recip_options[] = {
273 { "all", RECIP_ALL },
274 { "none", RECIP_NONE },
275 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
276 | RECIP_V2DF_DIV) },
277 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
278 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
279 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
280 | RECIP_V2DF_RSQRT) },
281 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
282 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
285 /* 2 argument gen function typedef. */
286 typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx);
288 /* Pointer to function (in rs6000-c.c) that can define or undefine target
289 macros that have changed. Languages that don't support the preprocessor
290 don't link in rs6000-c.c, so we can't call it directly. */
291 void (*rs6000_target_modify_macros_ptr) (bool, int, unsigned);
294 /* Target cpu costs. */
296 struct processor_costs {
297 const int mulsi; /* cost of SImode multiplication. */
298 const int mulsi_const; /* cost of SImode multiplication by constant. */
299 const int mulsi_const9; /* cost of SImode mult by short constant. */
300 const int muldi; /* cost of DImode multiplication. */
301 const int divsi; /* cost of SImode division. */
302 const int divdi; /* cost of DImode division. */
303 const int fp; /* cost of simple SFmode and DFmode insns. */
304 const int dmul; /* cost of DFmode multiplication (and fmadd). */
305 const int sdiv; /* cost of SFmode division (fdivs). */
306 const int ddiv; /* cost of DFmode division (fdiv). */
307 const int cache_line_size; /* cache line size in bytes. */
308 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
309 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
310 const int simultaneous_prefetches; /* number of parallel prefetch
311 operations. */
314 const struct processor_costs *rs6000_cost;
316 /* Processor costs (relative to an add) */
318 /* Instruction size costs on 32bit processors. */
319 static const
320 struct processor_costs size32_cost = {
321 COSTS_N_INSNS (1), /* mulsi */
322 COSTS_N_INSNS (1), /* mulsi_const */
323 COSTS_N_INSNS (1), /* mulsi_const9 */
324 COSTS_N_INSNS (1), /* muldi */
325 COSTS_N_INSNS (1), /* divsi */
326 COSTS_N_INSNS (1), /* divdi */
327 COSTS_N_INSNS (1), /* fp */
328 COSTS_N_INSNS (1), /* dmul */
329 COSTS_N_INSNS (1), /* sdiv */
330 COSTS_N_INSNS (1), /* ddiv */
337 /* Instruction size costs on 64bit processors. */
338 static const
339 struct processor_costs size64_cost = {
340 COSTS_N_INSNS (1), /* mulsi */
341 COSTS_N_INSNS (1), /* mulsi_const */
342 COSTS_N_INSNS (1), /* mulsi_const9 */
343 COSTS_N_INSNS (1), /* muldi */
344 COSTS_N_INSNS (1), /* divsi */
345 COSTS_N_INSNS (1), /* divdi */
346 COSTS_N_INSNS (1), /* fp */
347 COSTS_N_INSNS (1), /* dmul */
348 COSTS_N_INSNS (1), /* sdiv */
349 COSTS_N_INSNS (1), /* ddiv */
350 128,
356 /* Instruction costs on RS64A processors. */
357 static const
358 struct processor_costs rs64a_cost = {
359 COSTS_N_INSNS (20), /* mulsi */
360 COSTS_N_INSNS (12), /* mulsi_const */
361 COSTS_N_INSNS (8), /* mulsi_const9 */
362 COSTS_N_INSNS (34), /* muldi */
363 COSTS_N_INSNS (65), /* divsi */
364 COSTS_N_INSNS (67), /* divdi */
365 COSTS_N_INSNS (4), /* fp */
366 COSTS_N_INSNS (4), /* dmul */
367 COSTS_N_INSNS (31), /* sdiv */
368 COSTS_N_INSNS (31), /* ddiv */
369 128, /* cache line size */
370 128, /* l1 cache */
371 2048, /* l2 cache */
372 1, /* streams */
375 /* Instruction costs on MPCCORE processors. */
376 static const
377 struct processor_costs mpccore_cost = {
378 COSTS_N_INSNS (2), /* mulsi */
379 COSTS_N_INSNS (2), /* mulsi_const */
380 COSTS_N_INSNS (2), /* mulsi_const9 */
381 COSTS_N_INSNS (2), /* muldi */
382 COSTS_N_INSNS (6), /* divsi */
383 COSTS_N_INSNS (6), /* divdi */
384 COSTS_N_INSNS (4), /* fp */
385 COSTS_N_INSNS (5), /* dmul */
386 COSTS_N_INSNS (10), /* sdiv */
387 COSTS_N_INSNS (17), /* ddiv */
388 32, /* cache line size */
389 4, /* l1 cache */
390 16, /* l2 cache */
391 1, /* streams */
394 /* Instruction costs on PPC403 processors. */
395 static const
396 struct processor_costs ppc403_cost = {
397 COSTS_N_INSNS (4), /* mulsi */
398 COSTS_N_INSNS (4), /* mulsi_const */
399 COSTS_N_INSNS (4), /* mulsi_const9 */
400 COSTS_N_INSNS (4), /* muldi */
401 COSTS_N_INSNS (33), /* divsi */
402 COSTS_N_INSNS (33), /* divdi */
403 COSTS_N_INSNS (11), /* fp */
404 COSTS_N_INSNS (11), /* dmul */
405 COSTS_N_INSNS (11), /* sdiv */
406 COSTS_N_INSNS (11), /* ddiv */
407 32, /* cache line size */
408 4, /* l1 cache */
409 16, /* l2 cache */
410 1, /* streams */
413 /* Instruction costs on PPC405 processors. */
414 static const
415 struct processor_costs ppc405_cost = {
416 COSTS_N_INSNS (5), /* mulsi */
417 COSTS_N_INSNS (4), /* mulsi_const */
418 COSTS_N_INSNS (3), /* mulsi_const9 */
419 COSTS_N_INSNS (5), /* muldi */
420 COSTS_N_INSNS (35), /* divsi */
421 COSTS_N_INSNS (35), /* divdi */
422 COSTS_N_INSNS (11), /* fp */
423 COSTS_N_INSNS (11), /* dmul */
424 COSTS_N_INSNS (11), /* sdiv */
425 COSTS_N_INSNS (11), /* ddiv */
426 32, /* cache line size */
427 16, /* l1 cache */
428 128, /* l2 cache */
429 1, /* streams */
432 /* Instruction costs on PPC440 processors. */
433 static const
434 struct processor_costs ppc440_cost = {
435 COSTS_N_INSNS (3), /* mulsi */
436 COSTS_N_INSNS (2), /* mulsi_const */
437 COSTS_N_INSNS (2), /* mulsi_const9 */
438 COSTS_N_INSNS (3), /* muldi */
439 COSTS_N_INSNS (34), /* divsi */
440 COSTS_N_INSNS (34), /* divdi */
441 COSTS_N_INSNS (5), /* fp */
442 COSTS_N_INSNS (5), /* dmul */
443 COSTS_N_INSNS (19), /* sdiv */
444 COSTS_N_INSNS (33), /* ddiv */
445 32, /* cache line size */
446 32, /* l1 cache */
447 256, /* l2 cache */
448 1, /* streams */
451 /* Instruction costs on PPC476 processors. */
452 static const
453 struct processor_costs ppc476_cost = {
454 COSTS_N_INSNS (4), /* mulsi */
455 COSTS_N_INSNS (4), /* mulsi_const */
456 COSTS_N_INSNS (4), /* mulsi_const9 */
457 COSTS_N_INSNS (4), /* muldi */
458 COSTS_N_INSNS (11), /* divsi */
459 COSTS_N_INSNS (11), /* divdi */
460 COSTS_N_INSNS (6), /* fp */
461 COSTS_N_INSNS (6), /* dmul */
462 COSTS_N_INSNS (19), /* sdiv */
463 COSTS_N_INSNS (33), /* ddiv */
464 32, /* l1 cache line size */
465 32, /* l1 cache */
466 512, /* l2 cache */
467 1, /* streams */
470 /* Instruction costs on PPC601 processors. */
471 static const
472 struct processor_costs ppc601_cost = {
473 COSTS_N_INSNS (5), /* mulsi */
474 COSTS_N_INSNS (5), /* mulsi_const */
475 COSTS_N_INSNS (5), /* mulsi_const9 */
476 COSTS_N_INSNS (5), /* muldi */
477 COSTS_N_INSNS (36), /* divsi */
478 COSTS_N_INSNS (36), /* divdi */
479 COSTS_N_INSNS (4), /* fp */
480 COSTS_N_INSNS (5), /* dmul */
481 COSTS_N_INSNS (17), /* sdiv */
482 COSTS_N_INSNS (31), /* ddiv */
483 32, /* cache line size */
484 32, /* l1 cache */
485 256, /* l2 cache */
486 1, /* streams */
489 /* Instruction costs on PPC603 processors. */
490 static const
491 struct processor_costs ppc603_cost = {
492 COSTS_N_INSNS (5), /* mulsi */
493 COSTS_N_INSNS (3), /* mulsi_const */
494 COSTS_N_INSNS (2), /* mulsi_const9 */
495 COSTS_N_INSNS (5), /* muldi */
496 COSTS_N_INSNS (37), /* divsi */
497 COSTS_N_INSNS (37), /* divdi */
498 COSTS_N_INSNS (3), /* fp */
499 COSTS_N_INSNS (4), /* dmul */
500 COSTS_N_INSNS (18), /* sdiv */
501 COSTS_N_INSNS (33), /* ddiv */
502 32, /* cache line size */
503 8, /* l1 cache */
504 64, /* l2 cache */
505 1, /* streams */
508 /* Instruction costs on PPC604 processors. */
509 static const
510 struct processor_costs ppc604_cost = {
511 COSTS_N_INSNS (4), /* mulsi */
512 COSTS_N_INSNS (4), /* mulsi_const */
513 COSTS_N_INSNS (4), /* mulsi_const9 */
514 COSTS_N_INSNS (4), /* muldi */
515 COSTS_N_INSNS (20), /* divsi */
516 COSTS_N_INSNS (20), /* divdi */
517 COSTS_N_INSNS (3), /* fp */
518 COSTS_N_INSNS (3), /* dmul */
519 COSTS_N_INSNS (18), /* sdiv */
520 COSTS_N_INSNS (32), /* ddiv */
521 32, /* cache line size */
522 16, /* l1 cache */
523 512, /* l2 cache */
524 1, /* streams */
527 /* Instruction costs on PPC604e processors. */
528 static const
529 struct processor_costs ppc604e_cost = {
530 COSTS_N_INSNS (2), /* mulsi */
531 COSTS_N_INSNS (2), /* mulsi_const */
532 COSTS_N_INSNS (2), /* mulsi_const9 */
533 COSTS_N_INSNS (2), /* muldi */
534 COSTS_N_INSNS (20), /* divsi */
535 COSTS_N_INSNS (20), /* divdi */
536 COSTS_N_INSNS (3), /* fp */
537 COSTS_N_INSNS (3), /* dmul */
538 COSTS_N_INSNS (18), /* sdiv */
539 COSTS_N_INSNS (32), /* ddiv */
540 32, /* cache line size */
541 32, /* l1 cache */
542 1024, /* l2 cache */
543 1, /* streams */
546 /* Instruction costs on PPC620 processors. */
547 static const
548 struct processor_costs ppc620_cost = {
549 COSTS_N_INSNS (5), /* mulsi */
550 COSTS_N_INSNS (4), /* mulsi_const */
551 COSTS_N_INSNS (3), /* mulsi_const9 */
552 COSTS_N_INSNS (7), /* muldi */
553 COSTS_N_INSNS (21), /* divsi */
554 COSTS_N_INSNS (37), /* divdi */
555 COSTS_N_INSNS (3), /* fp */
556 COSTS_N_INSNS (3), /* dmul */
557 COSTS_N_INSNS (18), /* sdiv */
558 COSTS_N_INSNS (32), /* ddiv */
559 128, /* cache line size */
560 32, /* l1 cache */
561 1024, /* l2 cache */
562 1, /* streams */
565 /* Instruction costs on PPC630 processors. */
566 static const
567 struct processor_costs ppc630_cost = {
568 COSTS_N_INSNS (5), /* mulsi */
569 COSTS_N_INSNS (4), /* mulsi_const */
570 COSTS_N_INSNS (3), /* mulsi_const9 */
571 COSTS_N_INSNS (7), /* muldi */
572 COSTS_N_INSNS (21), /* divsi */
573 COSTS_N_INSNS (37), /* divdi */
574 COSTS_N_INSNS (3), /* fp */
575 COSTS_N_INSNS (3), /* dmul */
576 COSTS_N_INSNS (17), /* sdiv */
577 COSTS_N_INSNS (21), /* ddiv */
578 128, /* cache line size */
579 64, /* l1 cache */
580 1024, /* l2 cache */
581 1, /* streams */
584 /* Instruction costs on Cell processor. */
585 /* COSTS_N_INSNS (1) ~ one add. */
586 static const
587 struct processor_costs ppccell_cost = {
588 COSTS_N_INSNS (9/2)+2, /* mulsi */
589 COSTS_N_INSNS (6/2), /* mulsi_const */
590 COSTS_N_INSNS (6/2), /* mulsi_const9 */
591 COSTS_N_INSNS (15/2)+2, /* muldi */
592 COSTS_N_INSNS (38/2), /* divsi */
593 COSTS_N_INSNS (70/2), /* divdi */
594 COSTS_N_INSNS (10/2), /* fp */
595 COSTS_N_INSNS (10/2), /* dmul */
596 COSTS_N_INSNS (74/2), /* sdiv */
597 COSTS_N_INSNS (74/2), /* ddiv */
598 128, /* cache line size */
599 32, /* l1 cache */
600 512, /* l2 cache */
601 6, /* streams */
604 /* Instruction costs on PPC750 and PPC7400 processors. */
605 static const
606 struct processor_costs ppc750_cost = {
607 COSTS_N_INSNS (5), /* mulsi */
608 COSTS_N_INSNS (3), /* mulsi_const */
609 COSTS_N_INSNS (2), /* mulsi_const9 */
610 COSTS_N_INSNS (5), /* muldi */
611 COSTS_N_INSNS (17), /* divsi */
612 COSTS_N_INSNS (17), /* divdi */
613 COSTS_N_INSNS (3), /* fp */
614 COSTS_N_INSNS (3), /* dmul */
615 COSTS_N_INSNS (17), /* sdiv */
616 COSTS_N_INSNS (31), /* ddiv */
617 32, /* cache line size */
618 32, /* l1 cache */
619 512, /* l2 cache */
620 1, /* streams */
623 /* Instruction costs on PPC7450 processors. */
624 static const
625 struct processor_costs ppc7450_cost = {
626 COSTS_N_INSNS (4), /* mulsi */
627 COSTS_N_INSNS (3), /* mulsi_const */
628 COSTS_N_INSNS (3), /* mulsi_const9 */
629 COSTS_N_INSNS (4), /* muldi */
630 COSTS_N_INSNS (23), /* divsi */
631 COSTS_N_INSNS (23), /* divdi */
632 COSTS_N_INSNS (5), /* fp */
633 COSTS_N_INSNS (5), /* dmul */
634 COSTS_N_INSNS (21), /* sdiv */
635 COSTS_N_INSNS (35), /* ddiv */
636 32, /* cache line size */
637 32, /* l1 cache */
638 1024, /* l2 cache */
639 1, /* streams */
642 /* Instruction costs on PPC8540 processors. */
643 static const
644 struct processor_costs ppc8540_cost = {
645 COSTS_N_INSNS (4), /* mulsi */
646 COSTS_N_INSNS (4), /* mulsi_const */
647 COSTS_N_INSNS (4), /* mulsi_const9 */
648 COSTS_N_INSNS (4), /* muldi */
649 COSTS_N_INSNS (19), /* divsi */
650 COSTS_N_INSNS (19), /* divdi */
651 COSTS_N_INSNS (4), /* fp */
652 COSTS_N_INSNS (4), /* dmul */
653 COSTS_N_INSNS (29), /* sdiv */
654 COSTS_N_INSNS (29), /* ddiv */
655 32, /* cache line size */
656 32, /* l1 cache */
657 256, /* l2 cache */
658 1, /* prefetch streams /*/
661 /* Instruction costs on E300C2 and E300C3 cores. */
662 static const
663 struct processor_costs ppce300c2c3_cost = {
664 COSTS_N_INSNS (4), /* mulsi */
665 COSTS_N_INSNS (4), /* mulsi_const */
666 COSTS_N_INSNS (4), /* mulsi_const9 */
667 COSTS_N_INSNS (4), /* muldi */
668 COSTS_N_INSNS (19), /* divsi */
669 COSTS_N_INSNS (19), /* divdi */
670 COSTS_N_INSNS (3), /* fp */
671 COSTS_N_INSNS (4), /* dmul */
672 COSTS_N_INSNS (18), /* sdiv */
673 COSTS_N_INSNS (33), /* ddiv */
675 16, /* l1 cache */
676 16, /* l2 cache */
677 1, /* prefetch streams /*/
680 /* Instruction costs on PPCE500MC processors. */
681 static const
682 struct processor_costs ppce500mc_cost = {
683 COSTS_N_INSNS (4), /* mulsi */
684 COSTS_N_INSNS (4), /* mulsi_const */
685 COSTS_N_INSNS (4), /* mulsi_const9 */
686 COSTS_N_INSNS (4), /* muldi */
687 COSTS_N_INSNS (14), /* divsi */
688 COSTS_N_INSNS (14), /* divdi */
689 COSTS_N_INSNS (8), /* fp */
690 COSTS_N_INSNS (10), /* dmul */
691 COSTS_N_INSNS (36), /* sdiv */
692 COSTS_N_INSNS (66), /* ddiv */
693 64, /* cache line size */
694 32, /* l1 cache */
695 128, /* l2 cache */
696 1, /* prefetch streams /*/
699 /* Instruction costs on PPCE500MC64 processors. */
700 static const
701 struct processor_costs ppce500mc64_cost = {
702 COSTS_N_INSNS (4), /* mulsi */
703 COSTS_N_INSNS (4), /* mulsi_const */
704 COSTS_N_INSNS (4), /* mulsi_const9 */
705 COSTS_N_INSNS (4), /* muldi */
706 COSTS_N_INSNS (14), /* divsi */
707 COSTS_N_INSNS (14), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (10), /* dmul */
710 COSTS_N_INSNS (36), /* sdiv */
711 COSTS_N_INSNS (66), /* ddiv */
712 64, /* cache line size */
713 32, /* l1 cache */
714 128, /* l2 cache */
715 1, /* prefetch streams /*/
718 /* Instruction costs on PPCE5500 processors. */
719 static const
720 struct processor_costs ppce5500_cost = {
721 COSTS_N_INSNS (5), /* mulsi */
722 COSTS_N_INSNS (5), /* mulsi_const */
723 COSTS_N_INSNS (4), /* mulsi_const9 */
724 COSTS_N_INSNS (5), /* muldi */
725 COSTS_N_INSNS (14), /* divsi */
726 COSTS_N_INSNS (14), /* divdi */
727 COSTS_N_INSNS (7), /* fp */
728 COSTS_N_INSNS (10), /* dmul */
729 COSTS_N_INSNS (36), /* sdiv */
730 COSTS_N_INSNS (66), /* ddiv */
731 64, /* cache line size */
732 32, /* l1 cache */
733 128, /* l2 cache */
734 1, /* prefetch streams /*/
737 /* Instruction costs on PPCE6500 processors. */
738 static const
739 struct processor_costs ppce6500_cost = {
740 COSTS_N_INSNS (5), /* mulsi */
741 COSTS_N_INSNS (5), /* mulsi_const */
742 COSTS_N_INSNS (4), /* mulsi_const9 */
743 COSTS_N_INSNS (5), /* muldi */
744 COSTS_N_INSNS (14), /* divsi */
745 COSTS_N_INSNS (14), /* divdi */
746 COSTS_N_INSNS (7), /* fp */
747 COSTS_N_INSNS (10), /* dmul */
748 COSTS_N_INSNS (36), /* sdiv */
749 COSTS_N_INSNS (66), /* ddiv */
750 64, /* cache line size */
751 32, /* l1 cache */
752 128, /* l2 cache */
753 1, /* prefetch streams /*/
756 /* Instruction costs on AppliedMicro Titan processors. */
757 static const
758 struct processor_costs titan_cost = {
759 COSTS_N_INSNS (5), /* mulsi */
760 COSTS_N_INSNS (5), /* mulsi_const */
761 COSTS_N_INSNS (5), /* mulsi_const9 */
762 COSTS_N_INSNS (5), /* muldi */
763 COSTS_N_INSNS (18), /* divsi */
764 COSTS_N_INSNS (18), /* divdi */
765 COSTS_N_INSNS (10), /* fp */
766 COSTS_N_INSNS (10), /* dmul */
767 COSTS_N_INSNS (46), /* sdiv */
768 COSTS_N_INSNS (72), /* ddiv */
769 32, /* cache line size */
770 32, /* l1 cache */
771 512, /* l2 cache */
772 1, /* prefetch streams /*/
775 /* Instruction costs on POWER4 and POWER5 processors. */
776 static const
777 struct processor_costs power4_cost = {
778 COSTS_N_INSNS (3), /* mulsi */
779 COSTS_N_INSNS (2), /* mulsi_const */
780 COSTS_N_INSNS (2), /* mulsi_const9 */
781 COSTS_N_INSNS (4), /* muldi */
782 COSTS_N_INSNS (18), /* divsi */
783 COSTS_N_INSNS (34), /* divdi */
784 COSTS_N_INSNS (3), /* fp */
785 COSTS_N_INSNS (3), /* dmul */
786 COSTS_N_INSNS (17), /* sdiv */
787 COSTS_N_INSNS (17), /* ddiv */
788 128, /* cache line size */
789 32, /* l1 cache */
790 1024, /* l2 cache */
791 8, /* prefetch streams /*/
794 /* Instruction costs on POWER6 processors. */
795 static const
796 struct processor_costs power6_cost = {
797 COSTS_N_INSNS (8), /* mulsi */
798 COSTS_N_INSNS (8), /* mulsi_const */
799 COSTS_N_INSNS (8), /* mulsi_const9 */
800 COSTS_N_INSNS (8), /* muldi */
801 COSTS_N_INSNS (22), /* divsi */
802 COSTS_N_INSNS (28), /* divdi */
803 COSTS_N_INSNS (3), /* fp */
804 COSTS_N_INSNS (3), /* dmul */
805 COSTS_N_INSNS (13), /* sdiv */
806 COSTS_N_INSNS (16), /* ddiv */
807 128, /* cache line size */
808 64, /* l1 cache */
809 2048, /* l2 cache */
810 16, /* prefetch streams */
813 /* Instruction costs on POWER7 processors. */
814 static const
815 struct processor_costs power7_cost = {
816 COSTS_N_INSNS (2), /* mulsi */
817 COSTS_N_INSNS (2), /* mulsi_const */
818 COSTS_N_INSNS (2), /* mulsi_const9 */
819 COSTS_N_INSNS (2), /* muldi */
820 COSTS_N_INSNS (18), /* divsi */
821 COSTS_N_INSNS (34), /* divdi */
822 COSTS_N_INSNS (3), /* fp */
823 COSTS_N_INSNS (3), /* dmul */
824 COSTS_N_INSNS (13), /* sdiv */
825 COSTS_N_INSNS (16), /* ddiv */
826 128, /* cache line size */
827 32, /* l1 cache */
828 256, /* l2 cache */
829 12, /* prefetch streams */
832 /* Instruction costs on POWER A2 processors. */
833 static const
834 struct processor_costs ppca2_cost = {
835 COSTS_N_INSNS (16), /* mulsi */
836 COSTS_N_INSNS (16), /* mulsi_const */
837 COSTS_N_INSNS (16), /* mulsi_const9 */
838 COSTS_N_INSNS (16), /* muldi */
839 COSTS_N_INSNS (22), /* divsi */
840 COSTS_N_INSNS (28), /* divdi */
841 COSTS_N_INSNS (3), /* fp */
842 COSTS_N_INSNS (3), /* dmul */
843 COSTS_N_INSNS (59), /* sdiv */
844 COSTS_N_INSNS (72), /* ddiv */
846 16, /* l1 cache */
847 2048, /* l2 cache */
848 16, /* prefetch streams */
852 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
853 #undef RS6000_BUILTIN_1
854 #undef RS6000_BUILTIN_2
855 #undef RS6000_BUILTIN_3
856 #undef RS6000_BUILTIN_A
857 #undef RS6000_BUILTIN_D
858 #undef RS6000_BUILTIN_E
859 #undef RS6000_BUILTIN_P
860 #undef RS6000_BUILTIN_Q
861 #undef RS6000_BUILTIN_S
862 #undef RS6000_BUILTIN_X
864 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
865 { NAME, ICODE, MASK, ATTR },
867 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
868 { NAME, ICODE, MASK, ATTR },
870 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
871 { NAME, ICODE, MASK, ATTR },
873 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
874 { NAME, ICODE, MASK, ATTR },
876 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
877 { NAME, ICODE, MASK, ATTR },
879 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
880 { NAME, ICODE, MASK, ATTR },
882 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
883 { NAME, ICODE, MASK, ATTR },
885 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
886 { NAME, ICODE, MASK, ATTR },
888 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
889 { NAME, ICODE, MASK, ATTR },
891 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
892 { NAME, ICODE, MASK, ATTR },
894 struct rs6000_builtin_info_type {
895 const char *name;
896 const enum insn_code icode;
897 const unsigned mask;
898 const unsigned attr;
901 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
903 #include "rs6000-builtin.def"
906 #undef RS6000_BUILTIN_1
907 #undef RS6000_BUILTIN_2
908 #undef RS6000_BUILTIN_3
909 #undef RS6000_BUILTIN_A
910 #undef RS6000_BUILTIN_D
911 #undef RS6000_BUILTIN_E
912 #undef RS6000_BUILTIN_P
913 #undef RS6000_BUILTIN_Q
914 #undef RS6000_BUILTIN_S
915 #undef RS6000_BUILTIN_X
917 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
918 static tree (*rs6000_veclib_handler) (tree, tree, tree);
921 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
922 static bool spe_func_has_64bit_regs_p (void);
923 static struct machine_function * rs6000_init_machine_status (void);
924 static int rs6000_ra_ever_killed (void);
925 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
926 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
927 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
928 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
929 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
930 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
931 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
932 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
933 bool);
934 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
935 static bool is_microcoded_insn (rtx);
936 static bool is_nonpipeline_insn (rtx);
937 static bool is_cracked_insn (rtx);
938 static bool is_load_insn (rtx, rtx *);
939 static bool is_store_insn (rtx, rtx *);
940 static bool set_to_load_agen (rtx,rtx);
941 static bool insn_terminates_group_p (rtx , enum group_termination);
942 static bool insn_must_be_first_in_group (rtx);
943 static bool insn_must_be_last_in_group (rtx);
944 static void altivec_init_builtins (void);
945 static tree builtin_function_type (enum machine_mode, enum machine_mode,
946 enum machine_mode, enum machine_mode,
947 enum rs6000_builtins, const char *name);
948 static void rs6000_common_init_builtins (void);
949 static void paired_init_builtins (void);
950 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
951 static void spe_init_builtins (void);
952 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
953 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
954 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
955 static rs6000_stack_t *rs6000_stack_info (void);
956 static void is_altivec_return_reg (rtx, void *);
957 int easy_vector_constant (rtx, enum machine_mode);
958 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
959 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
960 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
961 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
962 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
963 bool, bool);
964 #if TARGET_MACHO
965 static void macho_branch_islands (void);
966 #endif
967 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
968 int, int *);
969 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
970 int, int, int *);
971 static bool rs6000_mode_dependent_address (const_rtx);
972 static bool rs6000_debug_mode_dependent_address (const_rtx);
973 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
974 enum machine_mode, rtx);
975 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
976 enum machine_mode,
977 rtx);
978 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
979 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
980 enum reg_class);
981 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
982 enum machine_mode);
983 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
984 enum reg_class,
985 enum machine_mode);
986 static bool rs6000_cannot_change_mode_class (enum machine_mode,
987 enum machine_mode,
988 enum reg_class);
989 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
990 enum machine_mode,
991 enum reg_class);
992 static bool rs6000_save_toc_in_prologue_p (void);
994 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
995 int, int *)
996 = rs6000_legitimize_reload_address;
998 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
999 = rs6000_mode_dependent_address;
1001 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1002 enum machine_mode, rtx)
1003 = rs6000_secondary_reload_class;
1005 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1006 = rs6000_preferred_reload_class;
1008 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1009 enum machine_mode)
1010 = rs6000_secondary_memory_needed;
1012 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1013 enum machine_mode,
1014 enum reg_class)
1015 = rs6000_cannot_change_mode_class;
1017 const int INSN_NOT_AVAILABLE = -1;
1019 /* Hash table stuff for keeping track of TOC entries. */
1021 struct GTY(()) toc_hash_struct
1023 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1024 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1025 rtx key;
1026 enum machine_mode key_mode;
1027 int labelno;
1030 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1032 /* Hash table to keep track of the argument types for builtin functions. */
1034 struct GTY(()) builtin_hash_struct
1036 tree type;
1037 enum machine_mode mode[4]; /* return value + 3 arguments. */
1038 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1041 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1044 /* Default register names. */
1045 char rs6000_reg_names[][8] =
1047 "0", "1", "2", "3", "4", "5", "6", "7",
1048 "8", "9", "10", "11", "12", "13", "14", "15",
1049 "16", "17", "18", "19", "20", "21", "22", "23",
1050 "24", "25", "26", "27", "28", "29", "30", "31",
1051 "0", "1", "2", "3", "4", "5", "6", "7",
1052 "8", "9", "10", "11", "12", "13", "14", "15",
1053 "16", "17", "18", "19", "20", "21", "22", "23",
1054 "24", "25", "26", "27", "28", "29", "30", "31",
1055 "mq", "lr", "ctr","ap",
1056 "0", "1", "2", "3", "4", "5", "6", "7",
1057 "ca",
1058 /* AltiVec registers. */
1059 "0", "1", "2", "3", "4", "5", "6", "7",
1060 "8", "9", "10", "11", "12", "13", "14", "15",
1061 "16", "17", "18", "19", "20", "21", "22", "23",
1062 "24", "25", "26", "27", "28", "29", "30", "31",
1063 "vrsave", "vscr",
1064 /* SPE registers. */
1065 "spe_acc", "spefscr",
1066 /* Soft frame pointer. */
1067 "sfp"
1070 #ifdef TARGET_REGNAMES
1071 static const char alt_reg_names[][8] =
1073 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1074 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1075 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1076 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1077 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1078 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1079 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1080 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1081 "mq", "lr", "ctr", "ap",
1082 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1083 "ca",
1084 /* AltiVec registers. */
1085 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1086 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1087 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1088 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1089 "vrsave", "vscr",
1090 /* SPE registers. */
1091 "spe_acc", "spefscr",
1092 /* Soft frame pointer. */
1093 "sfp"
1095 #endif
1097 /* Table of valid machine attributes. */
1099 static const struct attribute_spec rs6000_attribute_table[] =
1101 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1102 affects_type_identity } */
1103 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1104 false },
1105 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1106 false },
1107 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1108 false },
1109 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1110 false },
1111 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1112 false },
1113 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1114 SUBTARGET_ATTRIBUTE_TABLE,
1115 #endif
1116 { NULL, 0, 0, false, false, false, NULL, false }
1119 #ifndef MASK_STRICT_ALIGN
1120 #define MASK_STRICT_ALIGN 0
1121 #endif
1122 #ifndef TARGET_PROFILE_KERNEL
1123 #define TARGET_PROFILE_KERNEL 0
1124 #endif
1126 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1127 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1129 /* Initialize the GCC target structure. */
1130 #undef TARGET_ATTRIBUTE_TABLE
1131 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1132 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1133 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1134 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1135 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1137 #undef TARGET_ASM_ALIGNED_DI_OP
1138 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1140 /* Default unaligned ops are only provided for ELF. Find the ops needed
1141 for non-ELF systems. */
1142 #ifndef OBJECT_FORMAT_ELF
1143 #if TARGET_XCOFF
1144 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1145 64-bit targets. */
1146 #undef TARGET_ASM_UNALIGNED_HI_OP
1147 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1148 #undef TARGET_ASM_UNALIGNED_SI_OP
1149 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1150 #undef TARGET_ASM_UNALIGNED_DI_OP
1151 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1152 #else
1153 /* For Darwin. */
1154 #undef TARGET_ASM_UNALIGNED_HI_OP
1155 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1156 #undef TARGET_ASM_UNALIGNED_SI_OP
1157 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1158 #undef TARGET_ASM_UNALIGNED_DI_OP
1159 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1160 #undef TARGET_ASM_ALIGNED_DI_OP
1161 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1162 #endif
1163 #endif
1165 /* This hook deals with fixups for relocatable code and DI-mode objects
1166 in 64-bit code. */
1167 #undef TARGET_ASM_INTEGER
1168 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1170 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1171 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1172 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1173 #endif
1175 #undef TARGET_SET_UP_BY_PROLOGUE
1176 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1178 #undef TARGET_HAVE_TLS
1179 #define TARGET_HAVE_TLS HAVE_AS_TLS
1181 #undef TARGET_CANNOT_FORCE_CONST_MEM
1182 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1184 #undef TARGET_DELEGITIMIZE_ADDRESS
1185 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1187 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1188 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1190 #undef TARGET_ASM_FUNCTION_PROLOGUE
1191 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1192 #undef TARGET_ASM_FUNCTION_EPILOGUE
1193 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1195 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1196 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1198 #undef TARGET_LEGITIMIZE_ADDRESS
1199 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1201 #undef TARGET_SCHED_VARIABLE_ISSUE
1202 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1204 #undef TARGET_SCHED_ISSUE_RATE
1205 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1206 #undef TARGET_SCHED_ADJUST_COST
1207 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1208 #undef TARGET_SCHED_ADJUST_PRIORITY
1209 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1210 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1211 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1212 #undef TARGET_SCHED_INIT
1213 #define TARGET_SCHED_INIT rs6000_sched_init
1214 #undef TARGET_SCHED_FINISH
1215 #define TARGET_SCHED_FINISH rs6000_sched_finish
1216 #undef TARGET_SCHED_REORDER
1217 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1218 #undef TARGET_SCHED_REORDER2
1219 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1221 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1222 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1224 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1225 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1227 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1228 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1229 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1230 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1231 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1232 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1233 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1234 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1236 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1237 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1238 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1239 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1240 rs6000_builtin_support_vector_misalignment
1241 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1242 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1243 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1244 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1245 rs6000_builtin_vectorization_cost
1246 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1247 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1248 rs6000_preferred_simd_mode
1249 #undef TARGET_VECTORIZE_INIT_COST
1250 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1251 #undef TARGET_VECTORIZE_ADD_STMT_COST
1252 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1253 #undef TARGET_VECTORIZE_FINISH_COST
1254 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1255 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1256 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1258 #undef TARGET_INIT_BUILTINS
1259 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1260 #undef TARGET_BUILTIN_DECL
1261 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1263 #undef TARGET_EXPAND_BUILTIN
1264 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1266 #undef TARGET_MANGLE_TYPE
1267 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1269 #undef TARGET_INIT_LIBFUNCS
1270 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1272 #if TARGET_MACHO
1273 #undef TARGET_BINDS_LOCAL_P
1274 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1275 #endif
1277 #undef TARGET_MS_BITFIELD_LAYOUT_P
1278 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1280 #undef TARGET_ASM_OUTPUT_MI_THUNK
1281 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1286 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1287 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1289 #undef TARGET_INVALID_WITHIN_DOLOOP
1290 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1292 #undef TARGET_REGISTER_MOVE_COST
1293 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1294 #undef TARGET_MEMORY_MOVE_COST
1295 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1296 #undef TARGET_RTX_COSTS
1297 #define TARGET_RTX_COSTS rs6000_rtx_costs
1298 #undef TARGET_ADDRESS_COST
1299 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1301 #undef TARGET_DWARF_REGISTER_SPAN
1302 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1304 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1305 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1307 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1308 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1310 /* On rs6000, function arguments are promoted, as are function return
1311 values. */
1312 #undef TARGET_PROMOTE_FUNCTION_MODE
1313 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1315 #undef TARGET_RETURN_IN_MEMORY
1316 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1318 #undef TARGET_SETUP_INCOMING_VARARGS
1319 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1321 /* Always strict argument naming on rs6000. */
1322 #undef TARGET_STRICT_ARGUMENT_NAMING
1323 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1324 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1325 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1326 #undef TARGET_SPLIT_COMPLEX_ARG
1327 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1328 #undef TARGET_MUST_PASS_IN_STACK
1329 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1330 #undef TARGET_PASS_BY_REFERENCE
1331 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1332 #undef TARGET_ARG_PARTIAL_BYTES
1333 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1334 #undef TARGET_FUNCTION_ARG_ADVANCE
1335 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1336 #undef TARGET_FUNCTION_ARG
1337 #define TARGET_FUNCTION_ARG rs6000_function_arg
1338 #undef TARGET_FUNCTION_ARG_BOUNDARY
1339 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1341 #undef TARGET_BUILD_BUILTIN_VA_LIST
1342 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1344 #undef TARGET_EXPAND_BUILTIN_VA_START
1345 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1347 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1348 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1350 #undef TARGET_EH_RETURN_FILTER_MODE
1351 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1353 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1354 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1356 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1357 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1359 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1360 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1362 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1363 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1365 #undef TARGET_OPTION_OVERRIDE
1366 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1368 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1369 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1370 rs6000_builtin_vectorized_function
1372 #if !TARGET_MACHO
1373 #undef TARGET_STACK_PROTECT_FAIL
1374 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1375 #endif
1377 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1378 The PowerPC architecture requires only weak consistency among
1379 processors--that is, memory accesses between processors need not be
1380 sequentially consistent and memory accesses among processors can occur
1381 in any order. The ability to order memory accesses weakly provides
1382 opportunities for more efficient use of the system bus. Unless a
1383 dependency exists, the 604e allows read operations to precede store
1384 operations. */
1385 #undef TARGET_RELAXED_ORDERING
1386 #define TARGET_RELAXED_ORDERING true
1388 #ifdef HAVE_AS_TLS
1389 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1390 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1391 #endif
1393 /* Use a 32-bit anchor range. This leads to sequences like:
1395 addis tmp,anchor,high
1396 add dest,tmp,low
1398 where tmp itself acts as an anchor, and can be shared between
1399 accesses to the same 64k page. */
1400 #undef TARGET_MIN_ANCHOR_OFFSET
1401 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1402 #undef TARGET_MAX_ANCHOR_OFFSET
1403 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1404 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1405 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1407 #undef TARGET_BUILTIN_RECIPROCAL
1408 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1410 #undef TARGET_EXPAND_TO_RTL_HOOK
1411 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1413 #undef TARGET_INSTANTIATE_DECLS
1414 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1416 #undef TARGET_SECONDARY_RELOAD
1417 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1419 #undef TARGET_LEGITIMATE_ADDRESS_P
1420 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1422 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1423 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1425 #undef TARGET_CAN_ELIMINATE
1426 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1428 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1429 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1431 #undef TARGET_TRAMPOLINE_INIT
1432 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1434 #undef TARGET_FUNCTION_VALUE
1435 #define TARGET_FUNCTION_VALUE rs6000_function_value
1437 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1438 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1440 #undef TARGET_OPTION_SAVE
1441 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1443 #undef TARGET_OPTION_RESTORE
1444 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1446 #undef TARGET_OPTION_PRINT
1447 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1449 #undef TARGET_CAN_INLINE_P
1450 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1452 #undef TARGET_SET_CURRENT_FUNCTION
1453 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1455 #undef TARGET_LEGITIMATE_CONSTANT_P
1456 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1458 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1459 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1462 /* Simplifications for entries below. */
1464 enum {
1465 POWERPC_7400_MASK = MASK_PPC_GFXOPT | MASK_ALTIVEC
1468 /* Some OSs don't support saving the high part of 64-bit registers on context
1469 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1470 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1471 either, the user must explicitly specify them and we won't interfere with
1472 the user's specification. */
1474 enum {
1475 POWERPC_MASKS = (MASK_PPC_GPOPT | MASK_STRICT_ALIGN
1476 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
1477 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
1478 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
1479 | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE
1480 | MASK_RECIP_PRECISION)
1483 /* Masks for instructions set at various powerpc ISAs. */
1484 enum {
1485 ISA_2_1_MASKS = MASK_MFCRF,
1486 ISA_2_2_MASKS = (ISA_2_1_MASKS | MASK_POPCNTB),
1487 ISA_2_4_MASKS = (ISA_2_2_MASKS | MASK_FPRND),
1489 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1490 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1491 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1492 server and embedded. */
1493 ISA_2_5_MASKS_EMBEDDED = (ISA_2_2_MASKS | MASK_CMPB | MASK_RECIP_PRECISION
1494 | MASK_PPC_GFXOPT | MASK_PPC_GPOPT),
1495 ISA_2_5_MASKS_SERVER = (ISA_2_5_MASKS_EMBEDDED | MASK_DFP),
1497 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1498 altivec is a win so enable it. */
1499 ISA_2_6_MASKS_EMBEDDED = (ISA_2_5_MASKS_EMBEDDED | MASK_POPCNTD),
1500 ISA_2_6_MASKS_SERVER = (ISA_2_5_MASKS_SERVER | MASK_POPCNTD | MASK_ALTIVEC
1501 | MASK_VSX)
1504 struct rs6000_ptt
1506 const char *const name; /* Canonical processor name. */
1507 const enum processor_type processor; /* Processor type enum value. */
1508 const int target_enable; /* Target flags to enable. */
1511 static struct rs6000_ptt const processor_target_table[] =
1513 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1514 #include "rs6000-cpus.def"
1515 #undef RS6000_CPU
1518 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1519 name is invalid. */
1521 static int
1522 rs6000_cpu_name_lookup (const char *name)
1524 size_t i;
1526 if (name != NULL)
1528 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1529 if (! strcmp (name, processor_target_table[i].name))
1530 return (int)i;
1533 return -1;
1537 /* Return number of consecutive hard regs needed starting at reg REGNO
1538 to hold something of mode MODE.
1539 This is ordinarily the length in words of a value of mode MODE
1540 but can be less for certain modes in special long registers.
1542 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1543 scalar instructions. The upper 32 bits are only available to the
1544 SIMD instructions.
1546 POWER and PowerPC GPRs hold 32 bits worth;
1547 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1549 static int
1550 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1552 unsigned HOST_WIDE_INT reg_size;
1554 if (FP_REGNO_P (regno))
1555 reg_size = (VECTOR_MEM_VSX_P (mode)
1556 ? UNITS_PER_VSX_WORD
1557 : UNITS_PER_FP_WORD);
1559 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1560 reg_size = UNITS_PER_SPE_WORD;
1562 else if (ALTIVEC_REGNO_P (regno))
1563 reg_size = UNITS_PER_ALTIVEC_WORD;
1565 /* The value returned for SCmode in the E500 double case is 2 for
1566 ABI compatibility; storing an SCmode value in a single register
1567 would require function_arg and rs6000_spe_function_arg to handle
1568 SCmode so as to pass the value correctly in a pair of
1569 registers. */
1570 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1571 && !DECIMAL_FLOAT_MODE_P (mode))
1572 reg_size = UNITS_PER_FP_WORD;
1574 else
1575 reg_size = UNITS_PER_WORD;
1577 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1580 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1581 MODE. */
1582 static int
1583 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1585 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1587 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1588 implementations. Don't allow an item to be split between a FP register
1589 and an Altivec register. */
1590 if (VECTOR_MEM_VSX_P (mode))
1592 if (FP_REGNO_P (regno))
1593 return FP_REGNO_P (last_regno);
1595 if (ALTIVEC_REGNO_P (regno))
1596 return ALTIVEC_REGNO_P (last_regno);
1599 /* The GPRs can hold any mode, but values bigger than one register
1600 cannot go past R31. */
1601 if (INT_REGNO_P (regno))
1602 return INT_REGNO_P (last_regno);
1604 /* The float registers (except for VSX vector modes) can only hold floating
1605 modes and DImode. This excludes the 32-bit decimal float mode for
1606 now. */
1607 if (FP_REGNO_P (regno))
1609 if (SCALAR_FLOAT_MODE_P (mode)
1610 && (mode != TDmode || (regno % 2) == 0)
1611 && FP_REGNO_P (last_regno))
1612 return 1;
1614 if (GET_MODE_CLASS (mode) == MODE_INT
1615 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1616 return 1;
1618 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1619 && PAIRED_VECTOR_MODE (mode))
1620 return 1;
1622 return 0;
1625 /* The CR register can only hold CC modes. */
1626 if (CR_REGNO_P (regno))
1627 return GET_MODE_CLASS (mode) == MODE_CC;
1629 if (CA_REGNO_P (regno))
1630 return mode == BImode;
1632 /* AltiVec only in AldyVec registers. */
1633 if (ALTIVEC_REGNO_P (regno))
1634 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1636 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1637 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1638 return 1;
1640 /* We cannot put TImode anywhere except general register and it must be able
1641 to fit within the register set. In the future, allow TImode in the
1642 Altivec or VSX registers. */
1644 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1647 /* Print interesting facts about registers. */
1648 static void
1649 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1651 int r, m;
1653 for (r = first_regno; r <= last_regno; ++r)
1655 const char *comma = "";
1656 int len;
1658 if (first_regno == last_regno)
1659 fprintf (stderr, "%s:\t", reg_name);
1660 else
1661 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1663 len = 8;
1664 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1665 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1667 if (len > 70)
1669 fprintf (stderr, ",\n\t");
1670 len = 8;
1671 comma = "";
1674 if (rs6000_hard_regno_nregs[m][r] > 1)
1675 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1676 rs6000_hard_regno_nregs[m][r]);
1677 else
1678 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1680 comma = ", ";
1683 if (call_used_regs[r])
1685 if (len > 70)
1687 fprintf (stderr, ",\n\t");
1688 len = 8;
1689 comma = "";
1692 len += fprintf (stderr, "%s%s", comma, "call-used");
1693 comma = ", ";
1696 if (fixed_regs[r])
1698 if (len > 70)
1700 fprintf (stderr, ",\n\t");
1701 len = 8;
1702 comma = "";
1705 len += fprintf (stderr, "%s%s", comma, "fixed");
1706 comma = ", ";
1709 if (len > 70)
1711 fprintf (stderr, ",\n\t");
1712 comma = "";
1715 fprintf (stderr, "%sregno = %d\n", comma, r);
1719 #define DEBUG_FMT_D "%-32s= %d\n"
1720 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1721 #define DEBUG_FMT_S "%-32s= %s\n"
1723 /* Print various interesting information with -mdebug=reg. */
1724 static void
1725 rs6000_debug_reg_global (void)
1727 static const char *const tf[2] = { "false", "true" };
1728 const char *nl = (const char *)0;
1729 int m;
1730 char costly_num[20];
1731 char nop_num[20];
1732 const char *costly_str;
1733 const char *nop_str;
1734 const char *trace_str;
1735 const char *abi_str;
1736 const char *cmodel_str;
1738 /* Map enum rs6000_vector to string. */
1739 static const char *rs6000_debug_vector_unit[] = {
1740 "none",
1741 "altivec",
1742 "vsx",
1743 "paired",
1744 "spe",
1745 "other"
1748 fprintf (stderr, "Register information: (last virtual reg = %d)\n",
1749 LAST_VIRTUAL_REGISTER);
1750 rs6000_debug_reg_print (0, 31, "gr");
1751 rs6000_debug_reg_print (32, 63, "fp");
1752 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1753 LAST_ALTIVEC_REGNO,
1754 "vs");
1755 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1756 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1757 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1758 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1759 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1760 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1761 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1762 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1764 fprintf (stderr,
1765 "\n"
1766 "d reg_class = %s\n"
1767 "f reg_class = %s\n"
1768 "v reg_class = %s\n"
1769 "wa reg_class = %s\n"
1770 "wd reg_class = %s\n"
1771 "wf reg_class = %s\n"
1772 "ws reg_class = %s\n\n",
1773 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1774 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1775 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1776 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1777 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1778 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1779 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]);
1781 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1782 if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
1784 nl = "\n";
1785 fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1786 GET_MODE_NAME (m),
1787 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1788 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
1791 if (nl)
1792 fputs (nl, stderr);
1794 if (rs6000_recip_control)
1796 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1798 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1799 if (rs6000_recip_bits[m])
1801 fprintf (stderr,
1802 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1803 GET_MODE_NAME (m),
1804 (RS6000_RECIP_AUTO_RE_P (m)
1805 ? "auto"
1806 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1807 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1808 ? "auto"
1809 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1812 fputs ("\n", stderr);
1815 if (rs6000_cpu_index >= 0)
1816 fprintf (stderr, DEBUG_FMT_S, "cpu",
1817 processor_target_table[rs6000_cpu_index].name);
1819 if (rs6000_tune_index >= 0)
1820 fprintf (stderr, DEBUG_FMT_S, "tune",
1821 processor_target_table[rs6000_tune_index].name);
1823 switch (rs6000_sched_costly_dep)
1825 case max_dep_latency:
1826 costly_str = "max_dep_latency";
1827 break;
1829 case no_dep_costly:
1830 costly_str = "no_dep_costly";
1831 break;
1833 case all_deps_costly:
1834 costly_str = "all_deps_costly";
1835 break;
1837 case true_store_to_load_dep_costly:
1838 costly_str = "true_store_to_load_dep_costly";
1839 break;
1841 case store_to_load_dep_costly:
1842 costly_str = "store_to_load_dep_costly";
1843 break;
1845 default:
1846 costly_str = costly_num;
1847 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
1848 break;
1851 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
1853 switch (rs6000_sched_insert_nops)
1855 case sched_finish_regroup_exact:
1856 nop_str = "sched_finish_regroup_exact";
1857 break;
1859 case sched_finish_pad_groups:
1860 nop_str = "sched_finish_pad_groups";
1861 break;
1863 case sched_finish_none:
1864 nop_str = "sched_finish_none";
1865 break;
1867 default:
1868 nop_str = nop_num;
1869 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
1870 break;
1873 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
1875 switch (rs6000_sdata)
1877 default:
1878 case SDATA_NONE:
1879 break;
1881 case SDATA_DATA:
1882 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
1883 break;
1885 case SDATA_SYSV:
1886 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
1887 break;
1889 case SDATA_EABI:
1890 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
1891 break;
1895 switch (rs6000_traceback)
1897 case traceback_default: trace_str = "default"; break;
1898 case traceback_none: trace_str = "none"; break;
1899 case traceback_part: trace_str = "part"; break;
1900 case traceback_full: trace_str = "full"; break;
1901 default: trace_str = "unknown"; break;
1904 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
1906 switch (rs6000_current_cmodel)
1908 case CMODEL_SMALL: cmodel_str = "small"; break;
1909 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
1910 case CMODEL_LARGE: cmodel_str = "large"; break;
1911 default: cmodel_str = "unknown"; break;
1914 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
1916 switch (rs6000_current_abi)
1918 case ABI_NONE: abi_str = "none"; break;
1919 case ABI_AIX: abi_str = "aix"; break;
1920 case ABI_V4: abi_str = "V4"; break;
1921 case ABI_DARWIN: abi_str = "darwin"; break;
1922 default: abi_str = "unknown"; break;
1925 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
1927 if (rs6000_altivec_abi)
1928 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
1930 if (rs6000_spe_abi)
1931 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
1933 if (rs6000_darwin64_abi)
1934 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
1936 if (rs6000_float_gprs)
1937 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
1939 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
1940 fprintf (stderr, DEBUG_FMT_S, "align_branch",
1941 tf[!!rs6000_align_branch_targets]);
1942 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
1943 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
1944 rs6000_long_double_type_size);
1945 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
1946 (int)rs6000_sched_restricted_insns_priority);
1947 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
1948 (int)END_BUILTINS);
1949 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
1950 (int)RS6000_BUILTIN_COUNT);
1951 fprintf (stderr, DEBUG_FMT_X, "Builtin mask", rs6000_builtin_mask);
1954 /* Initialize the various global tables that are based on register size. */
1955 static void
1956 rs6000_init_hard_regno_mode_ok (bool global_init_p)
1958 int r, m, c;
1959 int align64;
1960 int align32;
1962 /* Precalculate REGNO_REG_CLASS. */
1963 rs6000_regno_regclass[0] = GENERAL_REGS;
1964 for (r = 1; r < 32; ++r)
1965 rs6000_regno_regclass[r] = BASE_REGS;
1967 for (r = 32; r < 64; ++r)
1968 rs6000_regno_regclass[r] = FLOAT_REGS;
1970 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
1971 rs6000_regno_regclass[r] = NO_REGS;
1973 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
1974 rs6000_regno_regclass[r] = ALTIVEC_REGS;
1976 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
1977 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
1978 rs6000_regno_regclass[r] = CR_REGS;
1980 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
1981 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
1982 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
1983 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
1984 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
1985 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
1986 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
1987 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
1988 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
1990 /* Precalculate vector information, this must be set up before the
1991 rs6000_hard_regno_nregs_internal below. */
1992 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1994 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
1995 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
1996 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
1999 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
2000 rs6000_constraints[c] = NO_REGS;
2002 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2003 believes it can use native alignment or still uses 128-bit alignment. */
2004 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2006 align64 = 64;
2007 align32 = 32;
2009 else
2011 align64 = 128;
2012 align32 = 128;
2015 /* V2DF mode, VSX only. */
2016 if (TARGET_VSX)
2018 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2019 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2020 rs6000_vector_align[V2DFmode] = align64;
2023 /* V4SF mode, either VSX or Altivec. */
2024 if (TARGET_VSX)
2026 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2027 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2028 rs6000_vector_align[V4SFmode] = align32;
2030 else if (TARGET_ALTIVEC)
2032 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2033 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2034 rs6000_vector_align[V4SFmode] = align32;
2037 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2038 and stores. */
2039 if (TARGET_ALTIVEC)
2041 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2042 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2043 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2044 rs6000_vector_align[V4SImode] = align32;
2045 rs6000_vector_align[V8HImode] = align32;
2046 rs6000_vector_align[V16QImode] = align32;
2048 if (TARGET_VSX)
2050 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2051 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2052 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2054 else
2056 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2057 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2058 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2062 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2063 Altivec doesn't have 64-bit support. */
2064 if (TARGET_VSX)
2066 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2067 rs6000_vector_unit[V2DImode] = VECTOR_NONE;
2068 rs6000_vector_align[V2DImode] = align64;
2071 /* DFmode, see if we want to use the VSX unit. */
2072 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2074 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2075 rs6000_vector_mem[DFmode]
2076 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2077 rs6000_vector_align[DFmode] = align64;
2080 /* TODO add SPE and paired floating point vector support. */
2082 /* Register class constraints for the constraints that depend on compile
2083 switches. */
2084 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2085 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2087 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2088 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2090 if (TARGET_VSX)
2092 /* At present, we just use VSX_REGS, but we have different constraints
2093 based on the use, in case we want to fine tune the default register
2094 class used. wa = any VSX register, wf = register class to use for
2095 V4SF, wd = register class to use for V2DF, and ws = register classs to
2096 use for DF scalars. */
2097 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2098 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2099 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2100 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2101 ? VSX_REGS
2102 : FLOAT_REGS);
2105 if (TARGET_ALTIVEC)
2106 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2108 /* Set up the reload helper functions. */
2109 if (TARGET_VSX || TARGET_ALTIVEC)
2111 if (TARGET_64BIT)
2113 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2114 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2115 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2116 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2117 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2118 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2119 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2120 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2121 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2122 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2123 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2124 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2125 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2127 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2128 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2131 else
2133 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2134 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2135 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2136 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2137 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2138 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2139 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2140 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2141 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2142 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2143 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2144 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2145 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2147 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2148 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2153 /* Precalculate HARD_REGNO_NREGS. */
2154 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2155 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2156 rs6000_hard_regno_nregs[m][r]
2157 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2159 /* Precalculate HARD_REGNO_MODE_OK. */
2160 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2161 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2162 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2163 rs6000_hard_regno_mode_ok_p[m][r] = true;
2165 /* Precalculate CLASS_MAX_NREGS sizes. */
2166 for (c = 0; c < LIM_REG_CLASSES; ++c)
2168 int reg_size;
2170 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2171 reg_size = UNITS_PER_VSX_WORD;
2173 else if (c == ALTIVEC_REGS)
2174 reg_size = UNITS_PER_ALTIVEC_WORD;
2176 else if (c == FLOAT_REGS)
2177 reg_size = UNITS_PER_FP_WORD;
2179 else
2180 reg_size = UNITS_PER_WORD;
2182 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2183 rs6000_class_max_nregs[m][c]
2184 = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size;
2187 if (TARGET_E500_DOUBLE)
2188 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2190 /* Calculate which modes to automatically generate code to use a the
2191 reciprocal divide and square root instructions. In the future, possibly
2192 automatically generate the instructions even if the user did not specify
2193 -mrecip. The older machines double precision reciprocal sqrt estimate is
2194 not accurate enough. */
2195 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2196 if (TARGET_FRES)
2197 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2198 if (TARGET_FRE)
2199 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2200 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2201 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2202 if (VECTOR_UNIT_VSX_P (V2DFmode))
2203 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2205 if (TARGET_FRSQRTES)
2206 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2207 if (TARGET_FRSQRTE)
2208 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2209 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2210 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2211 if (VECTOR_UNIT_VSX_P (V2DFmode))
2212 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2214 if (rs6000_recip_control)
2216 if (!flag_finite_math_only)
2217 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2218 if (flag_trapping_math)
2219 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2220 if (!flag_reciprocal_math)
2221 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2222 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2224 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2225 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2226 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2228 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2229 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2230 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2232 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2233 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2234 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2236 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2237 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2238 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2240 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2241 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2242 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2244 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2245 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2246 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2248 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2249 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2250 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2252 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2253 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2254 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2258 if (global_init_p || TARGET_DEBUG_TARGET)
2260 if (TARGET_DEBUG_REG)
2261 rs6000_debug_reg_global ();
2263 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2264 fprintf (stderr,
2265 "SImode variable mult cost = %d\n"
2266 "SImode constant mult cost = %d\n"
2267 "SImode short constant mult cost = %d\n"
2268 "DImode multipliciation cost = %d\n"
2269 "SImode division cost = %d\n"
2270 "DImode division cost = %d\n"
2271 "Simple fp operation cost = %d\n"
2272 "DFmode multiplication cost = %d\n"
2273 "SFmode division cost = %d\n"
2274 "DFmode division cost = %d\n"
2275 "cache line size = %d\n"
2276 "l1 cache size = %d\n"
2277 "l2 cache size = %d\n"
2278 "simultaneous prefetches = %d\n"
2279 "\n",
2280 rs6000_cost->mulsi,
2281 rs6000_cost->mulsi_const,
2282 rs6000_cost->mulsi_const9,
2283 rs6000_cost->muldi,
2284 rs6000_cost->divsi,
2285 rs6000_cost->divdi,
2286 rs6000_cost->fp,
2287 rs6000_cost->dmul,
2288 rs6000_cost->sdiv,
2289 rs6000_cost->ddiv,
2290 rs6000_cost->cache_line_size,
2291 rs6000_cost->l1_cache_size,
2292 rs6000_cost->l2_cache_size,
2293 rs6000_cost->simultaneous_prefetches);
2297 #if TARGET_MACHO
2298 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2300 static void
2301 darwin_rs6000_override_options (void)
2303 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2304 off. */
2305 rs6000_altivec_abi = 1;
2306 TARGET_ALTIVEC_VRSAVE = 1;
2307 rs6000_current_abi = ABI_DARWIN;
2309 if (DEFAULT_ABI == ABI_DARWIN
2310 && TARGET_64BIT)
2311 darwin_one_byte_bool = 1;
2313 if (TARGET_64BIT && ! TARGET_POWERPC64)
2315 target_flags |= MASK_POWERPC64;
2316 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2318 if (flag_mkernel)
2320 rs6000_default_long_calls = 1;
2321 target_flags |= MASK_SOFT_FLOAT;
2324 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2325 Altivec. */
2326 if (!flag_mkernel && !flag_apple_kext
2327 && TARGET_64BIT
2328 && ! (target_flags_explicit & MASK_ALTIVEC))
2329 target_flags |= MASK_ALTIVEC;
2331 /* Unless the user (not the configurer) has explicitly overridden
2332 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2333 G4 unless targeting the kernel. */
2334 if (!flag_mkernel
2335 && !flag_apple_kext
2336 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2337 && ! (target_flags_explicit & MASK_ALTIVEC)
2338 && ! global_options_set.x_rs6000_cpu_index)
2340 target_flags |= MASK_ALTIVEC;
2343 #endif
2345 /* If not otherwise specified by a target, make 'long double' equivalent to
2346 'double'. */
2348 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2349 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2350 #endif
2352 /* Return the builtin mask of the various options used that could affect which
2353 builtins were used. In the past we used target_flags, but we've run out of
2354 bits, and some options like SPE and PAIRED are no longer in
2355 target_flags. */
2357 unsigned
2358 rs6000_builtin_mask_calculate (void)
2360 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2361 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2362 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2363 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2364 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2365 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2366 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2367 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2368 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2369 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0));
2372 /* Override command line options. Mostly we process the processor type and
2373 sometimes adjust other TARGET_ options. */
2375 static bool
2376 rs6000_option_override_internal (bool global_init_p)
2378 bool ret = true;
2379 bool have_cpu = false;
2381 /* The default cpu requested at configure time, if any. */
2382 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2384 int set_masks;
2385 int cpu_index;
2386 int tune_index;
2387 struct cl_target_option *main_target_opt
2388 = ((global_init_p || target_option_default_node == NULL)
2389 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2391 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2392 library functions, so warn about it. The flag may be useful for
2393 performance studies from time to time though, so don't disable it
2394 entirely. */
2395 if (global_options_set.x_rs6000_alignment_flags
2396 && rs6000_alignment_flags == MASK_ALIGN_POWER
2397 && DEFAULT_ABI == ABI_DARWIN
2398 && TARGET_64BIT)
2399 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2400 " it is incompatible with the installed C and C++ libraries");
2402 /* Numerous experiment shows that IRA based loop pressure
2403 calculation works better for RTL loop invariant motion on targets
2404 with enough (>= 32) registers. It is an expensive optimization.
2405 So it is on only for peak performance. */
2406 if (optimize >= 3 && global_init_p)
2407 flag_ira_loop_pressure = 1;
2409 /* Set the pointer size. */
2410 if (TARGET_64BIT)
2412 rs6000_pmode = (int)DImode;
2413 rs6000_pointer_size = 64;
2415 else
2417 rs6000_pmode = (int)SImode;
2418 rs6000_pointer_size = 32;
2421 set_masks = POWERPC_MASKS | MASK_SOFT_FLOAT;
2422 #ifdef OS_MISSING_POWERPC64
2423 if (OS_MISSING_POWERPC64)
2424 set_masks &= ~MASK_POWERPC64;
2425 #endif
2426 #ifdef OS_MISSING_ALTIVEC
2427 if (OS_MISSING_ALTIVEC)
2428 set_masks &= ~MASK_ALTIVEC;
2429 #endif
2431 /* Don't override by the processor default if given explicitly. */
2432 set_masks &= ~target_flags_explicit;
2434 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2435 the cpu in a target attribute or pragma, but did not specify a tuning
2436 option, use the cpu for the tuning option rather than the option specified
2437 with -mtune on the command line. Process a '--with-cpu' configuration
2438 request as an implicit --cpu. */
2439 if (rs6000_cpu_index >= 0)
2441 cpu_index = rs6000_cpu_index;
2442 have_cpu = true;
2444 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2446 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2447 have_cpu = true;
2449 else if (implicit_cpu)
2451 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
2452 have_cpu = true;
2454 else
2456 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
2457 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2458 have_cpu = false;
2461 gcc_assert (cpu_index >= 0);
2463 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
2464 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
2465 with those from the cpu, except for options that were explicitly set. If
2466 we don't have a cpu, do not override the target bits set in
2467 TARGET_DEFAULT. */
2468 if (have_cpu)
2470 target_flags &= ~set_masks;
2471 target_flags |= (processor_target_table[cpu_index].target_enable
2472 & set_masks);
2474 else
2475 target_flags |= (processor_target_table[cpu_index].target_enable
2476 & ~target_flags_explicit);
2478 if (rs6000_tune_index >= 0)
2479 tune_index = rs6000_tune_index;
2480 else if (have_cpu)
2481 rs6000_tune_index = tune_index = cpu_index;
2482 else
2484 size_t i;
2485 enum processor_type tune_proc
2486 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2488 tune_index = -1;
2489 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2490 if (processor_target_table[i].processor == tune_proc)
2492 rs6000_tune_index = tune_index = i;
2493 break;
2497 gcc_assert (tune_index >= 0);
2498 rs6000_cpu = processor_target_table[tune_index].processor;
2500 /* Pick defaults for SPE related control flags. Do this early to make sure
2501 that the TARGET_ macros are representative ASAP. */
2503 int spe_capable_cpu =
2504 (rs6000_cpu == PROCESSOR_PPC8540
2505 || rs6000_cpu == PROCESSOR_PPC8548);
2507 if (!global_options_set.x_rs6000_spe_abi)
2508 rs6000_spe_abi = spe_capable_cpu;
2510 if (!global_options_set.x_rs6000_spe)
2511 rs6000_spe = spe_capable_cpu;
2513 if (!global_options_set.x_rs6000_float_gprs)
2514 rs6000_float_gprs =
2515 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2516 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2517 : 0);
2520 if (global_options_set.x_rs6000_spe_abi
2521 && rs6000_spe_abi
2522 && !TARGET_SPE_ABI)
2523 error ("not configured for SPE ABI");
2525 if (global_options_set.x_rs6000_spe
2526 && rs6000_spe
2527 && !TARGET_SPE)
2528 error ("not configured for SPE instruction set");
2530 if (main_target_opt != NULL
2531 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2532 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2533 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2534 error ("target attribute or pragma changes SPE ABI");
2536 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2537 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
2538 || rs6000_cpu == PROCESSOR_PPCE5500)
2540 if (TARGET_ALTIVEC)
2541 error ("AltiVec not supported in this target");
2542 if (TARGET_SPE)
2543 error ("SPE not supported in this target");
2545 if (rs6000_cpu == PROCESSOR_PPCE6500)
2547 if (TARGET_SPE)
2548 error ("SPE not supported in this target");
2551 /* Disable Cell microcode if we are optimizing for the Cell
2552 and not optimizing for size. */
2553 if (rs6000_gen_cell_microcode == -1)
2554 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2555 && !optimize_size);
2557 /* If we are optimizing big endian systems for space and it's OK to
2558 use instructions that would be microcoded on the Cell, use the
2559 load/store multiple and string instructions. */
2560 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2561 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
2563 /* Don't allow -mmultiple or -mstring on little endian systems
2564 unless the cpu is a 750, because the hardware doesn't support the
2565 instructions used in little endian mode, and causes an alignment
2566 trap. The 750 does not cause an alignment trap (except when the
2567 target is unaligned). */
2569 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2571 if (TARGET_MULTIPLE)
2573 target_flags &= ~MASK_MULTIPLE;
2574 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
2575 warning (0, "-mmultiple is not supported on little endian systems");
2578 if (TARGET_STRING)
2580 target_flags &= ~MASK_STRING;
2581 if ((target_flags_explicit & MASK_STRING) != 0)
2582 warning (0, "-mstring is not supported on little endian systems");
2586 /* Add some warnings for VSX. */
2587 if (TARGET_VSX)
2589 const char *msg = NULL;
2590 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2591 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2593 if (target_flags_explicit & MASK_VSX)
2594 msg = N_("-mvsx requires hardware floating point");
2595 else
2596 target_flags &= ~ MASK_VSX;
2598 else if (TARGET_PAIRED_FLOAT)
2599 msg = N_("-mvsx and -mpaired are incompatible");
2600 /* The hardware will allow VSX and little endian, but until we make sure
2601 things like vector select, etc. work don't allow VSX on little endian
2602 systems at this point. */
2603 else if (!BYTES_BIG_ENDIAN)
2604 msg = N_("-mvsx used with little endian code");
2605 else if (TARGET_AVOID_XFORM > 0)
2606 msg = N_("-mvsx needs indexed addressing");
2607 else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC))
2609 if (target_flags_explicit & MASK_VSX)
2610 msg = N_("-mvsx and -mno-altivec are incompatible");
2611 else
2612 msg = N_("-mno-altivec disables vsx");
2615 if (msg)
2617 warning (0, msg);
2618 target_flags &= ~ MASK_VSX;
2619 target_flags_explicit |= MASK_VSX;
2623 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2624 unless the user explicitly used the -mno-<option> to disable the code. */
2625 if (TARGET_VSX)
2626 target_flags |= (ISA_2_6_MASKS_SERVER & ~target_flags_explicit);
2627 else if (TARGET_POPCNTD)
2628 target_flags |= (ISA_2_6_MASKS_EMBEDDED & ~target_flags_explicit);
2629 else if (TARGET_DFP)
2630 target_flags |= (ISA_2_5_MASKS_SERVER & ~target_flags_explicit);
2631 else if (TARGET_CMPB)
2632 target_flags |= (ISA_2_5_MASKS_EMBEDDED & ~target_flags_explicit);
2633 else if (TARGET_FPRND)
2634 target_flags |= (ISA_2_4_MASKS & ~target_flags_explicit);
2635 else if (TARGET_POPCNTB)
2636 target_flags |= (ISA_2_2_MASKS & ~target_flags_explicit);
2637 else if (TARGET_ALTIVEC)
2638 target_flags |= (MASK_PPC_GFXOPT & ~target_flags_explicit);
2640 /* E500mc does "better" if we inline more aggressively. Respect the
2641 user's opinion, though. */
2642 if (rs6000_block_move_inline_limit == 0
2643 && (rs6000_cpu == PROCESSOR_PPCE500MC
2644 || rs6000_cpu == PROCESSOR_PPCE500MC64
2645 || rs6000_cpu == PROCESSOR_PPCE5500
2646 || rs6000_cpu == PROCESSOR_PPCE6500))
2647 rs6000_block_move_inline_limit = 128;
2649 /* store_one_arg depends on expand_block_move to handle at least the
2650 size of reg_parm_stack_space. */
2651 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
2652 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
2654 if (global_init_p)
2656 /* If the appropriate debug option is enabled, replace the target hooks
2657 with debug versions that call the real version and then prints
2658 debugging information. */
2659 if (TARGET_DEBUG_COST)
2661 targetm.rtx_costs = rs6000_debug_rtx_costs;
2662 targetm.address_cost = rs6000_debug_address_cost;
2663 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2666 if (TARGET_DEBUG_ADDR)
2668 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2669 targetm.legitimize_address = rs6000_debug_legitimize_address;
2670 rs6000_secondary_reload_class_ptr
2671 = rs6000_debug_secondary_reload_class;
2672 rs6000_secondary_memory_needed_ptr
2673 = rs6000_debug_secondary_memory_needed;
2674 rs6000_cannot_change_mode_class_ptr
2675 = rs6000_debug_cannot_change_mode_class;
2676 rs6000_preferred_reload_class_ptr
2677 = rs6000_debug_preferred_reload_class;
2678 rs6000_legitimize_reload_address_ptr
2679 = rs6000_debug_legitimize_reload_address;
2680 rs6000_mode_dependent_address_ptr
2681 = rs6000_debug_mode_dependent_address;
2684 if (rs6000_veclibabi_name)
2686 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
2687 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
2688 else
2690 error ("unknown vectorization library ABI type (%s) for "
2691 "-mveclibabi= switch", rs6000_veclibabi_name);
2692 ret = false;
2697 if (!global_options_set.x_rs6000_long_double_type_size)
2699 if (main_target_opt != NULL
2700 && (main_target_opt->x_rs6000_long_double_type_size
2701 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
2702 error ("target attribute or pragma changes long double size");
2703 else
2704 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2707 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2708 if (!global_options_set.x_rs6000_ieeequad)
2709 rs6000_ieeequad = 1;
2710 #endif
2712 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2713 target attribute or pragma which automatically enables both options,
2714 unless the altivec ABI was set. This is set by default for 64-bit, but
2715 not for 32-bit. */
2716 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2717 target_flags &= ~((MASK_VSX | MASK_ALTIVEC) & ~target_flags_explicit);
2719 /* Enable Altivec ABI for AIX -maltivec. */
2720 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
2722 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2723 error ("target attribute or pragma changes AltiVec ABI");
2724 else
2725 rs6000_altivec_abi = 1;
2728 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2729 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2730 be explicitly overridden in either case. */
2731 if (TARGET_ELF)
2733 if (!global_options_set.x_rs6000_altivec_abi
2734 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
2736 if (main_target_opt != NULL &&
2737 !main_target_opt->x_rs6000_altivec_abi)
2738 error ("target attribute or pragma changes AltiVec ABI");
2739 else
2740 rs6000_altivec_abi = 1;
2744 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2745 So far, the only darwin64 targets are also MACH-O. */
2746 if (TARGET_MACHO
2747 && DEFAULT_ABI == ABI_DARWIN
2748 && TARGET_64BIT)
2750 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
2751 error ("target attribute or pragma changes darwin64 ABI");
2752 else
2754 rs6000_darwin64_abi = 1;
2755 /* Default to natural alignment, for better performance. */
2756 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2760 /* Place FP constants in the constant pool instead of TOC
2761 if section anchors enabled. */
2762 if (flag_section_anchors)
2763 TARGET_NO_FP_IN_TOC = 1;
2765 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2766 SUBTARGET_OVERRIDE_OPTIONS;
2767 #endif
2768 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2769 SUBSUBTARGET_OVERRIDE_OPTIONS;
2770 #endif
2771 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2772 SUB3TARGET_OVERRIDE_OPTIONS;
2773 #endif
2775 /* For the E500 family of cores, reset the single/double FP flags to let us
2776 check that they remain constant across attributes or pragmas. Also,
2777 clear a possible request for string instructions, not supported and which
2778 we might have silently queried above for -Os.
2780 For other families, clear ISEL in case it was set implicitly.
2783 switch (rs6000_cpu)
2785 case PROCESSOR_PPC8540:
2786 case PROCESSOR_PPC8548:
2787 case PROCESSOR_PPCE500MC:
2788 case PROCESSOR_PPCE500MC64:
2789 case PROCESSOR_PPCE5500:
2790 case PROCESSOR_PPCE6500:
2792 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
2793 rs6000_double_float = TARGET_E500_DOUBLE;
2795 target_flags &= ~MASK_STRING;
2797 break;
2799 default:
2801 if (have_cpu && !(target_flags_explicit & MASK_ISEL))
2802 target_flags &= ~MASK_ISEL;
2804 break;
2807 if (main_target_opt)
2809 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
2810 error ("target attribute or pragma changes single precision floating "
2811 "point");
2812 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
2813 error ("target attribute or pragma changes double precision floating "
2814 "point");
2817 /* Detect invalid option combinations with E500. */
2818 CHECK_E500_OPTIONS;
2820 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
2821 && rs6000_cpu != PROCESSOR_POWER5
2822 && rs6000_cpu != PROCESSOR_POWER6
2823 && rs6000_cpu != PROCESSOR_POWER7
2824 && rs6000_cpu != PROCESSOR_PPCA2
2825 && rs6000_cpu != PROCESSOR_CELL
2826 && rs6000_cpu != PROCESSOR_PPC476);
2827 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
2828 || rs6000_cpu == PROCESSOR_POWER5
2829 || rs6000_cpu == PROCESSOR_POWER7);
2830 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
2831 || rs6000_cpu == PROCESSOR_POWER5
2832 || rs6000_cpu == PROCESSOR_POWER6
2833 || rs6000_cpu == PROCESSOR_POWER7
2834 || rs6000_cpu == PROCESSOR_PPCE500MC
2835 || rs6000_cpu == PROCESSOR_PPCE500MC64
2836 || rs6000_cpu == PROCESSOR_PPCE5500
2837 || rs6000_cpu == PROCESSOR_PPCE6500);
2839 /* Allow debug switches to override the above settings. These are set to -1
2840 in rs6000.opt to indicate the user hasn't directly set the switch. */
2841 if (TARGET_ALWAYS_HINT >= 0)
2842 rs6000_always_hint = TARGET_ALWAYS_HINT;
2844 if (TARGET_SCHED_GROUPS >= 0)
2845 rs6000_sched_groups = TARGET_SCHED_GROUPS;
2847 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
2848 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
2850 rs6000_sched_restricted_insns_priority
2851 = (rs6000_sched_groups ? 1 : 0);
2853 /* Handle -msched-costly-dep option. */
2854 rs6000_sched_costly_dep
2855 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
2857 if (rs6000_sched_costly_dep_str)
2859 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
2860 rs6000_sched_costly_dep = no_dep_costly;
2861 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
2862 rs6000_sched_costly_dep = all_deps_costly;
2863 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
2864 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
2865 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
2866 rs6000_sched_costly_dep = store_to_load_dep_costly;
2867 else
2868 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
2869 atoi (rs6000_sched_costly_dep_str));
2872 /* Handle -minsert-sched-nops option. */
2873 rs6000_sched_insert_nops
2874 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
2876 if (rs6000_sched_insert_nops_str)
2878 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
2879 rs6000_sched_insert_nops = sched_finish_none;
2880 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
2881 rs6000_sched_insert_nops = sched_finish_pad_groups;
2882 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
2883 rs6000_sched_insert_nops = sched_finish_regroup_exact;
2884 else
2885 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
2886 atoi (rs6000_sched_insert_nops_str));
2889 if (global_init_p)
2891 #ifdef TARGET_REGNAMES
2892 /* If the user desires alternate register names, copy in the
2893 alternate names now. */
2894 if (TARGET_REGNAMES)
2895 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
2896 #endif
2898 /* Set aix_struct_return last, after the ABI is determined.
2899 If -maix-struct-return or -msvr4-struct-return was explicitly
2900 used, don't override with the ABI default. */
2901 if (!global_options_set.x_aix_struct_return)
2902 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
2904 #if 0
2905 /* IBM XL compiler defaults to unsigned bitfields. */
2906 if (TARGET_XL_COMPAT)
2907 flag_signed_bitfields = 0;
2908 #endif
2910 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
2911 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
2913 if (TARGET_TOC)
2914 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
2916 /* We can only guarantee the availability of DI pseudo-ops when
2917 assembling for 64-bit targets. */
2918 if (!TARGET_64BIT)
2920 targetm.asm_out.aligned_op.di = NULL;
2921 targetm.asm_out.unaligned_op.di = NULL;
2925 /* Set branch target alignment, if not optimizing for size. */
2926 if (!optimize_size)
2928 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2929 aligned 8byte to avoid misprediction by the branch predictor. */
2930 if (rs6000_cpu == PROCESSOR_TITAN
2931 || rs6000_cpu == PROCESSOR_CELL)
2933 if (align_functions <= 0)
2934 align_functions = 8;
2935 if (align_jumps <= 0)
2936 align_jumps = 8;
2937 if (align_loops <= 0)
2938 align_loops = 8;
2940 if (rs6000_align_branch_targets)
2942 if (align_functions <= 0)
2943 align_functions = 16;
2944 if (align_jumps <= 0)
2945 align_jumps = 16;
2946 if (align_loops <= 0)
2948 can_override_loop_align = 1;
2949 align_loops = 16;
2952 if (align_jumps_max_skip <= 0)
2953 align_jumps_max_skip = 15;
2954 if (align_loops_max_skip <= 0)
2955 align_loops_max_skip = 15;
2958 /* Arrange to save and restore machine status around nested functions. */
2959 init_machine_status = rs6000_init_machine_status;
2961 /* We should always be splitting complex arguments, but we can't break
2962 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2963 if (DEFAULT_ABI != ABI_AIX)
2964 targetm.calls.split_complex_arg = NULL;
2967 /* Initialize rs6000_cost with the appropriate target costs. */
2968 if (optimize_size)
2969 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
2970 else
2971 switch (rs6000_cpu)
2973 case PROCESSOR_RS64A:
2974 rs6000_cost = &rs64a_cost;
2975 break;
2977 case PROCESSOR_MPCCORE:
2978 rs6000_cost = &mpccore_cost;
2979 break;
2981 case PROCESSOR_PPC403:
2982 rs6000_cost = &ppc403_cost;
2983 break;
2985 case PROCESSOR_PPC405:
2986 rs6000_cost = &ppc405_cost;
2987 break;
2989 case PROCESSOR_PPC440:
2990 rs6000_cost = &ppc440_cost;
2991 break;
2993 case PROCESSOR_PPC476:
2994 rs6000_cost = &ppc476_cost;
2995 break;
2997 case PROCESSOR_PPC601:
2998 rs6000_cost = &ppc601_cost;
2999 break;
3001 case PROCESSOR_PPC603:
3002 rs6000_cost = &ppc603_cost;
3003 break;
3005 case PROCESSOR_PPC604:
3006 rs6000_cost = &ppc604_cost;
3007 break;
3009 case PROCESSOR_PPC604e:
3010 rs6000_cost = &ppc604e_cost;
3011 break;
3013 case PROCESSOR_PPC620:
3014 rs6000_cost = &ppc620_cost;
3015 break;
3017 case PROCESSOR_PPC630:
3018 rs6000_cost = &ppc630_cost;
3019 break;
3021 case PROCESSOR_CELL:
3022 rs6000_cost = &ppccell_cost;
3023 break;
3025 case PROCESSOR_PPC750:
3026 case PROCESSOR_PPC7400:
3027 rs6000_cost = &ppc750_cost;
3028 break;
3030 case PROCESSOR_PPC7450:
3031 rs6000_cost = &ppc7450_cost;
3032 break;
3034 case PROCESSOR_PPC8540:
3035 case PROCESSOR_PPC8548:
3036 rs6000_cost = &ppc8540_cost;
3037 break;
3039 case PROCESSOR_PPCE300C2:
3040 case PROCESSOR_PPCE300C3:
3041 rs6000_cost = &ppce300c2c3_cost;
3042 break;
3044 case PROCESSOR_PPCE500MC:
3045 rs6000_cost = &ppce500mc_cost;
3046 break;
3048 case PROCESSOR_PPCE500MC64:
3049 rs6000_cost = &ppce500mc64_cost;
3050 break;
3052 case PROCESSOR_PPCE5500:
3053 rs6000_cost = &ppce5500_cost;
3054 break;
3056 case PROCESSOR_PPCE6500:
3057 rs6000_cost = &ppce6500_cost;
3058 break;
3060 case PROCESSOR_TITAN:
3061 rs6000_cost = &titan_cost;
3062 break;
3064 case PROCESSOR_POWER4:
3065 case PROCESSOR_POWER5:
3066 rs6000_cost = &power4_cost;
3067 break;
3069 case PROCESSOR_POWER6:
3070 rs6000_cost = &power6_cost;
3071 break;
3073 case PROCESSOR_POWER7:
3074 rs6000_cost = &power7_cost;
3075 break;
3077 case PROCESSOR_PPCA2:
3078 rs6000_cost = &ppca2_cost;
3079 break;
3081 default:
3082 gcc_unreachable ();
3085 if (global_init_p)
3087 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3088 rs6000_cost->simultaneous_prefetches,
3089 global_options.x_param_values,
3090 global_options_set.x_param_values);
3091 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3092 global_options.x_param_values,
3093 global_options_set.x_param_values);
3094 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3095 rs6000_cost->cache_line_size,
3096 global_options.x_param_values,
3097 global_options_set.x_param_values);
3098 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3099 global_options.x_param_values,
3100 global_options_set.x_param_values);
3102 /* If using typedef char *va_list, signal that
3103 __builtin_va_start (&ap, 0) can be optimized to
3104 ap = __builtin_next_arg (0). */
3105 if (DEFAULT_ABI != ABI_V4)
3106 targetm.expand_builtin_va_start = NULL;
3109 /* Set up single/double float flags.
3110 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3111 then set both flags. */
3112 if (TARGET_HARD_FLOAT && TARGET_FPRS
3113 && rs6000_single_float == 0 && rs6000_double_float == 0)
3114 rs6000_single_float = rs6000_double_float = 1;
3116 /* If not explicitly specified via option, decide whether to generate indexed
3117 load/store instructions. */
3118 if (TARGET_AVOID_XFORM == -1)
3119 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3120 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3121 need indexed accesses and the type used is the scalar type of the element
3122 being loaded or stored. */
3123 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3124 && !TARGET_ALTIVEC);
3126 /* Set the -mrecip options. */
3127 if (rs6000_recip_name)
3129 char *p = ASTRDUP (rs6000_recip_name);
3130 char *q;
3131 unsigned int mask, i;
3132 bool invert;
3134 while ((q = strtok (p, ",")) != NULL)
3136 p = NULL;
3137 if (*q == '!')
3139 invert = true;
3140 q++;
3142 else
3143 invert = false;
3145 if (!strcmp (q, "default"))
3146 mask = ((TARGET_RECIP_PRECISION)
3147 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3148 else
3150 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3151 if (!strcmp (q, recip_options[i].string))
3153 mask = recip_options[i].mask;
3154 break;
3157 if (i == ARRAY_SIZE (recip_options))
3159 error ("unknown option for -mrecip=%s", q);
3160 invert = false;
3161 mask = 0;
3162 ret = false;
3166 if (invert)
3167 rs6000_recip_control &= ~mask;
3168 else
3169 rs6000_recip_control |= mask;
3173 /* Set the builtin mask of the various options used that could affect which
3174 builtins were used. In the past we used target_flags, but we've run out
3175 of bits, and some options like SPE and PAIRED are no longer in
3176 target_flags. */
3177 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3178 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3179 fprintf (stderr, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask,
3180 (rs6000_builtin_mask & RS6000_BTM_ALTIVEC) ? ", altivec" : "",
3181 (rs6000_builtin_mask & RS6000_BTM_VSX) ? ", vsx" : "",
3182 (rs6000_builtin_mask & RS6000_BTM_PAIRED) ? ", paired" : "",
3183 (rs6000_builtin_mask & RS6000_BTM_SPE) ? ", spe" : "");
3185 /* Initialize all of the registers. */
3186 rs6000_init_hard_regno_mode_ok (global_init_p);
3188 /* Save the initial options in case the user does function specific options */
3189 if (global_init_p)
3190 target_option_default_node = target_option_current_node
3191 = build_target_option_node ();
3193 /* If not explicitly specified via option, decide whether to generate the
3194 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3195 if (TARGET_LINK_STACK == -1)
3196 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3198 return ret;
3201 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3202 define the target cpu type. */
3204 static void
3205 rs6000_option_override (void)
3207 (void) rs6000_option_override_internal (true);
3211 /* Implement targetm.vectorize.builtin_mask_for_load. */
3212 static tree
3213 rs6000_builtin_mask_for_load (void)
3215 if (TARGET_ALTIVEC || TARGET_VSX)
3216 return altivec_builtin_mask_for_load;
3217 else
3218 return 0;
3221 /* Implement LOOP_ALIGN. */
3223 rs6000_loop_align (rtx label)
3225 basic_block bb;
3226 int ninsns;
3228 /* Don't override loop alignment if -falign-loops was specified. */
3229 if (!can_override_loop_align)
3230 return align_loops_log;
3232 bb = BLOCK_FOR_INSN (label);
3233 ninsns = num_loop_insns(bb->loop_father);
3235 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3236 if (ninsns > 4 && ninsns <= 8
3237 && (rs6000_cpu == PROCESSOR_POWER4
3238 || rs6000_cpu == PROCESSOR_POWER5
3239 || rs6000_cpu == PROCESSOR_POWER6
3240 || rs6000_cpu == PROCESSOR_POWER7))
3241 return 5;
3242 else
3243 return align_loops_log;
3246 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3247 static int
3248 rs6000_loop_align_max_skip (rtx label)
3250 return (1 << rs6000_loop_align (label)) - 1;
3253 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3254 after applying N number of iterations. This routine does not determine
3255 how may iterations are required to reach desired alignment. */
3257 static bool
3258 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3260 if (is_packed)
3261 return false;
3263 if (TARGET_32BIT)
3265 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3266 return true;
3268 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3269 return true;
3271 return false;
3273 else
3275 if (TARGET_MACHO)
3276 return false;
3278 /* Assuming that all other types are naturally aligned. CHECKME! */
3279 return true;
3283 /* Return true if the vector misalignment factor is supported by the
3284 target. */
3285 static bool
3286 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3287 const_tree type,
3288 int misalignment,
3289 bool is_packed)
3291 if (TARGET_VSX)
3293 /* Return if movmisalign pattern is not supported for this mode. */
3294 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3295 return false;
3297 if (misalignment == -1)
3299 /* Misalignment factor is unknown at compile time but we know
3300 it's word aligned. */
3301 if (rs6000_vector_alignment_reachable (type, is_packed))
3303 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3305 if (element_size == 64 || element_size == 32)
3306 return true;
3309 return false;
3312 /* VSX supports word-aligned vector. */
3313 if (misalignment % 4 == 0)
3314 return true;
3316 return false;
3319 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3320 static int
3321 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3322 tree vectype, int misalign)
3324 unsigned elements;
3325 tree elem_type;
3327 switch (type_of_cost)
3329 case scalar_stmt:
3330 case scalar_load:
3331 case scalar_store:
3332 case vector_stmt:
3333 case vector_load:
3334 case vector_store:
3335 case vec_to_scalar:
3336 case scalar_to_vec:
3337 case cond_branch_not_taken:
3338 return 1;
3340 case vec_perm:
3341 if (TARGET_VSX)
3342 return 3;
3343 else
3344 return 1;
3346 case vec_promote_demote:
3347 if (TARGET_VSX)
3348 return 4;
3349 else
3350 return 1;
3352 case cond_branch_taken:
3353 return 3;
3355 case unaligned_load:
3356 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3358 elements = TYPE_VECTOR_SUBPARTS (vectype);
3359 if (elements == 2)
3360 /* Double word aligned. */
3361 return 2;
3363 if (elements == 4)
3365 switch (misalign)
3367 case 8:
3368 /* Double word aligned. */
3369 return 2;
3371 case -1:
3372 /* Unknown misalignment. */
3373 case 4:
3374 case 12:
3375 /* Word aligned. */
3376 return 22;
3378 default:
3379 gcc_unreachable ();
3384 if (TARGET_ALTIVEC)
3385 /* Misaligned loads are not supported. */
3386 gcc_unreachable ();
3388 return 2;
3390 case unaligned_store:
3391 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3393 elements = TYPE_VECTOR_SUBPARTS (vectype);
3394 if (elements == 2)
3395 /* Double word aligned. */
3396 return 2;
3398 if (elements == 4)
3400 switch (misalign)
3402 case 8:
3403 /* Double word aligned. */
3404 return 2;
3406 case -1:
3407 /* Unknown misalignment. */
3408 case 4:
3409 case 12:
3410 /* Word aligned. */
3411 return 23;
3413 default:
3414 gcc_unreachable ();
3419 if (TARGET_ALTIVEC)
3420 /* Misaligned stores are not supported. */
3421 gcc_unreachable ();
3423 return 2;
3425 case vec_construct:
3426 elements = TYPE_VECTOR_SUBPARTS (vectype);
3427 elem_type = TREE_TYPE (vectype);
3428 /* 32-bit vectors loaded into registers are stored as double
3429 precision, so we need n/2 converts in addition to the usual
3430 n/2 merges to construct a vector of short floats from them. */
3431 if (SCALAR_FLOAT_TYPE_P (elem_type)
3432 && TYPE_PRECISION (elem_type) == 32)
3433 return elements + 1;
3434 else
3435 return elements / 2 + 1;
3437 default:
3438 gcc_unreachable ();
3442 /* Implement targetm.vectorize.preferred_simd_mode. */
3444 static enum machine_mode
3445 rs6000_preferred_simd_mode (enum machine_mode mode)
3447 if (TARGET_VSX)
3448 switch (mode)
3450 case DFmode:
3451 return V2DFmode;
3452 default:;
3454 if (TARGET_ALTIVEC || TARGET_VSX)
3455 switch (mode)
3457 case SFmode:
3458 return V4SFmode;
3459 case DImode:
3460 return V2DImode;
3461 case SImode:
3462 return V4SImode;
3463 case HImode:
3464 return V8HImode;
3465 case QImode:
3466 return V16QImode;
3467 default:;
3469 if (TARGET_SPE)
3470 switch (mode)
3472 case SFmode:
3473 return V2SFmode;
3474 case SImode:
3475 return V2SImode;
3476 default:;
3478 if (TARGET_PAIRED_FLOAT
3479 && mode == SFmode)
3480 return V2SFmode;
3481 return word_mode;
3484 typedef struct _rs6000_cost_data
3486 struct loop *loop_info;
3487 unsigned cost[3];
3488 } rs6000_cost_data;
3490 /* Test for likely overcommitment of vector hardware resources. If a
3491 loop iteration is relatively large, and too large a percentage of
3492 instructions in the loop are vectorized, the cost model may not
3493 adequately reflect delays from unavailable vector resources.
3494 Penalize the loop body cost for this case. */
3496 static void
3497 rs6000_density_test (rs6000_cost_data *data)
3499 const int DENSITY_PCT_THRESHOLD = 85;
3500 const int DENSITY_SIZE_THRESHOLD = 70;
3501 const int DENSITY_PENALTY = 10;
3502 struct loop *loop = data->loop_info;
3503 basic_block *bbs = get_loop_body (loop);
3504 int nbbs = loop->num_nodes;
3505 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
3506 int i, density_pct;
3508 for (i = 0; i < nbbs; i++)
3510 basic_block bb = bbs[i];
3511 gimple_stmt_iterator gsi;
3513 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3515 gimple stmt = gsi_stmt (gsi);
3516 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3518 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3519 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
3520 not_vec_cost++;
3524 free (bbs);
3525 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
3527 if (density_pct > DENSITY_PCT_THRESHOLD
3528 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
3530 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
3531 if (dump_kind_p (MSG_NOTE))
3532 dump_printf_loc (MSG_NOTE, vect_location,
3533 "density %d%%, cost %d exceeds threshold, penalizing "
3534 "loop body cost by %d%%", density_pct,
3535 vec_cost + not_vec_cost, DENSITY_PENALTY);
3539 /* Implement targetm.vectorize.init_cost. */
3541 static void *
3542 rs6000_init_cost (struct loop *loop_info)
3544 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
3545 data->loop_info = loop_info;
3546 data->cost[vect_prologue] = 0;
3547 data->cost[vect_body] = 0;
3548 data->cost[vect_epilogue] = 0;
3549 return data;
3552 /* Implement targetm.vectorize.add_stmt_cost. */
3554 static unsigned
3555 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
3556 struct _stmt_vec_info *stmt_info, int misalign,
3557 enum vect_cost_model_location where)
3559 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3560 unsigned retval = 0;
3562 if (flag_vect_cost_model)
3564 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
3565 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
3566 misalign);
3567 /* Statements in an inner loop relative to the loop being
3568 vectorized are weighted more heavily. The value here is
3569 arbitrary and could potentially be improved with analysis. */
3570 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
3571 count *= 50; /* FIXME. */
3573 retval = (unsigned) (count * stmt_cost);
3574 cost_data->cost[where] += retval;
3577 return retval;
3580 /* Implement targetm.vectorize.finish_cost. */
3582 static void
3583 rs6000_finish_cost (void *data, unsigned *prologue_cost,
3584 unsigned *body_cost, unsigned *epilogue_cost)
3586 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
3588 if (cost_data->loop_info)
3589 rs6000_density_test (cost_data);
3591 *prologue_cost = cost_data->cost[vect_prologue];
3592 *body_cost = cost_data->cost[vect_body];
3593 *epilogue_cost = cost_data->cost[vect_epilogue];
3596 /* Implement targetm.vectorize.destroy_cost_data. */
3598 static void
3599 rs6000_destroy_cost_data (void *data)
3601 free (data);
3604 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3605 library with vectorized intrinsics. */
3607 static tree
3608 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
3610 char name[32];
3611 const char *suffix = NULL;
3612 tree fntype, new_fndecl, bdecl = NULL_TREE;
3613 int n_args = 1;
3614 const char *bname;
3615 enum machine_mode el_mode, in_mode;
3616 int n, in_n;
3618 /* Libmass is suitable for unsafe math only as it does not correctly support
3619 parts of IEEE with the required precision such as denormals. Only support
3620 it if we have VSX to use the simd d2 or f4 functions.
3621 XXX: Add variable length support. */
3622 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
3623 return NULL_TREE;
3625 el_mode = TYPE_MODE (TREE_TYPE (type_out));
3626 n = TYPE_VECTOR_SUBPARTS (type_out);
3627 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3628 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3629 if (el_mode != in_mode
3630 || n != in_n)
3631 return NULL_TREE;
3633 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3635 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3636 switch (fn)
3638 case BUILT_IN_ATAN2:
3639 case BUILT_IN_HYPOT:
3640 case BUILT_IN_POW:
3641 n_args = 2;
3642 /* fall through */
3644 case BUILT_IN_ACOS:
3645 case BUILT_IN_ACOSH:
3646 case BUILT_IN_ASIN:
3647 case BUILT_IN_ASINH:
3648 case BUILT_IN_ATAN:
3649 case BUILT_IN_ATANH:
3650 case BUILT_IN_CBRT:
3651 case BUILT_IN_COS:
3652 case BUILT_IN_COSH:
3653 case BUILT_IN_ERF:
3654 case BUILT_IN_ERFC:
3655 case BUILT_IN_EXP2:
3656 case BUILT_IN_EXP:
3657 case BUILT_IN_EXPM1:
3658 case BUILT_IN_LGAMMA:
3659 case BUILT_IN_LOG10:
3660 case BUILT_IN_LOG1P:
3661 case BUILT_IN_LOG2:
3662 case BUILT_IN_LOG:
3663 case BUILT_IN_SIN:
3664 case BUILT_IN_SINH:
3665 case BUILT_IN_SQRT:
3666 case BUILT_IN_TAN:
3667 case BUILT_IN_TANH:
3668 bdecl = builtin_decl_implicit (fn);
3669 suffix = "d2"; /* pow -> powd2 */
3670 if (el_mode != DFmode
3671 || n != 2)
3672 return NULL_TREE;
3673 break;
3675 case BUILT_IN_ATAN2F:
3676 case BUILT_IN_HYPOTF:
3677 case BUILT_IN_POWF:
3678 n_args = 2;
3679 /* fall through */
3681 case BUILT_IN_ACOSF:
3682 case BUILT_IN_ACOSHF:
3683 case BUILT_IN_ASINF:
3684 case BUILT_IN_ASINHF:
3685 case BUILT_IN_ATANF:
3686 case BUILT_IN_ATANHF:
3687 case BUILT_IN_CBRTF:
3688 case BUILT_IN_COSF:
3689 case BUILT_IN_COSHF:
3690 case BUILT_IN_ERFF:
3691 case BUILT_IN_ERFCF:
3692 case BUILT_IN_EXP2F:
3693 case BUILT_IN_EXPF:
3694 case BUILT_IN_EXPM1F:
3695 case BUILT_IN_LGAMMAF:
3696 case BUILT_IN_LOG10F:
3697 case BUILT_IN_LOG1PF:
3698 case BUILT_IN_LOG2F:
3699 case BUILT_IN_LOGF:
3700 case BUILT_IN_SINF:
3701 case BUILT_IN_SINHF:
3702 case BUILT_IN_SQRTF:
3703 case BUILT_IN_TANF:
3704 case BUILT_IN_TANHF:
3705 bdecl = builtin_decl_implicit (fn);
3706 suffix = "4"; /* powf -> powf4 */
3707 if (el_mode != SFmode
3708 || n != 4)
3709 return NULL_TREE;
3710 break;
3712 default:
3713 return NULL_TREE;
3716 else
3717 return NULL_TREE;
3719 gcc_assert (suffix != NULL);
3720 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
3721 strcpy (name, bname + sizeof ("__builtin_") - 1);
3722 strcat (name, suffix);
3724 if (n_args == 1)
3725 fntype = build_function_type_list (type_out, type_in, NULL);
3726 else if (n_args == 2)
3727 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
3728 else
3729 gcc_unreachable ();
3731 /* Build a function declaration for the vectorized function. */
3732 new_fndecl = build_decl (BUILTINS_LOCATION,
3733 FUNCTION_DECL, get_identifier (name), fntype);
3734 TREE_PUBLIC (new_fndecl) = 1;
3735 DECL_EXTERNAL (new_fndecl) = 1;
3736 DECL_IS_NOVOPS (new_fndecl) = 1;
3737 TREE_READONLY (new_fndecl) = 1;
3739 return new_fndecl;
3742 /* Returns a function decl for a vectorized version of the builtin function
3743 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3744 if it is not available. */
3746 static tree
3747 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
3748 tree type_in)
3750 enum machine_mode in_mode, out_mode;
3751 int in_n, out_n;
3753 if (TARGET_DEBUG_BUILTIN)
3754 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3755 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
3756 GET_MODE_NAME (TYPE_MODE (type_out)),
3757 GET_MODE_NAME (TYPE_MODE (type_in)));
3759 if (TREE_CODE (type_out) != VECTOR_TYPE
3760 || TREE_CODE (type_in) != VECTOR_TYPE
3761 || !TARGET_VECTORIZE_BUILTINS)
3762 return NULL_TREE;
3764 out_mode = TYPE_MODE (TREE_TYPE (type_out));
3765 out_n = TYPE_VECTOR_SUBPARTS (type_out);
3766 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3767 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3769 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3771 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3772 switch (fn)
3774 case BUILT_IN_COPYSIGN:
3775 if (VECTOR_UNIT_VSX_P (V2DFmode)
3776 && out_mode == DFmode && out_n == 2
3777 && in_mode == DFmode && in_n == 2)
3778 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
3779 break;
3780 case BUILT_IN_COPYSIGNF:
3781 if (out_mode != SFmode || out_n != 4
3782 || in_mode != SFmode || in_n != 4)
3783 break;
3784 if (VECTOR_UNIT_VSX_P (V4SFmode))
3785 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
3786 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3787 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
3788 break;
3789 case BUILT_IN_SQRT:
3790 if (VECTOR_UNIT_VSX_P (V2DFmode)
3791 && out_mode == DFmode && out_n == 2
3792 && in_mode == DFmode && in_n == 2)
3793 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
3794 break;
3795 case BUILT_IN_SQRTF:
3796 if (VECTOR_UNIT_VSX_P (V4SFmode)
3797 && out_mode == SFmode && out_n == 4
3798 && in_mode == SFmode && in_n == 4)
3799 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
3800 break;
3801 case BUILT_IN_CEIL:
3802 if (VECTOR_UNIT_VSX_P (V2DFmode)
3803 && out_mode == DFmode && out_n == 2
3804 && in_mode == DFmode && in_n == 2)
3805 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
3806 break;
3807 case BUILT_IN_CEILF:
3808 if (out_mode != SFmode || out_n != 4
3809 || in_mode != SFmode || in_n != 4)
3810 break;
3811 if (VECTOR_UNIT_VSX_P (V4SFmode))
3812 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
3813 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3814 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
3815 break;
3816 case BUILT_IN_FLOOR:
3817 if (VECTOR_UNIT_VSX_P (V2DFmode)
3818 && out_mode == DFmode && out_n == 2
3819 && in_mode == DFmode && in_n == 2)
3820 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
3821 break;
3822 case BUILT_IN_FLOORF:
3823 if (out_mode != SFmode || out_n != 4
3824 || in_mode != SFmode || in_n != 4)
3825 break;
3826 if (VECTOR_UNIT_VSX_P (V4SFmode))
3827 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
3828 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3829 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
3830 break;
3831 case BUILT_IN_FMA:
3832 if (VECTOR_UNIT_VSX_P (V2DFmode)
3833 && out_mode == DFmode && out_n == 2
3834 && in_mode == DFmode && in_n == 2)
3835 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
3836 break;
3837 case BUILT_IN_FMAF:
3838 if (VECTOR_UNIT_VSX_P (V4SFmode)
3839 && out_mode == SFmode && out_n == 4
3840 && in_mode == SFmode && in_n == 4)
3841 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
3842 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
3843 && out_mode == SFmode && out_n == 4
3844 && in_mode == SFmode && in_n == 4)
3845 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
3846 break;
3847 case BUILT_IN_TRUNC:
3848 if (VECTOR_UNIT_VSX_P (V2DFmode)
3849 && out_mode == DFmode && out_n == 2
3850 && in_mode == DFmode && in_n == 2)
3851 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
3852 break;
3853 case BUILT_IN_TRUNCF:
3854 if (out_mode != SFmode || out_n != 4
3855 || in_mode != SFmode || in_n != 4)
3856 break;
3857 if (VECTOR_UNIT_VSX_P (V4SFmode))
3858 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
3859 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3860 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
3861 break;
3862 case BUILT_IN_NEARBYINT:
3863 if (VECTOR_UNIT_VSX_P (V2DFmode)
3864 && flag_unsafe_math_optimizations
3865 && out_mode == DFmode && out_n == 2
3866 && in_mode == DFmode && in_n == 2)
3867 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
3868 break;
3869 case BUILT_IN_NEARBYINTF:
3870 if (VECTOR_UNIT_VSX_P (V4SFmode)
3871 && flag_unsafe_math_optimizations
3872 && out_mode == SFmode && out_n == 4
3873 && in_mode == SFmode && in_n == 4)
3874 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
3875 break;
3876 case BUILT_IN_RINT:
3877 if (VECTOR_UNIT_VSX_P (V2DFmode)
3878 && !flag_trapping_math
3879 && out_mode == DFmode && out_n == 2
3880 && in_mode == DFmode && in_n == 2)
3881 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
3882 break;
3883 case BUILT_IN_RINTF:
3884 if (VECTOR_UNIT_VSX_P (V4SFmode)
3885 && !flag_trapping_math
3886 && out_mode == SFmode && out_n == 4
3887 && in_mode == SFmode && in_n == 4)
3888 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
3889 break;
3890 default:
3891 break;
3895 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
3897 enum rs6000_builtins fn
3898 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
3899 switch (fn)
3901 case RS6000_BUILTIN_RSQRTF:
3902 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3903 && out_mode == SFmode && out_n == 4
3904 && in_mode == SFmode && in_n == 4)
3905 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
3906 break;
3907 case RS6000_BUILTIN_RSQRT:
3908 if (VECTOR_UNIT_VSX_P (V2DFmode)
3909 && out_mode == DFmode && out_n == 2
3910 && in_mode == DFmode && in_n == 2)
3911 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
3912 break;
3913 case RS6000_BUILTIN_RECIPF:
3914 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3915 && out_mode == SFmode && out_n == 4
3916 && in_mode == SFmode && in_n == 4)
3917 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
3918 break;
3919 case RS6000_BUILTIN_RECIP:
3920 if (VECTOR_UNIT_VSX_P (V2DFmode)
3921 && out_mode == DFmode && out_n == 2
3922 && in_mode == DFmode && in_n == 2)
3923 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
3924 break;
3925 default:
3926 break;
3930 /* Generate calls to libmass if appropriate. */
3931 if (rs6000_veclib_handler)
3932 return rs6000_veclib_handler (fndecl, type_out, type_in);
3934 return NULL_TREE;
3937 /* Default CPU string for rs6000*_file_start functions. */
3938 static const char *rs6000_default_cpu;
3940 /* Do anything needed at the start of the asm file. */
3942 static void
3943 rs6000_file_start (void)
3945 char buffer[80];
3946 const char *start = buffer;
3947 FILE *file = asm_out_file;
3949 rs6000_default_cpu = TARGET_CPU_DEFAULT;
3951 default_file_start ();
3953 if (flag_verbose_asm)
3955 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
3957 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
3959 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
3960 start = "";
3963 if (global_options_set.x_rs6000_cpu_index)
3965 fprintf (file, "%s -mcpu=%s", start,
3966 processor_target_table[rs6000_cpu_index].name);
3967 start = "";
3970 if (global_options_set.x_rs6000_tune_index)
3972 fprintf (file, "%s -mtune=%s", start,
3973 processor_target_table[rs6000_tune_index].name);
3974 start = "";
3977 if (PPC405_ERRATUM77)
3979 fprintf (file, "%s PPC405CR_ERRATUM77", start);
3980 start = "";
3983 #ifdef USING_ELFOS_H
3984 switch (rs6000_sdata)
3986 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
3987 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
3988 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
3989 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
3992 if (rs6000_sdata && g_switch_value)
3994 fprintf (file, "%s -G %d", start,
3995 g_switch_value);
3996 start = "";
3998 #endif
4000 if (*start == '\0')
4001 putc ('\n', file);
4004 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
4006 switch_to_section (toc_section);
4007 switch_to_section (text_section);
4012 /* Return nonzero if this function is known to have a null epilogue. */
4015 direct_return (void)
4017 if (reload_completed)
4019 rs6000_stack_t *info = rs6000_stack_info ();
4021 if (info->first_gp_reg_save == 32
4022 && info->first_fp_reg_save == 64
4023 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4024 && ! info->lr_save_p
4025 && ! info->cr_save_p
4026 && info->vrsave_mask == 0
4027 && ! info->push_p)
4028 return 1;
4031 return 0;
4034 /* Return the number of instructions it takes to form a constant in an
4035 integer register. */
4038 num_insns_constant_wide (HOST_WIDE_INT value)
4040 /* signed constant loadable with addi */
4041 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4042 return 1;
4044 /* constant loadable with addis */
4045 else if ((value & 0xffff) == 0
4046 && (value >> 31 == -1 || value >> 31 == 0))
4047 return 1;
4049 #if HOST_BITS_PER_WIDE_INT == 64
4050 else if (TARGET_POWERPC64)
4052 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4053 HOST_WIDE_INT high = value >> 31;
4055 if (high == 0 || high == -1)
4056 return 2;
4058 high >>= 1;
4060 if (low == 0)
4061 return num_insns_constant_wide (high) + 1;
4062 else if (high == 0)
4063 return num_insns_constant_wide (low) + 1;
4064 else
4065 return (num_insns_constant_wide (high)
4066 + num_insns_constant_wide (low) + 1);
4068 #endif
4070 else
4071 return 2;
4075 num_insns_constant (rtx op, enum machine_mode mode)
4077 HOST_WIDE_INT low, high;
4079 switch (GET_CODE (op))
4081 case CONST_INT:
4082 #if HOST_BITS_PER_WIDE_INT == 64
4083 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4084 && mask64_operand (op, mode))
4085 return 2;
4086 else
4087 #endif
4088 return num_insns_constant_wide (INTVAL (op));
4090 case CONST_DOUBLE:
4091 if (mode == SFmode || mode == SDmode)
4093 long l;
4094 REAL_VALUE_TYPE rv;
4096 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4097 if (DECIMAL_FLOAT_MODE_P (mode))
4098 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4099 else
4100 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4101 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4104 if (mode == VOIDmode || mode == DImode)
4106 high = CONST_DOUBLE_HIGH (op);
4107 low = CONST_DOUBLE_LOW (op);
4109 else
4111 long l[2];
4112 REAL_VALUE_TYPE rv;
4114 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4115 if (DECIMAL_FLOAT_MODE_P (mode))
4116 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4117 else
4118 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4119 high = l[WORDS_BIG_ENDIAN == 0];
4120 low = l[WORDS_BIG_ENDIAN != 0];
4123 if (TARGET_32BIT)
4124 return (num_insns_constant_wide (low)
4125 + num_insns_constant_wide (high));
4126 else
4128 if ((high == 0 && low >= 0)
4129 || (high == -1 && low < 0))
4130 return num_insns_constant_wide (low);
4132 else if (mask64_operand (op, mode))
4133 return 2;
4135 else if (low == 0)
4136 return num_insns_constant_wide (high) + 1;
4138 else
4139 return (num_insns_constant_wide (high)
4140 + num_insns_constant_wide (low) + 1);
4143 default:
4144 gcc_unreachable ();
4148 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4149 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4150 corresponding element of the vector, but for V4SFmode and V2SFmode,
4151 the corresponding "float" is interpreted as an SImode integer. */
4153 HOST_WIDE_INT
4154 const_vector_elt_as_int (rtx op, unsigned int elt)
4156 rtx tmp;
4158 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4159 gcc_assert (GET_MODE (op) != V2DImode
4160 && GET_MODE (op) != V2DFmode);
4162 tmp = CONST_VECTOR_ELT (op, elt);
4163 if (GET_MODE (op) == V4SFmode
4164 || GET_MODE (op) == V2SFmode)
4165 tmp = gen_lowpart (SImode, tmp);
4166 return INTVAL (tmp);
4169 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4170 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4171 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4172 all items are set to the same value and contain COPIES replicas of the
4173 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4174 operand and the others are set to the value of the operand's msb. */
4176 static bool
4177 vspltis_constant (rtx op, unsigned step, unsigned copies)
4179 enum machine_mode mode = GET_MODE (op);
4180 enum machine_mode inner = GET_MODE_INNER (mode);
4182 unsigned i;
4183 unsigned nunits;
4184 unsigned bitsize;
4185 unsigned mask;
4187 HOST_WIDE_INT val;
4188 HOST_WIDE_INT splat_val;
4189 HOST_WIDE_INT msb_val;
4191 if (mode == V2DImode || mode == V2DFmode)
4192 return false;
4194 nunits = GET_MODE_NUNITS (mode);
4195 bitsize = GET_MODE_BITSIZE (inner);
4196 mask = GET_MODE_MASK (inner);
4198 val = const_vector_elt_as_int (op, nunits - 1);
4199 splat_val = val;
4200 msb_val = val > 0 ? 0 : -1;
4202 /* Construct the value to be splatted, if possible. If not, return 0. */
4203 for (i = 2; i <= copies; i *= 2)
4205 HOST_WIDE_INT small_val;
4206 bitsize /= 2;
4207 small_val = splat_val >> bitsize;
4208 mask >>= bitsize;
4209 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4210 return false;
4211 splat_val = small_val;
4214 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4215 if (EASY_VECTOR_15 (splat_val))
4218 /* Also check if we can splat, and then add the result to itself. Do so if
4219 the value is positive, of if the splat instruction is using OP's mode;
4220 for splat_val < 0, the splat and the add should use the same mode. */
4221 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4222 && (splat_val >= 0 || (step == 1 && copies == 1)))
4225 /* Also check if are loading up the most significant bit which can be done by
4226 loading up -1 and shifting the value left by -1. */
4227 else if (EASY_VECTOR_MSB (splat_val, inner))
4230 else
4231 return false;
4233 /* Check if VAL is present in every STEP-th element, and the
4234 other elements are filled with its most significant bit. */
4235 for (i = 0; i < nunits - 1; ++i)
4237 HOST_WIDE_INT desired_val;
4238 if (((i + 1) & (step - 1)) == 0)
4239 desired_val = val;
4240 else
4241 desired_val = msb_val;
4243 if (desired_val != const_vector_elt_as_int (op, i))
4244 return false;
4247 return true;
4251 /* Return true if OP is of the given MODE and can be synthesized
4252 with a vspltisb, vspltish or vspltisw. */
4254 bool
4255 easy_altivec_constant (rtx op, enum machine_mode mode)
4257 unsigned step, copies;
4259 if (mode == VOIDmode)
4260 mode = GET_MODE (op);
4261 else if (mode != GET_MODE (op))
4262 return false;
4264 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4265 constants. */
4266 if (mode == V2DFmode)
4267 return zero_constant (op, mode);
4269 if (mode == V2DImode)
4271 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4272 easy. */
4273 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4274 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4275 return false;
4277 if (zero_constant (op, mode))
4278 return true;
4280 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4281 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4282 return true;
4284 return false;
4287 /* Start with a vspltisw. */
4288 step = GET_MODE_NUNITS (mode) / 4;
4289 copies = 1;
4291 if (vspltis_constant (op, step, copies))
4292 return true;
4294 /* Then try with a vspltish. */
4295 if (step == 1)
4296 copies <<= 1;
4297 else
4298 step >>= 1;
4300 if (vspltis_constant (op, step, copies))
4301 return true;
4303 /* And finally a vspltisb. */
4304 if (step == 1)
4305 copies <<= 1;
4306 else
4307 step >>= 1;
4309 if (vspltis_constant (op, step, copies))
4310 return true;
4312 return false;
4315 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4316 result is OP. Abort if it is not possible. */
4319 gen_easy_altivec_constant (rtx op)
4321 enum machine_mode mode = GET_MODE (op);
4322 int nunits = GET_MODE_NUNITS (mode);
4323 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
4324 unsigned step = nunits / 4;
4325 unsigned copies = 1;
4327 /* Start with a vspltisw. */
4328 if (vspltis_constant (op, step, copies))
4329 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
4331 /* Then try with a vspltish. */
4332 if (step == 1)
4333 copies <<= 1;
4334 else
4335 step >>= 1;
4337 if (vspltis_constant (op, step, copies))
4338 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
4340 /* And finally a vspltisb. */
4341 if (step == 1)
4342 copies <<= 1;
4343 else
4344 step >>= 1;
4346 if (vspltis_constant (op, step, copies))
4347 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
4349 gcc_unreachable ();
4352 const char *
4353 output_vec_const_move (rtx *operands)
4355 int cst, cst2;
4356 enum machine_mode mode;
4357 rtx dest, vec;
4359 dest = operands[0];
4360 vec = operands[1];
4361 mode = GET_MODE (dest);
4363 if (TARGET_VSX)
4365 if (zero_constant (vec, mode))
4366 return "xxlxor %x0,%x0,%x0";
4368 if (mode == V2DImode
4369 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4370 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4371 return "vspltisw %0,-1";
4374 if (TARGET_ALTIVEC)
4376 rtx splat_vec;
4377 if (zero_constant (vec, mode))
4378 return "vxor %0,%0,%0";
4380 splat_vec = gen_easy_altivec_constant (vec);
4381 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4382 operands[1] = XEXP (splat_vec, 0);
4383 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4384 return "#";
4386 switch (GET_MODE (splat_vec))
4388 case V4SImode:
4389 return "vspltisw %0,%1";
4391 case V8HImode:
4392 return "vspltish %0,%1";
4394 case V16QImode:
4395 return "vspltisb %0,%1";
4397 default:
4398 gcc_unreachable ();
4402 gcc_assert (TARGET_SPE);
4404 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4405 pattern of V1DI, V4HI, and V2SF.
4407 FIXME: We should probably return # and add post reload
4408 splitters for these, but this way is so easy ;-). */
4409 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4410 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4411 operands[1] = CONST_VECTOR_ELT (vec, 0);
4412 operands[2] = CONST_VECTOR_ELT (vec, 1);
4413 if (cst == cst2)
4414 return "li %0,%1\n\tevmergelo %0,%0,%0";
4415 else
4416 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4419 /* Initialize TARGET of vector PAIRED to VALS. */
4421 void
4422 paired_expand_vector_init (rtx target, rtx vals)
4424 enum machine_mode mode = GET_MODE (target);
4425 int n_elts = GET_MODE_NUNITS (mode);
4426 int n_var = 0;
4427 rtx x, new_rtx, tmp, constant_op, op1, op2;
4428 int i;
4430 for (i = 0; i < n_elts; ++i)
4432 x = XVECEXP (vals, 0, i);
4433 if (!(CONST_INT_P (x)
4434 || GET_CODE (x) == CONST_DOUBLE
4435 || GET_CODE (x) == CONST_FIXED))
4436 ++n_var;
4438 if (n_var == 0)
4440 /* Load from constant pool. */
4441 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4442 return;
4445 if (n_var == 2)
4447 /* The vector is initialized only with non-constants. */
4448 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4449 XVECEXP (vals, 0, 1));
4451 emit_move_insn (target, new_rtx);
4452 return;
4455 /* One field is non-constant and the other one is a constant. Load the
4456 constant from the constant pool and use ps_merge instruction to
4457 construct the whole vector. */
4458 op1 = XVECEXP (vals, 0, 0);
4459 op2 = XVECEXP (vals, 0, 1);
4461 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4463 tmp = gen_reg_rtx (GET_MODE (constant_op));
4464 emit_move_insn (tmp, constant_op);
4466 if (CONSTANT_P (op1))
4467 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4468 else
4469 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4471 emit_move_insn (target, new_rtx);
4474 void
4475 paired_expand_vector_move (rtx operands[])
4477 rtx op0 = operands[0], op1 = operands[1];
4479 emit_move_insn (op0, op1);
4482 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4483 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4484 operands for the relation operation COND. This is a recursive
4485 function. */
4487 static void
4488 paired_emit_vector_compare (enum rtx_code rcode,
4489 rtx dest, rtx op0, rtx op1,
4490 rtx cc_op0, rtx cc_op1)
4492 rtx tmp = gen_reg_rtx (V2SFmode);
4493 rtx tmp1, max, min;
4495 gcc_assert (TARGET_PAIRED_FLOAT);
4496 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4498 switch (rcode)
4500 case LT:
4501 case LTU:
4502 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4503 return;
4504 case GE:
4505 case GEU:
4506 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4507 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4508 return;
4509 case LE:
4510 case LEU:
4511 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4512 return;
4513 case GT:
4514 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4515 return;
4516 case EQ:
4517 tmp1 = gen_reg_rtx (V2SFmode);
4518 max = gen_reg_rtx (V2SFmode);
4519 min = gen_reg_rtx (V2SFmode);
4520 gen_reg_rtx (V2SFmode);
4522 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4523 emit_insn (gen_selv2sf4
4524 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4525 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4526 emit_insn (gen_selv2sf4
4527 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4528 emit_insn (gen_subv2sf3 (tmp1, min, max));
4529 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4530 return;
4531 case NE:
4532 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4533 return;
4534 case UNLE:
4535 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4536 return;
4537 case UNLT:
4538 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4539 return;
4540 case UNGE:
4541 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4542 return;
4543 case UNGT:
4544 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4545 return;
4546 default:
4547 gcc_unreachable ();
4550 return;
4553 /* Emit vector conditional expression.
4554 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4555 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4558 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4559 rtx cond, rtx cc_op0, rtx cc_op1)
4561 enum rtx_code rcode = GET_CODE (cond);
4563 if (!TARGET_PAIRED_FLOAT)
4564 return 0;
4566 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4568 return 1;
4571 /* Initialize vector TARGET to VALS. */
4573 void
4574 rs6000_expand_vector_init (rtx target, rtx vals)
4576 enum machine_mode mode = GET_MODE (target);
4577 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4578 int n_elts = GET_MODE_NUNITS (mode);
4579 int n_var = 0, one_var = -1;
4580 bool all_same = true, all_const_zero = true;
4581 rtx x, mem;
4582 int i;
4584 for (i = 0; i < n_elts; ++i)
4586 x = XVECEXP (vals, 0, i);
4587 if (!(CONST_INT_P (x)
4588 || GET_CODE (x) == CONST_DOUBLE
4589 || GET_CODE (x) == CONST_FIXED))
4590 ++n_var, one_var = i;
4591 else if (x != CONST0_RTX (inner_mode))
4592 all_const_zero = false;
4594 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4595 all_same = false;
4598 if (n_var == 0)
4600 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4601 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4602 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4604 /* Zero register. */
4605 emit_insn (gen_rtx_SET (VOIDmode, target,
4606 gen_rtx_XOR (mode, target, target)));
4607 return;
4609 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4611 /* Splat immediate. */
4612 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4613 return;
4615 else
4617 /* Load from constant pool. */
4618 emit_move_insn (target, const_vec);
4619 return;
4623 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4624 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4626 rtx op0 = XVECEXP (vals, 0, 0);
4627 rtx op1 = XVECEXP (vals, 0, 1);
4628 if (all_same)
4630 if (!MEM_P (op0) && !REG_P (op0))
4631 op0 = force_reg (inner_mode, op0);
4632 if (mode == V2DFmode)
4633 emit_insn (gen_vsx_splat_v2df (target, op0));
4634 else
4635 emit_insn (gen_vsx_splat_v2di (target, op0));
4637 else
4639 op0 = force_reg (inner_mode, op0);
4640 op1 = force_reg (inner_mode, op1);
4641 if (mode == V2DFmode)
4642 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4643 else
4644 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4646 return;
4649 /* With single precision floating point on VSX, know that internally single
4650 precision is actually represented as a double, and either make 2 V2DF
4651 vectors, and convert these vectors to single precision, or do one
4652 conversion, and splat the result to the other elements. */
4653 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4655 if (all_same)
4657 rtx freg = gen_reg_rtx (V4SFmode);
4658 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
4660 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4661 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4663 else
4665 rtx dbl_even = gen_reg_rtx (V2DFmode);
4666 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4667 rtx flt_even = gen_reg_rtx (V4SFmode);
4668 rtx flt_odd = gen_reg_rtx (V4SFmode);
4669 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
4670 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
4671 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
4672 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
4674 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
4675 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
4676 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4677 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4678 rs6000_expand_extract_even (target, flt_even, flt_odd);
4680 return;
4683 /* Store value to stack temp. Load vector element. Splat. However, splat
4684 of 64-bit items is not supported on Altivec. */
4685 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
4687 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4688 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4689 XVECEXP (vals, 0, 0));
4690 x = gen_rtx_UNSPEC (VOIDmode,
4691 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4692 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4693 gen_rtvec (2,
4694 gen_rtx_SET (VOIDmode,
4695 target, mem),
4696 x)));
4697 x = gen_rtx_VEC_SELECT (inner_mode, target,
4698 gen_rtx_PARALLEL (VOIDmode,
4699 gen_rtvec (1, const0_rtx)));
4700 emit_insn (gen_rtx_SET (VOIDmode, target,
4701 gen_rtx_VEC_DUPLICATE (mode, x)));
4702 return;
4705 /* One field is non-constant. Load constant then overwrite
4706 varying field. */
4707 if (n_var == 1)
4709 rtx copy = copy_rtx (vals);
4711 /* Load constant part of vector, substitute neighboring value for
4712 varying element. */
4713 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
4714 rs6000_expand_vector_init (target, copy);
4716 /* Insert variable. */
4717 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
4718 return;
4721 /* Construct the vector in memory one field at a time
4722 and load the whole vector. */
4723 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4724 for (i = 0; i < n_elts; i++)
4725 emit_move_insn (adjust_address_nv (mem, inner_mode,
4726 i * GET_MODE_SIZE (inner_mode)),
4727 XVECEXP (vals, 0, i));
4728 emit_move_insn (target, mem);
4731 /* Set field ELT of TARGET to VAL. */
4733 void
4734 rs6000_expand_vector_set (rtx target, rtx val, int elt)
4736 enum machine_mode mode = GET_MODE (target);
4737 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4738 rtx reg = gen_reg_rtx (mode);
4739 rtx mask, mem, x;
4740 int width = GET_MODE_SIZE (inner_mode);
4741 int i;
4743 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4745 rtx (*set_func) (rtx, rtx, rtx, rtx)
4746 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
4747 emit_insn (set_func (target, target, val, GEN_INT (elt)));
4748 return;
4751 /* Load single variable value. */
4752 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4753 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
4754 x = gen_rtx_UNSPEC (VOIDmode,
4755 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4756 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4757 gen_rtvec (2,
4758 gen_rtx_SET (VOIDmode,
4759 reg, mem),
4760 x)));
4762 /* Linear sequence. */
4763 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
4764 for (i = 0; i < 16; ++i)
4765 XVECEXP (mask, 0, i) = GEN_INT (i);
4767 /* Set permute mask to insert element into target. */
4768 for (i = 0; i < width; ++i)
4769 XVECEXP (mask, 0, elt*width + i)
4770 = GEN_INT (i + 0x10);
4771 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
4772 x = gen_rtx_UNSPEC (mode,
4773 gen_rtvec (3, target, reg,
4774 force_reg (V16QImode, x)),
4775 UNSPEC_VPERM);
4776 emit_insn (gen_rtx_SET (VOIDmode, target, x));
4779 /* Extract field ELT from VEC into TARGET. */
4781 void
4782 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
4784 enum machine_mode mode = GET_MODE (vec);
4785 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4786 rtx mem;
4788 if (VECTOR_MEM_VSX_P (mode))
4790 switch (mode)
4792 default:
4793 break;
4794 case V2DFmode:
4795 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
4796 return;
4797 case V2DImode:
4798 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
4799 return;
4800 case V4SFmode:
4801 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
4802 return;
4806 /* Allocate mode-sized buffer. */
4807 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4809 emit_move_insn (mem, vec);
4811 /* Add offset to field within buffer matching vector element. */
4812 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
4814 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
4817 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4818 implement ANDing by the mask IN. */
4819 void
4820 build_mask64_2_operands (rtx in, rtx *out)
4822 #if HOST_BITS_PER_WIDE_INT >= 64
4823 unsigned HOST_WIDE_INT c, lsb, m1, m2;
4824 int shift;
4826 gcc_assert (GET_CODE (in) == CONST_INT);
4828 c = INTVAL (in);
4829 if (c & 1)
4831 /* Assume c initially something like 0x00fff000000fffff. The idea
4832 is to rotate the word so that the middle ^^^^^^ group of zeros
4833 is at the MS end and can be cleared with an rldicl mask. We then
4834 rotate back and clear off the MS ^^ group of zeros with a
4835 second rldicl. */
4836 c = ~c; /* c == 0xff000ffffff00000 */
4837 lsb = c & -c; /* lsb == 0x0000000000100000 */
4838 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
4839 c = ~c; /* c == 0x00fff000000fffff */
4840 c &= -lsb; /* c == 0x00fff00000000000 */
4841 lsb = c & -c; /* lsb == 0x0000100000000000 */
4842 c = ~c; /* c == 0xff000fffffffffff */
4843 c &= -lsb; /* c == 0xff00000000000000 */
4844 shift = 0;
4845 while ((lsb >>= 1) != 0)
4846 shift++; /* shift == 44 on exit from loop */
4847 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
4848 m1 = ~m1; /* m1 == 0x000000ffffffffff */
4849 m2 = ~c; /* m2 == 0x00ffffffffffffff */
4851 else
4853 /* Assume c initially something like 0xff000f0000000000. The idea
4854 is to rotate the word so that the ^^^ middle group of zeros
4855 is at the LS end and can be cleared with an rldicr mask. We then
4856 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4857 a second rldicr. */
4858 lsb = c & -c; /* lsb == 0x0000010000000000 */
4859 m2 = -lsb; /* m2 == 0xffffff0000000000 */
4860 c = ~c; /* c == 0x00fff0ffffffffff */
4861 c &= -lsb; /* c == 0x00fff00000000000 */
4862 lsb = c & -c; /* lsb == 0x0000100000000000 */
4863 c = ~c; /* c == 0xff000fffffffffff */
4864 c &= -lsb; /* c == 0xff00000000000000 */
4865 shift = 0;
4866 while ((lsb >>= 1) != 0)
4867 shift++; /* shift == 44 on exit from loop */
4868 m1 = ~c; /* m1 == 0x00ffffffffffffff */
4869 m1 >>= shift; /* m1 == 0x0000000000000fff */
4870 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
4873 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4874 masks will be all 1's. We are guaranteed more than one transition. */
4875 out[0] = GEN_INT (64 - shift);
4876 out[1] = GEN_INT (m1);
4877 out[2] = GEN_INT (shift);
4878 out[3] = GEN_INT (m2);
4879 #else
4880 (void)in;
4881 (void)out;
4882 gcc_unreachable ();
4883 #endif
4886 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4888 bool
4889 invalid_e500_subreg (rtx op, enum machine_mode mode)
4891 if (TARGET_E500_DOUBLE)
4893 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4894 subreg:TI and reg:TF. Decimal float modes are like integer
4895 modes (only low part of each register used) for this
4896 purpose. */
4897 if (GET_CODE (op) == SUBREG
4898 && (mode == SImode || mode == DImode || mode == TImode
4899 || mode == DDmode || mode == TDmode)
4900 && REG_P (SUBREG_REG (op))
4901 && (GET_MODE (SUBREG_REG (op)) == DFmode
4902 || GET_MODE (SUBREG_REG (op)) == TFmode))
4903 return true;
4905 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4906 reg:TI. */
4907 if (GET_CODE (op) == SUBREG
4908 && (mode == DFmode || mode == TFmode)
4909 && REG_P (SUBREG_REG (op))
4910 && (GET_MODE (SUBREG_REG (op)) == DImode
4911 || GET_MODE (SUBREG_REG (op)) == TImode
4912 || GET_MODE (SUBREG_REG (op)) == DDmode
4913 || GET_MODE (SUBREG_REG (op)) == TDmode))
4914 return true;
4917 if (TARGET_SPE
4918 && GET_CODE (op) == SUBREG
4919 && mode == SImode
4920 && REG_P (SUBREG_REG (op))
4921 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
4922 return true;
4924 return false;
4927 /* AIX increases natural record alignment to doubleword if the first
4928 field is an FP double while the FP fields remain word aligned. */
4930 unsigned int
4931 rs6000_special_round_type_align (tree type, unsigned int computed,
4932 unsigned int specified)
4934 unsigned int align = MAX (computed, specified);
4935 tree field = TYPE_FIELDS (type);
4937 /* Skip all non field decls */
4938 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4939 field = DECL_CHAIN (field);
4941 if (field != NULL && field != type)
4943 type = TREE_TYPE (field);
4944 while (TREE_CODE (type) == ARRAY_TYPE)
4945 type = TREE_TYPE (type);
4947 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
4948 align = MAX (align, 64);
4951 return align;
4954 /* Darwin increases record alignment to the natural alignment of
4955 the first field. */
4957 unsigned int
4958 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
4959 unsigned int specified)
4961 unsigned int align = MAX (computed, specified);
4963 if (TYPE_PACKED (type))
4964 return align;
4966 /* Find the first field, looking down into aggregates. */
4967 do {
4968 tree field = TYPE_FIELDS (type);
4969 /* Skip all non field decls */
4970 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4971 field = DECL_CHAIN (field);
4972 if (! field)
4973 break;
4974 /* A packed field does not contribute any extra alignment. */
4975 if (DECL_PACKED (field))
4976 return align;
4977 type = TREE_TYPE (field);
4978 while (TREE_CODE (type) == ARRAY_TYPE)
4979 type = TREE_TYPE (type);
4980 } while (AGGREGATE_TYPE_P (type));
4982 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
4983 align = MAX (align, TYPE_ALIGN (type));
4985 return align;
4988 /* Return 1 for an operand in small memory on V.4/eabi. */
4991 small_data_operand (rtx op ATTRIBUTE_UNUSED,
4992 enum machine_mode mode ATTRIBUTE_UNUSED)
4994 #if TARGET_ELF
4995 rtx sym_ref;
4997 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
4998 return 0;
5000 if (DEFAULT_ABI != ABI_V4)
5001 return 0;
5003 /* Vector and float memory instructions have a limited offset on the
5004 SPE, so using a vector or float variable directly as an operand is
5005 not useful. */
5006 if (TARGET_SPE
5007 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5008 return 0;
5010 if (GET_CODE (op) == SYMBOL_REF)
5011 sym_ref = op;
5013 else if (GET_CODE (op) != CONST
5014 || GET_CODE (XEXP (op, 0)) != PLUS
5015 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5016 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5017 return 0;
5019 else
5021 rtx sum = XEXP (op, 0);
5022 HOST_WIDE_INT summand;
5024 /* We have to be careful here, because it is the referenced address
5025 that must be 32k from _SDA_BASE_, not just the symbol. */
5026 summand = INTVAL (XEXP (sum, 1));
5027 if (summand < 0 || summand > g_switch_value)
5028 return 0;
5030 sym_ref = XEXP (sum, 0);
5033 return SYMBOL_REF_SMALL_P (sym_ref);
5034 #else
5035 return 0;
5036 #endif
5039 /* Return true if either operand is a general purpose register. */
5041 bool
5042 gpr_or_gpr_p (rtx op0, rtx op1)
5044 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5045 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
5048 /* Given an address, return a constant offset term if one exists. */
5050 static rtx
5051 address_offset (rtx op)
5053 if (GET_CODE (op) == PRE_INC
5054 || GET_CODE (op) == PRE_DEC)
5055 op = XEXP (op, 0);
5056 else if (GET_CODE (op) == PRE_MODIFY
5057 || GET_CODE (op) == LO_SUM)
5058 op = XEXP (op, 1);
5060 if (GET_CODE (op) == CONST)
5061 op = XEXP (op, 0);
5063 if (GET_CODE (op) == PLUS)
5064 op = XEXP (op, 1);
5066 if (CONST_INT_P (op))
5067 return op;
5069 return NULL_RTX;
5072 /* Return true if the MEM operand is a memory operand suitable for use
5073 with a (full width, possibly multiple) gpr load/store. On
5074 powerpc64 this means the offset must be divisible by 4.
5075 Implements 'Y' constraint.
5077 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5078 a constraint function we know the operand has satisfied a suitable
5079 memory predicate. Also accept some odd rtl generated by reload
5080 (see rs6000_legitimize_reload_address for various forms). It is
5081 important that reload rtl be accepted by appropriate constraints
5082 but not by the operand predicate.
5084 Offsetting a lo_sum should not be allowed, except where we know by
5085 alignment that a 32k boundary is not crossed, but see the ???
5086 comment in rs6000_legitimize_reload_address. Note that by
5087 "offsetting" here we mean a further offset to access parts of the
5088 MEM. It's fine to have a lo_sum where the inner address is offset
5089 from a sym, since the same sym+offset will appear in the high part
5090 of the address calculation. */
5092 bool
5093 mem_operand_gpr (rtx op, enum machine_mode mode)
5095 unsigned HOST_WIDE_INT offset;
5096 int extra;
5097 rtx addr = XEXP (op, 0);
5099 op = address_offset (addr);
5100 if (op == NULL_RTX)
5101 return true;
5103 offset = INTVAL (op);
5104 if (TARGET_POWERPC64 && (offset & 3) != 0)
5105 return false;
5107 if (GET_CODE (addr) == LO_SUM)
5108 /* We know by alignment that ABI_AIX medium/large model toc refs
5109 will not cross a 32k boundary, since all entries in the
5110 constant pool are naturally aligned and we check alignment for
5111 other medium model toc-relative addresses. For ABI_V4 and
5112 ABI_DARWIN lo_sum addresses, we just check that 64-bit
5113 offsets are 4-byte aligned. */
5114 return true;
5116 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
5117 gcc_assert (extra >= 0);
5118 return offset + 0x8000 < 0x10000u - extra;
5121 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5123 static bool
5124 reg_offset_addressing_ok_p (enum machine_mode mode)
5126 switch (mode)
5128 case V16QImode:
5129 case V8HImode:
5130 case V4SFmode:
5131 case V4SImode:
5132 case V2DFmode:
5133 case V2DImode:
5134 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5135 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
5136 return false;
5137 break;
5139 case V4HImode:
5140 case V2SImode:
5141 case V1DImode:
5142 case V2SFmode:
5143 /* Paired vector modes. Only reg+reg addressing is valid. */
5144 if (TARGET_PAIRED_FLOAT)
5145 return false;
5146 break;
5148 default:
5149 break;
5152 return true;
5155 static bool
5156 virtual_stack_registers_memory_p (rtx op)
5158 int regnum;
5160 if (GET_CODE (op) == REG)
5161 regnum = REGNO (op);
5163 else if (GET_CODE (op) == PLUS
5164 && GET_CODE (XEXP (op, 0)) == REG
5165 && GET_CODE (XEXP (op, 1)) == CONST_INT)
5166 regnum = REGNO (XEXP (op, 0));
5168 else
5169 return false;
5171 return (regnum >= FIRST_VIRTUAL_REGISTER
5172 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
5175 /* Return true if memory accesses to OP are known to never straddle
5176 a 32k boundary. */
5178 static bool
5179 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
5180 enum machine_mode mode)
5182 tree decl, type;
5183 unsigned HOST_WIDE_INT dsize, dalign;
5185 if (GET_CODE (op) != SYMBOL_REF)
5186 return false;
5188 decl = SYMBOL_REF_DECL (op);
5189 if (!decl)
5191 if (GET_MODE_SIZE (mode) == 0)
5192 return false;
5194 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5195 replacing memory addresses with an anchor plus offset. We
5196 could find the decl by rummaging around in the block->objects
5197 VEC for the given offset but that seems like too much work. */
5198 dalign = 1;
5199 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5200 && SYMBOL_REF_ANCHOR_P (op)
5201 && SYMBOL_REF_BLOCK (op) != NULL)
5203 struct object_block *block = SYMBOL_REF_BLOCK (op);
5204 HOST_WIDE_INT lsb, mask;
5206 /* Given the alignment of the block.. */
5207 dalign = block->alignment;
5208 mask = dalign / BITS_PER_UNIT - 1;
5210 /* ..and the combined offset of the anchor and any offset
5211 to this block object.. */
5212 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5213 lsb = offset & -offset;
5215 /* ..find how many bits of the alignment we know for the
5216 object. */
5217 mask &= lsb - 1;
5218 dalign = mask + 1;
5220 return dalign >= GET_MODE_SIZE (mode);
5223 if (DECL_P (decl))
5225 if (TREE_CODE (decl) == FUNCTION_DECL)
5226 return true;
5228 if (!DECL_SIZE_UNIT (decl))
5229 return false;
5231 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5232 return false;
5234 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5235 if (dsize > 32768)
5236 return false;
5238 dalign = DECL_ALIGN_UNIT (decl);
5239 return dalign >= dsize;
5242 type = TREE_TYPE (decl);
5244 if (TREE_CODE (decl) == STRING_CST)
5245 dsize = TREE_STRING_LENGTH (decl);
5246 else if (TYPE_SIZE_UNIT (type)
5247 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5248 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5249 else
5250 return false;
5251 if (dsize > 32768)
5252 return false;
5254 dalign = TYPE_ALIGN (type);
5255 if (CONSTANT_CLASS_P (decl))
5256 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5257 else
5258 dalign = DATA_ALIGNMENT (decl, dalign);
5259 dalign /= BITS_PER_UNIT;
5260 return dalign >= dsize;
5263 static bool
5264 constant_pool_expr_p (rtx op)
5266 rtx base, offset;
5268 split_const (op, &base, &offset);
5269 return (GET_CODE (base) == SYMBOL_REF
5270 && CONSTANT_POOL_ADDRESS_P (base)
5271 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5274 static const_rtx tocrel_base, tocrel_offset;
5276 /* Return true if OP is a toc pointer relative address (the output
5277 of create_TOC_reference). If STRICT, do not match high part or
5278 non-split -mcmodel=large/medium toc pointer relative addresses. */
5280 bool
5281 toc_relative_expr_p (const_rtx op, bool strict)
5283 if (!TARGET_TOC)
5284 return false;
5286 if (TARGET_CMODEL != CMODEL_SMALL)
5288 /* Only match the low part. */
5289 if (GET_CODE (op) == LO_SUM
5290 && REG_P (XEXP (op, 0))
5291 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5292 op = XEXP (op, 1);
5293 else if (strict)
5294 return false;
5297 tocrel_base = op;
5298 tocrel_offset = const0_rtx;
5299 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
5301 tocrel_base = XEXP (op, 0);
5302 tocrel_offset = XEXP (op, 1);
5305 return (GET_CODE (tocrel_base) == UNSPEC
5306 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5309 /* Return true if X is a constant pool address, and also for cmodel=medium
5310 if X is a toc-relative address known to be offsettable within MODE. */
5312 bool
5313 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5314 bool strict)
5316 return (toc_relative_expr_p (x, strict)
5317 && (TARGET_CMODEL != CMODEL_MEDIUM
5318 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5319 || mode == QImode
5320 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5321 INTVAL (tocrel_offset), mode)));
5324 static bool
5325 legitimate_small_data_p (enum machine_mode mode, rtx x)
5327 return (DEFAULT_ABI == ABI_V4
5328 && !flag_pic && !TARGET_TOC
5329 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5330 && small_data_operand (x, mode));
5333 /* SPE offset addressing is limited to 5-bits worth of double words. */
5334 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5336 bool
5337 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
5338 bool strict, bool worst_case)
5340 unsigned HOST_WIDE_INT offset;
5341 unsigned int extra;
5343 if (GET_CODE (x) != PLUS)
5344 return false;
5345 if (!REG_P (XEXP (x, 0)))
5346 return false;
5347 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5348 return false;
5349 if (!reg_offset_addressing_ok_p (mode))
5350 return virtual_stack_registers_memory_p (x);
5351 if (legitimate_constant_pool_address_p (x, mode, strict))
5352 return true;
5353 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5354 return false;
5356 offset = INTVAL (XEXP (x, 1));
5357 extra = 0;
5358 switch (mode)
5360 case V4HImode:
5361 case V2SImode:
5362 case V1DImode:
5363 case V2SFmode:
5364 /* SPE vector modes. */
5365 return SPE_CONST_OFFSET_OK (offset);
5367 case DFmode:
5368 case DDmode:
5369 case DImode:
5370 /* On e500v2, we may have:
5372 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5374 Which gets addressed with evldd instructions. */
5375 if (TARGET_E500_DOUBLE)
5376 return SPE_CONST_OFFSET_OK (offset);
5378 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5379 addressing. */
5380 if (mode == DFmode && VECTOR_MEM_VSX_P (DFmode))
5381 return false;
5383 if (!worst_case)
5384 break;
5385 if (!TARGET_POWERPC64)
5386 extra = 4;
5387 else if (offset & 3)
5388 return false;
5389 break;
5391 case TFmode:
5392 case TDmode:
5393 case TImode:
5394 if (TARGET_E500_DOUBLE)
5395 return (SPE_CONST_OFFSET_OK (offset)
5396 && SPE_CONST_OFFSET_OK (offset + 8));
5398 extra = 8;
5399 if (!worst_case)
5400 break;
5401 if (!TARGET_POWERPC64)
5402 extra = 12;
5403 else if (offset & 3)
5404 return false;
5405 break;
5407 default:
5408 break;
5411 offset += 0x8000;
5412 return offset < 0x10000 - extra;
5415 bool
5416 legitimate_indexed_address_p (rtx x, int strict)
5418 rtx op0, op1;
5420 if (GET_CODE (x) != PLUS)
5421 return false;
5423 op0 = XEXP (x, 0);
5424 op1 = XEXP (x, 1);
5426 /* Recognize the rtl generated by reload which we know will later be
5427 replaced with proper base and index regs. */
5428 if (!strict
5429 && reload_in_progress
5430 && (REG_P (op0) || GET_CODE (op0) == PLUS)
5431 && REG_P (op1))
5432 return true;
5434 return (REG_P (op0) && REG_P (op1)
5435 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
5436 && INT_REG_OK_FOR_INDEX_P (op1, strict))
5437 || (INT_REG_OK_FOR_BASE_P (op1, strict)
5438 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
5441 bool
5442 avoiding_indexed_address_p (enum machine_mode mode)
5444 /* Avoid indexed addressing for modes that have non-indexed
5445 load/store instruction forms. */
5446 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
5449 inline bool
5450 legitimate_indirect_address_p (rtx x, int strict)
5452 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
5455 bool
5456 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
5458 if (!TARGET_MACHO || !flag_pic
5459 || mode != SImode || GET_CODE (x) != MEM)
5460 return false;
5461 x = XEXP (x, 0);
5463 if (GET_CODE (x) != LO_SUM)
5464 return false;
5465 if (GET_CODE (XEXP (x, 0)) != REG)
5466 return false;
5467 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
5468 return false;
5469 x = XEXP (x, 1);
5471 return CONSTANT_P (x);
5474 static bool
5475 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
5477 if (GET_CODE (x) != LO_SUM)
5478 return false;
5479 if (GET_CODE (XEXP (x, 0)) != REG)
5480 return false;
5481 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5482 return false;
5483 /* Restrict addressing for DI because of our SUBREG hackery. */
5484 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
5485 return false;
5486 x = XEXP (x, 1);
5488 if (TARGET_ELF || TARGET_MACHO)
5490 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
5491 return false;
5492 if (TARGET_TOC)
5493 return false;
5494 if (GET_MODE_NUNITS (mode) != 1)
5495 return false;
5496 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5497 && !(/* ??? Assume floating point reg based on mode? */
5498 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5499 && (mode == DFmode || mode == DDmode)))
5500 return false;
5502 return CONSTANT_P (x);
5505 return false;
5509 /* Try machine-dependent ways of modifying an illegitimate address
5510 to be legitimate. If we find one, return the new, valid address.
5511 This is used from only one place: `memory_address' in explow.c.
5513 OLDX is the address as it was before break_out_memory_refs was
5514 called. In some cases it is useful to look at this to decide what
5515 needs to be done.
5517 It is always safe for this function to do nothing. It exists to
5518 recognize opportunities to optimize the output.
5520 On RS/6000, first check for the sum of a register with a constant
5521 integer that is out of range. If so, generate code to add the
5522 constant with the low-order 16 bits masked to the register and force
5523 this result into another register (this can be done with `cau').
5524 Then generate an address of REG+(CONST&0xffff), allowing for the
5525 possibility of bit 16 being a one.
5527 Then check for the sum of a register and something not constant, try to
5528 load the other things into a register and return the sum. */
5530 static rtx
5531 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5532 enum machine_mode mode)
5534 unsigned int extra;
5536 if (!reg_offset_addressing_ok_p (mode))
5538 if (virtual_stack_registers_memory_p (x))
5539 return x;
5541 /* In theory we should not be seeing addresses of the form reg+0,
5542 but just in case it is generated, optimize it away. */
5543 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5544 return force_reg (Pmode, XEXP (x, 0));
5546 /* Make sure both operands are registers. */
5547 else if (GET_CODE (x) == PLUS)
5548 return gen_rtx_PLUS (Pmode,
5549 force_reg (Pmode, XEXP (x, 0)),
5550 force_reg (Pmode, XEXP (x, 1)));
5551 else
5552 return force_reg (Pmode, x);
5554 if (GET_CODE (x) == SYMBOL_REF)
5556 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5557 if (model != 0)
5558 return rs6000_legitimize_tls_address (x, model);
5561 extra = 0;
5562 switch (mode)
5564 case TFmode:
5565 case TDmode:
5566 case TImode:
5567 /* As in legitimate_offset_address_p we do not assume
5568 worst-case. The mode here is just a hint as to the registers
5569 used. A TImode is usually in gprs, but may actually be in
5570 fprs. Leave worst-case scenario for reload to handle via
5571 insn constraints. */
5572 extra = 8;
5573 break;
5574 default:
5575 break;
5578 if (GET_CODE (x) == PLUS
5579 && GET_CODE (XEXP (x, 0)) == REG
5580 && GET_CODE (XEXP (x, 1)) == CONST_INT
5581 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5582 >= 0x10000 - extra)
5583 && !(SPE_VECTOR_MODE (mode)
5584 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
5586 HOST_WIDE_INT high_int, low_int;
5587 rtx sum;
5588 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5589 if (low_int >= 0x8000 - extra)
5590 low_int = 0;
5591 high_int = INTVAL (XEXP (x, 1)) - low_int;
5592 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5593 GEN_INT (high_int)), 0);
5594 return plus_constant (Pmode, sum, low_int);
5596 else if (GET_CODE (x) == PLUS
5597 && GET_CODE (XEXP (x, 0)) == REG
5598 && GET_CODE (XEXP (x, 1)) != CONST_INT
5599 && GET_MODE_NUNITS (mode) == 1
5600 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5601 || (/* ??? Assume floating point reg based on mode? */
5602 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5603 && (mode == DFmode || mode == DDmode)))
5604 && !avoiding_indexed_address_p (mode))
5606 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5607 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5609 else if (SPE_VECTOR_MODE (mode)
5610 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
5612 if (mode == DImode)
5613 return x;
5614 /* We accept [reg + reg] and [reg + OFFSET]. */
5616 if (GET_CODE (x) == PLUS)
5618 rtx op1 = XEXP (x, 0);
5619 rtx op2 = XEXP (x, 1);
5620 rtx y;
5622 op1 = force_reg (Pmode, op1);
5624 if (GET_CODE (op2) != REG
5625 && (GET_CODE (op2) != CONST_INT
5626 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5627 || (GET_MODE_SIZE (mode) > 8
5628 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5629 op2 = force_reg (Pmode, op2);
5631 /* We can't always do [reg + reg] for these, because [reg +
5632 reg + offset] is not a legitimate addressing mode. */
5633 y = gen_rtx_PLUS (Pmode, op1, op2);
5635 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5636 return force_reg (Pmode, y);
5637 else
5638 return y;
5641 return force_reg (Pmode, x);
5643 else if ((TARGET_ELF
5644 #if TARGET_MACHO
5645 || !MACHO_DYNAMIC_NO_PIC_P
5646 #endif
5648 && TARGET_32BIT
5649 && TARGET_NO_TOC
5650 && ! flag_pic
5651 && GET_CODE (x) != CONST_INT
5652 && GET_CODE (x) != CONST_DOUBLE
5653 && CONSTANT_P (x)
5654 && GET_MODE_NUNITS (mode) == 1
5655 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5656 || (/* ??? Assume floating point reg based on mode? */
5657 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5658 && (mode == DFmode || mode == DDmode))))
5660 rtx reg = gen_reg_rtx (Pmode);
5661 if (TARGET_ELF)
5662 emit_insn (gen_elf_high (reg, x));
5663 else
5664 emit_insn (gen_macho_high (reg, x));
5665 return gen_rtx_LO_SUM (Pmode, reg, x);
5667 else if (TARGET_TOC
5668 && GET_CODE (x) == SYMBOL_REF
5669 && constant_pool_expr_p (x)
5670 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5671 return create_TOC_reference (x, NULL_RTX);
5672 else
5673 return x;
5676 /* Debug version of rs6000_legitimize_address. */
5677 static rtx
5678 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5680 rtx ret;
5681 rtx insns;
5683 start_sequence ();
5684 ret = rs6000_legitimize_address (x, oldx, mode);
5685 insns = get_insns ();
5686 end_sequence ();
5688 if (ret != x)
5690 fprintf (stderr,
5691 "\nrs6000_legitimize_address: mode %s, old code %s, "
5692 "new code %s, modified\n",
5693 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5694 GET_RTX_NAME (GET_CODE (ret)));
5696 fprintf (stderr, "Original address:\n");
5697 debug_rtx (x);
5699 fprintf (stderr, "oldx:\n");
5700 debug_rtx (oldx);
5702 fprintf (stderr, "New address:\n");
5703 debug_rtx (ret);
5705 if (insns)
5707 fprintf (stderr, "Insns added:\n");
5708 debug_rtx_list (insns, 20);
5711 else
5713 fprintf (stderr,
5714 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5715 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
5717 debug_rtx (x);
5720 if (insns)
5721 emit_insn (insns);
5723 return ret;
5726 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5727 We need to emit DTP-relative relocations. */
5729 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5730 static void
5731 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
5733 switch (size)
5735 case 4:
5736 fputs ("\t.long\t", file);
5737 break;
5738 case 8:
5739 fputs (DOUBLE_INT_ASM_OP, file);
5740 break;
5741 default:
5742 gcc_unreachable ();
5744 output_addr_const (file, x);
5745 fputs ("@dtprel+0x8000", file);
5748 /* In the name of slightly smaller debug output, and to cater to
5749 general assembler lossage, recognize various UNSPEC sequences
5750 and turn them back into a direct symbol reference. */
5752 static rtx
5753 rs6000_delegitimize_address (rtx orig_x)
5755 rtx x, y, offset;
5757 orig_x = delegitimize_mem_from_attrs (orig_x);
5758 x = orig_x;
5759 if (MEM_P (x))
5760 x = XEXP (x, 0);
5762 y = x;
5763 if (TARGET_CMODEL != CMODEL_SMALL
5764 && GET_CODE (y) == LO_SUM)
5765 y = XEXP (y, 1);
5767 offset = NULL_RTX;
5768 if (GET_CODE (y) == PLUS
5769 && GET_MODE (y) == Pmode
5770 && CONST_INT_P (XEXP (y, 1)))
5772 offset = XEXP (y, 1);
5773 y = XEXP (y, 0);
5776 if (GET_CODE (y) == UNSPEC
5777 && XINT (y, 1) == UNSPEC_TOCREL)
5779 #ifdef ENABLE_CHECKING
5780 if (REG_P (XVECEXP (y, 0, 1))
5781 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
5783 /* All good. */
5785 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
5787 /* Weirdness alert. df_note_compute can replace r2 with a
5788 debug_expr when this unspec is in a debug_insn.
5789 Seen in gcc.dg/pr51957-1.c */
5791 else
5793 debug_rtx (orig_x);
5794 abort ();
5796 #endif
5797 y = XVECEXP (y, 0, 0);
5798 if (offset != NULL_RTX)
5799 y = gen_rtx_PLUS (Pmode, y, offset);
5800 if (!MEM_P (orig_x))
5801 return y;
5802 else
5803 return replace_equiv_address_nv (orig_x, y);
5806 if (TARGET_MACHO
5807 && GET_CODE (orig_x) == LO_SUM
5808 && GET_CODE (XEXP (orig_x, 1)) == CONST)
5810 y = XEXP (XEXP (orig_x, 1), 0);
5811 if (GET_CODE (y) == UNSPEC
5812 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
5813 return XVECEXP (y, 0, 0);
5816 return orig_x;
5819 /* Return true if X shouldn't be emitted into the debug info.
5820 The linker doesn't like .toc section references from
5821 .debug_* sections, so reject .toc section symbols. */
5823 static bool
5824 rs6000_const_not_ok_for_debug_p (rtx x)
5826 if (GET_CODE (x) == SYMBOL_REF
5827 && CONSTANT_POOL_ADDRESS_P (x))
5829 rtx c = get_pool_constant (x);
5830 enum machine_mode cmode = get_pool_mode (x);
5831 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
5832 return true;
5835 return false;
5838 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5840 static GTY(()) rtx rs6000_tls_symbol;
5841 static rtx
5842 rs6000_tls_get_addr (void)
5844 if (!rs6000_tls_symbol)
5845 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
5847 return rs6000_tls_symbol;
5850 /* Construct the SYMBOL_REF for TLS GOT references. */
5852 static GTY(()) rtx rs6000_got_symbol;
5853 static rtx
5854 rs6000_got_sym (void)
5856 if (!rs6000_got_symbol)
5858 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
5859 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
5860 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
5863 return rs6000_got_symbol;
5866 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5867 this (thread-local) address. */
5869 static rtx
5870 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
5872 rtx dest, insn;
5874 dest = gen_reg_rtx (Pmode);
5875 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
5877 rtx tlsreg;
5879 if (TARGET_64BIT)
5881 tlsreg = gen_rtx_REG (Pmode, 13);
5882 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
5884 else
5886 tlsreg = gen_rtx_REG (Pmode, 2);
5887 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
5889 emit_insn (insn);
5891 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
5893 rtx tlsreg, tmp;
5895 tmp = gen_reg_rtx (Pmode);
5896 if (TARGET_64BIT)
5898 tlsreg = gen_rtx_REG (Pmode, 13);
5899 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
5901 else
5903 tlsreg = gen_rtx_REG (Pmode, 2);
5904 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
5906 emit_insn (insn);
5907 if (TARGET_64BIT)
5908 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
5909 else
5910 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
5911 emit_insn (insn);
5913 else
5915 rtx r3, got, tga, tmp1, tmp2, call_insn;
5917 /* We currently use relocations like @got@tlsgd for tls, which
5918 means the linker will handle allocation of tls entries, placing
5919 them in the .got section. So use a pointer to the .got section,
5920 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5921 or to secondary GOT sections used by 32-bit -fPIC. */
5922 if (TARGET_64BIT)
5923 got = gen_rtx_REG (Pmode, 2);
5924 else
5926 if (flag_pic == 1)
5927 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
5928 else
5930 rtx gsym = rs6000_got_sym ();
5931 got = gen_reg_rtx (Pmode);
5932 if (flag_pic == 0)
5933 rs6000_emit_move (got, gsym, Pmode);
5934 else
5936 rtx mem, lab, last;
5938 tmp1 = gen_reg_rtx (Pmode);
5939 tmp2 = gen_reg_rtx (Pmode);
5940 mem = gen_const_mem (Pmode, tmp1);
5941 lab = gen_label_rtx ();
5942 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
5943 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
5944 if (TARGET_LINK_STACK)
5945 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
5946 emit_move_insn (tmp2, mem);
5947 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
5948 set_unique_reg_note (last, REG_EQUAL, gsym);
5953 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
5955 tga = rs6000_tls_get_addr ();
5956 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
5957 1, const0_rtx, Pmode);
5959 r3 = gen_rtx_REG (Pmode, 3);
5960 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5961 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
5962 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5963 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
5964 else if (DEFAULT_ABI == ABI_V4)
5965 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
5966 else
5967 gcc_unreachable ();
5968 call_insn = last_call_insn ();
5969 PATTERN (call_insn) = insn;
5970 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5971 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5972 pic_offset_table_rtx);
5974 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
5976 tga = rs6000_tls_get_addr ();
5977 tmp1 = gen_reg_rtx (Pmode);
5978 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
5979 1, const0_rtx, Pmode);
5981 r3 = gen_rtx_REG (Pmode, 3);
5982 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5983 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
5984 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5985 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
5986 else if (DEFAULT_ABI == ABI_V4)
5987 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
5988 else
5989 gcc_unreachable ();
5990 call_insn = last_call_insn ();
5991 PATTERN (call_insn) = insn;
5992 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5993 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5994 pic_offset_table_rtx);
5996 if (rs6000_tls_size == 16)
5998 if (TARGET_64BIT)
5999 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
6000 else
6001 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
6003 else if (rs6000_tls_size == 32)
6005 tmp2 = gen_reg_rtx (Pmode);
6006 if (TARGET_64BIT)
6007 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
6008 else
6009 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
6010 emit_insn (insn);
6011 if (TARGET_64BIT)
6012 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
6013 else
6014 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
6016 else
6018 tmp2 = gen_reg_rtx (Pmode);
6019 if (TARGET_64BIT)
6020 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
6021 else
6022 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
6023 emit_insn (insn);
6024 insn = gen_rtx_SET (Pmode, dest,
6025 gen_rtx_PLUS (Pmode, tmp2, tmp1));
6027 emit_insn (insn);
6029 else
6031 /* IE, or 64-bit offset LE. */
6032 tmp2 = gen_reg_rtx (Pmode);
6033 if (TARGET_64BIT)
6034 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
6035 else
6036 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
6037 emit_insn (insn);
6038 if (TARGET_64BIT)
6039 insn = gen_tls_tls_64 (dest, tmp2, addr);
6040 else
6041 insn = gen_tls_tls_32 (dest, tmp2, addr);
6042 emit_insn (insn);
6046 return dest;
6049 /* Return 1 if X contains a thread-local symbol. */
6051 static bool
6052 rs6000_tls_referenced_p (rtx x)
6054 if (! TARGET_HAVE_TLS)
6055 return false;
6057 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
6060 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6062 static bool
6063 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
6065 if (GET_CODE (x) == HIGH
6066 && GET_CODE (XEXP (x, 0)) == UNSPEC)
6067 return true;
6069 return rs6000_tls_referenced_p (x);
6072 /* Return 1 if *X is a thread-local symbol. This is the same as
6073 rs6000_tls_symbol_ref except for the type of the unused argument. */
6075 static int
6076 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6078 return RS6000_SYMBOL_REF_TLS_P (*x);
6081 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6082 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6083 can be addressed relative to the toc pointer. */
6085 static bool
6086 use_toc_relative_ref (rtx sym)
6088 return ((constant_pool_expr_p (sym)
6089 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
6090 get_pool_mode (sym)))
6091 || (TARGET_CMODEL == CMODEL_MEDIUM
6092 && !CONSTANT_POOL_ADDRESS_P (sym)
6093 && SYMBOL_REF_LOCAL_P (sym)));
6096 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6097 replace the input X, or the original X if no replacement is called for.
6098 The output parameter *WIN is 1 if the calling macro should goto WIN,
6099 0 if it should not.
6101 For RS/6000, we wish to handle large displacements off a base
6102 register by splitting the addend across an addiu/addis and the mem insn.
6103 This cuts number of extra insns needed from 3 to 1.
6105 On Darwin, we use this to generate code for floating point constants.
6106 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6107 The Darwin code is inside #if TARGET_MACHO because only then are the
6108 machopic_* functions defined. */
6109 static rtx
6110 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
6111 int opnum, int type,
6112 int ind_levels ATTRIBUTE_UNUSED, int *win)
6114 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6116 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6117 DFmode/DImode MEM. */
6118 if (reg_offset_p
6119 && opnum == 1
6120 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
6121 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
6122 reg_offset_p = false;
6124 /* We must recognize output that we have already generated ourselves. */
6125 if (GET_CODE (x) == PLUS
6126 && GET_CODE (XEXP (x, 0)) == PLUS
6127 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6128 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6129 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6131 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6132 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6133 opnum, (enum reload_type) type);
6134 *win = 1;
6135 return x;
6138 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6139 if (GET_CODE (x) == LO_SUM
6140 && GET_CODE (XEXP (x, 0)) == HIGH)
6142 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6143 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6144 opnum, (enum reload_type) type);
6145 *win = 1;
6146 return x;
6149 #if TARGET_MACHO
6150 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
6151 && GET_CODE (x) == LO_SUM
6152 && GET_CODE (XEXP (x, 0)) == PLUS
6153 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
6154 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
6155 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6156 && machopic_operand_p (XEXP (x, 1)))
6158 /* Result of previous invocation of this function on Darwin
6159 floating point constant. */
6160 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6161 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6162 opnum, (enum reload_type) type);
6163 *win = 1;
6164 return x;
6166 #endif
6168 if (TARGET_CMODEL != CMODEL_SMALL
6169 && reg_offset_p
6170 && small_toc_ref (x, VOIDmode))
6172 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6173 x = gen_rtx_LO_SUM (Pmode, hi, x);
6174 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6175 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6176 opnum, (enum reload_type) type);
6177 *win = 1;
6178 return x;
6181 /* Force ld/std non-word aligned offset into base register by wrapping
6182 in offset 0. */
6183 if (GET_CODE (x) == PLUS
6184 && GET_CODE (XEXP (x, 0)) == REG
6185 && REGNO (XEXP (x, 0)) < 32
6186 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6187 && GET_CODE (XEXP (x, 1)) == CONST_INT
6188 && reg_offset_p
6189 && (INTVAL (XEXP (x, 1)) & 3) != 0
6190 && VECTOR_MEM_NONE_P (mode)
6191 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
6192 && TARGET_POWERPC64)
6194 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
6195 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6196 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6197 opnum, (enum reload_type) type);
6198 *win = 1;
6199 return x;
6202 if (GET_CODE (x) == PLUS
6203 && GET_CODE (XEXP (x, 0)) == REG
6204 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6205 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6206 && GET_CODE (XEXP (x, 1)) == CONST_INT
6207 && reg_offset_p
6208 && !SPE_VECTOR_MODE (mode)
6209 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6210 || mode == DDmode || mode == TDmode
6211 || mode == DImode))
6212 && VECTOR_MEM_NONE_P (mode))
6214 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6215 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6216 HOST_WIDE_INT high
6217 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6219 /* Check for 32-bit overflow. */
6220 if (high + low != val)
6222 *win = 0;
6223 return x;
6226 /* Reload the high part into a base reg; leave the low part
6227 in the mem directly. */
6229 x = gen_rtx_PLUS (GET_MODE (x),
6230 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6231 GEN_INT (high)),
6232 GEN_INT (low));
6234 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6235 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6236 opnum, (enum reload_type) type);
6237 *win = 1;
6238 return x;
6241 if (GET_CODE (x) == SYMBOL_REF
6242 && reg_offset_p
6243 && VECTOR_MEM_NONE_P (mode)
6244 && !SPE_VECTOR_MODE (mode)
6245 #if TARGET_MACHO
6246 && DEFAULT_ABI == ABI_DARWIN
6247 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6248 && machopic_symbol_defined_p (x)
6249 #else
6250 && DEFAULT_ABI == ABI_V4
6251 && !flag_pic
6252 #endif
6253 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6254 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6255 without fprs.
6256 ??? Assume floating point reg based on mode? This assumption is
6257 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6258 where reload ends up doing a DFmode load of a constant from
6259 mem using two gprs. Unfortunately, at this point reload
6260 hasn't yet selected regs so poking around in reload data
6261 won't help and even if we could figure out the regs reliably,
6262 we'd still want to allow this transformation when the mem is
6263 naturally aligned. Since we say the address is good here, we
6264 can't disable offsets from LO_SUMs in mem_operand_gpr.
6265 FIXME: Allow offset from lo_sum for other modes too, when
6266 mem is sufficiently aligned. */
6267 && mode != TFmode
6268 && mode != TDmode
6269 && (mode != DImode || TARGET_POWERPC64)
6270 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6271 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6273 #if TARGET_MACHO
6274 if (flag_pic)
6276 rtx offset = machopic_gen_offset (x);
6277 x = gen_rtx_LO_SUM (GET_MODE (x),
6278 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6279 gen_rtx_HIGH (Pmode, offset)), offset);
6281 else
6282 #endif
6283 x = gen_rtx_LO_SUM (GET_MODE (x),
6284 gen_rtx_HIGH (Pmode, x), x);
6286 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6287 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6288 opnum, (enum reload_type) type);
6289 *win = 1;
6290 return x;
6293 /* Reload an offset address wrapped by an AND that represents the
6294 masking of the lower bits. Strip the outer AND and let reload
6295 convert the offset address into an indirect address. For VSX,
6296 force reload to create the address with an AND in a separate
6297 register, because we can't guarantee an altivec register will
6298 be used. */
6299 if (VECTOR_MEM_ALTIVEC_P (mode)
6300 && GET_CODE (x) == AND
6301 && GET_CODE (XEXP (x, 0)) == PLUS
6302 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6303 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6304 && GET_CODE (XEXP (x, 1)) == CONST_INT
6305 && INTVAL (XEXP (x, 1)) == -16)
6307 x = XEXP (x, 0);
6308 *win = 1;
6309 return x;
6312 if (TARGET_TOC
6313 && reg_offset_p
6314 && GET_CODE (x) == SYMBOL_REF
6315 && use_toc_relative_ref (x))
6317 x = create_TOC_reference (x, NULL_RTX);
6318 if (TARGET_CMODEL != CMODEL_SMALL)
6319 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6320 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6321 opnum, (enum reload_type) type);
6322 *win = 1;
6323 return x;
6325 *win = 0;
6326 return x;
6329 /* Debug version of rs6000_legitimize_reload_address. */
6330 static rtx
6331 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
6332 int opnum, int type,
6333 int ind_levels, int *win)
6335 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
6336 ind_levels, win);
6337 fprintf (stderr,
6338 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6339 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6340 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
6341 debug_rtx (x);
6343 if (x == ret)
6344 fprintf (stderr, "Same address returned\n");
6345 else if (!ret)
6346 fprintf (stderr, "NULL returned\n");
6347 else
6349 fprintf (stderr, "New address:\n");
6350 debug_rtx (ret);
6353 return ret;
6356 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6357 that is a valid memory address for an instruction.
6358 The MODE argument is the machine mode for the MEM expression
6359 that wants to use this address.
6361 On the RS/6000, there are four valid address: a SYMBOL_REF that
6362 refers to a constant pool entry of an address (or the sum of it
6363 plus a constant), a short (16-bit signed) constant plus a register,
6364 the sum of two registers, or a register indirect, possibly with an
6365 auto-increment. For DFmode, DDmode and DImode with a constant plus
6366 register, we must ensure that both words are addressable or PowerPC64
6367 with offset word aligned.
6369 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6370 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6371 because adjacent memory cells are accessed by adding word-sized offsets
6372 during assembly output. */
6373 static bool
6374 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
6376 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6378 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6379 if (VECTOR_MEM_ALTIVEC_P (mode)
6380 && GET_CODE (x) == AND
6381 && GET_CODE (XEXP (x, 1)) == CONST_INT
6382 && INTVAL (XEXP (x, 1)) == -16)
6383 x = XEXP (x, 0);
6385 if (RS6000_SYMBOL_REF_TLS_P (x))
6386 return 0;
6387 if (legitimate_indirect_address_p (x, reg_ok_strict))
6388 return 1;
6389 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
6390 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6391 && !SPE_VECTOR_MODE (mode)
6392 && mode != TFmode
6393 && mode != TDmode
6394 /* Restrict addressing for DI because of our SUBREG hackery. */
6395 && !(TARGET_E500_DOUBLE
6396 && (mode == DFmode || mode == DDmode || mode == DImode))
6397 && TARGET_UPDATE
6398 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
6399 return 1;
6400 if (virtual_stack_registers_memory_p (x))
6401 return 1;
6402 if (reg_offset_p && legitimate_small_data_p (mode, x))
6403 return 1;
6404 if (reg_offset_p
6405 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
6406 return 1;
6407 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6408 if (! reg_ok_strict
6409 && reg_offset_p
6410 && GET_CODE (x) == PLUS
6411 && GET_CODE (XEXP (x, 0)) == REG
6412 && (XEXP (x, 0) == virtual_stack_vars_rtx
6413 || XEXP (x, 0) == arg_pointer_rtx)
6414 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6415 return 1;
6416 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
6417 return 1;
6418 if (mode != TImode
6419 && mode != TFmode
6420 && mode != TDmode
6421 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6422 || TARGET_POWERPC64
6423 || (mode != DFmode && mode != DDmode)
6424 || (TARGET_E500_DOUBLE && mode != DDmode))
6425 && (TARGET_POWERPC64 || mode != DImode)
6426 && !avoiding_indexed_address_p (mode)
6427 && legitimate_indexed_address_p (x, reg_ok_strict))
6428 return 1;
6429 if (GET_CODE (x) == PRE_MODIFY
6430 && mode != TImode
6431 && mode != TFmode
6432 && mode != TDmode
6433 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6434 || TARGET_POWERPC64
6435 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
6436 && (TARGET_POWERPC64 || mode != DImode)
6437 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6438 && !SPE_VECTOR_MODE (mode)
6439 /* Restrict addressing for DI because of our SUBREG hackery. */
6440 && !(TARGET_E500_DOUBLE
6441 && (mode == DFmode || mode == DDmode || mode == DImode))
6442 && TARGET_UPDATE
6443 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
6444 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
6445 reg_ok_strict, false)
6446 || (!avoiding_indexed_address_p (mode)
6447 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
6448 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6449 return 1;
6450 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
6451 return 1;
6452 return 0;
6455 /* Debug version of rs6000_legitimate_address_p. */
6456 static bool
6457 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
6458 bool reg_ok_strict)
6460 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
6461 fprintf (stderr,
6462 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6463 "strict = %d, code = %s\n",
6464 ret ? "true" : "false",
6465 GET_MODE_NAME (mode),
6466 reg_ok_strict,
6467 GET_RTX_NAME (GET_CODE (x)));
6468 debug_rtx (x);
6470 return ret;
6473 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6475 static bool
6476 rs6000_mode_dependent_address_p (const_rtx addr,
6477 addr_space_t as ATTRIBUTE_UNUSED)
6479 return rs6000_mode_dependent_address_ptr (addr);
6482 /* Go to LABEL if ADDR (a legitimate address expression)
6483 has an effect that depends on the machine mode it is used for.
6485 On the RS/6000 this is true of all integral offsets (since AltiVec
6486 and VSX modes don't allow them) or is a pre-increment or decrement.
6488 ??? Except that due to conceptual problems in offsettable_address_p
6489 we can't really report the problems of integral offsets. So leave
6490 this assuming that the adjustable offset must be valid for the
6491 sub-words of a TFmode operand, which is what we had before. */
6493 static bool
6494 rs6000_mode_dependent_address (const_rtx addr)
6496 switch (GET_CODE (addr))
6498 case PLUS:
6499 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6500 is considered a legitimate address before reload, so there
6501 are no offset restrictions in that case. Note that this
6502 condition is safe in strict mode because any address involving
6503 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6504 been rejected as illegitimate. */
6505 if (XEXP (addr, 0) != virtual_stack_vars_rtx
6506 && XEXP (addr, 0) != arg_pointer_rtx
6507 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6509 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
6510 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
6512 break;
6514 case LO_SUM:
6515 /* Anything in the constant pool is sufficiently aligned that
6516 all bytes have the same high part address. */
6517 return !legitimate_constant_pool_address_p (addr, QImode, false);
6519 /* Auto-increment cases are now treated generically in recog.c. */
6520 case PRE_MODIFY:
6521 return TARGET_UPDATE;
6523 /* AND is only allowed in Altivec loads. */
6524 case AND:
6525 return true;
6527 default:
6528 break;
6531 return false;
6534 /* Debug version of rs6000_mode_dependent_address. */
6535 static bool
6536 rs6000_debug_mode_dependent_address (const_rtx addr)
6538 bool ret = rs6000_mode_dependent_address (addr);
6540 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
6541 ret ? "true" : "false");
6542 debug_rtx (addr);
6544 return ret;
6547 /* Implement FIND_BASE_TERM. */
6550 rs6000_find_base_term (rtx op)
6552 rtx base;
6554 base = op;
6555 if (GET_CODE (base) == CONST)
6556 base = XEXP (base, 0);
6557 if (GET_CODE (base) == PLUS)
6558 base = XEXP (base, 0);
6559 if (GET_CODE (base) == UNSPEC)
6560 switch (XINT (base, 1))
6562 case UNSPEC_TOCREL:
6563 case UNSPEC_MACHOPIC_OFFSET:
6564 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6565 for aliasing purposes. */
6566 return XVECEXP (base, 0, 0);
6569 return op;
6572 /* More elaborate version of recog's offsettable_memref_p predicate
6573 that works around the ??? note of rs6000_mode_dependent_address.
6574 In particular it accepts
6576 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6578 in 32-bit mode, that the recog predicate rejects. */
6580 static bool
6581 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
6583 bool worst_case;
6585 if (!MEM_P (op))
6586 return false;
6588 /* First mimic offsettable_memref_p. */
6589 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
6590 return true;
6592 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6593 the latter predicate knows nothing about the mode of the memory
6594 reference and, therefore, assumes that it is the largest supported
6595 mode (TFmode). As a consequence, legitimate offsettable memory
6596 references are rejected. rs6000_legitimate_offset_address_p contains
6597 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6598 at least with a little bit of help here given that we know the
6599 actual registers used. */
6600 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
6601 || GET_MODE_SIZE (reg_mode) == 4);
6602 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
6603 true, worst_case);
6606 /* Change register usage conditional on target flags. */
6607 static void
6608 rs6000_conditional_register_usage (void)
6610 int i;
6612 if (TARGET_DEBUG_TARGET)
6613 fprintf (stderr, "rs6000_conditional_register_usage called\n");
6615 /* Set MQ register fixed (already call_used) so that it will not be
6616 allocated. */
6617 fixed_regs[64] = 1;
6619 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6620 if (TARGET_64BIT)
6621 fixed_regs[13] = call_used_regs[13]
6622 = call_really_used_regs[13] = 1;
6624 /* Conditionally disable FPRs. */
6625 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
6626 for (i = 32; i < 64; i++)
6627 fixed_regs[i] = call_used_regs[i]
6628 = call_really_used_regs[i] = 1;
6630 /* The TOC register is not killed across calls in a way that is
6631 visible to the compiler. */
6632 if (DEFAULT_ABI == ABI_AIX)
6633 call_really_used_regs[2] = 0;
6635 if (DEFAULT_ABI == ABI_V4
6636 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6637 && flag_pic == 2)
6638 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6640 if (DEFAULT_ABI == ABI_V4
6641 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6642 && flag_pic == 1)
6643 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6644 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6645 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6647 if (DEFAULT_ABI == ABI_DARWIN
6648 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6649 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6650 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6651 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6653 if (TARGET_TOC && TARGET_MINIMAL_TOC)
6654 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6655 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6657 if (TARGET_SPE)
6659 global_regs[SPEFSCR_REGNO] = 1;
6660 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6661 registers in prologues and epilogues. We no longer use r14
6662 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6663 pool for link-compatibility with older versions of GCC. Once
6664 "old" code has died out, we can return r14 to the allocation
6665 pool. */
6666 fixed_regs[14]
6667 = call_used_regs[14]
6668 = call_really_used_regs[14] = 1;
6671 if (!TARGET_ALTIVEC && !TARGET_VSX)
6673 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
6674 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6675 call_really_used_regs[VRSAVE_REGNO] = 1;
6678 if (TARGET_ALTIVEC || TARGET_VSX)
6679 global_regs[VSCR_REGNO] = 1;
6681 if (TARGET_ALTIVEC_ABI)
6683 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
6684 call_used_regs[i] = call_really_used_regs[i] = 1;
6686 /* AIX reserves VR20:31 in non-extended ABI mode. */
6687 if (TARGET_XCOFF)
6688 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
6689 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6693 /* Try to output insns to set TARGET equal to the constant C if it can
6694 be done in less than N insns. Do all computations in MODE.
6695 Returns the place where the output has been placed if it can be
6696 done and the insns have been emitted. If it would take more than N
6697 insns, zero is returned and no insns and emitted. */
6700 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
6701 rtx source, int n ATTRIBUTE_UNUSED)
6703 rtx result, insn, set;
6704 HOST_WIDE_INT c0, c1;
6706 switch (mode)
6708 case QImode:
6709 case HImode:
6710 if (dest == NULL)
6711 dest = gen_reg_rtx (mode);
6712 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
6713 return dest;
6715 case SImode:
6716 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
6718 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
6719 GEN_INT (INTVAL (source)
6720 & (~ (HOST_WIDE_INT) 0xffff))));
6721 emit_insn (gen_rtx_SET (VOIDmode, dest,
6722 gen_rtx_IOR (SImode, copy_rtx (result),
6723 GEN_INT (INTVAL (source) & 0xffff))));
6724 result = dest;
6725 break;
6727 case DImode:
6728 switch (GET_CODE (source))
6730 case CONST_INT:
6731 c0 = INTVAL (source);
6732 c1 = -(c0 < 0);
6733 break;
6735 case CONST_DOUBLE:
6736 #if HOST_BITS_PER_WIDE_INT >= 64
6737 c0 = CONST_DOUBLE_LOW (source);
6738 c1 = -(c0 < 0);
6739 #else
6740 c0 = CONST_DOUBLE_LOW (source);
6741 c1 = CONST_DOUBLE_HIGH (source);
6742 #endif
6743 break;
6745 default:
6746 gcc_unreachable ();
6749 result = rs6000_emit_set_long_const (dest, c0, c1);
6750 break;
6752 default:
6753 gcc_unreachable ();
6756 insn = get_last_insn ();
6757 set = single_set (insn);
6758 if (! CONSTANT_P (SET_SRC (set)))
6759 set_unique_reg_note (insn, REG_EQUAL, source);
6761 return result;
6764 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6765 fall back to a straight forward decomposition. We do this to avoid
6766 exponential run times encountered when looking for longer sequences
6767 with rs6000_emit_set_const. */
6768 static rtx
6769 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
6771 if (!TARGET_POWERPC64)
6773 rtx operand1, operand2;
6775 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
6776 DImode);
6777 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
6778 DImode);
6779 emit_move_insn (operand1, GEN_INT (c1));
6780 emit_move_insn (operand2, GEN_INT (c2));
6782 else
6784 HOST_WIDE_INT ud1, ud2, ud3, ud4;
6786 ud1 = c1 & 0xffff;
6787 ud2 = (c1 & 0xffff0000) >> 16;
6788 #if HOST_BITS_PER_WIDE_INT >= 64
6789 c2 = c1 >> 32;
6790 #endif
6791 ud3 = c2 & 0xffff;
6792 ud4 = (c2 & 0xffff0000) >> 16;
6794 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
6795 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
6797 if (ud1 & 0x8000)
6798 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
6799 else
6800 emit_move_insn (dest, GEN_INT (ud1));
6803 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
6804 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
6806 if (ud2 & 0x8000)
6807 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6808 - 0x80000000));
6809 else
6810 emit_move_insn (dest, GEN_INT (ud2 << 16));
6811 if (ud1 != 0)
6812 emit_move_insn (copy_rtx (dest),
6813 gen_rtx_IOR (DImode, copy_rtx (dest),
6814 GEN_INT (ud1)));
6816 else if (ud3 == 0 && ud4 == 0)
6818 gcc_assert (ud2 & 0x8000);
6819 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6820 - 0x80000000));
6821 if (ud1 != 0)
6822 emit_move_insn (copy_rtx (dest),
6823 gen_rtx_IOR (DImode, copy_rtx (dest),
6824 GEN_INT (ud1)));
6825 emit_move_insn (copy_rtx (dest),
6826 gen_rtx_ZERO_EXTEND (DImode,
6827 gen_lowpart (SImode,
6828 copy_rtx (dest))));
6830 else if ((ud4 == 0xffff && (ud3 & 0x8000))
6831 || (ud4 == 0 && ! (ud3 & 0x8000)))
6833 if (ud3 & 0x8000)
6834 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
6835 - 0x80000000));
6836 else
6837 emit_move_insn (dest, GEN_INT (ud3 << 16));
6839 if (ud2 != 0)
6840 emit_move_insn (copy_rtx (dest),
6841 gen_rtx_IOR (DImode, copy_rtx (dest),
6842 GEN_INT (ud2)));
6843 emit_move_insn (copy_rtx (dest),
6844 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6845 GEN_INT (16)));
6846 if (ud1 != 0)
6847 emit_move_insn (copy_rtx (dest),
6848 gen_rtx_IOR (DImode, copy_rtx (dest),
6849 GEN_INT (ud1)));
6851 else
6853 if (ud4 & 0x8000)
6854 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
6855 - 0x80000000));
6856 else
6857 emit_move_insn (dest, GEN_INT (ud4 << 16));
6859 if (ud3 != 0)
6860 emit_move_insn (copy_rtx (dest),
6861 gen_rtx_IOR (DImode, copy_rtx (dest),
6862 GEN_INT (ud3)));
6864 emit_move_insn (copy_rtx (dest),
6865 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6866 GEN_INT (32)));
6867 if (ud2 != 0)
6868 emit_move_insn (copy_rtx (dest),
6869 gen_rtx_IOR (DImode, copy_rtx (dest),
6870 GEN_INT (ud2 << 16)));
6871 if (ud1 != 0)
6872 emit_move_insn (copy_rtx (dest),
6873 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
6876 return dest;
6879 /* Helper for the following. Get rid of [r+r] memory refs
6880 in cases where it won't work (TImode, TFmode, TDmode). */
6882 static void
6883 rs6000_eliminate_indexed_memrefs (rtx operands[2])
6885 if (reload_in_progress)
6886 return;
6888 if (GET_CODE (operands[0]) == MEM
6889 && GET_CODE (XEXP (operands[0], 0)) != REG
6890 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
6891 GET_MODE (operands[0]), false))
6892 operands[0]
6893 = replace_equiv_address (operands[0],
6894 copy_addr_to_reg (XEXP (operands[0], 0)));
6896 if (GET_CODE (operands[1]) == MEM
6897 && GET_CODE (XEXP (operands[1], 0)) != REG
6898 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
6899 GET_MODE (operands[1]), false))
6900 operands[1]
6901 = replace_equiv_address (operands[1],
6902 copy_addr_to_reg (XEXP (operands[1], 0)));
6905 /* Emit a move from SOURCE to DEST in mode MODE. */
6906 void
6907 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
6909 rtx operands[2];
6910 operands[0] = dest;
6911 operands[1] = source;
6913 if (TARGET_DEBUG_ADDR)
6915 fprintf (stderr,
6916 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6917 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6918 GET_MODE_NAME (mode),
6919 reload_in_progress,
6920 reload_completed,
6921 can_create_pseudo_p ());
6922 debug_rtx (dest);
6923 fprintf (stderr, "source:\n");
6924 debug_rtx (source);
6927 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6928 if (GET_CODE (operands[1]) == CONST_DOUBLE
6929 && ! FLOAT_MODE_P (mode)
6930 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6932 /* FIXME. This should never happen. */
6933 /* Since it seems that it does, do the safe thing and convert
6934 to a CONST_INT. */
6935 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
6937 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
6938 || FLOAT_MODE_P (mode)
6939 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
6940 || CONST_DOUBLE_LOW (operands[1]) < 0)
6941 && (CONST_DOUBLE_HIGH (operands[1]) != -1
6942 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
6944 /* Check if GCC is setting up a block move that will end up using FP
6945 registers as temporaries. We must make sure this is acceptable. */
6946 if (GET_CODE (operands[0]) == MEM
6947 && GET_CODE (operands[1]) == MEM
6948 && mode == DImode
6949 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
6950 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
6951 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
6952 ? 32 : MEM_ALIGN (operands[0])))
6953 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
6954 ? 32
6955 : MEM_ALIGN (operands[1]))))
6956 && ! MEM_VOLATILE_P (operands [0])
6957 && ! MEM_VOLATILE_P (operands [1]))
6959 emit_move_insn (adjust_address (operands[0], SImode, 0),
6960 adjust_address (operands[1], SImode, 0));
6961 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
6962 adjust_address (copy_rtx (operands[1]), SImode, 4));
6963 return;
6966 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
6967 && !gpc_reg_operand (operands[1], mode))
6968 operands[1] = force_reg (mode, operands[1]);
6970 /* Recognize the case where operand[1] is a reference to thread-local
6971 data and load its address to a register. */
6972 if (rs6000_tls_referenced_p (operands[1]))
6974 enum tls_model model;
6975 rtx tmp = operands[1];
6976 rtx addend = NULL;
6978 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
6980 addend = XEXP (XEXP (tmp, 0), 1);
6981 tmp = XEXP (XEXP (tmp, 0), 0);
6984 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
6985 model = SYMBOL_REF_TLS_MODEL (tmp);
6986 gcc_assert (model != 0);
6988 tmp = rs6000_legitimize_tls_address (tmp, model);
6989 if (addend)
6991 tmp = gen_rtx_PLUS (mode, tmp, addend);
6992 tmp = force_operand (tmp, operands[0]);
6994 operands[1] = tmp;
6997 /* Handle the case where reload calls us with an invalid address. */
6998 if (reload_in_progress && mode == Pmode
6999 && (! general_operand (operands[1], mode)
7000 || ! nonimmediate_operand (operands[0], mode)))
7001 goto emit_set;
7003 /* 128-bit constant floating-point values on Darwin should really be
7004 loaded as two parts. */
7005 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
7006 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
7008 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
7009 simplify_gen_subreg (DFmode, operands[1], mode, 0),
7010 DFmode);
7011 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
7012 GET_MODE_SIZE (DFmode)),
7013 simplify_gen_subreg (DFmode, operands[1], mode,
7014 GET_MODE_SIZE (DFmode)),
7015 DFmode);
7016 return;
7019 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
7020 cfun->machine->sdmode_stack_slot =
7021 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
7023 if (reload_in_progress
7024 && mode == SDmode
7025 && MEM_P (operands[0])
7026 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
7027 && REG_P (operands[1]))
7029 if (FP_REGNO_P (REGNO (operands[1])))
7031 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
7032 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7033 emit_insn (gen_movsd_store (mem, operands[1]));
7035 else if (INT_REGNO_P (REGNO (operands[1])))
7037 rtx mem = adjust_address_nv (operands[0], mode, 4);
7038 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7039 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
7041 else
7042 gcc_unreachable();
7043 return;
7045 if (reload_in_progress
7046 && mode == SDmode
7047 && REG_P (operands[0])
7048 && MEM_P (operands[1])
7049 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
7051 if (FP_REGNO_P (REGNO (operands[0])))
7053 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
7054 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7055 emit_insn (gen_movsd_load (operands[0], mem));
7057 else if (INT_REGNO_P (REGNO (operands[0])))
7059 rtx mem = adjust_address_nv (operands[1], mode, 4);
7060 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
7061 emit_insn (gen_movsd_hardfloat (operands[0], mem));
7063 else
7064 gcc_unreachable();
7065 return;
7068 /* FIXME: In the long term, this switch statement should go away
7069 and be replaced by a sequence of tests based on things like
7070 mode == Pmode. */
7071 switch (mode)
7073 case HImode:
7074 case QImode:
7075 if (CONSTANT_P (operands[1])
7076 && GET_CODE (operands[1]) != CONST_INT)
7077 operands[1] = force_const_mem (mode, operands[1]);
7078 break;
7080 case TFmode:
7081 case TDmode:
7082 rs6000_eliminate_indexed_memrefs (operands);
7083 /* fall through */
7085 case DFmode:
7086 case DDmode:
7087 case SFmode:
7088 case SDmode:
7089 if (CONSTANT_P (operands[1])
7090 && ! easy_fp_constant (operands[1], mode))
7091 operands[1] = force_const_mem (mode, operands[1]);
7092 break;
7094 case V16QImode:
7095 case V8HImode:
7096 case V4SFmode:
7097 case V4SImode:
7098 case V4HImode:
7099 case V2SFmode:
7100 case V2SImode:
7101 case V1DImode:
7102 case V2DFmode:
7103 case V2DImode:
7104 if (CONSTANT_P (operands[1])
7105 && !easy_vector_constant (operands[1], mode))
7106 operands[1] = force_const_mem (mode, operands[1]);
7107 break;
7109 case SImode:
7110 case DImode:
7111 /* Use default pattern for address of ELF small data */
7112 if (TARGET_ELF
7113 && mode == Pmode
7114 && DEFAULT_ABI == ABI_V4
7115 && (GET_CODE (operands[1]) == SYMBOL_REF
7116 || GET_CODE (operands[1]) == CONST)
7117 && small_data_operand (operands[1], mode))
7119 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7120 return;
7123 if (DEFAULT_ABI == ABI_V4
7124 && mode == Pmode && mode == SImode
7125 && flag_pic == 1 && got_operand (operands[1], mode))
7127 emit_insn (gen_movsi_got (operands[0], operands[1]));
7128 return;
7131 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
7132 && TARGET_NO_TOC
7133 && ! flag_pic
7134 && mode == Pmode
7135 && CONSTANT_P (operands[1])
7136 && GET_CODE (operands[1]) != HIGH
7137 && GET_CODE (operands[1]) != CONST_INT)
7139 rtx target = (!can_create_pseudo_p ()
7140 ? operands[0]
7141 : gen_reg_rtx (mode));
7143 /* If this is a function address on -mcall-aixdesc,
7144 convert it to the address of the descriptor. */
7145 if (DEFAULT_ABI == ABI_AIX
7146 && GET_CODE (operands[1]) == SYMBOL_REF
7147 && XSTR (operands[1], 0)[0] == '.')
7149 const char *name = XSTR (operands[1], 0);
7150 rtx new_ref;
7151 while (*name == '.')
7152 name++;
7153 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7154 CONSTANT_POOL_ADDRESS_P (new_ref)
7155 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7156 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7157 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7158 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7159 operands[1] = new_ref;
7162 if (DEFAULT_ABI == ABI_DARWIN)
7164 #if TARGET_MACHO
7165 if (MACHO_DYNAMIC_NO_PIC_P)
7167 /* Take care of any required data indirection. */
7168 operands[1] = rs6000_machopic_legitimize_pic_address (
7169 operands[1], mode, operands[0]);
7170 if (operands[0] != operands[1])
7171 emit_insn (gen_rtx_SET (VOIDmode,
7172 operands[0], operands[1]));
7173 return;
7175 #endif
7176 emit_insn (gen_macho_high (target, operands[1]));
7177 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7178 return;
7181 emit_insn (gen_elf_high (target, operands[1]));
7182 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7183 return;
7186 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7187 and we have put it in the TOC, we just need to make a TOC-relative
7188 reference to it. */
7189 if (TARGET_TOC
7190 && GET_CODE (operands[1]) == SYMBOL_REF
7191 && use_toc_relative_ref (operands[1]))
7192 operands[1] = create_TOC_reference (operands[1], operands[0]);
7193 else if (mode == Pmode
7194 && CONSTANT_P (operands[1])
7195 && GET_CODE (operands[1]) != HIGH
7196 && ((GET_CODE (operands[1]) != CONST_INT
7197 && ! easy_fp_constant (operands[1], mode))
7198 || (GET_CODE (operands[1]) == CONST_INT
7199 && (num_insns_constant (operands[1], mode)
7200 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7201 || (GET_CODE (operands[0]) == REG
7202 && FP_REGNO_P (REGNO (operands[0]))))
7203 && !toc_relative_expr_p (operands[1], false)
7204 && (TARGET_CMODEL == CMODEL_SMALL
7205 || can_create_pseudo_p ()
7206 || (REG_P (operands[0])
7207 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7210 #if TARGET_MACHO
7211 /* Darwin uses a special PIC legitimizer. */
7212 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7214 operands[1] =
7215 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7216 operands[0]);
7217 if (operands[0] != operands[1])
7218 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7219 return;
7221 #endif
7223 /* If we are to limit the number of things we put in the TOC and
7224 this is a symbol plus a constant we can add in one insn,
7225 just put the symbol in the TOC and add the constant. Don't do
7226 this if reload is in progress. */
7227 if (GET_CODE (operands[1]) == CONST
7228 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7229 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7230 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7231 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7232 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7233 && ! side_effects_p (operands[0]))
7235 rtx sym =
7236 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7237 rtx other = XEXP (XEXP (operands[1], 0), 1);
7239 sym = force_reg (mode, sym);
7240 emit_insn (gen_add3_insn (operands[0], sym, other));
7241 return;
7244 operands[1] = force_const_mem (mode, operands[1]);
7246 if (TARGET_TOC
7247 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7248 && constant_pool_expr_p (XEXP (operands[1], 0))
7249 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7250 get_pool_constant (XEXP (operands[1], 0)),
7251 get_pool_mode (XEXP (operands[1], 0))))
7253 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7254 operands[0]);
7255 operands[1] = gen_const_mem (mode, tocref);
7256 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7259 break;
7261 case TImode:
7262 rs6000_eliminate_indexed_memrefs (operands);
7263 break;
7265 default:
7266 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7269 /* Above, we may have called force_const_mem which may have returned
7270 an invalid address. If we can, fix this up; otherwise, reload will
7271 have to deal with it. */
7272 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7273 operands[1] = validize_mem (operands[1]);
7275 emit_set:
7276 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7279 /* Return true if a structure, union or array containing FIELD should be
7280 accessed using `BLKMODE'.
7282 For the SPE, simd types are V2SI, and gcc can be tempted to put the
7283 entire thing in a DI and use subregs to access the internals.
7284 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
7285 back-end. Because a single GPR can hold a V2SI, but not a DI, the
7286 best thing to do is set structs to BLKmode and avoid Severe Tire
7287 Damage.
7289 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
7290 fit into 1, whereas DI still needs two. */
7292 static bool
7293 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
7295 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7296 || (TARGET_E500_DOUBLE && mode == DFmode));
7299 /* Nonzero if we can use a floating-point register to pass this arg. */
7300 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7301 (SCALAR_FLOAT_MODE_P (MODE) \
7302 && (CUM)->fregno <= FP_ARG_MAX_REG \
7303 && TARGET_HARD_FLOAT && TARGET_FPRS)
7305 /* Nonzero if we can use an AltiVec register to pass this arg. */
7306 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7307 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7308 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7309 && TARGET_ALTIVEC_ABI \
7310 && (NAMED))
7312 /* Return a nonzero value to say to return the function value in
7313 memory, just as large structures are always returned. TYPE will be
7314 the data type of the value, and FNTYPE will be the type of the
7315 function doing the returning, or @code{NULL} for libcalls.
7317 The AIX ABI for the RS/6000 specifies that all structures are
7318 returned in memory. The Darwin ABI does the same.
7320 For the Darwin 64 Bit ABI, a function result can be returned in
7321 registers or in memory, depending on the size of the return data
7322 type. If it is returned in registers, the value occupies the same
7323 registers as it would if it were the first and only function
7324 argument. Otherwise, the function places its result in memory at
7325 the location pointed to by GPR3.
7327 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7328 but a draft put them in memory, and GCC used to implement the draft
7329 instead of the final standard. Therefore, aix_struct_return
7330 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7331 compatibility can change DRAFT_V4_STRUCT_RET to override the
7332 default, and -m switches get the final word. See
7333 rs6000_option_override_internal for more details.
7335 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7336 long double support is enabled. These values are returned in memory.
7338 int_size_in_bytes returns -1 for variable size objects, which go in
7339 memory always. The cast to unsigned makes -1 > 8. */
7341 static bool
7342 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7344 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7345 if (TARGET_MACHO
7346 && rs6000_darwin64_abi
7347 && TREE_CODE (type) == RECORD_TYPE
7348 && int_size_in_bytes (type) > 0)
7350 CUMULATIVE_ARGS valcum;
7351 rtx valret;
7353 valcum.words = 0;
7354 valcum.fregno = FP_ARG_MIN_REG;
7355 valcum.vregno = ALTIVEC_ARG_MIN_REG;
7356 /* Do a trial code generation as if this were going to be passed
7357 as an argument; if any part goes in memory, we return NULL. */
7358 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
7359 if (valret)
7360 return false;
7361 /* Otherwise fall through to more conventional ABI rules. */
7364 if (AGGREGATE_TYPE_P (type)
7365 && (aix_struct_return
7366 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
7367 return true;
7369 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7370 modes only exist for GCC vector types if -maltivec. */
7371 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
7372 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7373 return false;
7375 /* Return synthetic vectors in memory. */
7376 if (TREE_CODE (type) == VECTOR_TYPE
7377 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
7379 static bool warned_for_return_big_vectors = false;
7380 if (!warned_for_return_big_vectors)
7382 warning (0, "GCC vector returned by reference: "
7383 "non-standard ABI extension with no compatibility guarantee");
7384 warned_for_return_big_vectors = true;
7386 return true;
7389 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
7390 return true;
7392 return false;
7395 #ifdef HAVE_AS_GNU_ATTRIBUTE
7396 /* Return TRUE if a call to function FNDECL may be one that
7397 potentially affects the function calling ABI of the object file. */
7399 static bool
7400 call_ABI_of_interest (tree fndecl)
7402 if (cgraph_state == CGRAPH_STATE_EXPANSION)
7404 struct cgraph_node *c_node;
7406 /* Libcalls are always interesting. */
7407 if (fndecl == NULL_TREE)
7408 return true;
7410 /* Any call to an external function is interesting. */
7411 if (DECL_EXTERNAL (fndecl))
7412 return true;
7414 /* Interesting functions that we are emitting in this object file. */
7415 c_node = cgraph_get_node (fndecl);
7416 c_node = cgraph_function_or_thunk_node (c_node, NULL);
7417 return !cgraph_only_called_directly_p (c_node);
7419 return false;
7421 #endif
7423 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7424 for a call to a function whose data type is FNTYPE.
7425 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7427 For incoming args we set the number of arguments in the prototype large
7428 so we never return a PARALLEL. */
7430 void
7431 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
7432 rtx libname ATTRIBUTE_UNUSED, int incoming,
7433 int libcall, int n_named_args,
7434 tree fndecl ATTRIBUTE_UNUSED,
7435 enum machine_mode return_mode ATTRIBUTE_UNUSED)
7437 static CUMULATIVE_ARGS zero_cumulative;
7439 *cum = zero_cumulative;
7440 cum->words = 0;
7441 cum->fregno = FP_ARG_MIN_REG;
7442 cum->vregno = ALTIVEC_ARG_MIN_REG;
7443 cum->prototype = (fntype && prototype_p (fntype));
7444 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
7445 ? CALL_LIBCALL : CALL_NORMAL);
7446 cum->sysv_gregno = GP_ARG_MIN_REG;
7447 cum->stdarg = stdarg_p (fntype);
7449 cum->nargs_prototype = 0;
7450 if (incoming || cum->prototype)
7451 cum->nargs_prototype = n_named_args;
7453 /* Check for a longcall attribute. */
7454 if ((!fntype && rs6000_default_long_calls)
7455 || (fntype
7456 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
7457 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
7458 cum->call_cookie |= CALL_LONG;
7460 if (TARGET_DEBUG_ARG)
7462 fprintf (stderr, "\ninit_cumulative_args:");
7463 if (fntype)
7465 tree ret_type = TREE_TYPE (fntype);
7466 fprintf (stderr, " ret code = %s,",
7467 tree_code_name[ (int)TREE_CODE (ret_type) ]);
7470 if (cum->call_cookie & CALL_LONG)
7471 fprintf (stderr, " longcall,");
7473 fprintf (stderr, " proto = %d, nargs = %d\n",
7474 cum->prototype, cum->nargs_prototype);
7477 #ifdef HAVE_AS_GNU_ATTRIBUTE
7478 if (DEFAULT_ABI == ABI_V4)
7480 cum->escapes = call_ABI_of_interest (fndecl);
7481 if (cum->escapes)
7483 tree return_type;
7485 if (fntype)
7487 return_type = TREE_TYPE (fntype);
7488 return_mode = TYPE_MODE (return_type);
7490 else
7491 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
7493 if (return_type != NULL)
7495 if (TREE_CODE (return_type) == RECORD_TYPE
7496 && TYPE_TRANSPARENT_AGGR (return_type))
7498 return_type = TREE_TYPE (first_field (return_type));
7499 return_mode = TYPE_MODE (return_type);
7501 if (AGGREGATE_TYPE_P (return_type)
7502 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
7503 <= 8))
7504 rs6000_returns_struct = true;
7506 if (SCALAR_FLOAT_MODE_P (return_mode))
7507 rs6000_passes_float = true;
7508 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
7509 || SPE_VECTOR_MODE (return_mode))
7510 rs6000_passes_vector = true;
7513 #endif
7515 if (fntype
7516 && !TARGET_ALTIVEC
7517 && TARGET_ALTIVEC_ABI
7518 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
7520 error ("cannot return value in vector register because"
7521 " altivec instructions are disabled, use -maltivec"
7522 " to enable them");
7526 /* Return true if TYPE must be passed on the stack and not in registers. */
7528 static bool
7529 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
7531 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
7532 return must_pass_in_stack_var_size (mode, type);
7533 else
7534 return must_pass_in_stack_var_size_or_pad (mode, type);
7537 /* If defined, a C expression which determines whether, and in which
7538 direction, to pad out an argument with extra space. The value
7539 should be of type `enum direction': either `upward' to pad above
7540 the argument, `downward' to pad below, or `none' to inhibit
7541 padding.
7543 For the AIX ABI structs are always stored left shifted in their
7544 argument slot. */
7546 enum direction
7547 function_arg_padding (enum machine_mode mode, const_tree type)
7549 #ifndef AGGREGATE_PADDING_FIXED
7550 #define AGGREGATE_PADDING_FIXED 0
7551 #endif
7552 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7553 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7554 #endif
7556 if (!AGGREGATE_PADDING_FIXED)
7558 /* GCC used to pass structures of the same size as integer types as
7559 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7560 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7561 passed padded downward, except that -mstrict-align further
7562 muddied the water in that multi-component structures of 2 and 4
7563 bytes in size were passed padded upward.
7565 The following arranges for best compatibility with previous
7566 versions of gcc, but removes the -mstrict-align dependency. */
7567 if (BYTES_BIG_ENDIAN)
7569 HOST_WIDE_INT size = 0;
7571 if (mode == BLKmode)
7573 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
7574 size = int_size_in_bytes (type);
7576 else
7577 size = GET_MODE_SIZE (mode);
7579 if (size == 1 || size == 2 || size == 4)
7580 return downward;
7582 return upward;
7585 if (AGGREGATES_PAD_UPWARD_ALWAYS)
7587 if (type != 0 && AGGREGATE_TYPE_P (type))
7588 return upward;
7591 /* Fall back to the default. */
7592 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7595 /* If defined, a C expression that gives the alignment boundary, in bits,
7596 of an argument with the specified mode and type. If it is not defined,
7597 PARM_BOUNDARY is used for all arguments.
7599 V.4 wants long longs and doubles to be double word aligned. Just
7600 testing the mode size is a boneheaded way to do this as it means
7601 that other types such as complex int are also double word aligned.
7602 However, we're stuck with this because changing the ABI might break
7603 existing library interfaces.
7605 Doubleword align SPE vectors.
7606 Quadword align Altivec/VSX vectors.
7607 Quadword align large synthetic vector types. */
7609 static unsigned int
7610 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
7612 if (DEFAULT_ABI == ABI_V4
7613 && (GET_MODE_SIZE (mode) == 8
7614 || (TARGET_HARD_FLOAT
7615 && TARGET_FPRS
7616 && (mode == TFmode || mode == TDmode))))
7617 return 64;
7618 else if (SPE_VECTOR_MODE (mode)
7619 || (type && TREE_CODE (type) == VECTOR_TYPE
7620 && int_size_in_bytes (type) >= 8
7621 && int_size_in_bytes (type) < 16))
7622 return 64;
7623 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7624 || (type && TREE_CODE (type) == VECTOR_TYPE
7625 && int_size_in_bytes (type) >= 16))
7626 return 128;
7627 else if (TARGET_MACHO
7628 && rs6000_darwin64_abi
7629 && mode == BLKmode
7630 && type && TYPE_ALIGN (type) > 64)
7631 return 128;
7632 else
7633 return PARM_BOUNDARY;
7636 /* For a function parm of MODE and TYPE, return the starting word in
7637 the parameter area. NWORDS of the parameter area are already used. */
7639 static unsigned int
7640 rs6000_parm_start (enum machine_mode mode, const_tree type,
7641 unsigned int nwords)
7643 unsigned int align;
7644 unsigned int parm_offset;
7646 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
7647 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
7648 return nwords + (-(parm_offset + nwords) & align);
7651 /* Compute the size (in words) of a function argument. */
7653 static unsigned long
7654 rs6000_arg_size (enum machine_mode mode, const_tree type)
7656 unsigned long size;
7658 if (mode != BLKmode)
7659 size = GET_MODE_SIZE (mode);
7660 else
7661 size = int_size_in_bytes (type);
7663 if (TARGET_32BIT)
7664 return (size + 3) >> 2;
7665 else
7666 return (size + 7) >> 3;
7669 /* Use this to flush pending int fields. */
7671 static void
7672 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
7673 HOST_WIDE_INT bitpos, int final)
7675 unsigned int startbit, endbit;
7676 int intregs, intoffset;
7677 enum machine_mode mode;
7679 /* Handle the situations where a float is taking up the first half
7680 of the GPR, and the other half is empty (typically due to
7681 alignment restrictions). We can detect this by a 8-byte-aligned
7682 int field, or by seeing that this is the final flush for this
7683 argument. Count the word and continue on. */
7684 if (cum->floats_in_gpr == 1
7685 && (cum->intoffset % 64 == 0
7686 || (cum->intoffset == -1 && final)))
7688 cum->words++;
7689 cum->floats_in_gpr = 0;
7692 if (cum->intoffset == -1)
7693 return;
7695 intoffset = cum->intoffset;
7696 cum->intoffset = -1;
7697 cum->floats_in_gpr = 0;
7699 if (intoffset % BITS_PER_WORD != 0)
7701 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7702 MODE_INT, 0);
7703 if (mode == BLKmode)
7705 /* We couldn't find an appropriate mode, which happens,
7706 e.g., in packed structs when there are 3 bytes to load.
7707 Back intoffset back to the beginning of the word in this
7708 case. */
7709 intoffset = intoffset & -BITS_PER_WORD;
7713 startbit = intoffset & -BITS_PER_WORD;
7714 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7715 intregs = (endbit - startbit) / BITS_PER_WORD;
7716 cum->words += intregs;
7717 /* words should be unsigned. */
7718 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
7720 int pad = (endbit/BITS_PER_WORD) - cum->words;
7721 cum->words += pad;
7725 /* The darwin64 ABI calls for us to recurse down through structs,
7726 looking for elements passed in registers. Unfortunately, we have
7727 to track int register count here also because of misalignments
7728 in powerpc alignment mode. */
7730 static void
7731 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
7732 const_tree type,
7733 HOST_WIDE_INT startbitpos)
7735 tree f;
7737 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
7738 if (TREE_CODE (f) == FIELD_DECL)
7740 HOST_WIDE_INT bitpos = startbitpos;
7741 tree ftype = TREE_TYPE (f);
7742 enum machine_mode mode;
7743 if (ftype == error_mark_node)
7744 continue;
7745 mode = TYPE_MODE (ftype);
7747 if (DECL_SIZE (f) != 0
7748 && host_integerp (bit_position (f), 1))
7749 bitpos += int_bit_position (f);
7751 /* ??? FIXME: else assume zero offset. */
7753 if (TREE_CODE (ftype) == RECORD_TYPE)
7754 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
7755 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
7757 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
7758 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7759 cum->fregno += n_fpregs;
7760 /* Single-precision floats present a special problem for
7761 us, because they are smaller than an 8-byte GPR, and so
7762 the structure-packing rules combined with the standard
7763 varargs behavior mean that we want to pack float/float
7764 and float/int combinations into a single register's
7765 space. This is complicated by the arg advance flushing,
7766 which works on arbitrarily large groups of int-type
7767 fields. */
7768 if (mode == SFmode)
7770 if (cum->floats_in_gpr == 1)
7772 /* Two floats in a word; count the word and reset
7773 the float count. */
7774 cum->words++;
7775 cum->floats_in_gpr = 0;
7777 else if (bitpos % 64 == 0)
7779 /* A float at the beginning of an 8-byte word;
7780 count it and put off adjusting cum->words until
7781 we see if a arg advance flush is going to do it
7782 for us. */
7783 cum->floats_in_gpr++;
7785 else
7787 /* The float is at the end of a word, preceded
7788 by integer fields, so the arg advance flush
7789 just above has already set cum->words and
7790 everything is taken care of. */
7793 else
7794 cum->words += n_fpregs;
7796 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
7798 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7799 cum->vregno++;
7800 cum->words += 2;
7802 else if (cum->intoffset == -1)
7803 cum->intoffset = bitpos;
7807 /* Check for an item that needs to be considered specially under the darwin 64
7808 bit ABI. These are record types where the mode is BLK or the structure is
7809 8 bytes in size. */
7810 static int
7811 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
7813 return rs6000_darwin64_abi
7814 && ((mode == BLKmode
7815 && TREE_CODE (type) == RECORD_TYPE
7816 && int_size_in_bytes (type) > 0)
7817 || (type && TREE_CODE (type) == RECORD_TYPE
7818 && int_size_in_bytes (type) == 8)) ? 1 : 0;
7821 /* Update the data in CUM to advance over an argument
7822 of mode MODE and data type TYPE.
7823 (TYPE is null for libcalls where that information may not be available.)
7825 Note that for args passed by reference, function_arg will be called
7826 with MODE and TYPE set to that of the pointer to the arg, not the arg
7827 itself. */
7829 static void
7830 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7831 const_tree type, bool named, int depth)
7833 /* Only tick off an argument if we're not recursing. */
7834 if (depth == 0)
7835 cum->nargs_prototype--;
7837 #ifdef HAVE_AS_GNU_ATTRIBUTE
7838 if (DEFAULT_ABI == ABI_V4
7839 && cum->escapes)
7841 if (SCALAR_FLOAT_MODE_P (mode))
7842 rs6000_passes_float = true;
7843 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
7844 rs6000_passes_vector = true;
7845 else if (SPE_VECTOR_MODE (mode)
7846 && !cum->stdarg
7847 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7848 rs6000_passes_vector = true;
7850 #endif
7852 if (TARGET_ALTIVEC_ABI
7853 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7854 || (type && TREE_CODE (type) == VECTOR_TYPE
7855 && int_size_in_bytes (type) == 16)))
7857 bool stack = false;
7859 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7861 cum->vregno++;
7862 if (!TARGET_ALTIVEC)
7863 error ("cannot pass argument in vector register because"
7864 " altivec instructions are disabled, use -maltivec"
7865 " to enable them");
7867 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7868 even if it is going to be passed in a vector register.
7869 Darwin does the same for variable-argument functions. */
7870 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
7871 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
7872 stack = true;
7874 else
7875 stack = true;
7877 if (stack)
7879 int align;
7881 /* Vector parameters must be 16-byte aligned. This places
7882 them at 2 mod 4 in terms of words in 32-bit mode, since
7883 the parameter save area starts at offset 24 from the
7884 stack. In 64-bit mode, they just have to start on an
7885 even word, since the parameter save area is 16-byte
7886 aligned. Space for GPRs is reserved even if the argument
7887 will be passed in memory. */
7888 if (TARGET_32BIT)
7889 align = (2 - cum->words) & 3;
7890 else
7891 align = cum->words & 1;
7892 cum->words += align + rs6000_arg_size (mode, type);
7894 if (TARGET_DEBUG_ARG)
7896 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
7897 cum->words, align);
7898 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
7899 cum->nargs_prototype, cum->prototype,
7900 GET_MODE_NAME (mode));
7904 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
7905 && !cum->stdarg
7906 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7907 cum->sysv_gregno++;
7909 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
7911 int size = int_size_in_bytes (type);
7912 /* Variable sized types have size == -1 and are
7913 treated as if consisting entirely of ints.
7914 Pad to 16 byte boundary if needed. */
7915 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
7916 && (cum->words % 2) != 0)
7917 cum->words++;
7918 /* For varargs, we can just go up by the size of the struct. */
7919 if (!named)
7920 cum->words += (size + 7) / 8;
7921 else
7923 /* It is tempting to say int register count just goes up by
7924 sizeof(type)/8, but this is wrong in a case such as
7925 { int; double; int; } [powerpc alignment]. We have to
7926 grovel through the fields for these too. */
7927 cum->intoffset = 0;
7928 cum->floats_in_gpr = 0;
7929 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
7930 rs6000_darwin64_record_arg_advance_flush (cum,
7931 size * BITS_PER_UNIT, 1);
7933 if (TARGET_DEBUG_ARG)
7935 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
7936 cum->words, TYPE_ALIGN (type), size);
7937 fprintf (stderr,
7938 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7939 cum->nargs_prototype, cum->prototype,
7940 GET_MODE_NAME (mode));
7943 else if (DEFAULT_ABI == ABI_V4)
7945 if (TARGET_HARD_FLOAT && TARGET_FPRS
7946 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
7947 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
7948 || (mode == TFmode && !TARGET_IEEEQUAD)
7949 || mode == SDmode || mode == DDmode || mode == TDmode))
7951 /* _Decimal128 must use an even/odd register pair. This assumes
7952 that the register number is odd when fregno is odd. */
7953 if (mode == TDmode && (cum->fregno % 2) == 1)
7954 cum->fregno++;
7956 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
7957 <= FP_ARG_V4_MAX_REG)
7958 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7959 else
7961 cum->fregno = FP_ARG_V4_MAX_REG + 1;
7962 if (mode == DFmode || mode == TFmode
7963 || mode == DDmode || mode == TDmode)
7964 cum->words += cum->words & 1;
7965 cum->words += rs6000_arg_size (mode, type);
7968 else
7970 int n_words = rs6000_arg_size (mode, type);
7971 int gregno = cum->sysv_gregno;
7973 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7974 (r7,r8) or (r9,r10). As does any other 2 word item such
7975 as complex int due to a historical mistake. */
7976 if (n_words == 2)
7977 gregno += (1 - gregno) & 1;
7979 /* Multi-reg args are not split between registers and stack. */
7980 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7982 /* Long long and SPE vectors are aligned on the stack.
7983 So are other 2 word items such as complex int due to
7984 a historical mistake. */
7985 if (n_words == 2)
7986 cum->words += cum->words & 1;
7987 cum->words += n_words;
7990 /* Note: continuing to accumulate gregno past when we've started
7991 spilling to the stack indicates the fact that we've started
7992 spilling to the stack to expand_builtin_saveregs. */
7993 cum->sysv_gregno = gregno + n_words;
7996 if (TARGET_DEBUG_ARG)
7998 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7999 cum->words, cum->fregno);
8000 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
8001 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
8002 fprintf (stderr, "mode = %4s, named = %d\n",
8003 GET_MODE_NAME (mode), named);
8006 else
8008 int n_words = rs6000_arg_size (mode, type);
8009 int start_words = cum->words;
8010 int align_words = rs6000_parm_start (mode, type, start_words);
8012 cum->words = align_words + n_words;
8014 if (SCALAR_FLOAT_MODE_P (mode)
8015 && TARGET_HARD_FLOAT && TARGET_FPRS)
8017 /* _Decimal128 must be passed in an even/odd float register pair.
8018 This assumes that the register number is odd when fregno is
8019 odd. */
8020 if (mode == TDmode && (cum->fregno % 2) == 1)
8021 cum->fregno++;
8022 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
8025 if (TARGET_DEBUG_ARG)
8027 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
8028 cum->words, cum->fregno);
8029 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
8030 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
8031 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
8032 named, align_words - start_words, depth);
8037 static void
8038 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
8039 const_tree type, bool named)
8041 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
8045 static rtx
8046 spe_build_register_parallel (enum machine_mode mode, int gregno)
8048 rtx r1, r3, r5, r7;
8050 switch (mode)
8052 case DFmode:
8053 r1 = gen_rtx_REG (DImode, gregno);
8054 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8055 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
8057 case DCmode:
8058 case TFmode:
8059 r1 = gen_rtx_REG (DImode, gregno);
8060 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8061 r3 = gen_rtx_REG (DImode, gregno + 2);
8062 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8063 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
8065 case TCmode:
8066 r1 = gen_rtx_REG (DImode, gregno);
8067 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
8068 r3 = gen_rtx_REG (DImode, gregno + 2);
8069 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
8070 r5 = gen_rtx_REG (DImode, gregno + 4);
8071 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
8072 r7 = gen_rtx_REG (DImode, gregno + 6);
8073 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
8074 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
8076 default:
8077 gcc_unreachable ();
8081 /* Determine where to put a SIMD argument on the SPE. */
8082 static rtx
8083 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8084 const_tree type)
8086 int gregno = cum->sysv_gregno;
8088 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8089 are passed and returned in a pair of GPRs for ABI compatibility. */
8090 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
8091 || mode == DCmode || mode == TCmode))
8093 int n_words = rs6000_arg_size (mode, type);
8095 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8096 if (mode == DFmode)
8097 gregno += (1 - gregno) & 1;
8099 /* Multi-reg args are not split between registers and stack. */
8100 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8101 return NULL_RTX;
8103 return spe_build_register_parallel (mode, gregno);
8105 if (cum->stdarg)
8107 int n_words = rs6000_arg_size (mode, type);
8109 /* SPE vectors are put in odd registers. */
8110 if (n_words == 2 && (gregno & 1) == 0)
8111 gregno += 1;
8113 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
8115 rtx r1, r2;
8116 enum machine_mode m = SImode;
8118 r1 = gen_rtx_REG (m, gregno);
8119 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
8120 r2 = gen_rtx_REG (m, gregno + 1);
8121 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
8122 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
8124 else
8125 return NULL_RTX;
8127 else
8129 if (gregno <= GP_ARG_MAX_REG)
8130 return gen_rtx_REG (mode, gregno);
8131 else
8132 return NULL_RTX;
8136 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8137 structure between cum->intoffset and bitpos to integer registers. */
8139 static void
8140 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
8141 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
8143 enum machine_mode mode;
8144 unsigned int regno;
8145 unsigned int startbit, endbit;
8146 int this_regno, intregs, intoffset;
8147 rtx reg;
8149 if (cum->intoffset == -1)
8150 return;
8152 intoffset = cum->intoffset;
8153 cum->intoffset = -1;
8155 /* If this is the trailing part of a word, try to only load that
8156 much into the register. Otherwise load the whole register. Note
8157 that in the latter case we may pick up unwanted bits. It's not a
8158 problem at the moment but may wish to revisit. */
8160 if (intoffset % BITS_PER_WORD != 0)
8162 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8163 MODE_INT, 0);
8164 if (mode == BLKmode)
8166 /* We couldn't find an appropriate mode, which happens,
8167 e.g., in packed structs when there are 3 bytes to load.
8168 Back intoffset back to the beginning of the word in this
8169 case. */
8170 intoffset = intoffset & -BITS_PER_WORD;
8171 mode = word_mode;
8174 else
8175 mode = word_mode;
8177 startbit = intoffset & -BITS_PER_WORD;
8178 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8179 intregs = (endbit - startbit) / BITS_PER_WORD;
8180 this_regno = cum->words + intoffset / BITS_PER_WORD;
8182 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8183 cum->use_stack = 1;
8185 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8186 if (intregs <= 0)
8187 return;
8189 intoffset /= BITS_PER_UNIT;
8192 regno = GP_ARG_MIN_REG + this_regno;
8193 reg = gen_rtx_REG (mode, regno);
8194 rvec[(*k)++] =
8195 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8197 this_regno += 1;
8198 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8199 mode = word_mode;
8200 intregs -= 1;
8202 while (intregs > 0);
8205 /* Recursive workhorse for the following. */
8207 static void
8208 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8209 HOST_WIDE_INT startbitpos, rtx rvec[],
8210 int *k)
8212 tree f;
8214 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8215 if (TREE_CODE (f) == FIELD_DECL)
8217 HOST_WIDE_INT bitpos = startbitpos;
8218 tree ftype = TREE_TYPE (f);
8219 enum machine_mode mode;
8220 if (ftype == error_mark_node)
8221 continue;
8222 mode = TYPE_MODE (ftype);
8224 if (DECL_SIZE (f) != 0
8225 && host_integerp (bit_position (f), 1))
8226 bitpos += int_bit_position (f);
8228 /* ??? FIXME: else assume zero offset. */
8230 if (TREE_CODE (ftype) == RECORD_TYPE)
8231 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8232 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8234 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8235 #if 0
8236 switch (mode)
8238 case SCmode: mode = SFmode; break;
8239 case DCmode: mode = DFmode; break;
8240 case TCmode: mode = TFmode; break;
8241 default: break;
8243 #endif
8244 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8245 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8247 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8248 && (mode == TFmode || mode == TDmode));
8249 /* Long double or _Decimal128 split over regs and memory. */
8250 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8251 cum->use_stack=1;
8253 rvec[(*k)++]
8254 = gen_rtx_EXPR_LIST (VOIDmode,
8255 gen_rtx_REG (mode, cum->fregno++),
8256 GEN_INT (bitpos / BITS_PER_UNIT));
8257 if (mode == TFmode || mode == TDmode)
8258 cum->fregno++;
8260 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8262 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8263 rvec[(*k)++]
8264 = gen_rtx_EXPR_LIST (VOIDmode,
8265 gen_rtx_REG (mode, cum->vregno++),
8266 GEN_INT (bitpos / BITS_PER_UNIT));
8268 else if (cum->intoffset == -1)
8269 cum->intoffset = bitpos;
8273 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8274 the register(s) to be used for each field and subfield of a struct
8275 being passed by value, along with the offset of where the
8276 register's value may be found in the block. FP fields go in FP
8277 register, vector fields go in vector registers, and everything
8278 else goes in int registers, packed as in memory.
8280 This code is also used for function return values. RETVAL indicates
8281 whether this is the case.
8283 Much of this is taken from the SPARC V9 port, which has a similar
8284 calling convention. */
8286 static rtx
8287 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8288 bool named, bool retval)
8290 rtx rvec[FIRST_PSEUDO_REGISTER];
8291 int k = 1, kbase = 1;
8292 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8293 /* This is a copy; modifications are not visible to our caller. */
8294 CUMULATIVE_ARGS copy_cum = *orig_cum;
8295 CUMULATIVE_ARGS *cum = &copy_cum;
8297 /* Pad to 16 byte boundary if needed. */
8298 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8299 && (cum->words % 2) != 0)
8300 cum->words++;
8302 cum->intoffset = 0;
8303 cum->use_stack = 0;
8304 cum->named = named;
8306 /* Put entries into rvec[] for individual FP and vector fields, and
8307 for the chunks of memory that go in int regs. Note we start at
8308 element 1; 0 is reserved for an indication of using memory, and
8309 may or may not be filled in below. */
8310 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8311 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8313 /* If any part of the struct went on the stack put all of it there.
8314 This hack is because the generic code for
8315 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8316 parts of the struct are not at the beginning. */
8317 if (cum->use_stack)
8319 if (retval)
8320 return NULL_RTX; /* doesn't go in registers at all */
8321 kbase = 0;
8322 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8324 if (k > 1 || cum->use_stack)
8325 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
8326 else
8327 return NULL_RTX;
8330 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8332 static rtx
8333 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
8334 int align_words)
8336 int n_units;
8337 int i, k;
8338 rtx rvec[GP_ARG_NUM_REG + 1];
8340 if (align_words >= GP_ARG_NUM_REG)
8341 return NULL_RTX;
8343 n_units = rs6000_arg_size (mode, type);
8345 /* Optimize the simple case where the arg fits in one gpr, except in
8346 the case of BLKmode due to assign_parms assuming that registers are
8347 BITS_PER_WORD wide. */
8348 if (n_units == 0
8349 || (n_units == 1 && mode != BLKmode))
8350 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8352 k = 0;
8353 if (align_words + n_units > GP_ARG_NUM_REG)
8354 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8355 using a magic NULL_RTX component.
8356 This is not strictly correct. Only some of the arg belongs in
8357 memory, not all of it. However, the normal scheme using
8358 function_arg_partial_nregs can result in unusual subregs, eg.
8359 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8360 store the whole arg to memory is often more efficient than code
8361 to store pieces, and we know that space is available in the right
8362 place for the whole arg. */
8363 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8365 i = 0;
8368 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
8369 rtx off = GEN_INT (i++ * 4);
8370 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8372 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
8374 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8377 /* Determine where to put an argument to a function.
8378 Value is zero to push the argument on the stack,
8379 or a hard register in which to store the argument.
8381 MODE is the argument's machine mode.
8382 TYPE is the data type of the argument (as a tree).
8383 This is null for libcalls where that information may
8384 not be available.
8385 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8386 the preceding args and about the function being called. It is
8387 not modified in this routine.
8388 NAMED is nonzero if this argument is a named parameter
8389 (otherwise it is an extra parameter matching an ellipsis).
8391 On RS/6000 the first eight words of non-FP are normally in registers
8392 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8393 Under V.4, the first 8 FP args are in registers.
8395 If this is floating-point and no prototype is specified, we use
8396 both an FP and integer register (or possibly FP reg and stack). Library
8397 functions (when CALL_LIBCALL is set) always have the proper types for args,
8398 so we can pass the FP value just in one register. emit_library_function
8399 doesn't support PARALLEL anyway.
8401 Note that for args passed by reference, function_arg will be called
8402 with MODE and TYPE set to that of the pointer to the arg, not the arg
8403 itself. */
8405 static rtx
8406 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8407 const_tree type, bool named)
8409 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8410 enum rs6000_abi abi = DEFAULT_ABI;
8412 /* Return a marker to indicate whether CR1 needs to set or clear the
8413 bit that V.4 uses to say fp args were passed in registers.
8414 Assume that we don't need the marker for software floating point,
8415 or compiler generated library calls. */
8416 if (mode == VOIDmode)
8418 if (abi == ABI_V4
8419 && (cum->call_cookie & CALL_LIBCALL) == 0
8420 && (cum->stdarg
8421 || (cum->nargs_prototype < 0
8422 && (cum->prototype || TARGET_NO_PROTOTYPE))))
8424 /* For the SPE, we need to crxor CR6 always. */
8425 if (TARGET_SPE_ABI)
8426 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
8427 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
8428 return GEN_INT (cum->call_cookie
8429 | ((cum->fregno == FP_ARG_MIN_REG)
8430 ? CALL_V4_SET_FP_ARGS
8431 : CALL_V4_CLEAR_FP_ARGS));
8434 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
8437 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8439 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
8440 if (rslt != NULL_RTX)
8441 return rslt;
8442 /* Else fall through to usual handling. */
8445 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8446 if (TARGET_64BIT && ! cum->prototype)
8448 /* Vector parameters get passed in vector register
8449 and also in GPRs or memory, in absence of prototype. */
8450 int align_words;
8451 rtx slot;
8452 align_words = (cum->words + 1) & ~1;
8454 if (align_words >= GP_ARG_NUM_REG)
8456 slot = NULL_RTX;
8458 else
8460 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8462 return gen_rtx_PARALLEL (mode,
8463 gen_rtvec (2,
8464 gen_rtx_EXPR_LIST (VOIDmode,
8465 slot, const0_rtx),
8466 gen_rtx_EXPR_LIST (VOIDmode,
8467 gen_rtx_REG (mode, cum->vregno),
8468 const0_rtx)));
8470 else
8471 return gen_rtx_REG (mode, cum->vregno);
8472 else if (TARGET_ALTIVEC_ABI
8473 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8474 || (type && TREE_CODE (type) == VECTOR_TYPE
8475 && int_size_in_bytes (type) == 16)))
8477 if (named || abi == ABI_V4)
8478 return NULL_RTX;
8479 else
8481 /* Vector parameters to varargs functions under AIX or Darwin
8482 get passed in memory and possibly also in GPRs. */
8483 int align, align_words, n_words;
8484 enum machine_mode part_mode;
8486 /* Vector parameters must be 16-byte aligned. This places them at
8487 2 mod 4 in terms of words in 32-bit mode, since the parameter
8488 save area starts at offset 24 from the stack. In 64-bit mode,
8489 they just have to start on an even word, since the parameter
8490 save area is 16-byte aligned. */
8491 if (TARGET_32BIT)
8492 align = (2 - cum->words) & 3;
8493 else
8494 align = cum->words & 1;
8495 align_words = cum->words + align;
8497 /* Out of registers? Memory, then. */
8498 if (align_words >= GP_ARG_NUM_REG)
8499 return NULL_RTX;
8501 if (TARGET_32BIT && TARGET_POWERPC64)
8502 return rs6000_mixed_function_arg (mode, type, align_words);
8504 /* The vector value goes in GPRs. Only the part of the
8505 value in GPRs is reported here. */
8506 part_mode = mode;
8507 n_words = rs6000_arg_size (mode, type);
8508 if (align_words + n_words > GP_ARG_NUM_REG)
8509 /* Fortunately, there are only two possibilities, the value
8510 is either wholly in GPRs or half in GPRs and half not. */
8511 part_mode = DImode;
8513 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
8516 else if (TARGET_SPE_ABI && TARGET_SPE
8517 && (SPE_VECTOR_MODE (mode)
8518 || (TARGET_E500_DOUBLE && (mode == DFmode
8519 || mode == DCmode
8520 || mode == TFmode
8521 || mode == TCmode))))
8522 return rs6000_spe_function_arg (cum, mode, type);
8524 else if (abi == ABI_V4)
8526 if (TARGET_HARD_FLOAT && TARGET_FPRS
8527 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8528 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8529 || (mode == TFmode && !TARGET_IEEEQUAD)
8530 || mode == SDmode || mode == DDmode || mode == TDmode))
8532 /* _Decimal128 must use an even/odd register pair. This assumes
8533 that the register number is odd when fregno is odd. */
8534 if (mode == TDmode && (cum->fregno % 2) == 1)
8535 cum->fregno++;
8537 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8538 <= FP_ARG_V4_MAX_REG)
8539 return gen_rtx_REG (mode, cum->fregno);
8540 else
8541 return NULL_RTX;
8543 else
8545 int n_words = rs6000_arg_size (mode, type);
8546 int gregno = cum->sysv_gregno;
8548 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8549 (r7,r8) or (r9,r10). As does any other 2 word item such
8550 as complex int due to a historical mistake. */
8551 if (n_words == 2)
8552 gregno += (1 - gregno) & 1;
8554 /* Multi-reg args are not split between registers and stack. */
8555 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8556 return NULL_RTX;
8558 if (TARGET_32BIT && TARGET_POWERPC64)
8559 return rs6000_mixed_function_arg (mode, type,
8560 gregno - GP_ARG_MIN_REG);
8561 return gen_rtx_REG (mode, gregno);
8564 else
8566 int align_words = rs6000_parm_start (mode, type, cum->words);
8568 /* _Decimal128 must be passed in an even/odd float register pair.
8569 This assumes that the register number is odd when fregno is odd. */
8570 if (mode == TDmode && (cum->fregno % 2) == 1)
8571 cum->fregno++;
8573 if (USE_FP_FOR_ARG_P (cum, mode, type))
8575 rtx rvec[GP_ARG_NUM_REG + 1];
8576 rtx r;
8577 int k;
8578 bool needs_psave;
8579 enum machine_mode fmode = mode;
8580 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8582 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8584 /* Currently, we only ever need one reg here because complex
8585 doubles are split. */
8586 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8587 && (fmode == TFmode || fmode == TDmode));
8589 /* Long double or _Decimal128 split over regs and memory. */
8590 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
8593 /* Do we also need to pass this arg in the parameter save
8594 area? */
8595 needs_psave = (type
8596 && (cum->nargs_prototype <= 0
8597 || (DEFAULT_ABI == ABI_AIX
8598 && TARGET_XL_COMPAT
8599 && align_words >= GP_ARG_NUM_REG)));
8601 if (!needs_psave && mode == fmode)
8602 return gen_rtx_REG (fmode, cum->fregno);
8604 k = 0;
8605 if (needs_psave)
8607 /* Describe the part that goes in gprs or the stack.
8608 This piece must come first, before the fprs. */
8609 if (align_words < GP_ARG_NUM_REG)
8611 unsigned long n_words = rs6000_arg_size (mode, type);
8613 if (align_words + n_words > GP_ARG_NUM_REG
8614 || (TARGET_32BIT && TARGET_POWERPC64))
8616 /* If this is partially on the stack, then we only
8617 include the portion actually in registers here. */
8618 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
8619 rtx off;
8620 int i = 0;
8621 if (align_words + n_words > GP_ARG_NUM_REG)
8622 /* Not all of the arg fits in gprs. Say that it
8623 goes in memory too, using a magic NULL_RTX
8624 component. Also see comment in
8625 rs6000_mixed_function_arg for why the normal
8626 function_arg_partial_nregs scheme doesn't work
8627 in this case. */
8628 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
8629 const0_rtx);
8632 r = gen_rtx_REG (rmode,
8633 GP_ARG_MIN_REG + align_words);
8634 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
8635 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8637 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
8639 else
8641 /* The whole arg fits in gprs. */
8642 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8643 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8646 else
8647 /* It's entirely in memory. */
8648 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8651 /* Describe where this piece goes in the fprs. */
8652 r = gen_rtx_REG (fmode, cum->fregno);
8653 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8655 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8657 else if (align_words < GP_ARG_NUM_REG)
8659 if (TARGET_32BIT && TARGET_POWERPC64)
8660 return rs6000_mixed_function_arg (mode, type, align_words);
8662 if (mode == BLKmode)
8663 mode = Pmode;
8665 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8667 else
8668 return NULL_RTX;
8672 /* For an arg passed partly in registers and partly in memory, this is
8673 the number of bytes passed in registers. For args passed entirely in
8674 registers or entirely in memory, zero. When an arg is described by a
8675 PARALLEL, perhaps using more than one register type, this function
8676 returns the number of bytes used by the first element of the PARALLEL. */
8678 static int
8679 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
8680 tree type, bool named)
8682 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8683 int ret = 0;
8684 int align_words;
8686 if (DEFAULT_ABI == ABI_V4)
8687 return 0;
8689 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
8690 && cum->nargs_prototype >= 0)
8691 return 0;
8693 /* In this complicated case we just disable the partial_nregs code. */
8694 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8695 return 0;
8697 align_words = rs6000_parm_start (mode, type, cum->words);
8699 if (USE_FP_FOR_ARG_P (cum, mode, type))
8701 /* If we are passing this arg in the fixed parameter save area
8702 (gprs or memory) as well as fprs, then this function should
8703 return the number of partial bytes passed in the parameter
8704 save area rather than partial bytes passed in fprs. */
8705 if (type
8706 && (cum->nargs_prototype <= 0
8707 || (DEFAULT_ABI == ABI_AIX
8708 && TARGET_XL_COMPAT
8709 && align_words >= GP_ARG_NUM_REG)))
8710 return 0;
8711 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
8712 > FP_ARG_MAX_REG + 1)
8713 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
8714 else if (cum->nargs_prototype >= 0)
8715 return 0;
8718 if (align_words < GP_ARG_NUM_REG
8719 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
8720 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
8722 if (ret != 0 && TARGET_DEBUG_ARG)
8723 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
8725 return ret;
8728 /* A C expression that indicates when an argument must be passed by
8729 reference. If nonzero for an argument, a copy of that argument is
8730 made in memory and a pointer to the argument is passed instead of
8731 the argument itself. The pointer is passed in whatever way is
8732 appropriate for passing a pointer to that type.
8734 Under V.4, aggregates and long double are passed by reference.
8736 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8737 reference unless the AltiVec vector extension ABI is in force.
8739 As an extension to all ABIs, variable sized types are passed by
8740 reference. */
8742 static bool
8743 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
8744 enum machine_mode mode, const_tree type,
8745 bool named ATTRIBUTE_UNUSED)
8747 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
8749 if (TARGET_DEBUG_ARG)
8750 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
8751 return 1;
8754 if (!type)
8755 return 0;
8757 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
8759 if (TARGET_DEBUG_ARG)
8760 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
8761 return 1;
8764 if (int_size_in_bytes (type) < 0)
8766 if (TARGET_DEBUG_ARG)
8767 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
8768 return 1;
8771 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8772 modes only exist for GCC vector types if -maltivec. */
8773 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
8775 if (TARGET_DEBUG_ARG)
8776 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
8777 return 1;
8780 /* Pass synthetic vectors in memory. */
8781 if (TREE_CODE (type) == VECTOR_TYPE
8782 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8784 static bool warned_for_pass_big_vectors = false;
8785 if (TARGET_DEBUG_ARG)
8786 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
8787 if (!warned_for_pass_big_vectors)
8789 warning (0, "GCC vector passed by reference: "
8790 "non-standard ABI extension with no compatibility guarantee");
8791 warned_for_pass_big_vectors = true;
8793 return 1;
8796 return 0;
8799 static void
8800 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
8802 int i;
8803 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
8805 if (nregs == 0)
8806 return;
8808 for (i = 0; i < nregs; i++)
8810 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
8811 if (reload_completed)
8813 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
8814 tem = NULL_RTX;
8815 else
8816 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
8817 i * GET_MODE_SIZE (reg_mode));
8819 else
8820 tem = replace_equiv_address (tem, XEXP (tem, 0));
8822 gcc_assert (tem);
8824 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
8828 /* Perform any needed actions needed for a function that is receiving a
8829 variable number of arguments.
8831 CUM is as above.
8833 MODE and TYPE are the mode and type of the current parameter.
8835 PRETEND_SIZE is a variable that should be set to the amount of stack
8836 that must be pushed by the prolog to pretend that our caller pushed
8839 Normally, this macro will push all remaining incoming registers on the
8840 stack and set PRETEND_SIZE to the length of the registers pushed. */
8842 static void
8843 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
8844 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8845 int no_rtl)
8847 CUMULATIVE_ARGS next_cum;
8848 int reg_size = TARGET_32BIT ? 4 : 8;
8849 rtx save_area = NULL_RTX, mem;
8850 int first_reg_offset;
8851 alias_set_type set;
8853 /* Skip the last named argument. */
8854 next_cum = *get_cumulative_args (cum);
8855 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
8857 if (DEFAULT_ABI == ABI_V4)
8859 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
8861 if (! no_rtl)
8863 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
8864 HOST_WIDE_INT offset = 0;
8866 /* Try to optimize the size of the varargs save area.
8867 The ABI requires that ap.reg_save_area is doubleword
8868 aligned, but we don't need to allocate space for all
8869 the bytes, only those to which we actually will save
8870 anything. */
8871 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
8872 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
8873 if (TARGET_HARD_FLOAT && TARGET_FPRS
8874 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8875 && cfun->va_list_fpr_size)
8877 if (gpr_reg_num)
8878 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
8879 * UNITS_PER_FP_WORD;
8880 if (cfun->va_list_fpr_size
8881 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8882 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
8883 else
8884 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8885 * UNITS_PER_FP_WORD;
8887 if (gpr_reg_num)
8889 offset = -((first_reg_offset * reg_size) & ~7);
8890 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
8892 gpr_reg_num = cfun->va_list_gpr_size;
8893 if (reg_size == 4 && (first_reg_offset & 1))
8894 gpr_reg_num++;
8896 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
8898 else if (fpr_size)
8899 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
8900 * UNITS_PER_FP_WORD
8901 - (int) (GP_ARG_NUM_REG * reg_size);
8903 if (gpr_size + fpr_size)
8905 rtx reg_save_area
8906 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
8907 gcc_assert (GET_CODE (reg_save_area) == MEM);
8908 reg_save_area = XEXP (reg_save_area, 0);
8909 if (GET_CODE (reg_save_area) == PLUS)
8911 gcc_assert (XEXP (reg_save_area, 0)
8912 == virtual_stack_vars_rtx);
8913 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
8914 offset += INTVAL (XEXP (reg_save_area, 1));
8916 else
8917 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
8920 cfun->machine->varargs_save_offset = offset;
8921 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
8924 else
8926 first_reg_offset = next_cum.words;
8927 save_area = virtual_incoming_args_rtx;
8929 if (targetm.calls.must_pass_in_stack (mode, type))
8930 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
8933 set = get_varargs_alias_set ();
8934 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
8935 && cfun->va_list_gpr_size)
8937 int nregs = GP_ARG_NUM_REG - first_reg_offset;
8939 if (va_list_gpr_counter_field)
8941 /* V4 va_list_gpr_size counts number of registers needed. */
8942 if (nregs > cfun->va_list_gpr_size)
8943 nregs = cfun->va_list_gpr_size;
8945 else
8947 /* char * va_list instead counts number of bytes needed. */
8948 if (nregs > cfun->va_list_gpr_size / reg_size)
8949 nregs = cfun->va_list_gpr_size / reg_size;
8952 mem = gen_rtx_MEM (BLKmode,
8953 plus_constant (Pmode, save_area,
8954 first_reg_offset * reg_size));
8955 MEM_NOTRAP_P (mem) = 1;
8956 set_mem_alias_set (mem, set);
8957 set_mem_align (mem, BITS_PER_WORD);
8959 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
8960 nregs);
8963 /* Save FP registers if needed. */
8964 if (DEFAULT_ABI == ABI_V4
8965 && TARGET_HARD_FLOAT && TARGET_FPRS
8966 && ! no_rtl
8967 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8968 && cfun->va_list_fpr_size)
8970 int fregno = next_cum.fregno, nregs;
8971 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
8972 rtx lab = gen_label_rtx ();
8973 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
8974 * UNITS_PER_FP_WORD);
8976 emit_jump_insn
8977 (gen_rtx_SET (VOIDmode,
8978 pc_rtx,
8979 gen_rtx_IF_THEN_ELSE (VOIDmode,
8980 gen_rtx_NE (VOIDmode, cr1,
8981 const0_rtx),
8982 gen_rtx_LABEL_REF (VOIDmode, lab),
8983 pc_rtx)));
8985 for (nregs = 0;
8986 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
8987 fregno++, off += UNITS_PER_FP_WORD, nregs++)
8989 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8990 ? DFmode : SFmode,
8991 plus_constant (Pmode, save_area, off));
8992 MEM_NOTRAP_P (mem) = 1;
8993 set_mem_alias_set (mem, set);
8994 set_mem_align (mem, GET_MODE_ALIGNMENT (
8995 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8996 ? DFmode : SFmode));
8997 emit_move_insn (mem, gen_rtx_REG (
8998 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8999 ? DFmode : SFmode, fregno));
9002 emit_label (lab);
9006 /* Create the va_list data type. */
9008 static tree
9009 rs6000_build_builtin_va_list (void)
9011 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
9013 /* For AIX, prefer 'char *' because that's what the system
9014 header files like. */
9015 if (DEFAULT_ABI != ABI_V4)
9016 return build_pointer_type (char_type_node);
9018 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
9019 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
9020 get_identifier ("__va_list_tag"), record);
9022 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
9023 unsigned_char_type_node);
9024 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
9025 unsigned_char_type_node);
9026 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9027 every user file. */
9028 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9029 get_identifier ("reserved"), short_unsigned_type_node);
9030 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9031 get_identifier ("overflow_arg_area"),
9032 ptr_type_node);
9033 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
9034 get_identifier ("reg_save_area"),
9035 ptr_type_node);
9037 va_list_gpr_counter_field = f_gpr;
9038 va_list_fpr_counter_field = f_fpr;
9040 DECL_FIELD_CONTEXT (f_gpr) = record;
9041 DECL_FIELD_CONTEXT (f_fpr) = record;
9042 DECL_FIELD_CONTEXT (f_res) = record;
9043 DECL_FIELD_CONTEXT (f_ovf) = record;
9044 DECL_FIELD_CONTEXT (f_sav) = record;
9046 TYPE_STUB_DECL (record) = type_decl;
9047 TYPE_NAME (record) = type_decl;
9048 TYPE_FIELDS (record) = f_gpr;
9049 DECL_CHAIN (f_gpr) = f_fpr;
9050 DECL_CHAIN (f_fpr) = f_res;
9051 DECL_CHAIN (f_res) = f_ovf;
9052 DECL_CHAIN (f_ovf) = f_sav;
9054 layout_type (record);
9056 /* The correct type is an array type of one element. */
9057 return build_array_type (record, build_index_type (size_zero_node));
9060 /* Implement va_start. */
9062 static void
9063 rs6000_va_start (tree valist, rtx nextarg)
9065 HOST_WIDE_INT words, n_gpr, n_fpr;
9066 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9067 tree gpr, fpr, ovf, sav, t;
9069 /* Only SVR4 needs something special. */
9070 if (DEFAULT_ABI != ABI_V4)
9072 std_expand_builtin_va_start (valist, nextarg);
9073 return;
9076 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9077 f_fpr = DECL_CHAIN (f_gpr);
9078 f_res = DECL_CHAIN (f_fpr);
9079 f_ovf = DECL_CHAIN (f_res);
9080 f_sav = DECL_CHAIN (f_ovf);
9082 valist = build_simple_mem_ref (valist);
9083 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9084 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9085 f_fpr, NULL_TREE);
9086 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9087 f_ovf, NULL_TREE);
9088 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9089 f_sav, NULL_TREE);
9091 /* Count number of gp and fp argument registers used. */
9092 words = crtl->args.info.words;
9093 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
9094 GP_ARG_NUM_REG);
9095 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
9096 FP_ARG_NUM_REG);
9098 if (TARGET_DEBUG_ARG)
9099 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
9100 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
9101 words, n_gpr, n_fpr);
9103 if (cfun->va_list_gpr_size)
9105 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9106 build_int_cst (NULL_TREE, n_gpr));
9107 TREE_SIDE_EFFECTS (t) = 1;
9108 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9111 if (cfun->va_list_fpr_size)
9113 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9114 build_int_cst (NULL_TREE, n_fpr));
9115 TREE_SIDE_EFFECTS (t) = 1;
9116 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9118 #ifdef HAVE_AS_GNU_ATTRIBUTE
9119 if (call_ABI_of_interest (cfun->decl))
9120 rs6000_passes_float = true;
9121 #endif
9124 /* Find the overflow area. */
9125 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9126 if (words != 0)
9127 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
9128 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9129 TREE_SIDE_EFFECTS (t) = 1;
9130 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9132 /* If there were no va_arg invocations, don't set up the register
9133 save area. */
9134 if (!cfun->va_list_gpr_size
9135 && !cfun->va_list_fpr_size
9136 && n_gpr < GP_ARG_NUM_REG
9137 && n_fpr < FP_ARG_V4_MAX_REG)
9138 return;
9140 /* Find the register save area. */
9141 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
9142 if (cfun->machine->varargs_save_offset)
9143 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
9144 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9145 TREE_SIDE_EFFECTS (t) = 1;
9146 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9149 /* Implement va_arg. */
9151 static tree
9152 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9153 gimple_seq *post_p)
9155 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
9156 tree gpr, fpr, ovf, sav, reg, t, u;
9157 int size, rsize, n_reg, sav_ofs, sav_scale;
9158 tree lab_false, lab_over, addr;
9159 int align;
9160 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9161 int regalign = 0;
9162 gimple stmt;
9164 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9166 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9167 return build_va_arg_indirect_ref (t);
9170 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9171 earlier version of gcc, with the property that it always applied alignment
9172 adjustments to the va-args (even for zero-sized types). The cheapest way
9173 to deal with this is to replicate the effect of the part of
9174 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9175 of relevance.
9176 We don't need to check for pass-by-reference because of the test above.
9177 We can return a simplifed answer, since we know there's no offset to add. */
9179 if (TARGET_MACHO
9180 && rs6000_darwin64_abi
9181 && integer_zerop (TYPE_SIZE (type)))
9183 unsigned HOST_WIDE_INT align, boundary;
9184 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9185 align = PARM_BOUNDARY / BITS_PER_UNIT;
9186 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9187 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9188 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9189 boundary /= BITS_PER_UNIT;
9190 if (boundary > align)
9192 tree t ;
9193 /* This updates arg ptr by the amount that would be necessary
9194 to align the zero-sized (but not zero-alignment) item. */
9195 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9196 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9197 gimplify_and_add (t, pre_p);
9199 t = fold_convert (sizetype, valist_tmp);
9200 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9201 fold_convert (TREE_TYPE (valist),
9202 fold_build2 (BIT_AND_EXPR, sizetype, t,
9203 size_int (-boundary))));
9204 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9205 gimplify_and_add (t, pre_p);
9207 /* Since it is zero-sized there's no increment for the item itself. */
9208 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9209 return build_va_arg_indirect_ref (valist_tmp);
9212 if (DEFAULT_ABI != ABI_V4)
9214 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9216 tree elem_type = TREE_TYPE (type);
9217 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9218 int elem_size = GET_MODE_SIZE (elem_mode);
9220 if (elem_size < UNITS_PER_WORD)
9222 tree real_part, imag_part;
9223 gimple_seq post = NULL;
9225 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9226 &post);
9227 /* Copy the value into a temporary, lest the formal temporary
9228 be reused out from under us. */
9229 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9230 gimple_seq_add_seq (pre_p, post);
9232 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9233 post_p);
9235 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9239 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9242 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9243 f_fpr = DECL_CHAIN (f_gpr);
9244 f_res = DECL_CHAIN (f_fpr);
9245 f_ovf = DECL_CHAIN (f_res);
9246 f_sav = DECL_CHAIN (f_ovf);
9248 valist = build_va_arg_indirect_ref (valist);
9249 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9250 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9251 f_fpr, NULL_TREE);
9252 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9253 f_ovf, NULL_TREE);
9254 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9255 f_sav, NULL_TREE);
9257 size = int_size_in_bytes (type);
9258 rsize = (size + 3) / 4;
9259 align = 1;
9261 if (TARGET_HARD_FLOAT && TARGET_FPRS
9262 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9263 || (TARGET_DOUBLE_FLOAT
9264 && (TYPE_MODE (type) == DFmode
9265 || TYPE_MODE (type) == TFmode
9266 || TYPE_MODE (type) == SDmode
9267 || TYPE_MODE (type) == DDmode
9268 || TYPE_MODE (type) == TDmode))))
9270 /* FP args go in FP registers, if present. */
9271 reg = fpr;
9272 n_reg = (size + 7) / 8;
9273 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9274 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9275 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9276 align = 8;
9278 else
9280 /* Otherwise into GP registers. */
9281 reg = gpr;
9282 n_reg = rsize;
9283 sav_ofs = 0;
9284 sav_scale = 4;
9285 if (n_reg == 2)
9286 align = 8;
9289 /* Pull the value out of the saved registers.... */
9291 lab_over = NULL;
9292 addr = create_tmp_var (ptr_type_node, "addr");
9294 /* AltiVec vectors never go in registers when -mabi=altivec. */
9295 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9296 align = 16;
9297 else
9299 lab_false = create_artificial_label (input_location);
9300 lab_over = create_artificial_label (input_location);
9302 /* Long long and SPE vectors are aligned in the registers.
9303 As are any other 2 gpr item such as complex int due to a
9304 historical mistake. */
9305 u = reg;
9306 if (n_reg == 2 && reg == gpr)
9308 regalign = 1;
9309 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9310 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9311 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9312 unshare_expr (reg), u);
9314 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9315 reg number is 0 for f1, so we want to make it odd. */
9316 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9318 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9319 build_int_cst (TREE_TYPE (reg), 1));
9320 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9323 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9324 t = build2 (GE_EXPR, boolean_type_node, u, t);
9325 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9326 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9327 gimplify_and_add (t, pre_p);
9329 t = sav;
9330 if (sav_ofs)
9331 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9333 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9334 build_int_cst (TREE_TYPE (reg), n_reg));
9335 u = fold_convert (sizetype, u);
9336 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
9337 t = fold_build_pointer_plus (t, u);
9339 /* _Decimal32 varargs are located in the second word of the 64-bit
9340 FP register for 32-bit binaries. */
9341 if (!TARGET_POWERPC64
9342 && TARGET_HARD_FLOAT && TARGET_FPRS
9343 && TYPE_MODE (type) == SDmode)
9344 t = fold_build_pointer_plus_hwi (t, size);
9346 gimplify_assign (addr, t, pre_p);
9348 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9350 stmt = gimple_build_label (lab_false);
9351 gimple_seq_add_stmt (pre_p, stmt);
9353 if ((n_reg == 2 && !regalign) || n_reg > 2)
9355 /* Ensure that we don't find any more args in regs.
9356 Alignment has taken care of for special cases. */
9357 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
9361 /* ... otherwise out of the overflow area. */
9363 /* Care for on-stack alignment if needed. */
9364 t = ovf;
9365 if (align != 1)
9367 t = fold_build_pointer_plus_hwi (t, align - 1);
9368 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9369 build_int_cst (TREE_TYPE (t), -align));
9371 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9373 gimplify_assign (unshare_expr (addr), t, pre_p);
9375 t = fold_build_pointer_plus_hwi (t, size);
9376 gimplify_assign (unshare_expr (ovf), t, pre_p);
9378 if (lab_over)
9380 stmt = gimple_build_label (lab_over);
9381 gimple_seq_add_stmt (pre_p, stmt);
9384 if (STRICT_ALIGNMENT
9385 && (TYPE_ALIGN (type)
9386 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
9388 /* The value (of type complex double, for example) may not be
9389 aligned in memory in the saved registers, so copy via a
9390 temporary. (This is the same code as used for SPARC.) */
9391 tree tmp = create_tmp_var (type, "va_arg_tmp");
9392 tree dest_addr = build_fold_addr_expr (tmp);
9394 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
9395 3, dest_addr, addr, size_int (rsize * 4));
9397 gimplify_and_add (copy, pre_p);
9398 addr = dest_addr;
9401 addr = fold_convert (ptrtype, addr);
9402 return build_va_arg_indirect_ref (addr);
9405 /* Builtins. */
9407 static void
9408 def_builtin (const char *name, tree type, enum rs6000_builtins code)
9410 tree t;
9411 unsigned classify = rs6000_builtin_info[(int)code].attr;
9412 const char *attr_string = "";
9414 gcc_assert (name != NULL);
9415 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
9417 if (rs6000_builtin_decls[(int)code])
9418 fatal_error ("internal error: builtin function %s already processed", name);
9420 rs6000_builtin_decls[(int)code] = t =
9421 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
9423 /* Set any special attributes. */
9424 if ((classify & RS6000_BTC_CONST) != 0)
9426 /* const function, function only depends on the inputs. */
9427 TREE_READONLY (t) = 1;
9428 TREE_NOTHROW (t) = 1;
9429 attr_string = ", pure";
9431 else if ((classify & RS6000_BTC_PURE) != 0)
9433 /* pure function, function can read global memory, but does not set any
9434 external state. */
9435 DECL_PURE_P (t) = 1;
9436 TREE_NOTHROW (t) = 1;
9437 attr_string = ", const";
9439 else if ((classify & RS6000_BTC_FP) != 0)
9441 /* Function is a math function. If rounding mode is on, then treat the
9442 function as not reading global memory, but it can have arbitrary side
9443 effects. If it is off, then assume the function is a const function.
9444 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9445 builtin-attribute.def that is used for the math functions. */
9446 TREE_NOTHROW (t) = 1;
9447 if (flag_rounding_math)
9449 DECL_PURE_P (t) = 1;
9450 DECL_IS_NOVOPS (t) = 1;
9451 attr_string = ", fp, pure";
9453 else
9455 TREE_READONLY (t) = 1;
9456 attr_string = ", fp, const";
9459 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
9460 gcc_unreachable ();
9462 if (TARGET_DEBUG_BUILTIN)
9463 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
9464 (int)code, name, attr_string);
9467 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9469 #undef RS6000_BUILTIN_1
9470 #undef RS6000_BUILTIN_2
9471 #undef RS6000_BUILTIN_3
9472 #undef RS6000_BUILTIN_A
9473 #undef RS6000_BUILTIN_D
9474 #undef RS6000_BUILTIN_E
9475 #undef RS6000_BUILTIN_P
9476 #undef RS6000_BUILTIN_Q
9477 #undef RS6000_BUILTIN_S
9478 #undef RS6000_BUILTIN_X
9480 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9481 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9482 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9483 { MASK, ICODE, NAME, ENUM },
9485 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9486 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9487 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9488 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9489 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9490 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9491 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9493 static const struct builtin_description bdesc_3arg[] =
9495 #include "rs6000-builtin.def"
9498 /* DST operations: void foo (void *, const int, const char). */
9500 #undef RS6000_BUILTIN_1
9501 #undef RS6000_BUILTIN_2
9502 #undef RS6000_BUILTIN_3
9503 #undef RS6000_BUILTIN_A
9504 #undef RS6000_BUILTIN_D
9505 #undef RS6000_BUILTIN_E
9506 #undef RS6000_BUILTIN_P
9507 #undef RS6000_BUILTIN_Q
9508 #undef RS6000_BUILTIN_S
9509 #undef RS6000_BUILTIN_X
9511 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9512 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9513 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9514 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9515 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9516 { MASK, ICODE, NAME, ENUM },
9518 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9519 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9520 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9521 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9522 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9524 static const struct builtin_description bdesc_dst[] =
9526 #include "rs6000-builtin.def"
9529 /* Simple binary operations: VECc = foo (VECa, VECb). */
9531 #undef RS6000_BUILTIN_1
9532 #undef RS6000_BUILTIN_2
9533 #undef RS6000_BUILTIN_3
9534 #undef RS6000_BUILTIN_A
9535 #undef RS6000_BUILTIN_D
9536 #undef RS6000_BUILTIN_E
9537 #undef RS6000_BUILTIN_P
9538 #undef RS6000_BUILTIN_Q
9539 #undef RS6000_BUILTIN_S
9540 #undef RS6000_BUILTIN_X
9542 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9543 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9544 { MASK, ICODE, NAME, ENUM },
9546 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9547 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9548 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9549 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9550 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9551 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9552 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9553 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9555 static const struct builtin_description bdesc_2arg[] =
9557 #include "rs6000-builtin.def"
9560 #undef RS6000_BUILTIN_1
9561 #undef RS6000_BUILTIN_2
9562 #undef RS6000_BUILTIN_3
9563 #undef RS6000_BUILTIN_A
9564 #undef RS6000_BUILTIN_D
9565 #undef RS6000_BUILTIN_E
9566 #undef RS6000_BUILTIN_P
9567 #undef RS6000_BUILTIN_Q
9568 #undef RS6000_BUILTIN_S
9569 #undef RS6000_BUILTIN_X
9571 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9572 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9573 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9574 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9575 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9576 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9577 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9578 { MASK, ICODE, NAME, ENUM },
9580 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9581 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9582 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9584 /* AltiVec predicates. */
9586 static const struct builtin_description bdesc_altivec_preds[] =
9588 #include "rs6000-builtin.def"
9591 /* SPE predicates. */
9592 #undef RS6000_BUILTIN_1
9593 #undef RS6000_BUILTIN_2
9594 #undef RS6000_BUILTIN_3
9595 #undef RS6000_BUILTIN_A
9596 #undef RS6000_BUILTIN_D
9597 #undef RS6000_BUILTIN_E
9598 #undef RS6000_BUILTIN_P
9599 #undef RS6000_BUILTIN_Q
9600 #undef RS6000_BUILTIN_S
9601 #undef RS6000_BUILTIN_X
9603 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9604 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9605 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9606 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9607 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9608 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9609 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9610 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9611 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9612 { MASK, ICODE, NAME, ENUM },
9614 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9616 static const struct builtin_description bdesc_spe_predicates[] =
9618 #include "rs6000-builtin.def"
9621 /* SPE evsel predicates. */
9622 #undef RS6000_BUILTIN_1
9623 #undef RS6000_BUILTIN_2
9624 #undef RS6000_BUILTIN_3
9625 #undef RS6000_BUILTIN_A
9626 #undef RS6000_BUILTIN_D
9627 #undef RS6000_BUILTIN_E
9628 #undef RS6000_BUILTIN_P
9629 #undef RS6000_BUILTIN_Q
9630 #undef RS6000_BUILTIN_S
9631 #undef RS6000_BUILTIN_X
9633 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9634 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9635 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9636 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9637 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9638 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9639 { MASK, ICODE, NAME, ENUM },
9641 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9642 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9643 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9644 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9646 static const struct builtin_description bdesc_spe_evsel[] =
9648 #include "rs6000-builtin.def"
9651 /* PAIRED predicates. */
9652 #undef RS6000_BUILTIN_1
9653 #undef RS6000_BUILTIN_2
9654 #undef RS6000_BUILTIN_3
9655 #undef RS6000_BUILTIN_A
9656 #undef RS6000_BUILTIN_D
9657 #undef RS6000_BUILTIN_E
9658 #undef RS6000_BUILTIN_P
9659 #undef RS6000_BUILTIN_Q
9660 #undef RS6000_BUILTIN_S
9661 #undef RS6000_BUILTIN_X
9663 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9664 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9665 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9666 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9667 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9668 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9669 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9670 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9671 { MASK, ICODE, NAME, ENUM },
9673 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9674 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9676 static const struct builtin_description bdesc_paired_preds[] =
9678 #include "rs6000-builtin.def"
9681 /* ABS* operations. */
9683 #undef RS6000_BUILTIN_1
9684 #undef RS6000_BUILTIN_2
9685 #undef RS6000_BUILTIN_3
9686 #undef RS6000_BUILTIN_A
9687 #undef RS6000_BUILTIN_D
9688 #undef RS6000_BUILTIN_E
9689 #undef RS6000_BUILTIN_P
9690 #undef RS6000_BUILTIN_Q
9691 #undef RS6000_BUILTIN_S
9692 #undef RS6000_BUILTIN_X
9694 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9695 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9696 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9697 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9698 { MASK, ICODE, NAME, ENUM },
9700 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9701 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9702 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9703 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9704 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9705 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9707 static const struct builtin_description bdesc_abs[] =
9709 #include "rs6000-builtin.def"
9712 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9713 foo (VECa). */
9715 #undef RS6000_BUILTIN_1
9716 #undef RS6000_BUILTIN_2
9717 #undef RS6000_BUILTIN_3
9718 #undef RS6000_BUILTIN_A
9719 #undef RS6000_BUILTIN_E
9720 #undef RS6000_BUILTIN_D
9721 #undef RS6000_BUILTIN_P
9722 #undef RS6000_BUILTIN_Q
9723 #undef RS6000_BUILTIN_S
9724 #undef RS6000_BUILTIN_X
9726 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9727 { MASK, ICODE, NAME, ENUM },
9729 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9730 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9731 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9732 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9733 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9734 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9735 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9736 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9737 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9739 static const struct builtin_description bdesc_1arg[] =
9741 #include "rs6000-builtin.def"
9744 #undef RS6000_BUILTIN_1
9745 #undef RS6000_BUILTIN_2
9746 #undef RS6000_BUILTIN_3
9747 #undef RS6000_BUILTIN_A
9748 #undef RS6000_BUILTIN_D
9749 #undef RS6000_BUILTIN_E
9750 #undef RS6000_BUILTIN_P
9751 #undef RS6000_BUILTIN_Q
9752 #undef RS6000_BUILTIN_S
9753 #undef RS6000_BUILTIN_X
9755 /* Return true if a builtin function is overloaded. */
9756 bool
9757 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
9759 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
9762 /* Expand an expression EXP that calls a builtin without arguments. */
9763 static rtx
9764 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
9766 rtx pat;
9767 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9769 if (icode == CODE_FOR_nothing)
9770 /* Builtin not supported on this processor. */
9771 return 0;
9773 if (target == 0
9774 || GET_MODE (target) != tmode
9775 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9776 target = gen_reg_rtx (tmode);
9778 pat = GEN_FCN (icode) (target);
9779 if (! pat)
9780 return 0;
9781 emit_insn (pat);
9783 return target;
9787 static rtx
9788 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
9790 rtx pat;
9791 tree arg0 = CALL_EXPR_ARG (exp, 0);
9792 rtx op0 = expand_normal (arg0);
9793 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9794 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9796 if (icode == CODE_FOR_nothing)
9797 /* Builtin not supported on this processor. */
9798 return 0;
9800 /* If we got invalid arguments bail out before generating bad rtl. */
9801 if (arg0 == error_mark_node)
9802 return const0_rtx;
9804 if (icode == CODE_FOR_altivec_vspltisb
9805 || icode == CODE_FOR_altivec_vspltish
9806 || icode == CODE_FOR_altivec_vspltisw
9807 || icode == CODE_FOR_spe_evsplatfi
9808 || icode == CODE_FOR_spe_evsplati)
9810 /* Only allow 5-bit *signed* literals. */
9811 if (GET_CODE (op0) != CONST_INT
9812 || INTVAL (op0) > 15
9813 || INTVAL (op0) < -16)
9815 error ("argument 1 must be a 5-bit signed literal");
9816 return const0_rtx;
9820 if (target == 0
9821 || GET_MODE (target) != tmode
9822 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9823 target = gen_reg_rtx (tmode);
9825 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9826 op0 = copy_to_mode_reg (mode0, op0);
9828 pat = GEN_FCN (icode) (target, op0);
9829 if (! pat)
9830 return 0;
9831 emit_insn (pat);
9833 return target;
9836 static rtx
9837 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
9839 rtx pat, scratch1, scratch2;
9840 tree arg0 = CALL_EXPR_ARG (exp, 0);
9841 rtx op0 = expand_normal (arg0);
9842 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9843 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9845 /* If we have invalid arguments, bail out before generating bad rtl. */
9846 if (arg0 == error_mark_node)
9847 return const0_rtx;
9849 if (target == 0
9850 || GET_MODE (target) != tmode
9851 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9852 target = gen_reg_rtx (tmode);
9854 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9855 op0 = copy_to_mode_reg (mode0, op0);
9857 scratch1 = gen_reg_rtx (mode0);
9858 scratch2 = gen_reg_rtx (mode0);
9860 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
9861 if (! pat)
9862 return 0;
9863 emit_insn (pat);
9865 return target;
9868 static rtx
9869 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
9871 rtx pat;
9872 tree arg0 = CALL_EXPR_ARG (exp, 0);
9873 tree arg1 = CALL_EXPR_ARG (exp, 1);
9874 rtx op0 = expand_normal (arg0);
9875 rtx op1 = expand_normal (arg1);
9876 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9877 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9878 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9880 if (icode == CODE_FOR_nothing)
9881 /* Builtin not supported on this processor. */
9882 return 0;
9884 /* If we got invalid arguments bail out before generating bad rtl. */
9885 if (arg0 == error_mark_node || arg1 == error_mark_node)
9886 return const0_rtx;
9888 if (icode == CODE_FOR_altivec_vcfux
9889 || icode == CODE_FOR_altivec_vcfsx
9890 || icode == CODE_FOR_altivec_vctsxs
9891 || icode == CODE_FOR_altivec_vctuxs
9892 || icode == CODE_FOR_altivec_vspltb
9893 || icode == CODE_FOR_altivec_vsplth
9894 || icode == CODE_FOR_altivec_vspltw
9895 || icode == CODE_FOR_spe_evaddiw
9896 || icode == CODE_FOR_spe_evldd
9897 || icode == CODE_FOR_spe_evldh
9898 || icode == CODE_FOR_spe_evldw
9899 || icode == CODE_FOR_spe_evlhhesplat
9900 || icode == CODE_FOR_spe_evlhhossplat
9901 || icode == CODE_FOR_spe_evlhhousplat
9902 || icode == CODE_FOR_spe_evlwhe
9903 || icode == CODE_FOR_spe_evlwhos
9904 || icode == CODE_FOR_spe_evlwhou
9905 || icode == CODE_FOR_spe_evlwhsplat
9906 || icode == CODE_FOR_spe_evlwwsplat
9907 || icode == CODE_FOR_spe_evrlwi
9908 || icode == CODE_FOR_spe_evslwi
9909 || icode == CODE_FOR_spe_evsrwis
9910 || icode == CODE_FOR_spe_evsubifw
9911 || icode == CODE_FOR_spe_evsrwiu)
9913 /* Only allow 5-bit unsigned literals. */
9914 STRIP_NOPS (arg1);
9915 if (TREE_CODE (arg1) != INTEGER_CST
9916 || TREE_INT_CST_LOW (arg1) & ~0x1f)
9918 error ("argument 2 must be a 5-bit unsigned literal");
9919 return const0_rtx;
9923 if (target == 0
9924 || GET_MODE (target) != tmode
9925 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9926 target = gen_reg_rtx (tmode);
9928 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9929 op0 = copy_to_mode_reg (mode0, op0);
9930 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9931 op1 = copy_to_mode_reg (mode1, op1);
9933 pat = GEN_FCN (icode) (target, op0, op1);
9934 if (! pat)
9935 return 0;
9936 emit_insn (pat);
9938 return target;
9941 static rtx
9942 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9944 rtx pat, scratch;
9945 tree cr6_form = CALL_EXPR_ARG (exp, 0);
9946 tree arg0 = CALL_EXPR_ARG (exp, 1);
9947 tree arg1 = CALL_EXPR_ARG (exp, 2);
9948 rtx op0 = expand_normal (arg0);
9949 rtx op1 = expand_normal (arg1);
9950 enum machine_mode tmode = SImode;
9951 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9952 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9953 int cr6_form_int;
9955 if (TREE_CODE (cr6_form) != INTEGER_CST)
9957 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9958 return const0_rtx;
9960 else
9961 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
9963 gcc_assert (mode0 == mode1);
9965 /* If we have invalid arguments, bail out before generating bad rtl. */
9966 if (arg0 == error_mark_node || arg1 == error_mark_node)
9967 return const0_rtx;
9969 if (target == 0
9970 || GET_MODE (target) != tmode
9971 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9972 target = gen_reg_rtx (tmode);
9974 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9975 op0 = copy_to_mode_reg (mode0, op0);
9976 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9977 op1 = copy_to_mode_reg (mode1, op1);
9979 scratch = gen_reg_rtx (mode0);
9981 pat = GEN_FCN (icode) (scratch, op0, op1);
9982 if (! pat)
9983 return 0;
9984 emit_insn (pat);
9986 /* The vec_any* and vec_all* predicates use the same opcodes for two
9987 different operations, but the bits in CR6 will be different
9988 depending on what information we want. So we have to play tricks
9989 with CR6 to get the right bits out.
9991 If you think this is disgusting, look at the specs for the
9992 AltiVec predicates. */
9994 switch (cr6_form_int)
9996 case 0:
9997 emit_insn (gen_cr6_test_for_zero (target));
9998 break;
9999 case 1:
10000 emit_insn (gen_cr6_test_for_zero_reverse (target));
10001 break;
10002 case 2:
10003 emit_insn (gen_cr6_test_for_lt (target));
10004 break;
10005 case 3:
10006 emit_insn (gen_cr6_test_for_lt_reverse (target));
10007 break;
10008 default:
10009 error ("argument 1 of __builtin_altivec_predicate is out of range");
10010 break;
10013 return target;
10016 static rtx
10017 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
10019 rtx pat, addr;
10020 tree arg0 = CALL_EXPR_ARG (exp, 0);
10021 tree arg1 = CALL_EXPR_ARG (exp, 1);
10022 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10023 enum machine_mode mode0 = Pmode;
10024 enum machine_mode mode1 = Pmode;
10025 rtx op0 = expand_normal (arg0);
10026 rtx op1 = expand_normal (arg1);
10028 if (icode == CODE_FOR_nothing)
10029 /* Builtin not supported on this processor. */
10030 return 0;
10032 /* If we got invalid arguments bail out before generating bad rtl. */
10033 if (arg0 == error_mark_node || arg1 == error_mark_node)
10034 return const0_rtx;
10036 if (target == 0
10037 || GET_MODE (target) != tmode
10038 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10039 target = gen_reg_rtx (tmode);
10041 op1 = copy_to_mode_reg (mode1, op1);
10043 if (op0 == const0_rtx)
10045 addr = gen_rtx_MEM (tmode, op1);
10047 else
10049 op0 = copy_to_mode_reg (mode0, op0);
10050 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
10053 pat = GEN_FCN (icode) (target, addr);
10055 if (! pat)
10056 return 0;
10057 emit_insn (pat);
10059 return target;
10062 static rtx
10063 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
10065 rtx pat, addr;
10066 tree arg0 = CALL_EXPR_ARG (exp, 0);
10067 tree arg1 = CALL_EXPR_ARG (exp, 1);
10068 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10069 enum machine_mode mode0 = Pmode;
10070 enum machine_mode mode1 = Pmode;
10071 rtx op0 = expand_normal (arg0);
10072 rtx op1 = expand_normal (arg1);
10074 if (icode == CODE_FOR_nothing)
10075 /* Builtin not supported on this processor. */
10076 return 0;
10078 /* If we got invalid arguments bail out before generating bad rtl. */
10079 if (arg0 == error_mark_node || arg1 == error_mark_node)
10080 return const0_rtx;
10082 if (target == 0
10083 || GET_MODE (target) != tmode
10084 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10085 target = gen_reg_rtx (tmode);
10087 op1 = copy_to_mode_reg (mode1, op1);
10089 if (op0 == const0_rtx)
10091 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
10093 else
10095 op0 = copy_to_mode_reg (mode0, op0);
10096 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
10099 pat = GEN_FCN (icode) (target, addr);
10101 if (! pat)
10102 return 0;
10103 emit_insn (pat);
10105 return target;
10108 static rtx
10109 spe_expand_stv_builtin (enum insn_code icode, tree exp)
10111 tree arg0 = CALL_EXPR_ARG (exp, 0);
10112 tree arg1 = CALL_EXPR_ARG (exp, 1);
10113 tree arg2 = CALL_EXPR_ARG (exp, 2);
10114 rtx op0 = expand_normal (arg0);
10115 rtx op1 = expand_normal (arg1);
10116 rtx op2 = expand_normal (arg2);
10117 rtx pat;
10118 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
10119 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
10120 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
10122 /* Invalid arguments. Bail before doing anything stoopid! */
10123 if (arg0 == error_mark_node
10124 || arg1 == error_mark_node
10125 || arg2 == error_mark_node)
10126 return const0_rtx;
10128 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
10129 op0 = copy_to_mode_reg (mode2, op0);
10130 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
10131 op1 = copy_to_mode_reg (mode0, op1);
10132 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
10133 op2 = copy_to_mode_reg (mode1, op2);
10135 pat = GEN_FCN (icode) (op1, op2, op0);
10136 if (pat)
10137 emit_insn (pat);
10138 return NULL_RTX;
10141 static rtx
10142 paired_expand_stv_builtin (enum insn_code icode, tree exp)
10144 tree arg0 = CALL_EXPR_ARG (exp, 0);
10145 tree arg1 = CALL_EXPR_ARG (exp, 1);
10146 tree arg2 = CALL_EXPR_ARG (exp, 2);
10147 rtx op0 = expand_normal (arg0);
10148 rtx op1 = expand_normal (arg1);
10149 rtx op2 = expand_normal (arg2);
10150 rtx pat, addr;
10151 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10152 enum machine_mode mode1 = Pmode;
10153 enum machine_mode mode2 = Pmode;
10155 /* Invalid arguments. Bail before doing anything stoopid! */
10156 if (arg0 == error_mark_node
10157 || arg1 == error_mark_node
10158 || arg2 == error_mark_node)
10159 return const0_rtx;
10161 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
10162 op0 = copy_to_mode_reg (tmode, op0);
10164 op2 = copy_to_mode_reg (mode2, op2);
10166 if (op1 == const0_rtx)
10168 addr = gen_rtx_MEM (tmode, op2);
10170 else
10172 op1 = copy_to_mode_reg (mode1, op1);
10173 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10176 pat = GEN_FCN (icode) (addr, op0);
10177 if (pat)
10178 emit_insn (pat);
10179 return NULL_RTX;
10182 static rtx
10183 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10185 tree arg0 = CALL_EXPR_ARG (exp, 0);
10186 tree arg1 = CALL_EXPR_ARG (exp, 1);
10187 tree arg2 = CALL_EXPR_ARG (exp, 2);
10188 rtx op0 = expand_normal (arg0);
10189 rtx op1 = expand_normal (arg1);
10190 rtx op2 = expand_normal (arg2);
10191 rtx pat, addr;
10192 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10193 enum machine_mode smode = insn_data[icode].operand[1].mode;
10194 enum machine_mode mode1 = Pmode;
10195 enum machine_mode mode2 = Pmode;
10197 /* Invalid arguments. Bail before doing anything stoopid! */
10198 if (arg0 == error_mark_node
10199 || arg1 == error_mark_node
10200 || arg2 == error_mark_node)
10201 return const0_rtx;
10203 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10204 op0 = copy_to_mode_reg (smode, op0);
10206 op2 = copy_to_mode_reg (mode2, op2);
10208 if (op1 == const0_rtx)
10210 addr = gen_rtx_MEM (tmode, op2);
10212 else
10214 op1 = copy_to_mode_reg (mode1, op1);
10215 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10218 pat = GEN_FCN (icode) (addr, op0);
10219 if (pat)
10220 emit_insn (pat);
10221 return NULL_RTX;
10224 static rtx
10225 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10227 rtx pat;
10228 tree arg0 = CALL_EXPR_ARG (exp, 0);
10229 tree arg1 = CALL_EXPR_ARG (exp, 1);
10230 tree arg2 = CALL_EXPR_ARG (exp, 2);
10231 rtx op0 = expand_normal (arg0);
10232 rtx op1 = expand_normal (arg1);
10233 rtx op2 = expand_normal (arg2);
10234 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10235 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10236 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10237 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10239 if (icode == CODE_FOR_nothing)
10240 /* Builtin not supported on this processor. */
10241 return 0;
10243 /* If we got invalid arguments bail out before generating bad rtl. */
10244 if (arg0 == error_mark_node
10245 || arg1 == error_mark_node
10246 || arg2 == error_mark_node)
10247 return const0_rtx;
10249 /* Check and prepare argument depending on the instruction code.
10251 Note that a switch statement instead of the sequence of tests
10252 would be incorrect as many of the CODE_FOR values could be
10253 CODE_FOR_nothing and that would yield multiple alternatives
10254 with identical values. We'd never reach here at runtime in
10255 this case. */
10256 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10257 || icode == CODE_FOR_altivec_vsldoi_v4si
10258 || icode == CODE_FOR_altivec_vsldoi_v8hi
10259 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10261 /* Only allow 4-bit unsigned literals. */
10262 STRIP_NOPS (arg2);
10263 if (TREE_CODE (arg2) != INTEGER_CST
10264 || TREE_INT_CST_LOW (arg2) & ~0xf)
10266 error ("argument 3 must be a 4-bit unsigned literal");
10267 return const0_rtx;
10270 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10271 || icode == CODE_FOR_vsx_xxpermdi_v2di
10272 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10273 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10274 || icode == CODE_FOR_vsx_xxsldwi_v4si
10275 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10276 || icode == CODE_FOR_vsx_xxsldwi_v2di
10277 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10279 /* Only allow 2-bit unsigned literals. */
10280 STRIP_NOPS (arg2);
10281 if (TREE_CODE (arg2) != INTEGER_CST
10282 || TREE_INT_CST_LOW (arg2) & ~0x3)
10284 error ("argument 3 must be a 2-bit unsigned literal");
10285 return const0_rtx;
10288 else if (icode == CODE_FOR_vsx_set_v2df
10289 || icode == CODE_FOR_vsx_set_v2di)
10291 /* Only allow 1-bit unsigned literals. */
10292 STRIP_NOPS (arg2);
10293 if (TREE_CODE (arg2) != INTEGER_CST
10294 || TREE_INT_CST_LOW (arg2) & ~0x1)
10296 error ("argument 3 must be a 1-bit unsigned literal");
10297 return const0_rtx;
10301 if (target == 0
10302 || GET_MODE (target) != tmode
10303 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10304 target = gen_reg_rtx (tmode);
10306 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10307 op0 = copy_to_mode_reg (mode0, op0);
10308 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10309 op1 = copy_to_mode_reg (mode1, op1);
10310 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10311 op2 = copy_to_mode_reg (mode2, op2);
10313 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10314 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10315 else
10316 pat = GEN_FCN (icode) (target, op0, op1, op2);
10317 if (! pat)
10318 return 0;
10319 emit_insn (pat);
10321 return target;
10324 /* Expand the lvx builtins. */
10325 static rtx
10326 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10328 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10329 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10330 tree arg0;
10331 enum machine_mode tmode, mode0;
10332 rtx pat, op0;
10333 enum insn_code icode;
10335 switch (fcode)
10337 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10338 icode = CODE_FOR_vector_altivec_load_v16qi;
10339 break;
10340 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10341 icode = CODE_FOR_vector_altivec_load_v8hi;
10342 break;
10343 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10344 icode = CODE_FOR_vector_altivec_load_v4si;
10345 break;
10346 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10347 icode = CODE_FOR_vector_altivec_load_v4sf;
10348 break;
10349 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
10350 icode = CODE_FOR_vector_altivec_load_v2df;
10351 break;
10352 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
10353 icode = CODE_FOR_vector_altivec_load_v2di;
10354 break;
10355 default:
10356 *expandedp = false;
10357 return NULL_RTX;
10360 *expandedp = true;
10362 arg0 = CALL_EXPR_ARG (exp, 0);
10363 op0 = expand_normal (arg0);
10364 tmode = insn_data[icode].operand[0].mode;
10365 mode0 = insn_data[icode].operand[1].mode;
10367 if (target == 0
10368 || GET_MODE (target) != tmode
10369 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10370 target = gen_reg_rtx (tmode);
10372 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10373 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10375 pat = GEN_FCN (icode) (target, op0);
10376 if (! pat)
10377 return 0;
10378 emit_insn (pat);
10379 return target;
10382 /* Expand the stvx builtins. */
10383 static rtx
10384 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10385 bool *expandedp)
10387 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10388 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10389 tree arg0, arg1;
10390 enum machine_mode mode0, mode1;
10391 rtx pat, op0, op1;
10392 enum insn_code icode;
10394 switch (fcode)
10396 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10397 icode = CODE_FOR_vector_altivec_store_v16qi;
10398 break;
10399 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10400 icode = CODE_FOR_vector_altivec_store_v8hi;
10401 break;
10402 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10403 icode = CODE_FOR_vector_altivec_store_v4si;
10404 break;
10405 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10406 icode = CODE_FOR_vector_altivec_store_v4sf;
10407 break;
10408 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
10409 icode = CODE_FOR_vector_altivec_store_v2df;
10410 break;
10411 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
10412 icode = CODE_FOR_vector_altivec_store_v2di;
10413 break;
10414 default:
10415 *expandedp = false;
10416 return NULL_RTX;
10419 arg0 = CALL_EXPR_ARG (exp, 0);
10420 arg1 = CALL_EXPR_ARG (exp, 1);
10421 op0 = expand_normal (arg0);
10422 op1 = expand_normal (arg1);
10423 mode0 = insn_data[icode].operand[0].mode;
10424 mode1 = insn_data[icode].operand[1].mode;
10426 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10427 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10428 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10429 op1 = copy_to_mode_reg (mode1, op1);
10431 pat = GEN_FCN (icode) (op0, op1);
10432 if (pat)
10433 emit_insn (pat);
10435 *expandedp = true;
10436 return NULL_RTX;
10439 /* Expand the dst builtins. */
10440 static rtx
10441 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10442 bool *expandedp)
10444 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10445 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10446 tree arg0, arg1, arg2;
10447 enum machine_mode mode0, mode1;
10448 rtx pat, op0, op1, op2;
10449 const struct builtin_description *d;
10450 size_t i;
10452 *expandedp = false;
10454 /* Handle DST variants. */
10455 d = bdesc_dst;
10456 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10457 if (d->code == fcode)
10459 arg0 = CALL_EXPR_ARG (exp, 0);
10460 arg1 = CALL_EXPR_ARG (exp, 1);
10461 arg2 = CALL_EXPR_ARG (exp, 2);
10462 op0 = expand_normal (arg0);
10463 op1 = expand_normal (arg1);
10464 op2 = expand_normal (arg2);
10465 mode0 = insn_data[d->icode].operand[0].mode;
10466 mode1 = insn_data[d->icode].operand[1].mode;
10468 /* Invalid arguments, bail out before generating bad rtl. */
10469 if (arg0 == error_mark_node
10470 || arg1 == error_mark_node
10471 || arg2 == error_mark_node)
10472 return const0_rtx;
10474 *expandedp = true;
10475 STRIP_NOPS (arg2);
10476 if (TREE_CODE (arg2) != INTEGER_CST
10477 || TREE_INT_CST_LOW (arg2) & ~0x3)
10479 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10480 return const0_rtx;
10483 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10484 op0 = copy_to_mode_reg (Pmode, op0);
10485 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10486 op1 = copy_to_mode_reg (mode1, op1);
10488 pat = GEN_FCN (d->icode) (op0, op1, op2);
10489 if (pat != 0)
10490 emit_insn (pat);
10492 return NULL_RTX;
10495 return NULL_RTX;
10498 /* Expand vec_init builtin. */
10499 static rtx
10500 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10502 enum machine_mode tmode = TYPE_MODE (type);
10503 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10504 int i, n_elt = GET_MODE_NUNITS (tmode);
10505 rtvec v = rtvec_alloc (n_elt);
10507 gcc_assert (VECTOR_MODE_P (tmode));
10508 gcc_assert (n_elt == call_expr_nargs (exp));
10510 for (i = 0; i < n_elt; ++i)
10512 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10513 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10516 if (!target || !register_operand (target, tmode))
10517 target = gen_reg_rtx (tmode);
10519 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10520 return target;
10523 /* Return the integer constant in ARG. Constrain it to be in the range
10524 of the subparts of VEC_TYPE; issue an error if not. */
10526 static int
10527 get_element_number (tree vec_type, tree arg)
10529 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10531 if (!host_integerp (arg, 1)
10532 || (elt = tree_low_cst (arg, 1), elt > max))
10534 error ("selector must be an integer constant in the range 0..%wi", max);
10535 return 0;
10538 return elt;
10541 /* Expand vec_set builtin. */
10542 static rtx
10543 altivec_expand_vec_set_builtin (tree exp)
10545 enum machine_mode tmode, mode1;
10546 tree arg0, arg1, arg2;
10547 int elt;
10548 rtx op0, op1;
10550 arg0 = CALL_EXPR_ARG (exp, 0);
10551 arg1 = CALL_EXPR_ARG (exp, 1);
10552 arg2 = CALL_EXPR_ARG (exp, 2);
10554 tmode = TYPE_MODE (TREE_TYPE (arg0));
10555 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10556 gcc_assert (VECTOR_MODE_P (tmode));
10558 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10559 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10560 elt = get_element_number (TREE_TYPE (arg0), arg2);
10562 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10563 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10565 op0 = force_reg (tmode, op0);
10566 op1 = force_reg (mode1, op1);
10568 rs6000_expand_vector_set (op0, op1, elt);
10570 return op0;
10573 /* Expand vec_ext builtin. */
10574 static rtx
10575 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10577 enum machine_mode tmode, mode0;
10578 tree arg0, arg1;
10579 int elt;
10580 rtx op0;
10582 arg0 = CALL_EXPR_ARG (exp, 0);
10583 arg1 = CALL_EXPR_ARG (exp, 1);
10585 op0 = expand_normal (arg0);
10586 elt = get_element_number (TREE_TYPE (arg0), arg1);
10588 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10589 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10590 gcc_assert (VECTOR_MODE_P (mode0));
10592 op0 = force_reg (mode0, op0);
10594 if (optimize || !target || !register_operand (target, tmode))
10595 target = gen_reg_rtx (tmode);
10597 rs6000_expand_vector_extract (target, op0, elt);
10599 return target;
10602 /* Expand the builtin in EXP and store the result in TARGET. Store
10603 true in *EXPANDEDP if we found a builtin to expand. */
10604 static rtx
10605 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
10607 const struct builtin_description *d;
10608 size_t i;
10609 enum insn_code icode;
10610 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10611 tree arg0;
10612 rtx op0, pat;
10613 enum machine_mode tmode, mode0;
10614 enum rs6000_builtins fcode
10615 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10617 if (rs6000_overloaded_builtin_p (fcode))
10619 *expandedp = true;
10620 error ("unresolved overload for Altivec builtin %qF", fndecl);
10622 /* Given it is invalid, just generate a normal call. */
10623 return expand_call (exp, target, false);
10626 target = altivec_expand_ld_builtin (exp, target, expandedp);
10627 if (*expandedp)
10628 return target;
10630 target = altivec_expand_st_builtin (exp, target, expandedp);
10631 if (*expandedp)
10632 return target;
10634 target = altivec_expand_dst_builtin (exp, target, expandedp);
10635 if (*expandedp)
10636 return target;
10638 *expandedp = true;
10640 switch (fcode)
10642 case ALTIVEC_BUILTIN_STVX:
10643 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
10644 case ALTIVEC_BUILTIN_STVEBX:
10645 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
10646 case ALTIVEC_BUILTIN_STVEHX:
10647 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
10648 case ALTIVEC_BUILTIN_STVEWX:
10649 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
10650 case ALTIVEC_BUILTIN_STVXL:
10651 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
10653 case ALTIVEC_BUILTIN_STVLX:
10654 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
10655 case ALTIVEC_BUILTIN_STVLXL:
10656 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
10657 case ALTIVEC_BUILTIN_STVRX:
10658 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
10659 case ALTIVEC_BUILTIN_STVRXL:
10660 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
10662 case VSX_BUILTIN_STXVD2X_V2DF:
10663 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
10664 case VSX_BUILTIN_STXVD2X_V2DI:
10665 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
10666 case VSX_BUILTIN_STXVW4X_V4SF:
10667 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
10668 case VSX_BUILTIN_STXVW4X_V4SI:
10669 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
10670 case VSX_BUILTIN_STXVW4X_V8HI:
10671 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
10672 case VSX_BUILTIN_STXVW4X_V16QI:
10673 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
10675 case ALTIVEC_BUILTIN_MFVSCR:
10676 icode = CODE_FOR_altivec_mfvscr;
10677 tmode = insn_data[icode].operand[0].mode;
10679 if (target == 0
10680 || GET_MODE (target) != tmode
10681 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10682 target = gen_reg_rtx (tmode);
10684 pat = GEN_FCN (icode) (target);
10685 if (! pat)
10686 return 0;
10687 emit_insn (pat);
10688 return target;
10690 case ALTIVEC_BUILTIN_MTVSCR:
10691 icode = CODE_FOR_altivec_mtvscr;
10692 arg0 = CALL_EXPR_ARG (exp, 0);
10693 op0 = expand_normal (arg0);
10694 mode0 = insn_data[icode].operand[0].mode;
10696 /* If we got invalid arguments bail out before generating bad rtl. */
10697 if (arg0 == error_mark_node)
10698 return const0_rtx;
10700 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10701 op0 = copy_to_mode_reg (mode0, op0);
10703 pat = GEN_FCN (icode) (op0);
10704 if (pat)
10705 emit_insn (pat);
10706 return NULL_RTX;
10708 case ALTIVEC_BUILTIN_DSSALL:
10709 emit_insn (gen_altivec_dssall ());
10710 return NULL_RTX;
10712 case ALTIVEC_BUILTIN_DSS:
10713 icode = CODE_FOR_altivec_dss;
10714 arg0 = CALL_EXPR_ARG (exp, 0);
10715 STRIP_NOPS (arg0);
10716 op0 = expand_normal (arg0);
10717 mode0 = insn_data[icode].operand[0].mode;
10719 /* If we got invalid arguments bail out before generating bad rtl. */
10720 if (arg0 == error_mark_node)
10721 return const0_rtx;
10723 if (TREE_CODE (arg0) != INTEGER_CST
10724 || TREE_INT_CST_LOW (arg0) & ~0x3)
10726 error ("argument to dss must be a 2-bit unsigned literal");
10727 return const0_rtx;
10730 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10731 op0 = copy_to_mode_reg (mode0, op0);
10733 emit_insn (gen_altivec_dss (op0));
10734 return NULL_RTX;
10736 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
10737 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
10738 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
10739 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
10740 case VSX_BUILTIN_VEC_INIT_V2DF:
10741 case VSX_BUILTIN_VEC_INIT_V2DI:
10742 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
10744 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
10745 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
10746 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
10747 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
10748 case VSX_BUILTIN_VEC_SET_V2DF:
10749 case VSX_BUILTIN_VEC_SET_V2DI:
10750 return altivec_expand_vec_set_builtin (exp);
10752 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
10753 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
10754 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
10755 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
10756 case VSX_BUILTIN_VEC_EXT_V2DF:
10757 case VSX_BUILTIN_VEC_EXT_V2DI:
10758 return altivec_expand_vec_ext_builtin (exp, target);
10760 default:
10761 break;
10762 /* Fall through. */
10765 /* Expand abs* operations. */
10766 d = bdesc_abs;
10767 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10768 if (d->code == fcode)
10769 return altivec_expand_abs_builtin (d->icode, exp, target);
10771 /* Expand the AltiVec predicates. */
10772 d = bdesc_altivec_preds;
10773 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
10774 if (d->code == fcode)
10775 return altivec_expand_predicate_builtin (d->icode, exp, target);
10777 /* LV* are funky. We initialized them differently. */
10778 switch (fcode)
10780 case ALTIVEC_BUILTIN_LVSL:
10781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
10782 exp, target, false);
10783 case ALTIVEC_BUILTIN_LVSR:
10784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
10785 exp, target, false);
10786 case ALTIVEC_BUILTIN_LVEBX:
10787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
10788 exp, target, false);
10789 case ALTIVEC_BUILTIN_LVEHX:
10790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
10791 exp, target, false);
10792 case ALTIVEC_BUILTIN_LVEWX:
10793 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
10794 exp, target, false);
10795 case ALTIVEC_BUILTIN_LVXL:
10796 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
10797 exp, target, false);
10798 case ALTIVEC_BUILTIN_LVX:
10799 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
10800 exp, target, false);
10801 case ALTIVEC_BUILTIN_LVLX:
10802 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
10803 exp, target, true);
10804 case ALTIVEC_BUILTIN_LVLXL:
10805 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
10806 exp, target, true);
10807 case ALTIVEC_BUILTIN_LVRX:
10808 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
10809 exp, target, true);
10810 case ALTIVEC_BUILTIN_LVRXL:
10811 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
10812 exp, target, true);
10813 case VSX_BUILTIN_LXVD2X_V2DF:
10814 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
10815 exp, target, false);
10816 case VSX_BUILTIN_LXVD2X_V2DI:
10817 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
10818 exp, target, false);
10819 case VSX_BUILTIN_LXVW4X_V4SF:
10820 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
10821 exp, target, false);
10822 case VSX_BUILTIN_LXVW4X_V4SI:
10823 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
10824 exp, target, false);
10825 case VSX_BUILTIN_LXVW4X_V8HI:
10826 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
10827 exp, target, false);
10828 case VSX_BUILTIN_LXVW4X_V16QI:
10829 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
10830 exp, target, false);
10831 break;
10832 default:
10833 break;
10834 /* Fall through. */
10837 *expandedp = false;
10838 return NULL_RTX;
10841 /* Expand the builtin in EXP and store the result in TARGET. Store
10842 true in *EXPANDEDP if we found a builtin to expand. */
10843 static rtx
10844 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
10846 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10847 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10848 const struct builtin_description *d;
10849 size_t i;
10851 *expandedp = true;
10853 switch (fcode)
10855 case PAIRED_BUILTIN_STX:
10856 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
10857 case PAIRED_BUILTIN_LX:
10858 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
10859 default:
10860 break;
10861 /* Fall through. */
10864 /* Expand the paired predicates. */
10865 d = bdesc_paired_preds;
10866 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
10867 if (d->code == fcode)
10868 return paired_expand_predicate_builtin (d->icode, exp, target);
10870 *expandedp = false;
10871 return NULL_RTX;
10874 /* Binops that need to be initialized manually, but can be expanded
10875 automagically by rs6000_expand_binop_builtin. */
10876 static const struct builtin_description bdesc_2arg_spe[] =
10878 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
10879 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
10880 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
10881 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
10882 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
10883 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
10884 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
10885 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
10886 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
10887 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
10888 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
10889 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
10890 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
10891 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
10892 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
10893 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
10894 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
10895 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
10896 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
10897 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
10898 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
10899 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
10902 /* Expand the builtin in EXP and store the result in TARGET. Store
10903 true in *EXPANDEDP if we found a builtin to expand.
10905 This expands the SPE builtins that are not simple unary and binary
10906 operations. */
10907 static rtx
10908 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
10910 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10911 tree arg1, arg0;
10912 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10913 enum insn_code icode;
10914 enum machine_mode tmode, mode0;
10915 rtx pat, op0;
10916 const struct builtin_description *d;
10917 size_t i;
10919 *expandedp = true;
10921 /* Syntax check for a 5-bit unsigned immediate. */
10922 switch (fcode)
10924 case SPE_BUILTIN_EVSTDD:
10925 case SPE_BUILTIN_EVSTDH:
10926 case SPE_BUILTIN_EVSTDW:
10927 case SPE_BUILTIN_EVSTWHE:
10928 case SPE_BUILTIN_EVSTWHO:
10929 case SPE_BUILTIN_EVSTWWE:
10930 case SPE_BUILTIN_EVSTWWO:
10931 arg1 = CALL_EXPR_ARG (exp, 2);
10932 if (TREE_CODE (arg1) != INTEGER_CST
10933 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10935 error ("argument 2 must be a 5-bit unsigned literal");
10936 return const0_rtx;
10938 break;
10939 default:
10940 break;
10943 /* The evsplat*i instructions are not quite generic. */
10944 switch (fcode)
10946 case SPE_BUILTIN_EVSPLATFI:
10947 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
10948 exp, target);
10949 case SPE_BUILTIN_EVSPLATI:
10950 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
10951 exp, target);
10952 default:
10953 break;
10956 d = bdesc_2arg_spe;
10957 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
10958 if (d->code == fcode)
10959 return rs6000_expand_binop_builtin (d->icode, exp, target);
10961 d = bdesc_spe_predicates;
10962 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
10963 if (d->code == fcode)
10964 return spe_expand_predicate_builtin (d->icode, exp, target);
10966 d = bdesc_spe_evsel;
10967 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
10968 if (d->code == fcode)
10969 return spe_expand_evsel_builtin (d->icode, exp, target);
10971 switch (fcode)
10973 case SPE_BUILTIN_EVSTDDX:
10974 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
10975 case SPE_BUILTIN_EVSTDHX:
10976 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
10977 case SPE_BUILTIN_EVSTDWX:
10978 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
10979 case SPE_BUILTIN_EVSTWHEX:
10980 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
10981 case SPE_BUILTIN_EVSTWHOX:
10982 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
10983 case SPE_BUILTIN_EVSTWWEX:
10984 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
10985 case SPE_BUILTIN_EVSTWWOX:
10986 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
10987 case SPE_BUILTIN_EVSTDD:
10988 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
10989 case SPE_BUILTIN_EVSTDH:
10990 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
10991 case SPE_BUILTIN_EVSTDW:
10992 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
10993 case SPE_BUILTIN_EVSTWHE:
10994 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
10995 case SPE_BUILTIN_EVSTWHO:
10996 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
10997 case SPE_BUILTIN_EVSTWWE:
10998 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
10999 case SPE_BUILTIN_EVSTWWO:
11000 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
11001 case SPE_BUILTIN_MFSPEFSCR:
11002 icode = CODE_FOR_spe_mfspefscr;
11003 tmode = insn_data[icode].operand[0].mode;
11005 if (target == 0
11006 || GET_MODE (target) != tmode
11007 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11008 target = gen_reg_rtx (tmode);
11010 pat = GEN_FCN (icode) (target);
11011 if (! pat)
11012 return 0;
11013 emit_insn (pat);
11014 return target;
11015 case SPE_BUILTIN_MTSPEFSCR:
11016 icode = CODE_FOR_spe_mtspefscr;
11017 arg0 = CALL_EXPR_ARG (exp, 0);
11018 op0 = expand_normal (arg0);
11019 mode0 = insn_data[icode].operand[0].mode;
11021 if (arg0 == error_mark_node)
11022 return const0_rtx;
11024 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11025 op0 = copy_to_mode_reg (mode0, op0);
11027 pat = GEN_FCN (icode) (op0);
11028 if (pat)
11029 emit_insn (pat);
11030 return NULL_RTX;
11031 default:
11032 break;
11035 *expandedp = false;
11036 return NULL_RTX;
11039 static rtx
11040 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11042 rtx pat, scratch, tmp;
11043 tree form = CALL_EXPR_ARG (exp, 0);
11044 tree arg0 = CALL_EXPR_ARG (exp, 1);
11045 tree arg1 = CALL_EXPR_ARG (exp, 2);
11046 rtx op0 = expand_normal (arg0);
11047 rtx op1 = expand_normal (arg1);
11048 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11049 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11050 int form_int;
11051 enum rtx_code code;
11053 if (TREE_CODE (form) != INTEGER_CST)
11055 error ("argument 1 of __builtin_paired_predicate must be a constant");
11056 return const0_rtx;
11058 else
11059 form_int = TREE_INT_CST_LOW (form);
11061 gcc_assert (mode0 == mode1);
11063 if (arg0 == error_mark_node || arg1 == error_mark_node)
11064 return const0_rtx;
11066 if (target == 0
11067 || GET_MODE (target) != SImode
11068 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
11069 target = gen_reg_rtx (SImode);
11070 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
11071 op0 = copy_to_mode_reg (mode0, op0);
11072 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
11073 op1 = copy_to_mode_reg (mode1, op1);
11075 scratch = gen_reg_rtx (CCFPmode);
11077 pat = GEN_FCN (icode) (scratch, op0, op1);
11078 if (!pat)
11079 return const0_rtx;
11081 emit_insn (pat);
11083 switch (form_int)
11085 /* LT bit. */
11086 case 0:
11087 code = LT;
11088 break;
11089 /* GT bit. */
11090 case 1:
11091 code = GT;
11092 break;
11093 /* EQ bit. */
11094 case 2:
11095 code = EQ;
11096 break;
11097 /* UN bit. */
11098 case 3:
11099 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11100 return target;
11101 default:
11102 error ("argument 1 of __builtin_paired_predicate is out of range");
11103 return const0_rtx;
11106 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11107 emit_move_insn (target, tmp);
11108 return target;
11111 static rtx
11112 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11114 rtx pat, scratch, tmp;
11115 tree form = CALL_EXPR_ARG (exp, 0);
11116 tree arg0 = CALL_EXPR_ARG (exp, 1);
11117 tree arg1 = CALL_EXPR_ARG (exp, 2);
11118 rtx op0 = expand_normal (arg0);
11119 rtx op1 = expand_normal (arg1);
11120 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11121 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11122 int form_int;
11123 enum rtx_code code;
11125 if (TREE_CODE (form) != INTEGER_CST)
11127 error ("argument 1 of __builtin_spe_predicate must be a constant");
11128 return const0_rtx;
11130 else
11131 form_int = TREE_INT_CST_LOW (form);
11133 gcc_assert (mode0 == mode1);
11135 if (arg0 == error_mark_node || arg1 == error_mark_node)
11136 return const0_rtx;
11138 if (target == 0
11139 || GET_MODE (target) != SImode
11140 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
11141 target = gen_reg_rtx (SImode);
11143 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11144 op0 = copy_to_mode_reg (mode0, op0);
11145 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11146 op1 = copy_to_mode_reg (mode1, op1);
11148 scratch = gen_reg_rtx (CCmode);
11150 pat = GEN_FCN (icode) (scratch, op0, op1);
11151 if (! pat)
11152 return const0_rtx;
11153 emit_insn (pat);
11155 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11156 _lower_. We use one compare, but look in different bits of the
11157 CR for each variant.
11159 There are 2 elements in each SPE simd type (upper/lower). The CR
11160 bits are set as follows:
11162 BIT0 | BIT 1 | BIT 2 | BIT 3
11163 U | L | (U | L) | (U & L)
11165 So, for an "all" relationship, BIT 3 would be set.
11166 For an "any" relationship, BIT 2 would be set. Etc.
11168 Following traditional nomenclature, these bits map to:
11170 BIT0 | BIT 1 | BIT 2 | BIT 3
11171 LT | GT | EQ | OV
11173 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11176 switch (form_int)
11178 /* All variant. OV bit. */
11179 case 0:
11180 /* We need to get to the OV bit, which is the ORDERED bit. We
11181 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11182 that's ugly and will make validate_condition_mode die.
11183 So let's just use another pattern. */
11184 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11185 return target;
11186 /* Any variant. EQ bit. */
11187 case 1:
11188 code = EQ;
11189 break;
11190 /* Upper variant. LT bit. */
11191 case 2:
11192 code = LT;
11193 break;
11194 /* Lower variant. GT bit. */
11195 case 3:
11196 code = GT;
11197 break;
11198 default:
11199 error ("argument 1 of __builtin_spe_predicate is out of range");
11200 return const0_rtx;
11203 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11204 emit_move_insn (target, tmp);
11206 return target;
11209 /* The evsel builtins look like this:
11211 e = __builtin_spe_evsel_OP (a, b, c, d);
11213 and work like this:
11215 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11216 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11219 static rtx
11220 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11222 rtx pat, scratch;
11223 tree arg0 = CALL_EXPR_ARG (exp, 0);
11224 tree arg1 = CALL_EXPR_ARG (exp, 1);
11225 tree arg2 = CALL_EXPR_ARG (exp, 2);
11226 tree arg3 = CALL_EXPR_ARG (exp, 3);
11227 rtx op0 = expand_normal (arg0);
11228 rtx op1 = expand_normal (arg1);
11229 rtx op2 = expand_normal (arg2);
11230 rtx op3 = expand_normal (arg3);
11231 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11232 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11234 gcc_assert (mode0 == mode1);
11236 if (arg0 == error_mark_node || arg1 == error_mark_node
11237 || arg2 == error_mark_node || arg3 == error_mark_node)
11238 return const0_rtx;
11240 if (target == 0
11241 || GET_MODE (target) != mode0
11242 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11243 target = gen_reg_rtx (mode0);
11245 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11246 op0 = copy_to_mode_reg (mode0, op0);
11247 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11248 op1 = copy_to_mode_reg (mode0, op1);
11249 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11250 op2 = copy_to_mode_reg (mode0, op2);
11251 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11252 op3 = copy_to_mode_reg (mode0, op3);
11254 /* Generate the compare. */
11255 scratch = gen_reg_rtx (CCmode);
11256 pat = GEN_FCN (icode) (scratch, op0, op1);
11257 if (! pat)
11258 return const0_rtx;
11259 emit_insn (pat);
11261 if (mode0 == V2SImode)
11262 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11263 else
11264 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11266 return target;
11269 /* Raise an error message for a builtin function that is called without the
11270 appropriate target options being set. */
11272 static void
11273 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11275 size_t uns_fncode = (size_t)fncode;
11276 const char *name = rs6000_builtin_info[uns_fncode].name;
11277 unsigned fnmask = rs6000_builtin_info[uns_fncode].mask;
11279 gcc_assert (name != NULL);
11280 if ((fnmask & RS6000_BTM_CELL) != 0)
11281 error ("Builtin function %s is only valid for the cell processor", name);
11282 else if ((fnmask & RS6000_BTM_VSX) != 0)
11283 error ("Builtin function %s requires the -mvsx option", name);
11284 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11285 error ("Builtin function %s requires the -maltivec option", name);
11286 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11287 error ("Builtin function %s requires the -mpaired option", name);
11288 else if ((fnmask & RS6000_BTM_SPE) != 0)
11289 error ("Builtin function %s requires the -mspe option", name);
11290 else
11291 error ("Builtin function %s is not supported with the current options",
11292 name);
11295 /* Expand an expression EXP that calls a built-in function,
11296 with result going to TARGET if that's convenient
11297 (and in mode MODE if that's convenient).
11298 SUBTARGET may be used as the target for computing one of EXP's operands.
11299 IGNORE is nonzero if the value is to be ignored. */
11301 static rtx
11302 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11303 enum machine_mode mode ATTRIBUTE_UNUSED,
11304 int ignore ATTRIBUTE_UNUSED)
11306 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11307 enum rs6000_builtins fcode
11308 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
11309 size_t uns_fcode = (size_t)fcode;
11310 const struct builtin_description *d;
11311 size_t i;
11312 rtx ret;
11313 bool success;
11314 unsigned mask = rs6000_builtin_info[uns_fcode].mask;
11315 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
11317 if (TARGET_DEBUG_BUILTIN)
11319 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
11320 const char *name1 = rs6000_builtin_info[uns_fcode].name;
11321 const char *name2 = ((icode != CODE_FOR_nothing)
11322 ? get_insn_name ((int)icode)
11323 : "nothing");
11324 const char *name3;
11326 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
11328 default: name3 = "unknown"; break;
11329 case RS6000_BTC_SPECIAL: name3 = "special"; break;
11330 case RS6000_BTC_UNARY: name3 = "unary"; break;
11331 case RS6000_BTC_BINARY: name3 = "binary"; break;
11332 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
11333 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
11334 case RS6000_BTC_ABS: name3 = "abs"; break;
11335 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
11336 case RS6000_BTC_DST: name3 = "dst"; break;
11340 fprintf (stderr,
11341 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11342 (name1) ? name1 : "---", fcode,
11343 (name2) ? name2 : "---", (int)icode,
11344 name3,
11345 func_valid_p ? "" : ", not valid");
11348 if (!func_valid_p)
11350 rs6000_invalid_builtin (fcode);
11352 /* Given it is invalid, just generate a normal call. */
11353 return expand_call (exp, target, ignore);
11356 switch (fcode)
11358 case RS6000_BUILTIN_RECIP:
11359 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
11361 case RS6000_BUILTIN_RECIPF:
11362 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
11364 case RS6000_BUILTIN_RSQRTF:
11365 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
11367 case RS6000_BUILTIN_RSQRT:
11368 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
11370 case POWER7_BUILTIN_BPERMD:
11371 return rs6000_expand_binop_builtin (((TARGET_64BIT)
11372 ? CODE_FOR_bpermd_di
11373 : CODE_FOR_bpermd_si), exp, target);
11375 case RS6000_BUILTIN_GET_TB:
11376 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
11377 target);
11379 case RS6000_BUILTIN_MFTB:
11380 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
11381 ? CODE_FOR_rs6000_mftb_di
11382 : CODE_FOR_rs6000_mftb_si),
11383 target);
11385 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
11386 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
11388 int icode = (int) CODE_FOR_altivec_lvsr;
11389 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11390 enum machine_mode mode = insn_data[icode].operand[1].mode;
11391 tree arg;
11392 rtx op, addr, pat;
11394 gcc_assert (TARGET_ALTIVEC);
11396 arg = CALL_EXPR_ARG (exp, 0);
11397 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
11398 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
11399 addr = memory_address (mode, op);
11400 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11401 op = addr;
11402 else
11404 /* For the load case need to negate the address. */
11405 op = gen_reg_rtx (GET_MODE (addr));
11406 emit_insn (gen_rtx_SET (VOIDmode, op,
11407 gen_rtx_NEG (GET_MODE (addr), addr)));
11409 op = gen_rtx_MEM (mode, op);
11411 if (target == 0
11412 || GET_MODE (target) != tmode
11413 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11414 target = gen_reg_rtx (tmode);
11416 /*pat = gen_altivec_lvsr (target, op);*/
11417 pat = GEN_FCN (icode) (target, op);
11418 if (!pat)
11419 return 0;
11420 emit_insn (pat);
11422 return target;
11425 case ALTIVEC_BUILTIN_VCFUX:
11426 case ALTIVEC_BUILTIN_VCFSX:
11427 case ALTIVEC_BUILTIN_VCTUXS:
11428 case ALTIVEC_BUILTIN_VCTSXS:
11429 /* FIXME: There's got to be a nicer way to handle this case than
11430 constructing a new CALL_EXPR. */
11431 if (call_expr_nargs (exp) == 1)
11433 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11434 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11436 break;
11438 default:
11439 break;
11442 if (TARGET_ALTIVEC)
11444 ret = altivec_expand_builtin (exp, target, &success);
11446 if (success)
11447 return ret;
11449 if (TARGET_SPE)
11451 ret = spe_expand_builtin (exp, target, &success);
11453 if (success)
11454 return ret;
11456 if (TARGET_PAIRED_FLOAT)
11458 ret = paired_expand_builtin (exp, target, &success);
11460 if (success)
11461 return ret;
11464 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11466 /* Handle simple unary operations. */
11467 d = bdesc_1arg;
11468 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11469 if (d->code == fcode)
11470 return rs6000_expand_unop_builtin (d->icode, exp, target);
11472 /* Handle simple binary operations. */
11473 d = bdesc_2arg;
11474 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11475 if (d->code == fcode)
11476 return rs6000_expand_binop_builtin (d->icode, exp, target);
11478 /* Handle simple ternary operations. */
11479 d = bdesc_3arg;
11480 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11481 if (d->code == fcode)
11482 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11484 gcc_unreachable ();
11487 static void
11488 rs6000_init_builtins (void)
11490 tree tdecl;
11491 tree ftype;
11492 enum machine_mode mode;
11494 if (TARGET_DEBUG_BUILTIN)
11495 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
11496 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
11497 (TARGET_SPE) ? ", spe" : "",
11498 (TARGET_ALTIVEC) ? ", altivec" : "",
11499 (TARGET_VSX) ? ", vsx" : "");
11501 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11502 V2SF_type_node = build_vector_type (float_type_node, 2);
11503 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11504 V2DF_type_node = build_vector_type (double_type_node, 2);
11505 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11506 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11507 V4SF_type_node = build_vector_type (float_type_node, 4);
11508 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11509 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11511 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11512 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11513 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11514 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11516 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11517 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11518 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11519 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11521 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11522 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11523 'vector unsigned short'. */
11525 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11526 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11527 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11528 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11529 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11531 long_integer_type_internal_node = long_integer_type_node;
11532 long_unsigned_type_internal_node = long_unsigned_type_node;
11533 long_long_integer_type_internal_node = long_long_integer_type_node;
11534 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
11535 intQI_type_internal_node = intQI_type_node;
11536 uintQI_type_internal_node = unsigned_intQI_type_node;
11537 intHI_type_internal_node = intHI_type_node;
11538 uintHI_type_internal_node = unsigned_intHI_type_node;
11539 intSI_type_internal_node = intSI_type_node;
11540 uintSI_type_internal_node = unsigned_intSI_type_node;
11541 intDI_type_internal_node = intDI_type_node;
11542 uintDI_type_internal_node = unsigned_intDI_type_node;
11543 float_type_internal_node = float_type_node;
11544 double_type_internal_node = double_type_node;
11545 void_type_internal_node = void_type_node;
11547 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11548 tree type node. */
11549 builtin_mode_to_type[QImode][0] = integer_type_node;
11550 builtin_mode_to_type[HImode][0] = integer_type_node;
11551 builtin_mode_to_type[SImode][0] = intSI_type_node;
11552 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11553 builtin_mode_to_type[DImode][0] = intDI_type_node;
11554 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11555 builtin_mode_to_type[SFmode][0] = float_type_node;
11556 builtin_mode_to_type[DFmode][0] = double_type_node;
11557 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11558 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11559 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11560 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11561 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11562 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11563 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11564 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11565 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11566 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11567 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11568 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11569 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11571 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
11572 TYPE_NAME (bool_char_type_node) = tdecl;
11574 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
11575 TYPE_NAME (bool_short_type_node) = tdecl;
11577 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
11578 TYPE_NAME (bool_int_type_node) = tdecl;
11580 tdecl = add_builtin_type ("__pixel", pixel_type_node);
11581 TYPE_NAME (pixel_type_node) = tdecl;
11583 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11584 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11585 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11586 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11587 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11589 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
11590 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11592 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
11593 TYPE_NAME (V16QI_type_node) = tdecl;
11595 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
11596 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11598 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
11599 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
11601 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
11602 TYPE_NAME (V8HI_type_node) = tdecl;
11604 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
11605 TYPE_NAME (bool_V8HI_type_node) = tdecl;
11607 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
11608 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
11610 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
11611 TYPE_NAME (V4SI_type_node) = tdecl;
11613 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
11614 TYPE_NAME (bool_V4SI_type_node) = tdecl;
11616 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
11617 TYPE_NAME (V4SF_type_node) = tdecl;
11619 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
11620 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
11622 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
11623 TYPE_NAME (V2DF_type_node) = tdecl;
11625 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
11626 TYPE_NAME (V2DI_type_node) = tdecl;
11628 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
11629 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
11631 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
11632 TYPE_NAME (bool_V2DI_type_node) = tdecl;
11634 /* Paired and SPE builtins are only available if you build a compiler with
11635 the appropriate options, so only create those builtins with the
11636 appropriate compiler option. Create Altivec and VSX builtins on machines
11637 with at least the general purpose extensions (970 and newer) to allow the
11638 use of the target attribute. */
11639 if (TARGET_PAIRED_FLOAT)
11640 paired_init_builtins ();
11641 if (TARGET_SPE)
11642 spe_init_builtins ();
11643 if (TARGET_EXTRA_BUILTINS)
11644 altivec_init_builtins ();
11645 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
11646 rs6000_common_init_builtins ();
11648 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
11649 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
11650 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
11652 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
11653 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
11654 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
11656 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
11657 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
11658 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
11660 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
11661 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
11662 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
11664 mode = (TARGET_64BIT) ? DImode : SImode;
11665 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
11666 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
11667 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
11669 ftype = build_function_type_list (unsigned_intDI_type_node,
11670 NULL_TREE);
11671 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
11673 if (TARGET_64BIT)
11674 ftype = build_function_type_list (unsigned_intDI_type_node,
11675 NULL_TREE);
11676 else
11677 ftype = build_function_type_list (unsigned_intSI_type_node,
11678 NULL_TREE);
11679 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
11681 #if TARGET_XCOFF
11682 /* AIX libm provides clog as __clog. */
11683 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
11684 set_user_assembler_name (tdecl, "__clog");
11685 #endif
11687 #ifdef SUBTARGET_INIT_BUILTINS
11688 SUBTARGET_INIT_BUILTINS;
11689 #endif
11692 /* Returns the rs6000 builtin decl for CODE. */
11694 static tree
11695 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11697 unsigned fnmask;
11699 if (code >= RS6000_BUILTIN_COUNT)
11700 return error_mark_node;
11702 fnmask = rs6000_builtin_info[code].mask;
11703 if ((fnmask & rs6000_builtin_mask) != fnmask)
11705 rs6000_invalid_builtin ((enum rs6000_builtins)code);
11706 return error_mark_node;
11709 return rs6000_builtin_decls[code];
11712 static void
11713 spe_init_builtins (void)
11715 tree puint_type_node = build_pointer_type (unsigned_type_node);
11716 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
11717 const struct builtin_description *d;
11718 size_t i;
11720 tree v2si_ftype_4_v2si
11721 = build_function_type_list (opaque_V2SI_type_node,
11722 opaque_V2SI_type_node,
11723 opaque_V2SI_type_node,
11724 opaque_V2SI_type_node,
11725 opaque_V2SI_type_node,
11726 NULL_TREE);
11728 tree v2sf_ftype_4_v2sf
11729 = build_function_type_list (opaque_V2SF_type_node,
11730 opaque_V2SF_type_node,
11731 opaque_V2SF_type_node,
11732 opaque_V2SF_type_node,
11733 opaque_V2SF_type_node,
11734 NULL_TREE);
11736 tree int_ftype_int_v2si_v2si
11737 = build_function_type_list (integer_type_node,
11738 integer_type_node,
11739 opaque_V2SI_type_node,
11740 opaque_V2SI_type_node,
11741 NULL_TREE);
11743 tree int_ftype_int_v2sf_v2sf
11744 = build_function_type_list (integer_type_node,
11745 integer_type_node,
11746 opaque_V2SF_type_node,
11747 opaque_V2SF_type_node,
11748 NULL_TREE);
11750 tree void_ftype_v2si_puint_int
11751 = build_function_type_list (void_type_node,
11752 opaque_V2SI_type_node,
11753 puint_type_node,
11754 integer_type_node,
11755 NULL_TREE);
11757 tree void_ftype_v2si_puint_char
11758 = build_function_type_list (void_type_node,
11759 opaque_V2SI_type_node,
11760 puint_type_node,
11761 char_type_node,
11762 NULL_TREE);
11764 tree void_ftype_v2si_pv2si_int
11765 = build_function_type_list (void_type_node,
11766 opaque_V2SI_type_node,
11767 opaque_p_V2SI_type_node,
11768 integer_type_node,
11769 NULL_TREE);
11771 tree void_ftype_v2si_pv2si_char
11772 = build_function_type_list (void_type_node,
11773 opaque_V2SI_type_node,
11774 opaque_p_V2SI_type_node,
11775 char_type_node,
11776 NULL_TREE);
11778 tree void_ftype_int
11779 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11781 tree int_ftype_void
11782 = build_function_type_list (integer_type_node, NULL_TREE);
11784 tree v2si_ftype_pv2si_int
11785 = build_function_type_list (opaque_V2SI_type_node,
11786 opaque_p_V2SI_type_node,
11787 integer_type_node,
11788 NULL_TREE);
11790 tree v2si_ftype_puint_int
11791 = build_function_type_list (opaque_V2SI_type_node,
11792 puint_type_node,
11793 integer_type_node,
11794 NULL_TREE);
11796 tree v2si_ftype_pushort_int
11797 = build_function_type_list (opaque_V2SI_type_node,
11798 pushort_type_node,
11799 integer_type_node,
11800 NULL_TREE);
11802 tree v2si_ftype_signed_char
11803 = build_function_type_list (opaque_V2SI_type_node,
11804 signed_char_type_node,
11805 NULL_TREE);
11807 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
11809 /* Initialize irregular SPE builtins. */
11811 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
11812 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
11813 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
11814 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
11815 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
11816 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
11817 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
11818 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
11819 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
11820 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
11821 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
11822 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
11823 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
11824 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
11825 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
11826 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
11827 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
11828 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
11830 /* Loads. */
11831 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
11832 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
11833 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
11834 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
11835 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
11836 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
11837 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
11838 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
11839 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
11840 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
11841 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
11842 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
11843 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
11844 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
11845 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
11846 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
11847 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
11848 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
11849 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
11850 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
11851 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
11852 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
11854 /* Predicates. */
11855 d = bdesc_spe_predicates;
11856 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
11858 tree type;
11860 switch (insn_data[d->icode].operand[1].mode)
11862 case V2SImode:
11863 type = int_ftype_int_v2si_v2si;
11864 break;
11865 case V2SFmode:
11866 type = int_ftype_int_v2sf_v2sf;
11867 break;
11868 default:
11869 gcc_unreachable ();
11872 def_builtin (d->name, type, d->code);
11875 /* Evsel predicates. */
11876 d = bdesc_spe_evsel;
11877 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
11879 tree type;
11881 switch (insn_data[d->icode].operand[1].mode)
11883 case V2SImode:
11884 type = v2si_ftype_4_v2si;
11885 break;
11886 case V2SFmode:
11887 type = v2sf_ftype_4_v2sf;
11888 break;
11889 default:
11890 gcc_unreachable ();
11893 def_builtin (d->name, type, d->code);
11897 static void
11898 paired_init_builtins (void)
11900 const struct builtin_description *d;
11901 size_t i;
11903 tree int_ftype_int_v2sf_v2sf
11904 = build_function_type_list (integer_type_node,
11905 integer_type_node,
11906 V2SF_type_node,
11907 V2SF_type_node,
11908 NULL_TREE);
11909 tree pcfloat_type_node =
11910 build_pointer_type (build_qualified_type
11911 (float_type_node, TYPE_QUAL_CONST));
11913 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
11914 long_integer_type_node,
11915 pcfloat_type_node,
11916 NULL_TREE);
11917 tree void_ftype_v2sf_long_pcfloat =
11918 build_function_type_list (void_type_node,
11919 V2SF_type_node,
11920 long_integer_type_node,
11921 pcfloat_type_node,
11922 NULL_TREE);
11925 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
11926 PAIRED_BUILTIN_LX);
11929 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
11930 PAIRED_BUILTIN_STX);
11932 /* Predicates. */
11933 d = bdesc_paired_preds;
11934 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
11936 tree type;
11938 if (TARGET_DEBUG_BUILTIN)
11939 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
11940 (int)i, get_insn_name (d->icode), (int)d->icode,
11941 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
11943 switch (insn_data[d->icode].operand[1].mode)
11945 case V2SFmode:
11946 type = int_ftype_int_v2sf_v2sf;
11947 break;
11948 default:
11949 gcc_unreachable ();
11952 def_builtin (d->name, type, d->code);
11956 static void
11957 altivec_init_builtins (void)
11959 const struct builtin_description *d;
11960 size_t i;
11961 tree ftype;
11962 tree decl;
11964 tree pvoid_type_node = build_pointer_type (void_type_node);
11966 tree pcvoid_type_node
11967 = build_pointer_type (build_qualified_type (void_type_node,
11968 TYPE_QUAL_CONST));
11970 tree int_ftype_opaque
11971 = build_function_type_list (integer_type_node,
11972 opaque_V4SI_type_node, NULL_TREE);
11973 tree opaque_ftype_opaque
11974 = build_function_type_list (integer_type_node, NULL_TREE);
11975 tree opaque_ftype_opaque_int
11976 = build_function_type_list (opaque_V4SI_type_node,
11977 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
11978 tree opaque_ftype_opaque_opaque_int
11979 = build_function_type_list (opaque_V4SI_type_node,
11980 opaque_V4SI_type_node, opaque_V4SI_type_node,
11981 integer_type_node, NULL_TREE);
11982 tree int_ftype_int_opaque_opaque
11983 = build_function_type_list (integer_type_node,
11984 integer_type_node, opaque_V4SI_type_node,
11985 opaque_V4SI_type_node, NULL_TREE);
11986 tree int_ftype_int_v4si_v4si
11987 = build_function_type_list (integer_type_node,
11988 integer_type_node, V4SI_type_node,
11989 V4SI_type_node, NULL_TREE);
11990 tree void_ftype_v4si
11991 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
11992 tree v8hi_ftype_void
11993 = build_function_type_list (V8HI_type_node, NULL_TREE);
11994 tree void_ftype_void
11995 = build_function_type_list (void_type_node, NULL_TREE);
11996 tree void_ftype_int
11997 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11999 tree opaque_ftype_long_pcvoid
12000 = build_function_type_list (opaque_V4SI_type_node,
12001 long_integer_type_node, pcvoid_type_node,
12002 NULL_TREE);
12003 tree v16qi_ftype_long_pcvoid
12004 = build_function_type_list (V16QI_type_node,
12005 long_integer_type_node, pcvoid_type_node,
12006 NULL_TREE);
12007 tree v8hi_ftype_long_pcvoid
12008 = build_function_type_list (V8HI_type_node,
12009 long_integer_type_node, pcvoid_type_node,
12010 NULL_TREE);
12011 tree v4si_ftype_long_pcvoid
12012 = build_function_type_list (V4SI_type_node,
12013 long_integer_type_node, pcvoid_type_node,
12014 NULL_TREE);
12015 tree v4sf_ftype_long_pcvoid
12016 = build_function_type_list (V4SF_type_node,
12017 long_integer_type_node, pcvoid_type_node,
12018 NULL_TREE);
12019 tree v2df_ftype_long_pcvoid
12020 = build_function_type_list (V2DF_type_node,
12021 long_integer_type_node, pcvoid_type_node,
12022 NULL_TREE);
12023 tree v2di_ftype_long_pcvoid
12024 = build_function_type_list (V2DI_type_node,
12025 long_integer_type_node, pcvoid_type_node,
12026 NULL_TREE);
12028 tree void_ftype_opaque_long_pvoid
12029 = build_function_type_list (void_type_node,
12030 opaque_V4SI_type_node, long_integer_type_node,
12031 pvoid_type_node, NULL_TREE);
12032 tree void_ftype_v4si_long_pvoid
12033 = build_function_type_list (void_type_node,
12034 V4SI_type_node, long_integer_type_node,
12035 pvoid_type_node, NULL_TREE);
12036 tree void_ftype_v16qi_long_pvoid
12037 = build_function_type_list (void_type_node,
12038 V16QI_type_node, long_integer_type_node,
12039 pvoid_type_node, NULL_TREE);
12040 tree void_ftype_v8hi_long_pvoid
12041 = build_function_type_list (void_type_node,
12042 V8HI_type_node, long_integer_type_node,
12043 pvoid_type_node, NULL_TREE);
12044 tree void_ftype_v4sf_long_pvoid
12045 = build_function_type_list (void_type_node,
12046 V4SF_type_node, long_integer_type_node,
12047 pvoid_type_node, NULL_TREE);
12048 tree void_ftype_v2df_long_pvoid
12049 = build_function_type_list (void_type_node,
12050 V2DF_type_node, long_integer_type_node,
12051 pvoid_type_node, NULL_TREE);
12052 tree void_ftype_v2di_long_pvoid
12053 = build_function_type_list (void_type_node,
12054 V2DI_type_node, long_integer_type_node,
12055 pvoid_type_node, NULL_TREE);
12056 tree int_ftype_int_v8hi_v8hi
12057 = build_function_type_list (integer_type_node,
12058 integer_type_node, V8HI_type_node,
12059 V8HI_type_node, NULL_TREE);
12060 tree int_ftype_int_v16qi_v16qi
12061 = build_function_type_list (integer_type_node,
12062 integer_type_node, V16QI_type_node,
12063 V16QI_type_node, NULL_TREE);
12064 tree int_ftype_int_v4sf_v4sf
12065 = build_function_type_list (integer_type_node,
12066 integer_type_node, V4SF_type_node,
12067 V4SF_type_node, NULL_TREE);
12068 tree int_ftype_int_v2df_v2df
12069 = build_function_type_list (integer_type_node,
12070 integer_type_node, V2DF_type_node,
12071 V2DF_type_node, NULL_TREE);
12072 tree v4si_ftype_v4si
12073 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
12074 tree v8hi_ftype_v8hi
12075 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
12076 tree v16qi_ftype_v16qi
12077 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
12078 tree v4sf_ftype_v4sf
12079 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12080 tree v2df_ftype_v2df
12081 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12082 tree void_ftype_pcvoid_int_int
12083 = build_function_type_list (void_type_node,
12084 pcvoid_type_node, integer_type_node,
12085 integer_type_node, NULL_TREE);
12087 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
12088 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
12089 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
12090 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
12091 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
12092 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
12093 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
12094 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
12095 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
12096 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
12097 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
12098 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
12099 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
12100 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
12101 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
12102 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
12103 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
12104 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
12105 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
12106 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
12107 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
12108 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
12109 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
12110 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
12111 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
12112 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
12113 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
12114 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
12115 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
12116 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
12118 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
12119 VSX_BUILTIN_LXVD2X_V2DF);
12120 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
12121 VSX_BUILTIN_LXVD2X_V2DI);
12122 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
12123 VSX_BUILTIN_LXVW4X_V4SF);
12124 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
12125 VSX_BUILTIN_LXVW4X_V4SI);
12126 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
12127 VSX_BUILTIN_LXVW4X_V8HI);
12128 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
12129 VSX_BUILTIN_LXVW4X_V16QI);
12130 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
12131 VSX_BUILTIN_STXVD2X_V2DF);
12132 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
12133 VSX_BUILTIN_STXVD2X_V2DI);
12134 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
12135 VSX_BUILTIN_STXVW4X_V4SF);
12136 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
12137 VSX_BUILTIN_STXVW4X_V4SI);
12138 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
12139 VSX_BUILTIN_STXVW4X_V8HI);
12140 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
12141 VSX_BUILTIN_STXVW4X_V16QI);
12142 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
12143 VSX_BUILTIN_VEC_LD);
12144 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
12145 VSX_BUILTIN_VEC_ST);
12147 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
12148 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
12149 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
12151 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
12152 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
12153 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
12154 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
12155 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
12156 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
12157 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
12158 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
12159 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
12160 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
12161 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
12162 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
12164 /* Cell builtins. */
12165 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
12166 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
12167 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
12168 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
12170 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
12171 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
12172 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
12173 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
12175 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
12176 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
12177 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
12178 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
12180 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
12181 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
12182 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
12183 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
12185 /* Add the DST variants. */
12186 d = bdesc_dst;
12187 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12188 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
12190 /* Initialize the predicates. */
12191 d = bdesc_altivec_preds;
12192 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
12194 enum machine_mode mode1;
12195 tree type;
12197 if (rs6000_overloaded_builtin_p (d->code))
12198 mode1 = VOIDmode;
12199 else
12200 mode1 = insn_data[d->icode].operand[1].mode;
12202 switch (mode1)
12204 case VOIDmode:
12205 type = int_ftype_int_opaque_opaque;
12206 break;
12207 case V4SImode:
12208 type = int_ftype_int_v4si_v4si;
12209 break;
12210 case V8HImode:
12211 type = int_ftype_int_v8hi_v8hi;
12212 break;
12213 case V16QImode:
12214 type = int_ftype_int_v16qi_v16qi;
12215 break;
12216 case V4SFmode:
12217 type = int_ftype_int_v4sf_v4sf;
12218 break;
12219 case V2DFmode:
12220 type = int_ftype_int_v2df_v2df;
12221 break;
12222 default:
12223 gcc_unreachable ();
12226 def_builtin (d->name, type, d->code);
12229 /* Initialize the abs* operators. */
12230 d = bdesc_abs;
12231 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12233 enum machine_mode mode0;
12234 tree type;
12236 mode0 = insn_data[d->icode].operand[0].mode;
12238 switch (mode0)
12240 case V4SImode:
12241 type = v4si_ftype_v4si;
12242 break;
12243 case V8HImode:
12244 type = v8hi_ftype_v8hi;
12245 break;
12246 case V16QImode:
12247 type = v16qi_ftype_v16qi;
12248 break;
12249 case V4SFmode:
12250 type = v4sf_ftype_v4sf;
12251 break;
12252 case V2DFmode:
12253 type = v2df_ftype_v2df;
12254 break;
12255 default:
12256 gcc_unreachable ();
12259 def_builtin (d->name, type, d->code);
12262 /* Initialize target builtin that implements
12263 targetm.vectorize.builtin_mask_for_load. */
12265 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12266 v16qi_ftype_long_pcvoid,
12267 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12268 BUILT_IN_MD, NULL, NULL_TREE);
12269 TREE_READONLY (decl) = 1;
12270 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12271 altivec_builtin_mask_for_load = decl;
12273 /* Access to the vec_init patterns. */
12274 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12275 integer_type_node, integer_type_node,
12276 integer_type_node, NULL_TREE);
12277 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12279 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12280 short_integer_type_node,
12281 short_integer_type_node,
12282 short_integer_type_node,
12283 short_integer_type_node,
12284 short_integer_type_node,
12285 short_integer_type_node,
12286 short_integer_type_node, NULL_TREE);
12287 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12289 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12290 char_type_node, char_type_node,
12291 char_type_node, char_type_node,
12292 char_type_node, char_type_node,
12293 char_type_node, char_type_node,
12294 char_type_node, char_type_node,
12295 char_type_node, char_type_node,
12296 char_type_node, char_type_node,
12297 char_type_node, NULL_TREE);
12298 def_builtin ("__builtin_vec_init_v16qi", ftype,
12299 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
12301 ftype = build_function_type_list (V4SF_type_node, float_type_node,
12302 float_type_node, float_type_node,
12303 float_type_node, NULL_TREE);
12304 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
12306 /* VSX builtins. */
12307 ftype = build_function_type_list (V2DF_type_node, double_type_node,
12308 double_type_node, NULL_TREE);
12309 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
12311 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12312 intDI_type_node, NULL_TREE);
12313 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
12315 /* Access to the vec_set patterns. */
12316 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12317 intSI_type_node,
12318 integer_type_node, NULL_TREE);
12319 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
12321 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12322 intHI_type_node,
12323 integer_type_node, NULL_TREE);
12324 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
12326 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12327 intQI_type_node,
12328 integer_type_node, NULL_TREE);
12329 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
12331 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12332 float_type_node,
12333 integer_type_node, NULL_TREE);
12334 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
12336 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12337 double_type_node,
12338 integer_type_node, NULL_TREE);
12339 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
12341 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12342 intDI_type_node,
12343 integer_type_node, NULL_TREE);
12344 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
12346 /* Access to the vec_extract patterns. */
12347 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12348 integer_type_node, NULL_TREE);
12349 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12351 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12352 integer_type_node, NULL_TREE);
12353 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12355 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12356 integer_type_node, NULL_TREE);
12357 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12359 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12360 integer_type_node, NULL_TREE);
12361 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12363 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12364 integer_type_node, NULL_TREE);
12365 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
12367 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12368 integer_type_node, NULL_TREE);
12369 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
12372 /* Hash function for builtin functions with up to 3 arguments and a return
12373 type. */
12374 static unsigned
12375 builtin_hash_function (const void *hash_entry)
12377 unsigned ret = 0;
12378 int i;
12379 const struct builtin_hash_struct *bh =
12380 (const struct builtin_hash_struct *) hash_entry;
12382 for (i = 0; i < 4; i++)
12384 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12385 ret = (ret * 2) + bh->uns_p[i];
12388 return ret;
12391 /* Compare builtin hash entries H1 and H2 for equivalence. */
12392 static int
12393 builtin_hash_eq (const void *h1, const void *h2)
12395 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12396 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12398 return ((p1->mode[0] == p2->mode[0])
12399 && (p1->mode[1] == p2->mode[1])
12400 && (p1->mode[2] == p2->mode[2])
12401 && (p1->mode[3] == p2->mode[3])
12402 && (p1->uns_p[0] == p2->uns_p[0])
12403 && (p1->uns_p[1] == p2->uns_p[1])
12404 && (p1->uns_p[2] == p2->uns_p[2])
12405 && (p1->uns_p[3] == p2->uns_p[3]));
12408 /* Map types for builtin functions with an explicit return type and up to 3
12409 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12410 of the argument. */
12411 static tree
12412 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12413 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12414 enum rs6000_builtins builtin, const char *name)
12416 struct builtin_hash_struct h;
12417 struct builtin_hash_struct *h2;
12418 void **found;
12419 int num_args = 3;
12420 int i;
12421 tree ret_type = NULL_TREE;
12422 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12424 /* Create builtin_hash_table. */
12425 if (builtin_hash_table == NULL)
12426 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12427 builtin_hash_eq, NULL);
12429 h.type = NULL_TREE;
12430 h.mode[0] = mode_ret;
12431 h.mode[1] = mode_arg0;
12432 h.mode[2] = mode_arg1;
12433 h.mode[3] = mode_arg2;
12434 h.uns_p[0] = 0;
12435 h.uns_p[1] = 0;
12436 h.uns_p[2] = 0;
12437 h.uns_p[3] = 0;
12439 /* If the builtin is a type that produces unsigned results or takes unsigned
12440 arguments, and it is returned as a decl for the vectorizer (such as
12441 widening multiplies, permute), make sure the arguments and return value
12442 are type correct. */
12443 switch (builtin)
12445 /* unsigned 2 argument functions. */
12446 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12447 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12448 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12449 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12450 h.uns_p[0] = 1;
12451 h.uns_p[1] = 1;
12452 h.uns_p[2] = 1;
12453 break;
12455 /* unsigned 3 argument functions. */
12456 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12457 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12458 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12459 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12460 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12461 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12462 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12463 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12464 case VSX_BUILTIN_VPERM_16QI_UNS:
12465 case VSX_BUILTIN_VPERM_8HI_UNS:
12466 case VSX_BUILTIN_VPERM_4SI_UNS:
12467 case VSX_BUILTIN_VPERM_2DI_UNS:
12468 case VSX_BUILTIN_XXSEL_16QI_UNS:
12469 case VSX_BUILTIN_XXSEL_8HI_UNS:
12470 case VSX_BUILTIN_XXSEL_4SI_UNS:
12471 case VSX_BUILTIN_XXSEL_2DI_UNS:
12472 h.uns_p[0] = 1;
12473 h.uns_p[1] = 1;
12474 h.uns_p[2] = 1;
12475 h.uns_p[3] = 1;
12476 break;
12478 /* signed permute functions with unsigned char mask. */
12479 case ALTIVEC_BUILTIN_VPERM_16QI:
12480 case ALTIVEC_BUILTIN_VPERM_8HI:
12481 case ALTIVEC_BUILTIN_VPERM_4SI:
12482 case ALTIVEC_BUILTIN_VPERM_4SF:
12483 case ALTIVEC_BUILTIN_VPERM_2DI:
12484 case ALTIVEC_BUILTIN_VPERM_2DF:
12485 case VSX_BUILTIN_VPERM_16QI:
12486 case VSX_BUILTIN_VPERM_8HI:
12487 case VSX_BUILTIN_VPERM_4SI:
12488 case VSX_BUILTIN_VPERM_4SF:
12489 case VSX_BUILTIN_VPERM_2DI:
12490 case VSX_BUILTIN_VPERM_2DF:
12491 h.uns_p[3] = 1;
12492 break;
12494 /* unsigned args, signed return. */
12495 case VSX_BUILTIN_XVCVUXDDP_UNS:
12496 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
12497 h.uns_p[1] = 1;
12498 break;
12500 /* signed args, unsigned return. */
12501 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12502 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
12503 h.uns_p[0] = 1;
12504 break;
12506 default:
12507 break;
12510 /* Figure out how many args are present. */
12511 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12512 num_args--;
12514 if (num_args == 0)
12515 fatal_error ("internal error: builtin function %s had no type", name);
12517 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12518 if (!ret_type && h.uns_p[0])
12519 ret_type = builtin_mode_to_type[h.mode[0]][0];
12521 if (!ret_type)
12522 fatal_error ("internal error: builtin function %s had an unexpected "
12523 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12525 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
12526 arg_type[i] = NULL_TREE;
12528 for (i = 0; i < num_args; i++)
12530 int m = (int) h.mode[i+1];
12531 int uns_p = h.uns_p[i+1];
12533 arg_type[i] = builtin_mode_to_type[m][uns_p];
12534 if (!arg_type[i] && uns_p)
12535 arg_type[i] = builtin_mode_to_type[m][0];
12537 if (!arg_type[i])
12538 fatal_error ("internal error: builtin function %s, argument %d "
12539 "had unexpected argument type %s", name, i,
12540 GET_MODE_NAME (m));
12543 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12544 if (*found == NULL)
12546 h2 = ggc_alloc_builtin_hash_struct ();
12547 *h2 = h;
12548 *found = (void *)h2;
12550 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
12551 arg_type[2], NULL_TREE);
12554 return ((struct builtin_hash_struct *)(*found))->type;
12557 static void
12558 rs6000_common_init_builtins (void)
12560 const struct builtin_description *d;
12561 size_t i;
12563 tree opaque_ftype_opaque = NULL_TREE;
12564 tree opaque_ftype_opaque_opaque = NULL_TREE;
12565 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
12566 tree v2si_ftype_qi = NULL_TREE;
12567 tree v2si_ftype_v2si_qi = NULL_TREE;
12568 tree v2si_ftype_int_qi = NULL_TREE;
12569 unsigned builtin_mask = rs6000_builtin_mask;
12571 if (!TARGET_PAIRED_FLOAT)
12573 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
12574 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
12577 /* Paired and SPE builtins are only available if you build a compiler with
12578 the appropriate options, so only create those builtins with the
12579 appropriate compiler option. Create Altivec and VSX builtins on machines
12580 with at least the general purpose extensions (970 and newer) to allow the
12581 use of the target attribute.. */
12583 if (TARGET_EXTRA_BUILTINS)
12584 builtin_mask |= RS6000_BTM_COMMON;
12586 /* Add the ternary operators. */
12587 d = bdesc_3arg;
12588 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12590 tree type;
12591 unsigned mask = d->mask;
12593 if ((mask & builtin_mask) != mask)
12595 if (TARGET_DEBUG_BUILTIN)
12596 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
12597 continue;
12600 if (rs6000_overloaded_builtin_p (d->code))
12602 if (! (type = opaque_ftype_opaque_opaque_opaque))
12603 type = opaque_ftype_opaque_opaque_opaque
12604 = build_function_type_list (opaque_V4SI_type_node,
12605 opaque_V4SI_type_node,
12606 opaque_V4SI_type_node,
12607 opaque_V4SI_type_node,
12608 NULL_TREE);
12610 else
12612 enum insn_code icode = d->icode;
12613 if (d->name == 0 || icode == CODE_FOR_nothing)
12614 continue;
12616 type = builtin_function_type (insn_data[icode].operand[0].mode,
12617 insn_data[icode].operand[1].mode,
12618 insn_data[icode].operand[2].mode,
12619 insn_data[icode].operand[3].mode,
12620 d->code, d->name);
12623 def_builtin (d->name, type, d->code);
12626 /* Add the binary operators. */
12627 d = bdesc_2arg;
12628 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12630 enum machine_mode mode0, mode1, mode2;
12631 tree type;
12632 unsigned mask = d->mask;
12634 if ((mask & builtin_mask) != mask)
12636 if (TARGET_DEBUG_BUILTIN)
12637 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
12638 continue;
12641 if (rs6000_overloaded_builtin_p (d->code))
12643 if (! (type = opaque_ftype_opaque_opaque))
12644 type = opaque_ftype_opaque_opaque
12645 = build_function_type_list (opaque_V4SI_type_node,
12646 opaque_V4SI_type_node,
12647 opaque_V4SI_type_node,
12648 NULL_TREE);
12650 else
12652 enum insn_code icode = d->icode;
12653 if (d->name == 0 || icode == CODE_FOR_nothing)
12654 continue;
12656 mode0 = insn_data[icode].operand[0].mode;
12657 mode1 = insn_data[icode].operand[1].mode;
12658 mode2 = insn_data[icode].operand[2].mode;
12660 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
12662 if (! (type = v2si_ftype_v2si_qi))
12663 type = v2si_ftype_v2si_qi
12664 = build_function_type_list (opaque_V2SI_type_node,
12665 opaque_V2SI_type_node,
12666 char_type_node,
12667 NULL_TREE);
12670 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
12671 && mode2 == QImode)
12673 if (! (type = v2si_ftype_int_qi))
12674 type = v2si_ftype_int_qi
12675 = build_function_type_list (opaque_V2SI_type_node,
12676 integer_type_node,
12677 char_type_node,
12678 NULL_TREE);
12681 else
12682 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
12683 d->code, d->name);
12686 def_builtin (d->name, type, d->code);
12689 /* Add the simple unary operators. */
12690 d = bdesc_1arg;
12691 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12693 enum machine_mode mode0, mode1;
12694 tree type;
12695 unsigned mask = d->mask;
12697 if ((mask & builtin_mask) != mask)
12699 if (TARGET_DEBUG_BUILTIN)
12700 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
12701 continue;
12704 if (rs6000_overloaded_builtin_p (d->code))
12706 if (! (type = opaque_ftype_opaque))
12707 type = opaque_ftype_opaque
12708 = build_function_type_list (opaque_V4SI_type_node,
12709 opaque_V4SI_type_node,
12710 NULL_TREE);
12712 else
12714 enum insn_code icode = d->icode;
12715 if (d->name == 0 || icode == CODE_FOR_nothing)
12716 continue;
12718 mode0 = insn_data[icode].operand[0].mode;
12719 mode1 = insn_data[icode].operand[1].mode;
12721 if (mode0 == V2SImode && mode1 == QImode)
12723 if (! (type = v2si_ftype_qi))
12724 type = v2si_ftype_qi
12725 = build_function_type_list (opaque_V2SI_type_node,
12726 char_type_node,
12727 NULL_TREE);
12730 else
12731 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
12732 d->code, d->name);
12735 def_builtin (d->name, type, d->code);
12739 static void
12740 rs6000_init_libfuncs (void)
12742 if (!TARGET_IEEEQUAD)
12743 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12744 if (!TARGET_XL_COMPAT)
12746 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
12747 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
12748 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
12749 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
12751 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
12753 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
12754 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
12755 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
12756 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
12757 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
12758 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
12759 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
12761 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
12762 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
12763 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
12764 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
12765 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
12766 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
12767 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
12768 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
12771 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
12772 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
12774 else
12776 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
12777 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
12778 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
12779 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
12781 else
12783 /* 32-bit SVR4 quad floating point routines. */
12785 set_optab_libfunc (add_optab, TFmode, "_q_add");
12786 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
12787 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
12788 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
12789 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
12790 if (TARGET_PPC_GPOPT)
12791 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
12793 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
12794 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
12795 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
12796 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
12797 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
12798 set_optab_libfunc (le_optab, TFmode, "_q_fle");
12800 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
12801 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
12802 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
12803 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
12804 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
12805 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
12806 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
12807 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
12812 /* Expand a block clear operation, and return 1 if successful. Return 0
12813 if we should let the compiler generate normal code.
12815 operands[0] is the destination
12816 operands[1] is the length
12817 operands[3] is the alignment */
12820 expand_block_clear (rtx operands[])
12822 rtx orig_dest = operands[0];
12823 rtx bytes_rtx = operands[1];
12824 rtx align_rtx = operands[3];
12825 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
12826 HOST_WIDE_INT align;
12827 HOST_WIDE_INT bytes;
12828 int offset;
12829 int clear_bytes;
12830 int clear_step;
12832 /* If this is not a fixed size move, just call memcpy */
12833 if (! constp)
12834 return 0;
12836 /* This must be a fixed size alignment */
12837 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12838 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12840 /* Anything to clear? */
12841 bytes = INTVAL (bytes_rtx);
12842 if (bytes <= 0)
12843 return 1;
12845 /* Use the builtin memset after a point, to avoid huge code bloat.
12846 When optimize_size, avoid any significant code bloat; calling
12847 memset is about 4 instructions, so allow for one instruction to
12848 load zero and three to do clearing. */
12849 if (TARGET_ALTIVEC && align >= 128)
12850 clear_step = 16;
12851 else if (TARGET_POWERPC64 && align >= 32)
12852 clear_step = 8;
12853 else if (TARGET_SPE && align >= 64)
12854 clear_step = 8;
12855 else
12856 clear_step = 4;
12858 if (optimize_size && bytes > 3 * clear_step)
12859 return 0;
12860 if (! optimize_size && bytes > 8 * clear_step)
12861 return 0;
12863 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
12865 enum machine_mode mode = BLKmode;
12866 rtx dest;
12868 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
12870 clear_bytes = 16;
12871 mode = V4SImode;
12873 else if (bytes >= 8 && TARGET_SPE && align >= 64)
12875 clear_bytes = 8;
12876 mode = V2SImode;
12878 else if (bytes >= 8 && TARGET_POWERPC64
12879 /* 64-bit loads and stores require word-aligned
12880 displacements. */
12881 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12883 clear_bytes = 8;
12884 mode = DImode;
12886 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12887 { /* move 4 bytes */
12888 clear_bytes = 4;
12889 mode = SImode;
12891 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12892 { /* move 2 bytes */
12893 clear_bytes = 2;
12894 mode = HImode;
12896 else /* move 1 byte at a time */
12898 clear_bytes = 1;
12899 mode = QImode;
12902 dest = adjust_address (orig_dest, mode, offset);
12904 emit_move_insn (dest, CONST0_RTX (mode));
12907 return 1;
12911 /* Expand a block move operation, and return 1 if successful. Return 0
12912 if we should let the compiler generate normal code.
12914 operands[0] is the destination
12915 operands[1] is the source
12916 operands[2] is the length
12917 operands[3] is the alignment */
12919 #define MAX_MOVE_REG 4
12922 expand_block_move (rtx operands[])
12924 rtx orig_dest = operands[0];
12925 rtx orig_src = operands[1];
12926 rtx bytes_rtx = operands[2];
12927 rtx align_rtx = operands[3];
12928 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
12929 int align;
12930 int bytes;
12931 int offset;
12932 int move_bytes;
12933 rtx stores[MAX_MOVE_REG];
12934 int num_reg = 0;
12936 /* If this is not a fixed size move, just call memcpy */
12937 if (! constp)
12938 return 0;
12940 /* This must be a fixed size alignment */
12941 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12942 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12944 /* Anything to move? */
12945 bytes = INTVAL (bytes_rtx);
12946 if (bytes <= 0)
12947 return 1;
12949 if (bytes > rs6000_block_move_inline_limit)
12950 return 0;
12952 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
12954 union {
12955 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
12956 rtx (*mov) (rtx, rtx);
12957 } gen_func;
12958 enum machine_mode mode = BLKmode;
12959 rtx src, dest;
12961 /* Altivec first, since it will be faster than a string move
12962 when it applies, and usually not significantly larger. */
12963 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
12965 move_bytes = 16;
12966 mode = V4SImode;
12967 gen_func.mov = gen_movv4si;
12969 else if (TARGET_SPE && bytes >= 8 && align >= 64)
12971 move_bytes = 8;
12972 mode = V2SImode;
12973 gen_func.mov = gen_movv2si;
12975 else if (TARGET_STRING
12976 && bytes > 24 /* move up to 32 bytes at a time */
12977 && ! fixed_regs[5]
12978 && ! fixed_regs[6]
12979 && ! fixed_regs[7]
12980 && ! fixed_regs[8]
12981 && ! fixed_regs[9]
12982 && ! fixed_regs[10]
12983 && ! fixed_regs[11]
12984 && ! fixed_regs[12])
12986 move_bytes = (bytes > 32) ? 32 : bytes;
12987 gen_func.movmemsi = gen_movmemsi_8reg;
12989 else if (TARGET_STRING
12990 && bytes > 16 /* move up to 24 bytes at a time */
12991 && ! fixed_regs[5]
12992 && ! fixed_regs[6]
12993 && ! fixed_regs[7]
12994 && ! fixed_regs[8]
12995 && ! fixed_regs[9]
12996 && ! fixed_regs[10])
12998 move_bytes = (bytes > 24) ? 24 : bytes;
12999 gen_func.movmemsi = gen_movmemsi_6reg;
13001 else if (TARGET_STRING
13002 && bytes > 8 /* move up to 16 bytes at a time */
13003 && ! fixed_regs[5]
13004 && ! fixed_regs[6]
13005 && ! fixed_regs[7]
13006 && ! fixed_regs[8])
13008 move_bytes = (bytes > 16) ? 16 : bytes;
13009 gen_func.movmemsi = gen_movmemsi_4reg;
13011 else if (bytes >= 8 && TARGET_POWERPC64
13012 /* 64-bit loads and stores require word-aligned
13013 displacements. */
13014 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
13016 move_bytes = 8;
13017 mode = DImode;
13018 gen_func.mov = gen_movdi;
13020 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
13021 { /* move up to 8 bytes at a time */
13022 move_bytes = (bytes > 8) ? 8 : bytes;
13023 gen_func.movmemsi = gen_movmemsi_2reg;
13025 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
13026 { /* move 4 bytes */
13027 move_bytes = 4;
13028 mode = SImode;
13029 gen_func.mov = gen_movsi;
13031 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
13032 { /* move 2 bytes */
13033 move_bytes = 2;
13034 mode = HImode;
13035 gen_func.mov = gen_movhi;
13037 else if (TARGET_STRING && bytes > 1)
13038 { /* move up to 4 bytes at a time */
13039 move_bytes = (bytes > 4) ? 4 : bytes;
13040 gen_func.movmemsi = gen_movmemsi_1reg;
13042 else /* move 1 byte at a time */
13044 move_bytes = 1;
13045 mode = QImode;
13046 gen_func.mov = gen_movqi;
13049 src = adjust_address (orig_src, mode, offset);
13050 dest = adjust_address (orig_dest, mode, offset);
13052 if (mode != BLKmode)
13054 rtx tmp_reg = gen_reg_rtx (mode);
13056 emit_insn ((*gen_func.mov) (tmp_reg, src));
13057 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
13060 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
13062 int i;
13063 for (i = 0; i < num_reg; i++)
13064 emit_insn (stores[i]);
13065 num_reg = 0;
13068 if (mode == BLKmode)
13070 /* Move the address into scratch registers. The movmemsi
13071 patterns require zero offset. */
13072 if (!REG_P (XEXP (src, 0)))
13074 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
13075 src = replace_equiv_address (src, src_reg);
13077 set_mem_size (src, move_bytes);
13079 if (!REG_P (XEXP (dest, 0)))
13081 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
13082 dest = replace_equiv_address (dest, dest_reg);
13084 set_mem_size (dest, move_bytes);
13086 emit_insn ((*gen_func.movmemsi) (dest, src,
13087 GEN_INT (move_bytes & 31),
13088 align_rtx));
13092 return 1;
13096 /* Return a string to perform a load_multiple operation.
13097 operands[0] is the vector.
13098 operands[1] is the source address.
13099 operands[2] is the first destination register. */
13101 const char *
13102 rs6000_output_load_multiple (rtx operands[3])
13104 /* We have to handle the case where the pseudo used to contain the address
13105 is assigned to one of the output registers. */
13106 int i, j;
13107 int words = XVECLEN (operands[0], 0);
13108 rtx xop[10];
13110 if (XVECLEN (operands[0], 0) == 1)
13111 return "lwz %2,0(%1)";
13113 for (i = 0; i < words; i++)
13114 if (refers_to_regno_p (REGNO (operands[2]) + i,
13115 REGNO (operands[2]) + i + 1, operands[1], 0))
13117 if (i == words-1)
13119 xop[0] = GEN_INT (4 * (words-1));
13120 xop[1] = operands[1];
13121 xop[2] = operands[2];
13122 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
13123 return "";
13125 else if (i == 0)
13127 xop[0] = GEN_INT (4 * (words-1));
13128 xop[1] = operands[1];
13129 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
13130 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
13131 return "";
13133 else
13135 for (j = 0; j < words; j++)
13136 if (j != i)
13138 xop[0] = GEN_INT (j * 4);
13139 xop[1] = operands[1];
13140 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
13141 output_asm_insn ("lwz %2,%0(%1)", xop);
13143 xop[0] = GEN_INT (i * 4);
13144 xop[1] = operands[1];
13145 output_asm_insn ("lwz %1,%0(%1)", xop);
13146 return "";
13150 return "lswi %2,%1,%N0";
13154 /* A validation routine: say whether CODE, a condition code, and MODE
13155 match. The other alternatives either don't make sense or should
13156 never be generated. */
13158 void
13159 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
13161 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
13162 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
13163 && GET_MODE_CLASS (mode) == MODE_CC);
13165 /* These don't make sense. */
13166 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
13167 || mode != CCUNSmode);
13169 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
13170 || mode == CCUNSmode);
13172 gcc_assert (mode == CCFPmode
13173 || (code != ORDERED && code != UNORDERED
13174 && code != UNEQ && code != LTGT
13175 && code != UNGT && code != UNLT
13176 && code != UNGE && code != UNLE));
13178 /* These should never be generated except for
13179 flag_finite_math_only. */
13180 gcc_assert (mode != CCFPmode
13181 || flag_finite_math_only
13182 || (code != LE && code != GE
13183 && code != UNEQ && code != LTGT
13184 && code != UNGT && code != UNLT));
13186 /* These are invalid; the information is not there. */
13187 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
13191 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13192 mask required to convert the result of a rotate insn into a shift
13193 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13196 includes_lshift_p (rtx shiftop, rtx andop)
13198 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13200 shift_mask <<= INTVAL (shiftop);
13202 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13205 /* Similar, but for right shift. */
13208 includes_rshift_p (rtx shiftop, rtx andop)
13210 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13212 shift_mask >>= INTVAL (shiftop);
13214 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13217 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13218 to perform a left shift. It must have exactly SHIFTOP least
13219 significant 0's, then one or more 1's, then zero or more 0's. */
13222 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13224 if (GET_CODE (andop) == CONST_INT)
13226 HOST_WIDE_INT c, lsb, shift_mask;
13228 c = INTVAL (andop);
13229 if (c == 0 || c == ~0)
13230 return 0;
13232 shift_mask = ~0;
13233 shift_mask <<= INTVAL (shiftop);
13235 /* Find the least significant one bit. */
13236 lsb = c & -c;
13238 /* It must coincide with the LSB of the shift mask. */
13239 if (-lsb != shift_mask)
13240 return 0;
13242 /* Invert to look for the next transition (if any). */
13243 c = ~c;
13245 /* Remove the low group of ones (originally low group of zeros). */
13246 c &= -lsb;
13248 /* Again find the lsb, and check we have all 1's above. */
13249 lsb = c & -c;
13250 return c == -lsb;
13252 else if (GET_CODE (andop) == CONST_DOUBLE
13253 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13255 HOST_WIDE_INT low, high, lsb;
13256 HOST_WIDE_INT shift_mask_low, shift_mask_high;
13258 low = CONST_DOUBLE_LOW (andop);
13259 if (HOST_BITS_PER_WIDE_INT < 64)
13260 high = CONST_DOUBLE_HIGH (andop);
13262 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
13263 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
13264 return 0;
13266 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13268 shift_mask_high = ~0;
13269 if (INTVAL (shiftop) > 32)
13270 shift_mask_high <<= INTVAL (shiftop) - 32;
13272 lsb = high & -high;
13274 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
13275 return 0;
13277 high = ~high;
13278 high &= -lsb;
13280 lsb = high & -high;
13281 return high == -lsb;
13284 shift_mask_low = ~0;
13285 shift_mask_low <<= INTVAL (shiftop);
13287 lsb = low & -low;
13289 if (-lsb != shift_mask_low)
13290 return 0;
13292 if (HOST_BITS_PER_WIDE_INT < 64)
13293 high = ~high;
13294 low = ~low;
13295 low &= -lsb;
13297 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13299 lsb = high & -high;
13300 return high == -lsb;
13303 lsb = low & -low;
13304 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
13306 else
13307 return 0;
13310 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13311 to perform a left shift. It must have SHIFTOP or more least
13312 significant 0's, with the remainder of the word 1's. */
13315 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13317 if (GET_CODE (andop) == CONST_INT)
13319 HOST_WIDE_INT c, lsb, shift_mask;
13321 shift_mask = ~0;
13322 shift_mask <<= INTVAL (shiftop);
13323 c = INTVAL (andop);
13325 /* Find the least significant one bit. */
13326 lsb = c & -c;
13328 /* It must be covered by the shift mask.
13329 This test also rejects c == 0. */
13330 if ((lsb & shift_mask) == 0)
13331 return 0;
13333 /* Check we have all 1's above the transition, and reject all 1's. */
13334 return c == -lsb && lsb != 1;
13336 else if (GET_CODE (andop) == CONST_DOUBLE
13337 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13339 HOST_WIDE_INT low, lsb, shift_mask_low;
13341 low = CONST_DOUBLE_LOW (andop);
13343 if (HOST_BITS_PER_WIDE_INT < 64)
13345 HOST_WIDE_INT high, shift_mask_high;
13347 high = CONST_DOUBLE_HIGH (andop);
13349 if (low == 0)
13351 shift_mask_high = ~0;
13352 if (INTVAL (shiftop) > 32)
13353 shift_mask_high <<= INTVAL (shiftop) - 32;
13355 lsb = high & -high;
13357 if ((lsb & shift_mask_high) == 0)
13358 return 0;
13360 return high == -lsb;
13362 if (high != ~0)
13363 return 0;
13366 shift_mask_low = ~0;
13367 shift_mask_low <<= INTVAL (shiftop);
13369 lsb = low & -low;
13371 if ((lsb & shift_mask_low) == 0)
13372 return 0;
13374 return low == -lsb && lsb != 1;
13376 else
13377 return 0;
13380 /* Return 1 if operands will generate a valid arguments to rlwimi
13381 instruction for insert with right shift in 64-bit mode. The mask may
13382 not start on the first bit or stop on the last bit because wrap-around
13383 effects of instruction do not correspond to semantics of RTL insn. */
13386 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13388 if (INTVAL (startop) > 32
13389 && INTVAL (startop) < 64
13390 && INTVAL (sizeop) > 1
13391 && INTVAL (sizeop) + INTVAL (startop) < 64
13392 && INTVAL (shiftop) > 0
13393 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13394 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13395 return 1;
13397 return 0;
13400 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13401 for lfq and stfq insns iff the registers are hard registers. */
13404 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13406 /* We might have been passed a SUBREG. */
13407 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13408 return 0;
13410 /* We might have been passed non floating point registers. */
13411 if (!FP_REGNO_P (REGNO (reg1))
13412 || !FP_REGNO_P (REGNO (reg2)))
13413 return 0;
13415 return (REGNO (reg1) == REGNO (reg2) - 1);
13418 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13419 addr1 and addr2 must be in consecutive memory locations
13420 (addr2 == addr1 + 8). */
13423 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13425 rtx addr1, addr2;
13426 unsigned int reg1, reg2;
13427 int offset1, offset2;
13429 /* The mems cannot be volatile. */
13430 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13431 return 0;
13433 addr1 = XEXP (mem1, 0);
13434 addr2 = XEXP (mem2, 0);
13436 /* Extract an offset (if used) from the first addr. */
13437 if (GET_CODE (addr1) == PLUS)
13439 /* If not a REG, return zero. */
13440 if (GET_CODE (XEXP (addr1, 0)) != REG)
13441 return 0;
13442 else
13444 reg1 = REGNO (XEXP (addr1, 0));
13445 /* The offset must be constant! */
13446 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13447 return 0;
13448 offset1 = INTVAL (XEXP (addr1, 1));
13451 else if (GET_CODE (addr1) != REG)
13452 return 0;
13453 else
13455 reg1 = REGNO (addr1);
13456 /* This was a simple (mem (reg)) expression. Offset is 0. */
13457 offset1 = 0;
13460 /* And now for the second addr. */
13461 if (GET_CODE (addr2) == PLUS)
13463 /* If not a REG, return zero. */
13464 if (GET_CODE (XEXP (addr2, 0)) != REG)
13465 return 0;
13466 else
13468 reg2 = REGNO (XEXP (addr2, 0));
13469 /* The offset must be constant. */
13470 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13471 return 0;
13472 offset2 = INTVAL (XEXP (addr2, 1));
13475 else if (GET_CODE (addr2) != REG)
13476 return 0;
13477 else
13479 reg2 = REGNO (addr2);
13480 /* This was a simple (mem (reg)) expression. Offset is 0. */
13481 offset2 = 0;
13484 /* Both of these must have the same base register. */
13485 if (reg1 != reg2)
13486 return 0;
13488 /* The offset for the second addr must be 8 more than the first addr. */
13489 if (offset2 != offset1 + 8)
13490 return 0;
13492 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13493 instructions. */
13494 return 1;
13499 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13501 static bool eliminated = false;
13502 rtx ret;
13504 if (mode != SDmode)
13505 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13506 else
13508 rtx mem = cfun->machine->sdmode_stack_slot;
13509 gcc_assert (mem != NULL_RTX);
13511 if (!eliminated)
13513 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13514 cfun->machine->sdmode_stack_slot = mem;
13515 eliminated = true;
13517 ret = mem;
13520 if (TARGET_DEBUG_ADDR)
13522 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13523 GET_MODE_NAME (mode));
13524 if (!ret)
13525 fprintf (stderr, "\tNULL_RTX\n");
13526 else
13527 debug_rtx (ret);
13530 return ret;
13533 static tree
13534 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13536 /* Don't walk into types. */
13537 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13539 *walk_subtrees = 0;
13540 return NULL_TREE;
13543 switch (TREE_CODE (*tp))
13545 case VAR_DECL:
13546 case PARM_DECL:
13547 case FIELD_DECL:
13548 case RESULT_DECL:
13549 case SSA_NAME:
13550 case REAL_CST:
13551 case MEM_REF:
13552 case VIEW_CONVERT_EXPR:
13553 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13554 return *tp;
13555 break;
13556 default:
13557 break;
13560 return NULL_TREE;
13563 enum reload_reg_type {
13564 GPR_REGISTER_TYPE,
13565 VECTOR_REGISTER_TYPE,
13566 OTHER_REGISTER_TYPE
13569 static enum reload_reg_type
13570 rs6000_reload_register_type (enum reg_class rclass)
13572 switch (rclass)
13574 case GENERAL_REGS:
13575 case BASE_REGS:
13576 return GPR_REGISTER_TYPE;
13578 case FLOAT_REGS:
13579 case ALTIVEC_REGS:
13580 case VSX_REGS:
13581 return VECTOR_REGISTER_TYPE;
13583 default:
13584 return OTHER_REGISTER_TYPE;
13588 /* Inform reload about cases where moving X with a mode MODE to a register in
13589 RCLASS requires an extra scratch or immediate register. Return the class
13590 needed for the immediate register.
13592 For VSX and Altivec, we may need a register to convert sp+offset into
13593 reg+sp.
13595 For misaligned 64-bit gpr loads and stores we need a register to
13596 convert an offset address to indirect. */
13598 static reg_class_t
13599 rs6000_secondary_reload (bool in_p,
13600 rtx x,
13601 reg_class_t rclass_i,
13602 enum machine_mode mode,
13603 secondary_reload_info *sri)
13605 enum reg_class rclass = (enum reg_class) rclass_i;
13606 reg_class_t ret = ALL_REGS;
13607 enum insn_code icode;
13608 bool default_p = false;
13610 sri->icode = CODE_FOR_nothing;
13612 /* Convert vector loads and stores into gprs to use an additional base
13613 register. */
13614 icode = rs6000_vector_reload[mode][in_p != false];
13615 if (icode != CODE_FOR_nothing)
13617 ret = NO_REGS;
13618 sri->icode = CODE_FOR_nothing;
13619 sri->extra_cost = 0;
13621 if (GET_CODE (x) == MEM)
13623 rtx addr = XEXP (x, 0);
13625 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13626 an extra register in that case, but it would need an extra
13627 register if the addressing is reg+reg or (reg+reg)&(-16). */
13628 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
13630 if (!legitimate_indirect_address_p (addr, false)
13631 && !rs6000_legitimate_offset_address_p (TImode, addr,
13632 false, true))
13634 sri->icode = icode;
13635 /* account for splitting the loads, and converting the
13636 address from reg+reg to reg. */
13637 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
13638 + ((GET_CODE (addr) == AND) ? 1 : 0));
13641 /* Loads to and stores from vector registers can only do reg+reg
13642 addressing. Altivec registers can also do (reg+reg)&(-16). */
13643 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
13644 || rclass == FLOAT_REGS || rclass == NO_REGS)
13646 if (!VECTOR_MEM_ALTIVEC_P (mode)
13647 && GET_CODE (addr) == AND
13648 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13649 && INTVAL (XEXP (addr, 1)) == -16
13650 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
13651 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
13653 sri->icode = icode;
13654 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
13655 ? 2 : 1);
13657 else if (!legitimate_indirect_address_p (addr, false)
13658 && (rclass == NO_REGS
13659 || !legitimate_indexed_address_p (addr, false)))
13661 sri->icode = icode;
13662 sri->extra_cost = 1;
13664 else
13665 icode = CODE_FOR_nothing;
13667 /* Any other loads, including to pseudo registers which haven't been
13668 assigned to a register yet, default to require a scratch
13669 register. */
13670 else
13672 sri->icode = icode;
13673 sri->extra_cost = 2;
13676 else if (REG_P (x))
13678 int regno = true_regnum (x);
13680 icode = CODE_FOR_nothing;
13681 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
13682 default_p = true;
13683 else
13685 enum reg_class xclass = REGNO_REG_CLASS (regno);
13686 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
13687 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
13689 /* If memory is needed, use default_secondary_reload to create the
13690 stack slot. */
13691 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
13692 default_p = true;
13693 else
13694 ret = NO_REGS;
13697 else
13698 default_p = true;
13700 else if (TARGET_POWERPC64
13701 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13702 && MEM_P (x)
13703 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
13705 rtx off = address_offset (XEXP (x, 0));
13706 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13708 if (off != NULL_RTX
13709 && (INTVAL (off) & 3) != 0
13710 && (unsigned HOST_WIDE_INT) INTVAL (off) + 0x8000 < 0x10000 - extra)
13712 if (in_p)
13713 sri->icode = CODE_FOR_reload_di_load;
13714 else
13715 sri->icode = CODE_FOR_reload_di_store;
13716 sri->extra_cost = 2;
13717 ret = NO_REGS;
13719 else
13720 default_p = true;
13722 else if (!TARGET_POWERPC64
13723 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13724 && MEM_P (x)
13725 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
13727 rtx off = address_offset (XEXP (x, 0));
13728 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
13730 /* We need a secondary reload only when our legitimate_address_p
13731 says the address is good (as otherwise the entire address
13732 will be reloaded). So for mode sizes of 8 and 16 this will
13733 be when the offset is in the ranges [0x7ffc,0x7fff] and
13734 [0x7ff4,0x7ff7] respectively. Note that the address we see
13735 here may have been manipulated by legitimize_reload_address. */
13736 if (off != NULL_RTX
13737 && ((unsigned HOST_WIDE_INT) INTVAL (off) - (0x8000 - extra)
13738 < UNITS_PER_WORD))
13740 if (in_p)
13741 sri->icode = CODE_FOR_reload_si_load;
13742 else
13743 sri->icode = CODE_FOR_reload_si_store;
13744 sri->extra_cost = 2;
13745 ret = NO_REGS;
13747 else
13748 default_p = true;
13750 else
13751 default_p = true;
13753 if (default_p)
13754 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
13756 gcc_assert (ret != ALL_REGS);
13758 if (TARGET_DEBUG_ADDR)
13760 fprintf (stderr,
13761 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13762 "mode = %s",
13763 reg_class_names[ret],
13764 in_p ? "true" : "false",
13765 reg_class_names[rclass],
13766 GET_MODE_NAME (mode));
13768 if (default_p)
13769 fprintf (stderr, ", default secondary reload");
13771 if (sri->icode != CODE_FOR_nothing)
13772 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
13773 insn_data[sri->icode].name, sri->extra_cost);
13774 else
13775 fprintf (stderr, "\n");
13777 debug_rtx (x);
13780 return ret;
13783 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13784 to SP+reg addressing. */
13786 void
13787 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
13789 int regno = true_regnum (reg);
13790 enum machine_mode mode = GET_MODE (reg);
13791 enum reg_class rclass;
13792 rtx addr;
13793 rtx and_op2 = NULL_RTX;
13794 rtx addr_op1;
13795 rtx addr_op2;
13796 rtx scratch_or_premodify = scratch;
13797 rtx and_rtx;
13798 rtx cc_clobber;
13800 if (TARGET_DEBUG_ADDR)
13802 fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
13803 store_p ? "store" : "load");
13804 fprintf (stderr, "reg:\n");
13805 debug_rtx (reg);
13806 fprintf (stderr, "mem:\n");
13807 debug_rtx (mem);
13808 fprintf (stderr, "scratch:\n");
13809 debug_rtx (scratch);
13812 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13813 gcc_assert (GET_CODE (mem) == MEM);
13814 rclass = REGNO_REG_CLASS (regno);
13815 addr = XEXP (mem, 0);
13817 switch (rclass)
13819 /* GPRs can handle reg + small constant, all other addresses need to use
13820 the scratch register. */
13821 case GENERAL_REGS:
13822 case BASE_REGS:
13823 if (GET_CODE (addr) == AND)
13825 and_op2 = XEXP (addr, 1);
13826 addr = XEXP (addr, 0);
13829 if (GET_CODE (addr) == PRE_MODIFY)
13831 scratch_or_premodify = XEXP (addr, 0);
13832 gcc_assert (REG_P (scratch_or_premodify));
13833 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13834 addr = XEXP (addr, 1);
13837 if (GET_CODE (addr) == PLUS
13838 && (and_op2 != NULL_RTX
13839 || !rs6000_legitimate_offset_address_p (TImode, addr,
13840 false, true)))
13842 addr_op1 = XEXP (addr, 0);
13843 addr_op2 = XEXP (addr, 1);
13844 gcc_assert (legitimate_indirect_address_p (addr_op1, false));
13846 if (!REG_P (addr_op2)
13847 && (GET_CODE (addr_op2) != CONST_INT
13848 || !satisfies_constraint_I (addr_op2)))
13850 if (TARGET_DEBUG_ADDR)
13852 fprintf (stderr,
13853 "\nMove plus addr to register %s, mode = %s: ",
13854 rs6000_reg_names[REGNO (scratch)],
13855 GET_MODE_NAME (mode));
13856 debug_rtx (addr_op2);
13858 rs6000_emit_move (scratch, addr_op2, Pmode);
13859 addr_op2 = scratch;
13862 emit_insn (gen_rtx_SET (VOIDmode,
13863 scratch_or_premodify,
13864 gen_rtx_PLUS (Pmode,
13865 addr_op1,
13866 addr_op2)));
13868 addr = scratch_or_premodify;
13869 scratch_or_premodify = scratch;
13871 else if (!legitimate_indirect_address_p (addr, false)
13872 && !rs6000_legitimate_offset_address_p (TImode, addr,
13873 false, true))
13875 if (TARGET_DEBUG_ADDR)
13877 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13878 rs6000_reg_names[REGNO (scratch_or_premodify)],
13879 GET_MODE_NAME (mode));
13880 debug_rtx (addr);
13882 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13883 addr = scratch_or_premodify;
13884 scratch_or_premodify = scratch;
13886 break;
13888 /* Float/Altivec registers can only handle reg+reg addressing. Move
13889 other addresses into a scratch register. */
13890 case FLOAT_REGS:
13891 case VSX_REGS:
13892 case ALTIVEC_REGS:
13894 /* With float regs, we need to handle the AND ourselves, since we can't
13895 use the Altivec instruction with an implicit AND -16. Allow scalar
13896 loads to float registers to use reg+offset even if VSX. */
13897 if (GET_CODE (addr) == AND
13898 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
13899 || GET_CODE (XEXP (addr, 1)) != CONST_INT
13900 || INTVAL (XEXP (addr, 1)) != -16
13901 || !VECTOR_MEM_ALTIVEC_P (mode)))
13903 and_op2 = XEXP (addr, 1);
13904 addr = XEXP (addr, 0);
13907 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13908 as the address later. */
13909 if (GET_CODE (addr) == PRE_MODIFY
13910 && (!VECTOR_MEM_VSX_P (mode)
13911 || and_op2 != NULL_RTX
13912 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
13914 scratch_or_premodify = XEXP (addr, 0);
13915 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
13916 false));
13917 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13918 addr = XEXP (addr, 1);
13921 if (legitimate_indirect_address_p (addr, false) /* reg */
13922 || legitimate_indexed_address_p (addr, false) /* reg+reg */
13923 || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
13924 || (GET_CODE (addr) == AND /* Altivec memory */
13925 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13926 && INTVAL (XEXP (addr, 1)) == -16
13927 && VECTOR_MEM_ALTIVEC_P (mode))
13928 || (rclass == FLOAT_REGS /* legacy float mem */
13929 && GET_MODE_SIZE (mode) == 8
13930 && and_op2 == NULL_RTX
13931 && scratch_or_premodify == scratch
13932 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
13935 else if (GET_CODE (addr) == PLUS)
13937 addr_op1 = XEXP (addr, 0);
13938 addr_op2 = XEXP (addr, 1);
13939 gcc_assert (REG_P (addr_op1));
13941 if (TARGET_DEBUG_ADDR)
13943 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
13944 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13945 debug_rtx (addr_op2);
13947 rs6000_emit_move (scratch, addr_op2, Pmode);
13948 emit_insn (gen_rtx_SET (VOIDmode,
13949 scratch_or_premodify,
13950 gen_rtx_PLUS (Pmode,
13951 addr_op1,
13952 scratch)));
13953 addr = scratch_or_premodify;
13954 scratch_or_premodify = scratch;
13957 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
13958 || GET_CODE (addr) == CONST_INT || REG_P (addr))
13960 if (TARGET_DEBUG_ADDR)
13962 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13963 rs6000_reg_names[REGNO (scratch_or_premodify)],
13964 GET_MODE_NAME (mode));
13965 debug_rtx (addr);
13968 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13969 addr = scratch_or_premodify;
13970 scratch_or_premodify = scratch;
13973 else
13974 gcc_unreachable ();
13976 break;
13978 default:
13979 gcc_unreachable ();
13982 /* If the original address involved a pre-modify that we couldn't use the VSX
13983 memory instruction with update, and we haven't taken care of already,
13984 store the address in the pre-modify register and use that as the
13985 address. */
13986 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
13988 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
13989 addr = scratch_or_premodify;
13992 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13993 memory instruction, recreate the AND now, including the clobber which is
13994 generated by the general ANDSI3/ANDDI3 patterns for the
13995 andi. instruction. */
13996 if (and_op2 != NULL_RTX)
13998 if (! legitimate_indirect_address_p (addr, false))
14000 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
14001 addr = scratch;
14004 if (TARGET_DEBUG_ADDR)
14006 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
14007 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
14008 debug_rtx (and_op2);
14011 and_rtx = gen_rtx_SET (VOIDmode,
14012 scratch,
14013 gen_rtx_AND (Pmode,
14014 addr,
14015 and_op2));
14017 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
14018 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14019 gen_rtvec (2, and_rtx, cc_clobber)));
14020 addr = scratch;
14023 /* Adjust the address if it changed. */
14024 if (addr != XEXP (mem, 0))
14026 mem = change_address (mem, mode, addr);
14027 if (TARGET_DEBUG_ADDR)
14028 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
14031 /* Now create the move. */
14032 if (store_p)
14033 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14034 else
14035 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14037 return;
14040 /* Convert reloads involving 64-bit gprs and misaligned offset
14041 addressing, or multiple 32-bit gprs and offsets that are too large,
14042 to use indirect addressing. */
14044 void
14045 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
14047 int regno = true_regnum (reg);
14048 enum reg_class rclass;
14049 rtx addr;
14050 rtx scratch_or_premodify = scratch;
14052 if (TARGET_DEBUG_ADDR)
14054 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
14055 store_p ? "store" : "load");
14056 fprintf (stderr, "reg:\n");
14057 debug_rtx (reg);
14058 fprintf (stderr, "mem:\n");
14059 debug_rtx (mem);
14060 fprintf (stderr, "scratch:\n");
14061 debug_rtx (scratch);
14064 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
14065 gcc_assert (GET_CODE (mem) == MEM);
14066 rclass = REGNO_REG_CLASS (regno);
14067 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
14068 addr = XEXP (mem, 0);
14070 if (GET_CODE (addr) == PRE_MODIFY)
14072 scratch_or_premodify = XEXP (addr, 0);
14073 gcc_assert (REG_P (scratch_or_premodify));
14074 addr = XEXP (addr, 1);
14076 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
14078 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
14080 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
14082 /* Now create the move. */
14083 if (store_p)
14084 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
14085 else
14086 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
14088 return;
14091 /* Allocate a 64-bit stack slot to be used for copying SDmode
14092 values through if this function has any SDmode references. */
14094 static void
14095 rs6000_alloc_sdmode_stack_slot (void)
14097 tree t;
14098 basic_block bb;
14099 gimple_stmt_iterator gsi;
14101 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
14103 FOR_EACH_BB (bb)
14104 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
14106 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
14107 if (ret)
14109 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14110 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14111 SDmode, 0);
14112 return;
14116 /* Check for any SDmode parameters of the function. */
14117 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
14119 if (TREE_TYPE (t) == error_mark_node)
14120 continue;
14122 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
14123 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
14125 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
14126 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
14127 SDmode, 0);
14128 return;
14133 static void
14134 rs6000_instantiate_decls (void)
14136 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
14137 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
14140 /* Given an rtx X being reloaded into a reg required to be
14141 in class CLASS, return the class of reg to actually use.
14142 In general this is just CLASS; but on some machines
14143 in some cases it is preferable to use a more restrictive class.
14145 On the RS/6000, we have to return NO_REGS when we want to reload a
14146 floating-point CONST_DOUBLE to force it to be copied to memory.
14148 We also don't want to reload integer values into floating-point
14149 registers if we can at all help it. In fact, this can
14150 cause reload to die, if it tries to generate a reload of CTR
14151 into a FP register and discovers it doesn't have the memory location
14152 required.
14154 ??? Would it be a good idea to have reload do the converse, that is
14155 try to reload floating modes into FP registers if possible?
14158 static enum reg_class
14159 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
14161 enum machine_mode mode = GET_MODE (x);
14163 if (VECTOR_UNIT_VSX_P (mode)
14164 && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
14165 return rclass;
14167 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
14168 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
14169 && easy_vector_constant (x, mode))
14170 return ALTIVEC_REGS;
14172 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
14173 return NO_REGS;
14175 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
14176 return GENERAL_REGS;
14178 /* For VSX, prefer the traditional registers for 64-bit values because we can
14179 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14180 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14181 prefer Altivec loads.. */
14182 if (rclass == VSX_REGS)
14184 if (GET_MODE_SIZE (mode) <= 8)
14185 return FLOAT_REGS;
14187 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
14188 return ALTIVEC_REGS;
14190 return rclass;
14193 return rclass;
14196 /* Debug version of rs6000_preferred_reload_class. */
14197 static enum reg_class
14198 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
14200 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
14202 fprintf (stderr,
14203 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14204 "mode = %s, x:\n",
14205 reg_class_names[ret], reg_class_names[rclass],
14206 GET_MODE_NAME (GET_MODE (x)));
14207 debug_rtx (x);
14209 return ret;
14212 /* If we are copying between FP or AltiVec registers and anything else, we need
14213 a memory location. The exception is when we are targeting ppc64 and the
14214 move to/from fpr to gpr instructions are available. Also, under VSX, you
14215 can copy vector registers from the FP register set to the Altivec register
14216 set and vice versa. */
14218 static bool
14219 rs6000_secondary_memory_needed (enum reg_class class1,
14220 enum reg_class class2,
14221 enum machine_mode mode)
14223 if (class1 == class2)
14224 return false;
14226 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14227 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14228 between these classes. But we need memory for other things that can go in
14229 FLOAT_REGS like SFmode. */
14230 if (TARGET_VSX
14231 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
14232 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
14233 || class1 == FLOAT_REGS))
14234 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
14235 && class2 != FLOAT_REGS);
14237 if (class1 == VSX_REGS || class2 == VSX_REGS)
14238 return true;
14240 if (class1 == FLOAT_REGS
14241 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14242 || ((mode != DFmode)
14243 && (mode != DDmode)
14244 && (mode != DImode))))
14245 return true;
14247 if (class2 == FLOAT_REGS
14248 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14249 || ((mode != DFmode)
14250 && (mode != DDmode)
14251 && (mode != DImode))))
14252 return true;
14254 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
14255 return true;
14257 return false;
14260 /* Debug version of rs6000_secondary_memory_needed. */
14261 static bool
14262 rs6000_debug_secondary_memory_needed (enum reg_class class1,
14263 enum reg_class class2,
14264 enum machine_mode mode)
14266 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
14268 fprintf (stderr,
14269 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14270 "class2 = %s, mode = %s\n",
14271 ret ? "true" : "false", reg_class_names[class1],
14272 reg_class_names[class2], GET_MODE_NAME (mode));
14274 return ret;
14277 /* Return the register class of a scratch register needed to copy IN into
14278 or out of a register in RCLASS in MODE. If it can be done directly,
14279 NO_REGS is returned. */
14281 static enum reg_class
14282 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
14283 rtx in)
14285 int regno;
14287 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
14288 #if TARGET_MACHO
14289 && MACHOPIC_INDIRECT
14290 #endif
14293 /* We cannot copy a symbolic operand directly into anything
14294 other than BASE_REGS for TARGET_ELF. So indicate that a
14295 register from BASE_REGS is needed as an intermediate
14296 register.
14298 On Darwin, pic addresses require a load from memory, which
14299 needs a base register. */
14300 if (rclass != BASE_REGS
14301 && (GET_CODE (in) == SYMBOL_REF
14302 || GET_CODE (in) == HIGH
14303 || GET_CODE (in) == LABEL_REF
14304 || GET_CODE (in) == CONST))
14305 return BASE_REGS;
14308 if (GET_CODE (in) == REG)
14310 regno = REGNO (in);
14311 if (regno >= FIRST_PSEUDO_REGISTER)
14313 regno = true_regnum (in);
14314 if (regno >= FIRST_PSEUDO_REGISTER)
14315 regno = -1;
14318 else if (GET_CODE (in) == SUBREG)
14320 regno = true_regnum (in);
14321 if (regno >= FIRST_PSEUDO_REGISTER)
14322 regno = -1;
14324 else
14325 regno = -1;
14327 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14328 into anything. */
14329 if (rclass == GENERAL_REGS || rclass == BASE_REGS
14330 || (regno >= 0 && INT_REGNO_P (regno)))
14331 return NO_REGS;
14333 /* Constants, memory, and FP registers can go into FP registers. */
14334 if ((regno == -1 || FP_REGNO_P (regno))
14335 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
14336 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
14338 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14339 VSX. */
14340 if (TARGET_VSX
14341 && (regno == -1 || VSX_REGNO_P (regno))
14342 && VSX_REG_CLASS_P (rclass))
14343 return NO_REGS;
14345 /* Memory, and AltiVec registers can go into AltiVec registers. */
14346 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
14347 && rclass == ALTIVEC_REGS)
14348 return NO_REGS;
14350 /* We can copy among the CR registers. */
14351 if ((rclass == CR_REGS || rclass == CR0_REGS)
14352 && regno >= 0 && CR_REGNO_P (regno))
14353 return NO_REGS;
14355 /* Otherwise, we need GENERAL_REGS. */
14356 return GENERAL_REGS;
14359 /* Debug version of rs6000_secondary_reload_class. */
14360 static enum reg_class
14361 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14362 enum machine_mode mode, rtx in)
14364 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14365 fprintf (stderr,
14366 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14367 "mode = %s, input rtx:\n",
14368 reg_class_names[ret], reg_class_names[rclass],
14369 GET_MODE_NAME (mode));
14370 debug_rtx (in);
14372 return ret;
14375 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14377 static bool
14378 rs6000_cannot_change_mode_class (enum machine_mode from,
14379 enum machine_mode to,
14380 enum reg_class rclass)
14382 unsigned from_size = GET_MODE_SIZE (from);
14383 unsigned to_size = GET_MODE_SIZE (to);
14385 if (from_size != to_size)
14387 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14388 return ((from_size < 8 || to_size < 8 || TARGET_IEEEQUAD)
14389 && reg_classes_intersect_p (xclass, rclass));
14392 if (TARGET_E500_DOUBLE
14393 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14394 || (((to) == TFmode) + ((from) == TFmode)) == 1
14395 || (((to) == DDmode) + ((from) == DDmode)) == 1
14396 || (((to) == TDmode) + ((from) == TDmode)) == 1
14397 || (((to) == DImode) + ((from) == DImode)) == 1))
14398 return true;
14400 /* Since the VSX register set includes traditional floating point registers
14401 and altivec registers, just check for the size being different instead of
14402 trying to check whether the modes are vector modes. Otherwise it won't
14403 allow say DF and DI to change classes. */
14404 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14405 return (from_size != 8 && from_size != 16);
14407 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14408 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14409 return true;
14411 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14412 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14413 return true;
14415 return false;
14418 /* Debug version of rs6000_cannot_change_mode_class. */
14419 static bool
14420 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14421 enum machine_mode to,
14422 enum reg_class rclass)
14424 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14426 fprintf (stderr,
14427 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14428 "to = %s, rclass = %s\n",
14429 ret ? "true" : "false",
14430 GET_MODE_NAME (from), GET_MODE_NAME (to),
14431 reg_class_names[rclass]);
14433 return ret;
14436 /* Given a comparison operation, return the bit number in CCR to test. We
14437 know this is a valid comparison.
14439 SCC_P is 1 if this is for an scc. That means that %D will have been
14440 used instead of %C, so the bits will be in different places.
14442 Return -1 if OP isn't a valid comparison for some reason. */
14445 ccr_bit (rtx op, int scc_p)
14447 enum rtx_code code = GET_CODE (op);
14448 enum machine_mode cc_mode;
14449 int cc_regnum;
14450 int base_bit;
14451 rtx reg;
14453 if (!COMPARISON_P (op))
14454 return -1;
14456 reg = XEXP (op, 0);
14458 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14460 cc_mode = GET_MODE (reg);
14461 cc_regnum = REGNO (reg);
14462 base_bit = 4 * (cc_regnum - CR0_REGNO);
14464 validate_condition_mode (code, cc_mode);
14466 /* When generating a sCOND operation, only positive conditions are
14467 allowed. */
14468 gcc_assert (!scc_p
14469 || code == EQ || code == GT || code == LT || code == UNORDERED
14470 || code == GTU || code == LTU);
14472 switch (code)
14474 case NE:
14475 return scc_p ? base_bit + 3 : base_bit + 2;
14476 case EQ:
14477 return base_bit + 2;
14478 case GT: case GTU: case UNLE:
14479 return base_bit + 1;
14480 case LT: case LTU: case UNGE:
14481 return base_bit;
14482 case ORDERED: case UNORDERED:
14483 return base_bit + 3;
14485 case GE: case GEU:
14486 /* If scc, we will have done a cror to put the bit in the
14487 unordered position. So test that bit. For integer, this is ! LT
14488 unless this is an scc insn. */
14489 return scc_p ? base_bit + 3 : base_bit;
14491 case LE: case LEU:
14492 return scc_p ? base_bit + 3 : base_bit + 1;
14494 default:
14495 gcc_unreachable ();
14499 /* Return the GOT register. */
14502 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
14504 /* The second flow pass currently (June 1999) can't update
14505 regs_ever_live without disturbing other parts of the compiler, so
14506 update it here to make the prolog/epilogue code happy. */
14507 if (!can_create_pseudo_p ()
14508 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
14509 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
14511 crtl->uses_pic_offset_table = 1;
14513 return pic_offset_table_rtx;
14516 static rs6000_stack_t stack_info;
14518 /* Function to init struct machine_function.
14519 This will be called, via a pointer variable,
14520 from push_function_context. */
14522 static struct machine_function *
14523 rs6000_init_machine_status (void)
14525 stack_info.reload_completed = 0;
14526 return ggc_alloc_cleared_machine_function ();
14529 /* These macros test for integers and extract the low-order bits. */
14530 #define INT_P(X) \
14531 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14532 && GET_MODE (X) == VOIDmode)
14534 #define INT_LOWPART(X) \
14535 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14538 extract_MB (rtx op)
14540 int i;
14541 unsigned long val = INT_LOWPART (op);
14543 /* If the high bit is zero, the value is the first 1 bit we find
14544 from the left. */
14545 if ((val & 0x80000000) == 0)
14547 gcc_assert (val & 0xffffffff);
14549 i = 1;
14550 while (((val <<= 1) & 0x80000000) == 0)
14551 ++i;
14552 return i;
14555 /* If the high bit is set and the low bit is not, or the mask is all
14556 1's, the value is zero. */
14557 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
14558 return 0;
14560 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14561 from the right. */
14562 i = 31;
14563 while (((val >>= 1) & 1) != 0)
14564 --i;
14566 return i;
14570 extract_ME (rtx op)
14572 int i;
14573 unsigned long val = INT_LOWPART (op);
14575 /* If the low bit is zero, the value is the first 1 bit we find from
14576 the right. */
14577 if ((val & 1) == 0)
14579 gcc_assert (val & 0xffffffff);
14581 i = 30;
14582 while (((val >>= 1) & 1) == 0)
14583 --i;
14585 return i;
14588 /* If the low bit is set and the high bit is not, or the mask is all
14589 1's, the value is 31. */
14590 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
14591 return 31;
14593 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14594 from the left. */
14595 i = 0;
14596 while (((val <<= 1) & 0x80000000) != 0)
14597 ++i;
14599 return i;
14602 /* Locate some local-dynamic symbol still in use by this function
14603 so that we can print its name in some tls_ld pattern. */
14605 static const char *
14606 rs6000_get_some_local_dynamic_name (void)
14608 rtx insn;
14610 if (cfun->machine->some_ld_name)
14611 return cfun->machine->some_ld_name;
14613 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
14614 if (INSN_P (insn)
14615 && for_each_rtx (&PATTERN (insn),
14616 rs6000_get_some_local_dynamic_name_1, 0))
14617 return cfun->machine->some_ld_name;
14619 gcc_unreachable ();
14622 /* Helper function for rs6000_get_some_local_dynamic_name. */
14624 static int
14625 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
14627 rtx x = *px;
14629 if (GET_CODE (x) == SYMBOL_REF)
14631 const char *str = XSTR (x, 0);
14632 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
14634 cfun->machine->some_ld_name = str;
14635 return 1;
14639 return 0;
14642 /* Write out a function code label. */
14644 void
14645 rs6000_output_function_entry (FILE *file, const char *fname)
14647 if (fname[0] != '.')
14649 switch (DEFAULT_ABI)
14651 default:
14652 gcc_unreachable ();
14654 case ABI_AIX:
14655 if (DOT_SYMBOLS)
14656 putc ('.', file);
14657 else
14658 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
14659 break;
14661 case ABI_V4:
14662 case ABI_DARWIN:
14663 break;
14667 RS6000_OUTPUT_BASENAME (file, fname);
14670 /* Print an operand. Recognize special options, documented below. */
14672 #if TARGET_ELF
14673 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14674 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14675 #else
14676 #define SMALL_DATA_RELOC "sda21"
14677 #define SMALL_DATA_REG 0
14678 #endif
14680 void
14681 print_operand (FILE *file, rtx x, int code)
14683 int i;
14684 unsigned HOST_WIDE_INT uval;
14686 switch (code)
14688 /* %a is output_address. */
14690 case 'b':
14691 /* If constant, low-order 16 bits of constant, unsigned.
14692 Otherwise, write normally. */
14693 if (INT_P (x))
14694 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
14695 else
14696 print_operand (file, x, 0);
14697 return;
14699 case 'B':
14700 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14701 for 64-bit mask direction. */
14702 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
14703 return;
14705 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14706 output_operand. */
14708 case 'D':
14709 /* Like 'J' but get to the GT bit only. */
14710 gcc_assert (REG_P (x));
14712 /* Bit 1 is GT bit. */
14713 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
14715 /* Add one for shift count in rlinm for scc. */
14716 fprintf (file, "%d", i + 1);
14717 return;
14719 case 'E':
14720 /* X is a CR register. Print the number of the EQ bit of the CR */
14721 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14722 output_operand_lossage ("invalid %%E value");
14723 else
14724 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
14725 return;
14727 case 'f':
14728 /* X is a CR register. Print the shift count needed to move it
14729 to the high-order four bits. */
14730 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14731 output_operand_lossage ("invalid %%f value");
14732 else
14733 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
14734 return;
14736 case 'F':
14737 /* Similar, but print the count for the rotate in the opposite
14738 direction. */
14739 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14740 output_operand_lossage ("invalid %%F value");
14741 else
14742 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
14743 return;
14745 case 'G':
14746 /* X is a constant integer. If it is negative, print "m",
14747 otherwise print "z". This is to make an aze or ame insn. */
14748 if (GET_CODE (x) != CONST_INT)
14749 output_operand_lossage ("invalid %%G value");
14750 else if (INTVAL (x) >= 0)
14751 putc ('z', file);
14752 else
14753 putc ('m', file);
14754 return;
14756 case 'h':
14757 /* If constant, output low-order five bits. Otherwise, write
14758 normally. */
14759 if (INT_P (x))
14760 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
14761 else
14762 print_operand (file, x, 0);
14763 return;
14765 case 'H':
14766 /* If constant, output low-order six bits. Otherwise, write
14767 normally. */
14768 if (INT_P (x))
14769 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
14770 else
14771 print_operand (file, x, 0);
14772 return;
14774 case 'I':
14775 /* Print `i' if this is a constant, else nothing. */
14776 if (INT_P (x))
14777 putc ('i', file);
14778 return;
14780 case 'j':
14781 /* Write the bit number in CCR for jump. */
14782 i = ccr_bit (x, 0);
14783 if (i == -1)
14784 output_operand_lossage ("invalid %%j code");
14785 else
14786 fprintf (file, "%d", i);
14787 return;
14789 case 'J':
14790 /* Similar, but add one for shift count in rlinm for scc and pass
14791 scc flag to `ccr_bit'. */
14792 i = ccr_bit (x, 1);
14793 if (i == -1)
14794 output_operand_lossage ("invalid %%J code");
14795 else
14796 /* If we want bit 31, write a shift count of zero, not 32. */
14797 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14798 return;
14800 case 'k':
14801 /* X must be a constant. Write the 1's complement of the
14802 constant. */
14803 if (! INT_P (x))
14804 output_operand_lossage ("invalid %%k value");
14805 else
14806 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
14807 return;
14809 case 'K':
14810 /* X must be a symbolic constant on ELF. Write an
14811 expression suitable for an 'addi' that adds in the low 16
14812 bits of the MEM. */
14813 if (GET_CODE (x) == CONST)
14815 if (GET_CODE (XEXP (x, 0)) != PLUS
14816 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
14817 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
14818 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
14819 output_operand_lossage ("invalid %%K value");
14821 print_operand_address (file, x);
14822 fputs ("@l", file);
14823 return;
14825 /* %l is output_asm_label. */
14827 case 'L':
14828 /* Write second word of DImode or DFmode reference. Works on register
14829 or non-indexed memory only. */
14830 if (REG_P (x))
14831 fputs (reg_names[REGNO (x) + 1], file);
14832 else if (MEM_P (x))
14834 /* Handle possible auto-increment. Since it is pre-increment and
14835 we have already done it, we can just use an offset of word. */
14836 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14837 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14838 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14839 UNITS_PER_WORD));
14840 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14841 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14842 UNITS_PER_WORD));
14843 else
14844 output_address (XEXP (adjust_address_nv (x, SImode,
14845 UNITS_PER_WORD),
14846 0));
14848 if (small_data_operand (x, GET_MODE (x)))
14849 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14850 reg_names[SMALL_DATA_REG]);
14852 return;
14854 case 'm':
14855 /* MB value for a mask operand. */
14856 if (! mask_operand (x, SImode))
14857 output_operand_lossage ("invalid %%m value");
14859 fprintf (file, "%d", extract_MB (x));
14860 return;
14862 case 'M':
14863 /* ME value for a mask operand. */
14864 if (! mask_operand (x, SImode))
14865 output_operand_lossage ("invalid %%M value");
14867 fprintf (file, "%d", extract_ME (x));
14868 return;
14870 /* %n outputs the negative of its operand. */
14872 case 'N':
14873 /* Write the number of elements in the vector times 4. */
14874 if (GET_CODE (x) != PARALLEL)
14875 output_operand_lossage ("invalid %%N value");
14876 else
14877 fprintf (file, "%d", XVECLEN (x, 0) * 4);
14878 return;
14880 case 'O':
14881 /* Similar, but subtract 1 first. */
14882 if (GET_CODE (x) != PARALLEL)
14883 output_operand_lossage ("invalid %%O value");
14884 else
14885 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
14886 return;
14888 case 'p':
14889 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14890 if (! INT_P (x)
14891 || INT_LOWPART (x) < 0
14892 || (i = exact_log2 (INT_LOWPART (x))) < 0)
14893 output_operand_lossage ("invalid %%p value");
14894 else
14895 fprintf (file, "%d", i);
14896 return;
14898 case 'P':
14899 /* The operand must be an indirect memory reference. The result
14900 is the register name. */
14901 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
14902 || REGNO (XEXP (x, 0)) >= 32)
14903 output_operand_lossage ("invalid %%P value");
14904 else
14905 fputs (reg_names[REGNO (XEXP (x, 0))], file);
14906 return;
14908 case 'q':
14909 /* This outputs the logical code corresponding to a boolean
14910 expression. The expression may have one or both operands
14911 negated (if one, only the first one). For condition register
14912 logical operations, it will also treat the negated
14913 CR codes as NOTs, but not handle NOTs of them. */
14915 const char *const *t = 0;
14916 const char *s;
14917 enum rtx_code code = GET_CODE (x);
14918 static const char * const tbl[3][3] = {
14919 { "and", "andc", "nor" },
14920 { "or", "orc", "nand" },
14921 { "xor", "eqv", "xor" } };
14923 if (code == AND)
14924 t = tbl[0];
14925 else if (code == IOR)
14926 t = tbl[1];
14927 else if (code == XOR)
14928 t = tbl[2];
14929 else
14930 output_operand_lossage ("invalid %%q value");
14932 if (GET_CODE (XEXP (x, 0)) != NOT)
14933 s = t[0];
14934 else
14936 if (GET_CODE (XEXP (x, 1)) == NOT)
14937 s = t[2];
14938 else
14939 s = t[1];
14942 fputs (s, file);
14944 return;
14946 case 'Q':
14947 if (TARGET_MFCRF)
14948 fputc (',', file);
14949 /* FALLTHRU */
14950 else
14951 return;
14953 case 'R':
14954 /* X is a CR register. Print the mask for `mtcrf'. */
14955 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14956 output_operand_lossage ("invalid %%R value");
14957 else
14958 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
14959 return;
14961 case 's':
14962 /* Low 5 bits of 32 - value */
14963 if (! INT_P (x))
14964 output_operand_lossage ("invalid %%s value");
14965 else
14966 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
14967 return;
14969 case 'S':
14970 /* PowerPC64 mask position. All 0's is excluded.
14971 CONST_INT 32-bit mask is considered sign-extended so any
14972 transition must occur within the CONST_INT, not on the boundary. */
14973 if (! mask64_operand (x, DImode))
14974 output_operand_lossage ("invalid %%S value");
14976 uval = INT_LOWPART (x);
14978 if (uval & 1) /* Clear Left */
14980 #if HOST_BITS_PER_WIDE_INT > 64
14981 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14982 #endif
14983 i = 64;
14985 else /* Clear Right */
14987 uval = ~uval;
14988 #if HOST_BITS_PER_WIDE_INT > 64
14989 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14990 #endif
14991 i = 63;
14993 while (uval != 0)
14994 --i, uval >>= 1;
14995 gcc_assert (i >= 0);
14996 fprintf (file, "%d", i);
14997 return;
14999 case 't':
15000 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
15001 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
15003 /* Bit 3 is OV bit. */
15004 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
15006 /* If we want bit 31, write a shift count of zero, not 32. */
15007 fprintf (file, "%d", i == 31 ? 0 : i + 1);
15008 return;
15010 case 'T':
15011 /* Print the symbolic name of a branch target register. */
15012 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
15013 && REGNO (x) != CTR_REGNO))
15014 output_operand_lossage ("invalid %%T value");
15015 else if (REGNO (x) == LR_REGNO)
15016 fputs ("lr", file);
15017 else
15018 fputs ("ctr", file);
15019 return;
15021 case 'u':
15022 /* High-order 16 bits of constant for use in unsigned operand. */
15023 if (! INT_P (x))
15024 output_operand_lossage ("invalid %%u value");
15025 else
15026 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15027 (INT_LOWPART (x) >> 16) & 0xffff);
15028 return;
15030 case 'v':
15031 /* High-order 16 bits of constant for use in signed operand. */
15032 if (! INT_P (x))
15033 output_operand_lossage ("invalid %%v value");
15034 else
15035 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
15036 (INT_LOWPART (x) >> 16) & 0xffff);
15037 return;
15039 case 'U':
15040 /* Print `u' if this has an auto-increment or auto-decrement. */
15041 if (MEM_P (x)
15042 && (GET_CODE (XEXP (x, 0)) == PRE_INC
15043 || GET_CODE (XEXP (x, 0)) == PRE_DEC
15044 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
15045 putc ('u', file);
15046 return;
15048 case 'V':
15049 /* Print the trap code for this operand. */
15050 switch (GET_CODE (x))
15052 case EQ:
15053 fputs ("eq", file); /* 4 */
15054 break;
15055 case NE:
15056 fputs ("ne", file); /* 24 */
15057 break;
15058 case LT:
15059 fputs ("lt", file); /* 16 */
15060 break;
15061 case LE:
15062 fputs ("le", file); /* 20 */
15063 break;
15064 case GT:
15065 fputs ("gt", file); /* 8 */
15066 break;
15067 case GE:
15068 fputs ("ge", file); /* 12 */
15069 break;
15070 case LTU:
15071 fputs ("llt", file); /* 2 */
15072 break;
15073 case LEU:
15074 fputs ("lle", file); /* 6 */
15075 break;
15076 case GTU:
15077 fputs ("lgt", file); /* 1 */
15078 break;
15079 case GEU:
15080 fputs ("lge", file); /* 5 */
15081 break;
15082 default:
15083 gcc_unreachable ();
15085 break;
15087 case 'w':
15088 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15089 normally. */
15090 if (INT_P (x))
15091 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
15092 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
15093 else
15094 print_operand (file, x, 0);
15095 return;
15097 case 'W':
15098 /* MB value for a PowerPC64 rldic operand. */
15099 i = clz_hwi (GET_CODE (x) == CONST_INT
15100 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
15102 #if HOST_BITS_PER_WIDE_INT == 32
15103 if (GET_CODE (x) == CONST_INT && i > 0)
15104 i += 32; /* zero-extend high-part was all 0's */
15105 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
15106 i = clz_hwi (CONST_DOUBLE_LOW (x)) + 32;
15107 #endif
15109 fprintf (file, "%d", i);
15110 return;
15112 case 'x':
15113 /* X is a FPR or Altivec register used in a VSX context. */
15114 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
15115 output_operand_lossage ("invalid %%x value");
15116 else
15118 int reg = REGNO (x);
15119 int vsx_reg = (FP_REGNO_P (reg)
15120 ? reg - 32
15121 : reg - FIRST_ALTIVEC_REGNO + 32);
15123 #ifdef TARGET_REGNAMES
15124 if (TARGET_REGNAMES)
15125 fprintf (file, "%%vs%d", vsx_reg);
15126 else
15127 #endif
15128 fprintf (file, "%d", vsx_reg);
15130 return;
15132 case 'X':
15133 if (MEM_P (x)
15134 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
15135 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
15136 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
15137 putc ('x', file);
15138 return;
15140 case 'Y':
15141 /* Like 'L', for third word of TImode */
15142 if (REG_P (x))
15143 fputs (reg_names[REGNO (x) + 2], file);
15144 else if (MEM_P (x))
15146 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15147 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15148 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15149 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15150 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
15151 else
15152 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
15153 if (small_data_operand (x, GET_MODE (x)))
15154 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15155 reg_names[SMALL_DATA_REG]);
15157 return;
15159 case 'z':
15160 /* X is a SYMBOL_REF. Write out the name preceded by a
15161 period and without any trailing data in brackets. Used for function
15162 names. If we are configured for System V (or the embedded ABI) on
15163 the PowerPC, do not emit the period, since those systems do not use
15164 TOCs and the like. */
15165 gcc_assert (GET_CODE (x) == SYMBOL_REF);
15167 /* Mark the decl as referenced so that cgraph will output the
15168 function. */
15169 if (SYMBOL_REF_DECL (x))
15170 mark_decl_referenced (SYMBOL_REF_DECL (x));
15172 /* For macho, check to see if we need a stub. */
15173 if (TARGET_MACHO)
15175 const char *name = XSTR (x, 0);
15176 #if TARGET_MACHO
15177 if (darwin_emit_branch_islands
15178 && MACHOPIC_INDIRECT
15179 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
15180 name = machopic_indirection_name (x, /*stub_p=*/true);
15181 #endif
15182 assemble_name (file, name);
15184 else if (!DOT_SYMBOLS)
15185 assemble_name (file, XSTR (x, 0));
15186 else
15187 rs6000_output_function_entry (file, XSTR (x, 0));
15188 return;
15190 case 'Z':
15191 /* Like 'L', for last word of TImode. */
15192 if (REG_P (x))
15193 fputs (reg_names[REGNO (x) + 3], file);
15194 else if (MEM_P (x))
15196 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15197 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15198 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15199 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15200 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15201 else
15202 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
15203 if (small_data_operand (x, GET_MODE (x)))
15204 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15205 reg_names[SMALL_DATA_REG]);
15207 return;
15209 /* Print AltiVec or SPE memory operand. */
15210 case 'y':
15212 rtx tmp;
15214 gcc_assert (MEM_P (x));
15216 tmp = XEXP (x, 0);
15218 /* Ugly hack because %y is overloaded. */
15219 if ((TARGET_SPE || TARGET_E500_DOUBLE)
15220 && (GET_MODE_SIZE (GET_MODE (x)) == 8
15221 || GET_MODE (x) == TFmode
15222 || GET_MODE (x) == TImode))
15224 /* Handle [reg]. */
15225 if (REG_P (tmp))
15227 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
15228 break;
15230 /* Handle [reg+UIMM]. */
15231 else if (GET_CODE (tmp) == PLUS &&
15232 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
15234 int x;
15236 gcc_assert (REG_P (XEXP (tmp, 0)));
15238 x = INTVAL (XEXP (tmp, 1));
15239 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
15240 break;
15243 /* Fall through. Must be [reg+reg]. */
15245 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
15246 && GET_CODE (tmp) == AND
15247 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
15248 && INTVAL (XEXP (tmp, 1)) == -16)
15249 tmp = XEXP (tmp, 0);
15250 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
15251 && GET_CODE (tmp) == PRE_MODIFY)
15252 tmp = XEXP (tmp, 1);
15253 if (REG_P (tmp))
15254 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
15255 else
15257 if (!GET_CODE (tmp) == PLUS
15258 || !REG_P (XEXP (tmp, 0))
15259 || !REG_P (XEXP (tmp, 1)))
15261 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15262 break;
15265 if (REGNO (XEXP (tmp, 0)) == 0)
15266 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
15267 reg_names[ REGNO (XEXP (tmp, 0)) ]);
15268 else
15269 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
15270 reg_names[ REGNO (XEXP (tmp, 1)) ]);
15272 break;
15275 case 0:
15276 if (REG_P (x))
15277 fprintf (file, "%s", reg_names[REGNO (x)]);
15278 else if (MEM_P (x))
15280 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15281 know the width from the mode. */
15282 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
15283 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
15284 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15285 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
15286 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
15287 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15288 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15289 output_address (XEXP (XEXP (x, 0), 1));
15290 else
15291 output_address (XEXP (x, 0));
15293 else
15295 if (toc_relative_expr_p (x, false))
15296 /* This hack along with a corresponding hack in
15297 rs6000_output_addr_const_extra arranges to output addends
15298 where the assembler expects to find them. eg.
15299 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15300 without this hack would be output as "x@toc+4". We
15301 want "x+4@toc". */
15302 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15303 else
15304 output_addr_const (file, x);
15306 return;
15308 case '&':
15309 assemble_name (file, rs6000_get_some_local_dynamic_name ());
15310 return;
15312 default:
15313 output_operand_lossage ("invalid %%xn code");
15317 /* Print the address of an operand. */
15319 void
15320 print_operand_address (FILE *file, rtx x)
15322 if (REG_P (x))
15323 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15324 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15325 || GET_CODE (x) == LABEL_REF)
15327 output_addr_const (file, x);
15328 if (small_data_operand (x, GET_MODE (x)))
15329 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15330 reg_names[SMALL_DATA_REG]);
15331 else
15332 gcc_assert (!TARGET_TOC);
15334 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15335 && REG_P (XEXP (x, 1)))
15337 if (REGNO (XEXP (x, 0)) == 0)
15338 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15339 reg_names[ REGNO (XEXP (x, 0)) ]);
15340 else
15341 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15342 reg_names[ REGNO (XEXP (x, 1)) ]);
15344 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15345 && GET_CODE (XEXP (x, 1)) == CONST_INT)
15346 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15347 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15348 #if TARGET_MACHO
15349 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15350 && CONSTANT_P (XEXP (x, 1)))
15352 fprintf (file, "lo16(");
15353 output_addr_const (file, XEXP (x, 1));
15354 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15356 #endif
15357 #if TARGET_ELF
15358 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15359 && CONSTANT_P (XEXP (x, 1)))
15361 output_addr_const (file, XEXP (x, 1));
15362 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15364 #endif
15365 else if (toc_relative_expr_p (x, false))
15367 /* This hack along with a corresponding hack in
15368 rs6000_output_addr_const_extra arranges to output addends
15369 where the assembler expects to find them. eg.
15370 (lo_sum (reg 9)
15371 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15372 without this hack would be output as "x@toc+8@l(9)". We
15373 want "x+8@toc@l(9)". */
15374 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15375 if (GET_CODE (x) == LO_SUM)
15376 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
15377 else
15378 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
15380 else
15381 gcc_unreachable ();
15384 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15386 static bool
15387 rs6000_output_addr_const_extra (FILE *file, rtx x)
15389 if (GET_CODE (x) == UNSPEC)
15390 switch (XINT (x, 1))
15392 case UNSPEC_TOCREL:
15393 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
15394 && REG_P (XVECEXP (x, 0, 1))
15395 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
15396 output_addr_const (file, XVECEXP (x, 0, 0));
15397 if (x == tocrel_base && tocrel_offset != const0_rtx)
15399 if (INTVAL (tocrel_offset) >= 0)
15400 fprintf (file, "+");
15401 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
15403 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15405 putc ('-', file);
15406 assemble_name (file, toc_label_name);
15408 else if (TARGET_ELF)
15409 fputs ("@toc", file);
15410 return true;
15412 #if TARGET_MACHO
15413 case UNSPEC_MACHOPIC_OFFSET:
15414 output_addr_const (file, XVECEXP (x, 0, 0));
15415 putc ('-', file);
15416 machopic_output_function_base_name (file);
15417 return true;
15418 #endif
15420 return false;
15423 /* Target hook for assembling integer objects. The PowerPC version has
15424 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15425 is defined. It also needs to handle DI-mode objects on 64-bit
15426 targets. */
15428 static bool
15429 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15431 #ifdef RELOCATABLE_NEEDS_FIXUP
15432 /* Special handling for SI values. */
15433 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15435 static int recurse = 0;
15437 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15438 the .fixup section. Since the TOC section is already relocated, we
15439 don't need to mark it here. We used to skip the text section, but it
15440 should never be valid for relocated addresses to be placed in the text
15441 section. */
15442 if (TARGET_RELOCATABLE
15443 && in_section != toc_section
15444 && !recurse
15445 && GET_CODE (x) != CONST_INT
15446 && GET_CODE (x) != CONST_DOUBLE
15447 && CONSTANT_P (x))
15449 char buf[256];
15451 recurse = 1;
15452 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15453 fixuplabelno++;
15454 ASM_OUTPUT_LABEL (asm_out_file, buf);
15455 fprintf (asm_out_file, "\t.long\t(");
15456 output_addr_const (asm_out_file, x);
15457 fprintf (asm_out_file, ")@fixup\n");
15458 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15459 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15460 fprintf (asm_out_file, "\t.long\t");
15461 assemble_name (asm_out_file, buf);
15462 fprintf (asm_out_file, "\n\t.previous\n");
15463 recurse = 0;
15464 return true;
15466 /* Remove initial .'s to turn a -mcall-aixdesc function
15467 address into the address of the descriptor, not the function
15468 itself. */
15469 else if (GET_CODE (x) == SYMBOL_REF
15470 && XSTR (x, 0)[0] == '.'
15471 && DEFAULT_ABI == ABI_AIX)
15473 const char *name = XSTR (x, 0);
15474 while (*name == '.')
15475 name++;
15477 fprintf (asm_out_file, "\t.long\t%s\n", name);
15478 return true;
15481 #endif /* RELOCATABLE_NEEDS_FIXUP */
15482 return default_assemble_integer (x, size, aligned_p);
15485 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15486 /* Emit an assembler directive to set symbol visibility for DECL to
15487 VISIBILITY_TYPE. */
15489 static void
15490 rs6000_assemble_visibility (tree decl, int vis)
15492 /* Functions need to have their entry point symbol visibility set as
15493 well as their descriptor symbol visibility. */
15494 if (DEFAULT_ABI == ABI_AIX
15495 && DOT_SYMBOLS
15496 && TREE_CODE (decl) == FUNCTION_DECL)
15498 static const char * const visibility_types[] = {
15499 NULL, "internal", "hidden", "protected"
15502 const char *name, *type;
15504 name = ((* targetm.strip_name_encoding)
15505 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
15506 type = visibility_types[vis];
15508 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
15509 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
15511 else
15512 default_assemble_visibility (decl, vis);
15514 #endif
15516 enum rtx_code
15517 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
15519 /* Reversal of FP compares takes care -- an ordered compare
15520 becomes an unordered compare and vice versa. */
15521 if (mode == CCFPmode
15522 && (!flag_finite_math_only
15523 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
15524 || code == UNEQ || code == LTGT))
15525 return reverse_condition_maybe_unordered (code);
15526 else
15527 return reverse_condition (code);
15530 /* Generate a compare for CODE. Return a brand-new rtx that
15531 represents the result of the compare. */
15533 static rtx
15534 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
15536 enum machine_mode comp_mode;
15537 rtx compare_result;
15538 enum rtx_code code = GET_CODE (cmp);
15539 rtx op0 = XEXP (cmp, 0);
15540 rtx op1 = XEXP (cmp, 1);
15542 if (FLOAT_MODE_P (mode))
15543 comp_mode = CCFPmode;
15544 else if (code == GTU || code == LTU
15545 || code == GEU || code == LEU)
15546 comp_mode = CCUNSmode;
15547 else if ((code == EQ || code == NE)
15548 && unsigned_reg_p (op0)
15549 && (unsigned_reg_p (op1)
15550 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
15551 /* These are unsigned values, perhaps there will be a later
15552 ordering compare that can be shared with this one. */
15553 comp_mode = CCUNSmode;
15554 else
15555 comp_mode = CCmode;
15557 /* If we have an unsigned compare, make sure we don't have a signed value as
15558 an immediate. */
15559 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
15560 && INTVAL (op1) < 0)
15562 op0 = copy_rtx_if_shared (op0);
15563 op1 = force_reg (GET_MODE (op0), op1);
15564 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
15567 /* First, the compare. */
15568 compare_result = gen_reg_rtx (comp_mode);
15570 /* E500 FP compare instructions on the GPRs. Yuck! */
15571 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
15572 && FLOAT_MODE_P (mode))
15574 rtx cmp, or_result, compare_result2;
15575 enum machine_mode op_mode = GET_MODE (op0);
15577 if (op_mode == VOIDmode)
15578 op_mode = GET_MODE (op1);
15580 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15581 This explains the following mess. */
15583 switch (code)
15585 case EQ: case UNEQ: case NE: case LTGT:
15586 switch (op_mode)
15588 case SFmode:
15589 cmp = (flag_finite_math_only && !flag_trapping_math)
15590 ? gen_tstsfeq_gpr (compare_result, op0, op1)
15591 : gen_cmpsfeq_gpr (compare_result, op0, op1);
15592 break;
15594 case DFmode:
15595 cmp = (flag_finite_math_only && !flag_trapping_math)
15596 ? gen_tstdfeq_gpr (compare_result, op0, op1)
15597 : gen_cmpdfeq_gpr (compare_result, op0, op1);
15598 break;
15600 case TFmode:
15601 cmp = (flag_finite_math_only && !flag_trapping_math)
15602 ? gen_tsttfeq_gpr (compare_result, op0, op1)
15603 : gen_cmptfeq_gpr (compare_result, op0, op1);
15604 break;
15606 default:
15607 gcc_unreachable ();
15609 break;
15611 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
15612 switch (op_mode)
15614 case SFmode:
15615 cmp = (flag_finite_math_only && !flag_trapping_math)
15616 ? gen_tstsfgt_gpr (compare_result, op0, op1)
15617 : gen_cmpsfgt_gpr (compare_result, op0, op1);
15618 break;
15620 case DFmode:
15621 cmp = (flag_finite_math_only && !flag_trapping_math)
15622 ? gen_tstdfgt_gpr (compare_result, op0, op1)
15623 : gen_cmpdfgt_gpr (compare_result, op0, op1);
15624 break;
15626 case TFmode:
15627 cmp = (flag_finite_math_only && !flag_trapping_math)
15628 ? gen_tsttfgt_gpr (compare_result, op0, op1)
15629 : gen_cmptfgt_gpr (compare_result, op0, op1);
15630 break;
15632 default:
15633 gcc_unreachable ();
15635 break;
15637 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
15638 switch (op_mode)
15640 case SFmode:
15641 cmp = (flag_finite_math_only && !flag_trapping_math)
15642 ? gen_tstsflt_gpr (compare_result, op0, op1)
15643 : gen_cmpsflt_gpr (compare_result, op0, op1);
15644 break;
15646 case DFmode:
15647 cmp = (flag_finite_math_only && !flag_trapping_math)
15648 ? gen_tstdflt_gpr (compare_result, op0, op1)
15649 : gen_cmpdflt_gpr (compare_result, op0, op1);
15650 break;
15652 case TFmode:
15653 cmp = (flag_finite_math_only && !flag_trapping_math)
15654 ? gen_tsttflt_gpr (compare_result, op0, op1)
15655 : gen_cmptflt_gpr (compare_result, op0, op1);
15656 break;
15658 default:
15659 gcc_unreachable ();
15661 break;
15662 default:
15663 gcc_unreachable ();
15666 /* Synthesize LE and GE from LT/GT || EQ. */
15667 if (code == LE || code == GE || code == LEU || code == GEU)
15669 emit_insn (cmp);
15671 switch (code)
15673 case LE: code = LT; break;
15674 case GE: code = GT; break;
15675 case LEU: code = LT; break;
15676 case GEU: code = GT; break;
15677 default: gcc_unreachable ();
15680 compare_result2 = gen_reg_rtx (CCFPmode);
15682 /* Do the EQ. */
15683 switch (op_mode)
15685 case SFmode:
15686 cmp = (flag_finite_math_only && !flag_trapping_math)
15687 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
15688 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
15689 break;
15691 case DFmode:
15692 cmp = (flag_finite_math_only && !flag_trapping_math)
15693 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
15694 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
15695 break;
15697 case TFmode:
15698 cmp = (flag_finite_math_only && !flag_trapping_math)
15699 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
15700 : gen_cmptfeq_gpr (compare_result2, op0, op1);
15701 break;
15703 default:
15704 gcc_unreachable ();
15706 emit_insn (cmp);
15708 /* OR them together. */
15709 or_result = gen_reg_rtx (CCFPmode);
15710 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
15711 compare_result2);
15712 compare_result = or_result;
15713 code = EQ;
15715 else
15717 if (code == NE || code == LTGT)
15718 code = NE;
15719 else
15720 code = EQ;
15723 emit_insn (cmp);
15725 else
15727 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15728 CLOBBERs to match cmptf_internal2 pattern. */
15729 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
15730 && GET_MODE (op0) == TFmode
15731 && !TARGET_IEEEQUAD
15732 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
15733 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15734 gen_rtvec (10,
15735 gen_rtx_SET (VOIDmode,
15736 compare_result,
15737 gen_rtx_COMPARE (comp_mode, op0, op1)),
15738 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15739 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15740 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15741 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15742 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15743 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15744 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15745 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15746 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
15747 else if (GET_CODE (op1) == UNSPEC
15748 && XINT (op1, 1) == UNSPEC_SP_TEST)
15750 rtx op1b = XVECEXP (op1, 0, 0);
15751 comp_mode = CCEQmode;
15752 compare_result = gen_reg_rtx (CCEQmode);
15753 if (TARGET_64BIT)
15754 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
15755 else
15756 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
15758 else
15759 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
15760 gen_rtx_COMPARE (comp_mode, op0, op1)));
15763 /* Some kinds of FP comparisons need an OR operation;
15764 under flag_finite_math_only we don't bother. */
15765 if (FLOAT_MODE_P (mode)
15766 && !flag_finite_math_only
15767 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
15768 && (code == LE || code == GE
15769 || code == UNEQ || code == LTGT
15770 || code == UNGT || code == UNLT))
15772 enum rtx_code or1, or2;
15773 rtx or1_rtx, or2_rtx, compare2_rtx;
15774 rtx or_result = gen_reg_rtx (CCEQmode);
15776 switch (code)
15778 case LE: or1 = LT; or2 = EQ; break;
15779 case GE: or1 = GT; or2 = EQ; break;
15780 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
15781 case LTGT: or1 = LT; or2 = GT; break;
15782 case UNGT: or1 = UNORDERED; or2 = GT; break;
15783 case UNLT: or1 = UNORDERED; or2 = LT; break;
15784 default: gcc_unreachable ();
15786 validate_condition_mode (or1, comp_mode);
15787 validate_condition_mode (or2, comp_mode);
15788 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
15789 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
15790 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
15791 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
15792 const_true_rtx);
15793 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
15795 compare_result = or_result;
15796 code = EQ;
15799 validate_condition_mode (code, GET_MODE (compare_result));
15801 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
15805 /* Emit the RTL for an sISEL pattern. */
15807 void
15808 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
15810 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
15813 void
15814 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
15816 rtx condition_rtx;
15817 enum machine_mode op_mode;
15818 enum rtx_code cond_code;
15819 rtx result = operands[0];
15821 if (TARGET_ISEL && (mode == SImode || mode == DImode))
15823 rs6000_emit_sISEL (mode, operands);
15824 return;
15827 condition_rtx = rs6000_generate_compare (operands[1], mode);
15828 cond_code = GET_CODE (condition_rtx);
15830 if (FLOAT_MODE_P (mode)
15831 && !TARGET_FPRS && TARGET_HARD_FLOAT)
15833 rtx t;
15835 PUT_MODE (condition_rtx, SImode);
15836 t = XEXP (condition_rtx, 0);
15838 gcc_assert (cond_code == NE || cond_code == EQ);
15840 if (cond_code == NE)
15841 emit_insn (gen_e500_flip_gt_bit (t, t));
15843 emit_insn (gen_move_from_CR_gt_bit (result, t));
15844 return;
15847 if (cond_code == NE
15848 || cond_code == GE || cond_code == LE
15849 || cond_code == GEU || cond_code == LEU
15850 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
15852 rtx not_result = gen_reg_rtx (CCEQmode);
15853 rtx not_op, rev_cond_rtx;
15854 enum machine_mode cc_mode;
15856 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
15858 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
15859 SImode, XEXP (condition_rtx, 0), const0_rtx);
15860 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
15861 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
15862 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
15865 op_mode = GET_MODE (XEXP (operands[1], 0));
15866 if (op_mode == VOIDmode)
15867 op_mode = GET_MODE (XEXP (operands[1], 1));
15869 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
15871 PUT_MODE (condition_rtx, DImode);
15872 convert_move (result, condition_rtx, 0);
15874 else
15876 PUT_MODE (condition_rtx, SImode);
15877 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
15881 /* Emit a branch of kind CODE to location LOC. */
15883 void
15884 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
15886 rtx condition_rtx, loc_ref;
15888 condition_rtx = rs6000_generate_compare (operands[0], mode);
15889 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
15890 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
15891 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
15892 loc_ref, pc_rtx)));
15895 /* Return the string to output a conditional branch to LABEL, which is
15896 the operand number of the label, or -1 if the branch is really a
15897 conditional return.
15899 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15900 condition code register and its mode specifies what kind of
15901 comparison we made.
15903 REVERSED is nonzero if we should reverse the sense of the comparison.
15905 INSN is the insn. */
15907 char *
15908 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
15910 static char string[64];
15911 enum rtx_code code = GET_CODE (op);
15912 rtx cc_reg = XEXP (op, 0);
15913 enum machine_mode mode = GET_MODE (cc_reg);
15914 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
15915 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
15916 int really_reversed = reversed ^ need_longbranch;
15917 char *s = string;
15918 const char *ccode;
15919 const char *pred;
15920 rtx note;
15922 validate_condition_mode (code, mode);
15924 /* Work out which way this really branches. We could use
15925 reverse_condition_maybe_unordered here always but this
15926 makes the resulting assembler clearer. */
15927 if (really_reversed)
15929 /* Reversal of FP compares takes care -- an ordered compare
15930 becomes an unordered compare and vice versa. */
15931 if (mode == CCFPmode)
15932 code = reverse_condition_maybe_unordered (code);
15933 else
15934 code = reverse_condition (code);
15937 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
15939 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15940 to the GT bit. */
15941 switch (code)
15943 case EQ:
15944 /* Opposite of GT. */
15945 code = GT;
15946 break;
15948 case NE:
15949 code = UNLE;
15950 break;
15952 default:
15953 gcc_unreachable ();
15957 switch (code)
15959 /* Not all of these are actually distinct opcodes, but
15960 we distinguish them for clarity of the resulting assembler. */
15961 case NE: case LTGT:
15962 ccode = "ne"; break;
15963 case EQ: case UNEQ:
15964 ccode = "eq"; break;
15965 case GE: case GEU:
15966 ccode = "ge"; break;
15967 case GT: case GTU: case UNGT:
15968 ccode = "gt"; break;
15969 case LE: case LEU:
15970 ccode = "le"; break;
15971 case LT: case LTU: case UNLT:
15972 ccode = "lt"; break;
15973 case UNORDERED: ccode = "un"; break;
15974 case ORDERED: ccode = "nu"; break;
15975 case UNGE: ccode = "nl"; break;
15976 case UNLE: ccode = "ng"; break;
15977 default:
15978 gcc_unreachable ();
15981 /* Maybe we have a guess as to how likely the branch is. */
15982 pred = "";
15983 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
15984 if (note != NULL_RTX)
15986 /* PROB is the difference from 50%. */
15987 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
15989 /* Only hint for highly probable/improbable branches on newer
15990 cpus as static prediction overrides processor dynamic
15991 prediction. For older cpus we may as well always hint, but
15992 assume not taken for branches that are very close to 50% as a
15993 mispredicted taken branch is more expensive than a
15994 mispredicted not-taken branch. */
15995 if (rs6000_always_hint
15996 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
15997 && br_prob_note_reliable_p (note)))
15999 if (abs (prob) > REG_BR_PROB_BASE / 20
16000 && ((prob > 0) ^ need_longbranch))
16001 pred = "+";
16002 else
16003 pred = "-";
16007 if (label == NULL)
16008 s += sprintf (s, "b%slr%s ", ccode, pred);
16009 else
16010 s += sprintf (s, "b%s%s ", ccode, pred);
16012 /* We need to escape any '%' characters in the reg_names string.
16013 Assume they'd only be the first character.... */
16014 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
16015 *s++ = '%';
16016 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
16018 if (label != NULL)
16020 /* If the branch distance was too far, we may have to use an
16021 unconditional branch to go the distance. */
16022 if (need_longbranch)
16023 s += sprintf (s, ",$+8\n\tb %s", label);
16024 else
16025 s += sprintf (s, ",%s", label);
16028 return string;
16031 /* Return the string to flip the GT bit on a CR. */
16032 char *
16033 output_e500_flip_gt_bit (rtx dst, rtx src)
16035 static char string[64];
16036 int a, b;
16038 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
16039 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
16041 /* GT bit. */
16042 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
16043 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
16045 sprintf (string, "crnot %d,%d", a, b);
16046 return string;
16049 /* Return insn for VSX or Altivec comparisons. */
16051 static rtx
16052 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
16054 rtx mask;
16055 enum machine_mode mode = GET_MODE (op0);
16057 switch (code)
16059 default:
16060 break;
16062 case GE:
16063 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
16064 return NULL_RTX;
16066 case EQ:
16067 case GT:
16068 case GTU:
16069 case ORDERED:
16070 case UNORDERED:
16071 case UNEQ:
16072 case LTGT:
16073 mask = gen_reg_rtx (mode);
16074 emit_insn (gen_rtx_SET (VOIDmode,
16075 mask,
16076 gen_rtx_fmt_ee (code, mode, op0, op1)));
16077 return mask;
16080 return NULL_RTX;
16083 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16084 DMODE is expected destination mode. This is a recursive function. */
16086 static rtx
16087 rs6000_emit_vector_compare (enum rtx_code rcode,
16088 rtx op0, rtx op1,
16089 enum machine_mode dmode)
16091 rtx mask;
16092 bool swap_operands = false;
16093 bool try_again = false;
16095 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
16096 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
16098 /* See if the comparison works as is. */
16099 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16100 if (mask)
16101 return mask;
16103 switch (rcode)
16105 case LT:
16106 rcode = GT;
16107 swap_operands = true;
16108 try_again = true;
16109 break;
16110 case LTU:
16111 rcode = GTU;
16112 swap_operands = true;
16113 try_again = true;
16114 break;
16115 case NE:
16116 case UNLE:
16117 case UNLT:
16118 case UNGE:
16119 case UNGT:
16120 /* Invert condition and try again.
16121 e.g., A != B becomes ~(A==B). */
16123 enum rtx_code rev_code;
16124 enum insn_code nor_code;
16125 rtx mask2;
16127 rev_code = reverse_condition_maybe_unordered (rcode);
16128 if (rev_code == UNKNOWN)
16129 return NULL_RTX;
16131 nor_code = optab_handler (one_cmpl_optab, dmode);
16132 if (nor_code == CODE_FOR_nothing)
16133 return NULL_RTX;
16135 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
16136 if (!mask2)
16137 return NULL_RTX;
16139 mask = gen_reg_rtx (dmode);
16140 emit_insn (GEN_FCN (nor_code) (mask, mask2));
16141 return mask;
16143 break;
16144 case GE:
16145 case GEU:
16146 case LE:
16147 case LEU:
16148 /* Try GT/GTU/LT/LTU OR EQ */
16150 rtx c_rtx, eq_rtx;
16151 enum insn_code ior_code;
16152 enum rtx_code new_code;
16154 switch (rcode)
16156 case GE:
16157 new_code = GT;
16158 break;
16160 case GEU:
16161 new_code = GTU;
16162 break;
16164 case LE:
16165 new_code = LT;
16166 break;
16168 case LEU:
16169 new_code = LTU;
16170 break;
16172 default:
16173 gcc_unreachable ();
16176 ior_code = optab_handler (ior_optab, dmode);
16177 if (ior_code == CODE_FOR_nothing)
16178 return NULL_RTX;
16180 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
16181 if (!c_rtx)
16182 return NULL_RTX;
16184 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
16185 if (!eq_rtx)
16186 return NULL_RTX;
16188 mask = gen_reg_rtx (dmode);
16189 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
16190 return mask;
16192 break;
16193 default:
16194 return NULL_RTX;
16197 if (try_again)
16199 if (swap_operands)
16201 rtx tmp;
16202 tmp = op0;
16203 op0 = op1;
16204 op1 = tmp;
16207 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16208 if (mask)
16209 return mask;
16212 /* You only get two chances. */
16213 return NULL_RTX;
16216 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16217 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16218 operands for the relation operation COND. */
16221 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
16222 rtx cond, rtx cc_op0, rtx cc_op1)
16224 enum machine_mode dest_mode = GET_MODE (dest);
16225 enum machine_mode mask_mode = GET_MODE (cc_op0);
16226 enum rtx_code rcode = GET_CODE (cond);
16227 enum machine_mode cc_mode = CCmode;
16228 rtx mask;
16229 rtx cond2;
16230 rtx tmp;
16231 bool invert_move = false;
16233 if (VECTOR_UNIT_NONE_P (dest_mode))
16234 return 0;
16236 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
16237 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
16239 switch (rcode)
16241 /* Swap operands if we can, and fall back to doing the operation as
16242 specified, and doing a NOR to invert the test. */
16243 case NE:
16244 case UNLE:
16245 case UNLT:
16246 case UNGE:
16247 case UNGT:
16248 /* Invert condition and try again.
16249 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16250 invert_move = true;
16251 rcode = reverse_condition_maybe_unordered (rcode);
16252 if (rcode == UNKNOWN)
16253 return 0;
16254 break;
16256 /* Mark unsigned tests with CCUNSmode. */
16257 case GTU:
16258 case GEU:
16259 case LTU:
16260 case LEU:
16261 cc_mode = CCUNSmode;
16262 break;
16264 default:
16265 break;
16268 /* Get the vector mask for the given relational operations. */
16269 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
16271 if (!mask)
16272 return 0;
16274 if (invert_move)
16276 tmp = op_true;
16277 op_true = op_false;
16278 op_false = tmp;
16281 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
16282 CONST0_RTX (dest_mode));
16283 emit_insn (gen_rtx_SET (VOIDmode,
16284 dest,
16285 gen_rtx_IF_THEN_ELSE (dest_mode,
16286 cond2,
16287 op_true,
16288 op_false)));
16289 return 1;
16292 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16293 operands of the last comparison is nonzero/true, FALSE_COND if it
16294 is zero/false. Return 0 if the hardware has no such operation. */
16297 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16299 enum rtx_code code = GET_CODE (op);
16300 rtx op0 = XEXP (op, 0);
16301 rtx op1 = XEXP (op, 1);
16302 REAL_VALUE_TYPE c1;
16303 enum machine_mode compare_mode = GET_MODE (op0);
16304 enum machine_mode result_mode = GET_MODE (dest);
16305 rtx temp;
16306 bool is_against_zero;
16308 /* These modes should always match. */
16309 if (GET_MODE (op1) != compare_mode
16310 /* In the isel case however, we can use a compare immediate, so
16311 op1 may be a small constant. */
16312 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
16313 return 0;
16314 if (GET_MODE (true_cond) != result_mode)
16315 return 0;
16316 if (GET_MODE (false_cond) != result_mode)
16317 return 0;
16319 /* Don't allow using floating point comparisons for integer results for
16320 now. */
16321 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
16322 return 0;
16324 /* First, work out if the hardware can do this at all, or
16325 if it's too slow.... */
16326 if (!FLOAT_MODE_P (compare_mode))
16328 if (TARGET_ISEL)
16329 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16330 return 0;
16332 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16333 && SCALAR_FLOAT_MODE_P (compare_mode))
16334 return 0;
16336 is_against_zero = op1 == CONST0_RTX (compare_mode);
16338 /* A floating-point subtract might overflow, underflow, or produce
16339 an inexact result, thus changing the floating-point flags, so it
16340 can't be generated if we care about that. It's safe if one side
16341 of the construct is zero, since then no subtract will be
16342 generated. */
16343 if (SCALAR_FLOAT_MODE_P (compare_mode)
16344 && flag_trapping_math && ! is_against_zero)
16345 return 0;
16347 /* Eliminate half of the comparisons by switching operands, this
16348 makes the remaining code simpler. */
16349 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16350 || code == LTGT || code == LT || code == UNLE)
16352 code = reverse_condition_maybe_unordered (code);
16353 temp = true_cond;
16354 true_cond = false_cond;
16355 false_cond = temp;
16358 /* UNEQ and LTGT take four instructions for a comparison with zero,
16359 it'll probably be faster to use a branch here too. */
16360 if (code == UNEQ && HONOR_NANS (compare_mode))
16361 return 0;
16363 if (GET_CODE (op1) == CONST_DOUBLE)
16364 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16366 /* We're going to try to implement comparisons by performing
16367 a subtract, then comparing against zero. Unfortunately,
16368 Inf - Inf is NaN which is not zero, and so if we don't
16369 know that the operand is finite and the comparison
16370 would treat EQ different to UNORDERED, we can't do it. */
16371 if (HONOR_INFINITIES (compare_mode)
16372 && code != GT && code != UNGE
16373 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16374 /* Constructs of the form (a OP b ? a : b) are safe. */
16375 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16376 || (! rtx_equal_p (op0, true_cond)
16377 && ! rtx_equal_p (op1, true_cond))))
16378 return 0;
16380 /* At this point we know we can use fsel. */
16382 /* Reduce the comparison to a comparison against zero. */
16383 if (! is_against_zero)
16385 temp = gen_reg_rtx (compare_mode);
16386 emit_insn (gen_rtx_SET (VOIDmode, temp,
16387 gen_rtx_MINUS (compare_mode, op0, op1)));
16388 op0 = temp;
16389 op1 = CONST0_RTX (compare_mode);
16392 /* If we don't care about NaNs we can reduce some of the comparisons
16393 down to faster ones. */
16394 if (! HONOR_NANS (compare_mode))
16395 switch (code)
16397 case GT:
16398 code = LE;
16399 temp = true_cond;
16400 true_cond = false_cond;
16401 false_cond = temp;
16402 break;
16403 case UNGE:
16404 code = GE;
16405 break;
16406 case UNEQ:
16407 code = EQ;
16408 break;
16409 default:
16410 break;
16413 /* Now, reduce everything down to a GE. */
16414 switch (code)
16416 case GE:
16417 break;
16419 case LE:
16420 temp = gen_reg_rtx (compare_mode);
16421 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16422 op0 = temp;
16423 break;
16425 case ORDERED:
16426 temp = gen_reg_rtx (compare_mode);
16427 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16428 op0 = temp;
16429 break;
16431 case EQ:
16432 temp = gen_reg_rtx (compare_mode);
16433 emit_insn (gen_rtx_SET (VOIDmode, temp,
16434 gen_rtx_NEG (compare_mode,
16435 gen_rtx_ABS (compare_mode, op0))));
16436 op0 = temp;
16437 break;
16439 case UNGE:
16440 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16441 temp = gen_reg_rtx (result_mode);
16442 emit_insn (gen_rtx_SET (VOIDmode, temp,
16443 gen_rtx_IF_THEN_ELSE (result_mode,
16444 gen_rtx_GE (VOIDmode,
16445 op0, op1),
16446 true_cond, false_cond)));
16447 false_cond = true_cond;
16448 true_cond = temp;
16450 temp = gen_reg_rtx (compare_mode);
16451 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16452 op0 = temp;
16453 break;
16455 case GT:
16456 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16457 temp = gen_reg_rtx (result_mode);
16458 emit_insn (gen_rtx_SET (VOIDmode, temp,
16459 gen_rtx_IF_THEN_ELSE (result_mode,
16460 gen_rtx_GE (VOIDmode,
16461 op0, op1),
16462 true_cond, false_cond)));
16463 true_cond = false_cond;
16464 false_cond = temp;
16466 temp = gen_reg_rtx (compare_mode);
16467 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16468 op0 = temp;
16469 break;
16471 default:
16472 gcc_unreachable ();
16475 emit_insn (gen_rtx_SET (VOIDmode, dest,
16476 gen_rtx_IF_THEN_ELSE (result_mode,
16477 gen_rtx_GE (VOIDmode,
16478 op0, op1),
16479 true_cond, false_cond)));
16480 return 1;
16483 /* Same as above, but for ints (isel). */
16485 static int
16486 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16488 rtx condition_rtx, cr;
16489 enum machine_mode mode = GET_MODE (dest);
16490 enum rtx_code cond_code;
16491 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
16492 bool signedp;
16494 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
16495 return 0;
16497 /* We still have to do the compare, because isel doesn't do a
16498 compare, it just looks at the CRx bits set by a previous compare
16499 instruction. */
16500 condition_rtx = rs6000_generate_compare (op, mode);
16501 cond_code = GET_CODE (condition_rtx);
16502 cr = XEXP (condition_rtx, 0);
16503 signedp = GET_MODE (cr) == CCmode;
16505 isel_func = (mode == SImode
16506 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
16507 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
16509 switch (cond_code)
16511 case LT: case GT: case LTU: case GTU: case EQ:
16512 /* isel handles these directly. */
16513 break;
16515 default:
16516 /* We need to swap the sense of the comparison. */
16518 rtx t = true_cond;
16519 true_cond = false_cond;
16520 false_cond = t;
16521 PUT_CODE (condition_rtx, reverse_condition (cond_code));
16523 break;
16526 false_cond = force_reg (mode, false_cond);
16527 if (true_cond != const0_rtx)
16528 true_cond = force_reg (mode, true_cond);
16530 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
16532 return 1;
16535 const char *
16536 output_isel (rtx *operands)
16538 enum rtx_code code;
16540 code = GET_CODE (operands[1]);
16542 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
16544 gcc_assert (GET_CODE (operands[2]) == REG
16545 && GET_CODE (operands[3]) == REG);
16546 PUT_CODE (operands[1], reverse_condition (code));
16547 return "isel %0,%3,%2,%j1";
16550 return "isel %0,%2,%3,%j1";
16553 void
16554 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16556 enum machine_mode mode = GET_MODE (op0);
16557 enum rtx_code c;
16558 rtx target;
16560 /* VSX/altivec have direct min/max insns. */
16561 if ((code == SMAX || code == SMIN)
16562 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
16563 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
16565 emit_insn (gen_rtx_SET (VOIDmode,
16566 dest,
16567 gen_rtx_fmt_ee (code, mode, op0, op1)));
16568 return;
16571 if (code == SMAX || code == SMIN)
16572 c = GE;
16573 else
16574 c = GEU;
16576 if (code == SMAX || code == UMAX)
16577 target = emit_conditional_move (dest, c, op0, op1, mode,
16578 op0, op1, mode, 0);
16579 else
16580 target = emit_conditional_move (dest, c, op0, op1, mode,
16581 op1, op0, mode, 0);
16582 gcc_assert (target);
16583 if (target != dest)
16584 emit_move_insn (dest, target);
16587 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16588 COND is true. Mark the jump as unlikely to be taken. */
16590 static void
16591 emit_unlikely_jump (rtx cond, rtx label)
16593 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
16594 rtx x;
16596 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
16597 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
16598 add_reg_note (x, REG_BR_PROB, very_unlikely);
16601 /* A subroutine of the atomic operation splitters. Emit a load-locked
16602 instruction in MODE. */
16604 static void
16605 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
16607 rtx (*fn) (rtx, rtx) = NULL;
16609 switch (mode)
16611 case SImode:
16612 fn = gen_load_lockedsi;
16613 break;
16614 case DImode:
16615 fn = gen_load_lockeddi;
16616 break;
16617 default:
16618 gcc_unreachable ();
16620 emit_insn (fn (reg, mem));
16623 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16624 instruction in MODE. */
16626 static void
16627 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
16629 rtx (*fn) (rtx, rtx, rtx) = NULL;
16631 switch (mode)
16633 case SImode:
16634 fn = gen_store_conditionalsi;
16635 break;
16636 case DImode:
16637 fn = gen_store_conditionaldi;
16638 break;
16639 default:
16640 gcc_unreachable ();
16643 /* Emit sync before stwcx. to address PPC405 Erratum. */
16644 if (PPC405_ERRATUM77)
16645 emit_insn (gen_hwsync ());
16647 emit_insn (fn (res, mem, val));
16650 /* Expand barriers before and after a load_locked/store_cond sequence. */
16652 static rtx
16653 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
16655 rtx addr = XEXP (mem, 0);
16656 int strict_p = (reload_in_progress || reload_completed);
16658 if (!legitimate_indirect_address_p (addr, strict_p)
16659 && !legitimate_indexed_address_p (addr, strict_p))
16661 addr = force_reg (Pmode, addr);
16662 mem = replace_equiv_address_nv (mem, addr);
16665 switch (model)
16667 case MEMMODEL_RELAXED:
16668 case MEMMODEL_CONSUME:
16669 case MEMMODEL_ACQUIRE:
16670 break;
16671 case MEMMODEL_RELEASE:
16672 case MEMMODEL_ACQ_REL:
16673 emit_insn (gen_lwsync ());
16674 break;
16675 case MEMMODEL_SEQ_CST:
16676 emit_insn (gen_hwsync ());
16677 break;
16678 default:
16679 gcc_unreachable ();
16681 return mem;
16684 static void
16685 rs6000_post_atomic_barrier (enum memmodel model)
16687 switch (model)
16689 case MEMMODEL_RELAXED:
16690 case MEMMODEL_CONSUME:
16691 case MEMMODEL_RELEASE:
16692 break;
16693 case MEMMODEL_ACQUIRE:
16694 case MEMMODEL_ACQ_REL:
16695 case MEMMODEL_SEQ_CST:
16696 emit_insn (gen_isync ());
16697 break;
16698 default:
16699 gcc_unreachable ();
16703 /* A subroutine of the various atomic expanders. For sub-word operations,
16704 we must adjust things to operate on SImode. Given the original MEM,
16705 return a new aligned memory. Also build and return the quantities by
16706 which to shift and mask. */
16708 static rtx
16709 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
16711 rtx addr, align, shift, mask, mem;
16712 HOST_WIDE_INT shift_mask;
16713 enum machine_mode mode = GET_MODE (orig_mem);
16715 /* For smaller modes, we have to implement this via SImode. */
16716 shift_mask = (mode == QImode ? 0x18 : 0x10);
16718 addr = XEXP (orig_mem, 0);
16719 addr = force_reg (GET_MODE (addr), addr);
16721 /* Aligned memory containing subword. Generate a new memory. We
16722 do not want any of the existing MEM_ATTR data, as we're now
16723 accessing memory outside the original object. */
16724 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
16725 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16726 mem = gen_rtx_MEM (SImode, align);
16727 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
16728 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
16729 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
16731 /* Shift amount for subword relative to aligned word. */
16732 shift = gen_reg_rtx (SImode);
16733 addr = gen_lowpart (SImode, addr);
16734 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
16735 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
16736 shift, 1, OPTAB_LIB_WIDEN);
16737 *pshift = shift;
16739 /* Mask for insertion. */
16740 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
16741 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
16742 *pmask = mask;
16744 return mem;
16747 /* A subroutine of the various atomic expanders. For sub-word operands,
16748 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16750 static rtx
16751 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
16753 rtx x;
16755 x = gen_reg_rtx (SImode);
16756 emit_insn (gen_rtx_SET (VOIDmode, x,
16757 gen_rtx_AND (SImode,
16758 gen_rtx_NOT (SImode, mask),
16759 oldval)));
16761 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
16763 return x;
16766 /* A subroutine of the various atomic expanders. For sub-word operands,
16767 extract WIDE to NARROW via SHIFT. */
16769 static void
16770 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
16772 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
16773 wide, 1, OPTAB_LIB_WIDEN);
16774 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
16777 /* Expand an atomic compare and swap operation. */
16779 void
16780 rs6000_expand_atomic_compare_and_swap (rtx operands[])
16782 rtx boolval, retval, mem, oldval, newval, cond;
16783 rtx label1, label2, x, mask, shift;
16784 enum machine_mode mode;
16785 enum memmodel mod_s, mod_f;
16786 bool is_weak;
16788 boolval = operands[0];
16789 retval = operands[1];
16790 mem = operands[2];
16791 oldval = operands[3];
16792 newval = operands[4];
16793 is_weak = (INTVAL (operands[5]) != 0);
16794 mod_s = (enum memmodel) INTVAL (operands[6]);
16795 mod_f = (enum memmodel) INTVAL (operands[7]);
16796 mode = GET_MODE (mem);
16798 mask = shift = NULL_RTX;
16799 if (mode == QImode || mode == HImode)
16801 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16803 /* Shift and mask OLDVAL into position with the word. */
16804 oldval = convert_modes (SImode, mode, oldval, 1);
16805 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
16806 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16808 /* Shift and mask NEWVAL into position within the word. */
16809 newval = convert_modes (SImode, mode, newval, 1);
16810 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
16811 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16813 /* Prepare to adjust the return value. */
16814 retval = gen_reg_rtx (SImode);
16815 mode = SImode;
16817 else if (reg_overlap_mentioned_p (retval, oldval))
16818 oldval = copy_to_reg (oldval);
16820 mem = rs6000_pre_atomic_barrier (mem, mod_s);
16822 label1 = NULL_RTX;
16823 if (!is_weak)
16825 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16826 emit_label (XEXP (label1, 0));
16828 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16830 emit_load_locked (mode, retval, mem);
16832 x = retval;
16833 if (mask)
16835 x = expand_simple_binop (SImode, AND, retval, mask,
16836 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16839 cond = gen_reg_rtx (CCmode);
16840 x = gen_rtx_COMPARE (CCmode, x, oldval);
16841 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
16843 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16844 emit_unlikely_jump (x, label2);
16846 x = newval;
16847 if (mask)
16848 x = rs6000_mask_atomic_subword (retval, newval, mask);
16850 emit_store_conditional (mode, cond, mem, x);
16852 if (!is_weak)
16854 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16855 emit_unlikely_jump (x, label1);
16858 if (mod_f != MEMMODEL_RELAXED)
16859 emit_label (XEXP (label2, 0));
16861 rs6000_post_atomic_barrier (mod_s);
16863 if (mod_f == MEMMODEL_RELAXED)
16864 emit_label (XEXP (label2, 0));
16866 if (shift)
16867 rs6000_finish_atomic_subword (operands[1], retval, shift);
16869 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16870 x = gen_rtx_EQ (SImode, cond, const0_rtx);
16871 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
16874 /* Expand an atomic exchange operation. */
16876 void
16877 rs6000_expand_atomic_exchange (rtx operands[])
16879 rtx retval, mem, val, cond;
16880 enum machine_mode mode;
16881 enum memmodel model;
16882 rtx label, x, mask, shift;
16884 retval = operands[0];
16885 mem = operands[1];
16886 val = operands[2];
16887 model = (enum memmodel) INTVAL (operands[3]);
16888 mode = GET_MODE (mem);
16890 mask = shift = NULL_RTX;
16891 if (mode == QImode || mode == HImode)
16893 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16895 /* Shift and mask VAL into position with the word. */
16896 val = convert_modes (SImode, mode, val, 1);
16897 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16898 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16900 /* Prepare to adjust the return value. */
16901 retval = gen_reg_rtx (SImode);
16902 mode = SImode;
16905 mem = rs6000_pre_atomic_barrier (mem, model);
16907 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16908 emit_label (XEXP (label, 0));
16910 emit_load_locked (mode, retval, mem);
16912 x = val;
16913 if (mask)
16914 x = rs6000_mask_atomic_subword (retval, val, mask);
16916 cond = gen_reg_rtx (CCmode);
16917 emit_store_conditional (mode, cond, mem, x);
16919 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16920 emit_unlikely_jump (x, label);
16922 rs6000_post_atomic_barrier (model);
16924 if (shift)
16925 rs6000_finish_atomic_subword (operands[0], retval, shift);
16928 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16929 to perform. MEM is the memory on which to operate. VAL is the second
16930 operand of the binary operator. BEFORE and AFTER are optional locations to
16931 return the value of MEM either before of after the operation. MODEL_RTX
16932 is a CONST_INT containing the memory model to use. */
16934 void
16935 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
16936 rtx orig_before, rtx orig_after, rtx model_rtx)
16938 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
16939 enum machine_mode mode = GET_MODE (mem);
16940 rtx label, x, cond, mask, shift;
16941 rtx before = orig_before, after = orig_after;
16943 mask = shift = NULL_RTX;
16944 if (mode == QImode || mode == HImode)
16946 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16948 /* Shift and mask VAL into position with the word. */
16949 val = convert_modes (SImode, mode, val, 1);
16950 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16951 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16953 switch (code)
16955 case IOR:
16956 case XOR:
16957 /* We've already zero-extended VAL. That is sufficient to
16958 make certain that it does not affect other bits. */
16959 mask = NULL;
16960 break;
16962 case AND:
16963 /* If we make certain that all of the other bits in VAL are
16964 set, that will be sufficient to not affect other bits. */
16965 x = gen_rtx_NOT (SImode, mask);
16966 x = gen_rtx_IOR (SImode, x, val);
16967 emit_insn (gen_rtx_SET (VOIDmode, val, x));
16968 mask = NULL;
16969 break;
16971 case NOT:
16972 case PLUS:
16973 case MINUS:
16974 /* These will all affect bits outside the field and need
16975 adjustment via MASK within the loop. */
16976 break;
16978 default:
16979 gcc_unreachable ();
16982 /* Prepare to adjust the return value. */
16983 before = gen_reg_rtx (SImode);
16984 if (after)
16985 after = gen_reg_rtx (SImode);
16986 mode = SImode;
16989 mem = rs6000_pre_atomic_barrier (mem, model);
16991 label = gen_label_rtx ();
16992 emit_label (label);
16993 label = gen_rtx_LABEL_REF (VOIDmode, label);
16995 if (before == NULL_RTX)
16996 before = gen_reg_rtx (mode);
16998 emit_load_locked (mode, before, mem);
17000 if (code == NOT)
17002 x = expand_simple_binop (mode, AND, before, val,
17003 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17004 after = expand_simple_unop (mode, NOT, x, after, 1);
17006 else
17008 after = expand_simple_binop (mode, code, before, val,
17009 after, 1, OPTAB_LIB_WIDEN);
17012 x = after;
17013 if (mask)
17015 x = expand_simple_binop (SImode, AND, after, mask,
17016 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17017 x = rs6000_mask_atomic_subword (before, x, mask);
17020 cond = gen_reg_rtx (CCmode);
17021 emit_store_conditional (mode, cond, mem, x);
17023 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
17024 emit_unlikely_jump (x, label);
17026 rs6000_post_atomic_barrier (model);
17028 if (shift)
17030 if (orig_before)
17031 rs6000_finish_atomic_subword (orig_before, before, shift);
17032 if (orig_after)
17033 rs6000_finish_atomic_subword (orig_after, after, shift);
17035 else if (orig_after && after != orig_after)
17036 emit_move_insn (orig_after, after);
17039 /* Emit instructions to move SRC to DST. Called by splitters for
17040 multi-register moves. It will emit at most one instruction for
17041 each register that is accessed; that is, it won't emit li/lis pairs
17042 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17043 register. */
17045 void
17046 rs6000_split_multireg_move (rtx dst, rtx src)
17048 /* The register number of the first register being moved. */
17049 int reg;
17050 /* The mode that is to be moved. */
17051 enum machine_mode mode;
17052 /* The mode that the move is being done in, and its size. */
17053 enum machine_mode reg_mode;
17054 int reg_mode_size;
17055 /* The number of registers that will be moved. */
17056 int nregs;
17058 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
17059 mode = GET_MODE (dst);
17060 nregs = hard_regno_nregs[reg][mode];
17061 if (FP_REGNO_P (reg))
17062 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
17063 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
17064 else if (ALTIVEC_REGNO_P (reg))
17065 reg_mode = V16QImode;
17066 else if (TARGET_E500_DOUBLE && mode == TFmode)
17067 reg_mode = DFmode;
17068 else
17069 reg_mode = word_mode;
17070 reg_mode_size = GET_MODE_SIZE (reg_mode);
17072 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
17074 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
17076 /* Move register range backwards, if we might have destructive
17077 overlap. */
17078 int i;
17079 for (i = nregs - 1; i >= 0; i--)
17080 emit_insn (gen_rtx_SET (VOIDmode,
17081 simplify_gen_subreg (reg_mode, dst, mode,
17082 i * reg_mode_size),
17083 simplify_gen_subreg (reg_mode, src, mode,
17084 i * reg_mode_size)));
17086 else
17088 int i;
17089 int j = -1;
17090 bool used_update = false;
17091 rtx restore_basereg = NULL_RTX;
17093 if (MEM_P (src) && INT_REGNO_P (reg))
17095 rtx breg;
17097 if (GET_CODE (XEXP (src, 0)) == PRE_INC
17098 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
17100 rtx delta_rtx;
17101 breg = XEXP (XEXP (src, 0), 0);
17102 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
17103 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
17104 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
17105 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17106 src = replace_equiv_address (src, breg);
17108 else if (! rs6000_offsettable_memref_p (src, reg_mode))
17110 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
17112 rtx basereg = XEXP (XEXP (src, 0), 0);
17113 if (TARGET_UPDATE)
17115 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
17116 emit_insn (gen_rtx_SET (VOIDmode, ndst,
17117 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
17118 used_update = true;
17120 else
17121 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17122 XEXP (XEXP (src, 0), 1)));
17123 src = replace_equiv_address (src, basereg);
17125 else
17127 rtx basereg = gen_rtx_REG (Pmode, reg);
17128 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
17129 src = replace_equiv_address (src, basereg);
17133 breg = XEXP (src, 0);
17134 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
17135 breg = XEXP (breg, 0);
17137 /* If the base register we are using to address memory is
17138 also a destination reg, then change that register last. */
17139 if (REG_P (breg)
17140 && REGNO (breg) >= REGNO (dst)
17141 && REGNO (breg) < REGNO (dst) + nregs)
17142 j = REGNO (breg) - REGNO (dst);
17144 else if (MEM_P (dst) && INT_REGNO_P (reg))
17146 rtx breg;
17148 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
17149 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
17151 rtx delta_rtx;
17152 breg = XEXP (XEXP (dst, 0), 0);
17153 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
17154 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
17155 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
17157 /* We have to update the breg before doing the store.
17158 Use store with update, if available. */
17160 if (TARGET_UPDATE)
17162 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17163 emit_insn (TARGET_32BIT
17164 ? (TARGET_POWERPC64
17165 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
17166 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
17167 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
17168 used_update = true;
17170 else
17171 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
17172 dst = replace_equiv_address (dst, breg);
17174 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
17175 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
17177 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
17179 rtx basereg = XEXP (XEXP (dst, 0), 0);
17180 if (TARGET_UPDATE)
17182 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
17183 emit_insn (gen_rtx_SET (VOIDmode,
17184 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
17185 used_update = true;
17187 else
17188 emit_insn (gen_rtx_SET (VOIDmode, basereg,
17189 XEXP (XEXP (dst, 0), 1)));
17190 dst = replace_equiv_address (dst, basereg);
17192 else
17194 rtx basereg = XEXP (XEXP (dst, 0), 0);
17195 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
17196 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
17197 && REG_P (basereg)
17198 && REG_P (offsetreg)
17199 && REGNO (basereg) != REGNO (offsetreg));
17200 if (REGNO (basereg) == 0)
17202 rtx tmp = offsetreg;
17203 offsetreg = basereg;
17204 basereg = tmp;
17206 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
17207 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
17208 dst = replace_equiv_address (dst, basereg);
17211 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
17212 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
17215 for (i = 0; i < nregs; i++)
17217 /* Calculate index to next subword. */
17218 ++j;
17219 if (j == nregs)
17220 j = 0;
17222 /* If compiler already emitted move of first word by
17223 store with update, no need to do anything. */
17224 if (j == 0 && used_update)
17225 continue;
17227 emit_insn (gen_rtx_SET (VOIDmode,
17228 simplify_gen_subreg (reg_mode, dst, mode,
17229 j * reg_mode_size),
17230 simplify_gen_subreg (reg_mode, src, mode,
17231 j * reg_mode_size)));
17233 if (restore_basereg != NULL_RTX)
17234 emit_insn (restore_basereg);
17239 /* This page contains routines that are used to determine what the
17240 function prologue and epilogue code will do and write them out. */
17242 static inline bool
17243 save_reg_p (int r)
17245 return !call_used_regs[r] && df_regs_ever_live_p (r);
17248 /* Return the first fixed-point register that is required to be
17249 saved. 32 if none. */
17252 first_reg_to_save (void)
17254 int first_reg;
17256 /* Find lowest numbered live register. */
17257 for (first_reg = 13; first_reg <= 31; first_reg++)
17258 if (save_reg_p (first_reg))
17259 break;
17261 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
17262 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
17263 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
17264 || (TARGET_TOC && TARGET_MINIMAL_TOC))
17265 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17266 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
17268 #if TARGET_MACHO
17269 if (flag_pic
17270 && crtl->uses_pic_offset_table
17271 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
17272 return RS6000_PIC_OFFSET_TABLE_REGNUM;
17273 #endif
17275 return first_reg;
17278 /* Similar, for FP regs. */
17281 first_fp_reg_to_save (void)
17283 int first_reg;
17285 /* Find lowest numbered live register. */
17286 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
17287 if (save_reg_p (first_reg))
17288 break;
17290 return first_reg;
17293 /* Similar, for AltiVec regs. */
17295 static int
17296 first_altivec_reg_to_save (void)
17298 int i;
17300 /* Stack frame remains as is unless we are in AltiVec ABI. */
17301 if (! TARGET_ALTIVEC_ABI)
17302 return LAST_ALTIVEC_REGNO + 1;
17304 /* On Darwin, the unwind routines are compiled without
17305 TARGET_ALTIVEC, and use save_world to save/restore the
17306 altivec registers when necessary. */
17307 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17308 && ! TARGET_ALTIVEC)
17309 return FIRST_ALTIVEC_REGNO + 20;
17311 /* Find lowest numbered live register. */
17312 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17313 if (save_reg_p (i))
17314 break;
17316 return i;
17319 /* Return a 32-bit mask of the AltiVec registers we need to set in
17320 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17321 the 32-bit word is 0. */
17323 static unsigned int
17324 compute_vrsave_mask (void)
17326 unsigned int i, mask = 0;
17328 /* On Darwin, the unwind routines are compiled without
17329 TARGET_ALTIVEC, and use save_world to save/restore the
17330 call-saved altivec registers when necessary. */
17331 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17332 && ! TARGET_ALTIVEC)
17333 mask |= 0xFFF;
17335 /* First, find out if we use _any_ altivec registers. */
17336 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
17337 if (df_regs_ever_live_p (i))
17338 mask |= ALTIVEC_REG_BIT (i);
17340 if (mask == 0)
17341 return mask;
17343 /* Next, remove the argument registers from the set. These must
17344 be in the VRSAVE mask set by the caller, so we don't need to add
17345 them in again. More importantly, the mask we compute here is
17346 used to generate CLOBBERs in the set_vrsave insn, and we do not
17347 wish the argument registers to die. */
17348 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
17349 mask &= ~ALTIVEC_REG_BIT (i);
17351 /* Similarly, remove the return value from the set. */
17353 bool yes = false;
17354 diddle_return_value (is_altivec_return_reg, &yes);
17355 if (yes)
17356 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
17359 return mask;
17362 /* For a very restricted set of circumstances, we can cut down the
17363 size of prologues/epilogues by calling our own save/restore-the-world
17364 routines. */
17366 static void
17367 compute_save_world_info (rs6000_stack_t *info_ptr)
17369 info_ptr->world_save_p = 1;
17370 info_ptr->world_save_p
17371 = (WORLD_SAVE_P (info_ptr)
17372 && DEFAULT_ABI == ABI_DARWIN
17373 && !cfun->has_nonlocal_label
17374 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17375 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17376 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17377 && info_ptr->cr_save_p);
17379 /* This will not work in conjunction with sibcalls. Make sure there
17380 are none. (This check is expensive, but seldom executed.) */
17381 if (WORLD_SAVE_P (info_ptr))
17383 rtx insn;
17384 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17385 if ( GET_CODE (insn) == CALL_INSN
17386 && SIBLING_CALL_P (insn))
17388 info_ptr->world_save_p = 0;
17389 break;
17393 if (WORLD_SAVE_P (info_ptr))
17395 /* Even if we're not touching VRsave, make sure there's room on the
17396 stack for it, if it looks like we're calling SAVE_WORLD, which
17397 will attempt to save it. */
17398 info_ptr->vrsave_size = 4;
17400 /* If we are going to save the world, we need to save the link register too. */
17401 info_ptr->lr_save_p = 1;
17403 /* "Save" the VRsave register too if we're saving the world. */
17404 if (info_ptr->vrsave_mask == 0)
17405 info_ptr->vrsave_mask = compute_vrsave_mask ();
17407 /* Because the Darwin register save/restore routines only handle
17408 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17409 check. */
17410 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17411 && (info_ptr->first_altivec_reg_save
17412 >= FIRST_SAVED_ALTIVEC_REGNO));
17414 return;
17418 static void
17419 is_altivec_return_reg (rtx reg, void *xyes)
17421 bool *yes = (bool *) xyes;
17422 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17423 *yes = true;
17427 /* Look for user-defined global regs in the range FIRST to LAST-1.
17428 We should not restore these, and so cannot use lmw or out-of-line
17429 restore functions if there are any. We also can't save them
17430 (well, emit frame notes for them), because frame unwinding during
17431 exception handling will restore saved registers. */
17433 static bool
17434 global_regs_p (unsigned first, unsigned last)
17436 while (first < last)
17437 if (global_regs[first++])
17438 return true;
17439 return false;
17442 /* Determine the strategy for savings/restoring registers. */
17444 enum {
17445 SAVRES_MULTIPLE = 0x1,
17446 SAVE_INLINE_FPRS = 0x2,
17447 SAVE_INLINE_GPRS = 0x4,
17448 REST_INLINE_FPRS = 0x8,
17449 REST_INLINE_GPRS = 0x10,
17450 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
17451 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
17452 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
17453 SAVE_INLINE_VRS = 0x100,
17454 REST_INLINE_VRS = 0x200
17457 static int
17458 rs6000_savres_strategy (rs6000_stack_t *info,
17459 bool using_static_chain_p)
17461 int strategy = 0;
17462 bool lr_save_p;
17464 if (TARGET_MULTIPLE
17465 && !TARGET_POWERPC64
17466 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
17467 && info->first_gp_reg_save < 31
17468 && !global_regs_p (info->first_gp_reg_save, 32))
17469 strategy |= SAVRES_MULTIPLE;
17471 if (crtl->calls_eh_return
17472 || cfun->machine->ra_need_lr)
17473 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
17474 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
17475 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17477 if (info->first_fp_reg_save == 64
17478 /* The out-of-line FP routines use double-precision stores;
17479 we can't use those routines if we don't have such stores. */
17480 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
17481 || global_regs_p (info->first_fp_reg_save, 64))
17482 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17484 if (info->first_gp_reg_save == 32
17485 || (!(strategy & SAVRES_MULTIPLE)
17486 && global_regs_p (info->first_gp_reg_save, 32)))
17487 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17489 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
17490 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
17491 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17493 /* Define cutoff for using out-of-line functions to save registers. */
17494 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
17496 if (!optimize_size)
17498 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17499 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17500 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17502 else
17504 /* Prefer out-of-line restore if it will exit. */
17505 if (info->first_fp_reg_save > 61)
17506 strategy |= SAVE_INLINE_FPRS;
17507 if (info->first_gp_reg_save > 29)
17509 if (info->first_fp_reg_save == 64)
17510 strategy |= SAVE_INLINE_GPRS;
17511 else
17512 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17514 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
17515 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17518 else if (DEFAULT_ABI == ABI_DARWIN)
17520 if (info->first_fp_reg_save > 60)
17521 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17522 if (info->first_gp_reg_save > 29)
17523 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17524 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17526 else
17528 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
17529 if (info->first_fp_reg_save > 61)
17530 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17531 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17532 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17535 /* Don't bother to try to save things out-of-line if r11 is occupied
17536 by the static chain. It would require too much fiddling and the
17537 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17538 pointer on Darwin, and AIX uses r1 or r12. */
17539 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
17540 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
17541 | SAVE_INLINE_GPRS
17542 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17544 /* We can only use the out-of-line routines to restore if we've
17545 saved all the registers from first_fp_reg_save in the prologue.
17546 Otherwise, we risk loading garbage. */
17547 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
17549 int i;
17551 for (i = info->first_fp_reg_save; i < 64; i++)
17552 if (!save_reg_p (i))
17554 strategy |= REST_INLINE_FPRS;
17555 break;
17559 /* If we are going to use store multiple, then don't even bother
17560 with the out-of-line routines, since the store-multiple
17561 instruction will always be smaller. */
17562 if ((strategy & SAVRES_MULTIPLE))
17563 strategy |= SAVE_INLINE_GPRS;
17565 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17566 saved is an out-of-line save or restore. Set up the value for
17567 the next test (excluding out-of-line gpr restore). */
17568 lr_save_p = (info->lr_save_p
17569 || !(strategy & SAVE_INLINE_GPRS)
17570 || !(strategy & SAVE_INLINE_FPRS)
17571 || !(strategy & SAVE_INLINE_VRS)
17572 || !(strategy & REST_INLINE_FPRS)
17573 || !(strategy & REST_INLINE_VRS));
17575 /* The situation is more complicated with load multiple. We'd
17576 prefer to use the out-of-line routines for restores, since the
17577 "exit" out-of-line routines can handle the restore of LR and the
17578 frame teardown. However if doesn't make sense to use the
17579 out-of-line routine if that is the only reason we'd need to save
17580 LR, and we can't use the "exit" out-of-line gpr restore if we
17581 have saved some fprs; In those cases it is advantageous to use
17582 load multiple when available. */
17583 if ((strategy & SAVRES_MULTIPLE)
17584 && (!lr_save_p
17585 || info->first_fp_reg_save != 64))
17586 strategy |= REST_INLINE_GPRS;
17588 /* Saving CR interferes with the exit routines used on the SPE, so
17589 just punt here. */
17590 if (TARGET_SPE_ABI
17591 && info->spe_64bit_regs_used
17592 && info->cr_save_p)
17593 strategy |= REST_INLINE_GPRS;
17595 /* We can only use load multiple or the out-of-line routines to
17596 restore if we've used store multiple or out-of-line routines
17597 in the prologue, i.e. if we've saved all the registers from
17598 first_gp_reg_save. Otherwise, we risk loading garbage. */
17599 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
17600 == SAVE_INLINE_GPRS)
17602 int i;
17604 for (i = info->first_gp_reg_save; i < 32; i++)
17605 if (!save_reg_p (i))
17607 strategy |= REST_INLINE_GPRS;
17608 break;
17612 if (TARGET_ELF && TARGET_64BIT)
17614 if (!(strategy & SAVE_INLINE_FPRS))
17615 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17616 else if (!(strategy & SAVE_INLINE_GPRS)
17617 && info->first_fp_reg_save == 64)
17618 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
17620 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
17621 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
17623 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
17624 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17626 return strategy;
17629 /* Calculate the stack information for the current function. This is
17630 complicated by having two separate calling sequences, the AIX calling
17631 sequence and the V.4 calling sequence.
17633 AIX (and Darwin/Mac OS X) stack frames look like:
17634 32-bit 64-bit
17635 SP----> +---------------------------------------+
17636 | back chain to caller | 0 0
17637 +---------------------------------------+
17638 | saved CR | 4 8 (8-11)
17639 +---------------------------------------+
17640 | saved LR | 8 16
17641 +---------------------------------------+
17642 | reserved for compilers | 12 24
17643 +---------------------------------------+
17644 | reserved for binders | 16 32
17645 +---------------------------------------+
17646 | saved TOC pointer | 20 40
17647 +---------------------------------------+
17648 | Parameter save area (P) | 24 48
17649 +---------------------------------------+
17650 | Alloca space (A) | 24+P etc.
17651 +---------------------------------------+
17652 | Local variable space (L) | 24+P+A
17653 +---------------------------------------+
17654 | Float/int conversion temporary (X) | 24+P+A+L
17655 +---------------------------------------+
17656 | Save area for AltiVec registers (W) | 24+P+A+L+X
17657 +---------------------------------------+
17658 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17659 +---------------------------------------+
17660 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17661 +---------------------------------------+
17662 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17663 +---------------------------------------+
17664 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17665 +---------------------------------------+
17666 old SP->| back chain to caller's caller |
17667 +---------------------------------------+
17669 The required alignment for AIX configurations is two words (i.e., 8
17670 or 16 bytes).
17673 V.4 stack frames look like:
17675 SP----> +---------------------------------------+
17676 | back chain to caller | 0
17677 +---------------------------------------+
17678 | caller's saved LR | 4
17679 +---------------------------------------+
17680 | Parameter save area (P) | 8
17681 +---------------------------------------+
17682 | Alloca space (A) | 8+P
17683 +---------------------------------------+
17684 | Varargs save area (V) | 8+P+A
17685 +---------------------------------------+
17686 | Local variable space (L) | 8+P+A+V
17687 +---------------------------------------+
17688 | Float/int conversion temporary (X) | 8+P+A+V+L
17689 +---------------------------------------+
17690 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17691 +---------------------------------------+
17692 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17693 +---------------------------------------+
17694 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17695 +---------------------------------------+
17696 | SPE: area for 64-bit GP registers |
17697 +---------------------------------------+
17698 | SPE alignment padding |
17699 +---------------------------------------+
17700 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17701 +---------------------------------------+
17702 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17703 +---------------------------------------+
17704 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17705 +---------------------------------------+
17706 old SP->| back chain to caller's caller |
17707 +---------------------------------------+
17709 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17710 given. (But note below and in sysv4.h that we require only 8 and
17711 may round up the size of our stack frame anyways. The historical
17712 reason is early versions of powerpc-linux which didn't properly
17713 align the stack at program startup. A happy side-effect is that
17714 -mno-eabi libraries can be used with -meabi programs.)
17716 The EABI configuration defaults to the V.4 layout. However,
17717 the stack alignment requirements may differ. If -mno-eabi is not
17718 given, the required stack alignment is 8 bytes; if -mno-eabi is
17719 given, the required alignment is 16 bytes. (But see V.4 comment
17720 above.) */
17722 #ifndef ABI_STACK_BOUNDARY
17723 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17724 #endif
17726 static rs6000_stack_t *
17727 rs6000_stack_info (void)
17729 rs6000_stack_t *info_ptr = &stack_info;
17730 int reg_size = TARGET_32BIT ? 4 : 8;
17731 int ehrd_size;
17732 int save_align;
17733 int first_gp;
17734 HOST_WIDE_INT non_fixed_size;
17735 bool using_static_chain_p;
17737 if (reload_completed && info_ptr->reload_completed)
17738 return info_ptr;
17740 memset (info_ptr, 0, sizeof (*info_ptr));
17741 info_ptr->reload_completed = reload_completed;
17743 if (TARGET_SPE)
17745 /* Cache value so we don't rescan instruction chain over and over. */
17746 if (cfun->machine->insn_chain_scanned_p == 0)
17747 cfun->machine->insn_chain_scanned_p
17748 = spe_func_has_64bit_regs_p () + 1;
17749 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
17752 /* Select which calling sequence. */
17753 info_ptr->abi = DEFAULT_ABI;
17755 /* Calculate which registers need to be saved & save area size. */
17756 info_ptr->first_gp_reg_save = first_reg_to_save ();
17757 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17758 even if it currently looks like we won't. Reload may need it to
17759 get at a constant; if so, it will have already created a constant
17760 pool entry for it. */
17761 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
17762 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
17763 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
17764 && crtl->uses_const_pool
17765 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
17766 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
17767 else
17768 first_gp = info_ptr->first_gp_reg_save;
17770 info_ptr->gp_size = reg_size * (32 - first_gp);
17772 /* For the SPE, we have an additional upper 32-bits on each GPR.
17773 Ideally we should save the entire 64-bits only when the upper
17774 half is used in SIMD instructions. Since we only record
17775 registers live (not the size they are used in), this proves
17776 difficult because we'd have to traverse the instruction chain at
17777 the right time, taking reload into account. This is a real pain,
17778 so we opt to save the GPRs in 64-bits always if but one register
17779 gets used in 64-bits. Otherwise, all the registers in the frame
17780 get saved in 32-bits.
17782 So... since when we save all GPRs (except the SP) in 64-bits, the
17783 traditional GP save area will be empty. */
17784 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17785 info_ptr->gp_size = 0;
17787 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
17788 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
17790 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
17791 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
17792 - info_ptr->first_altivec_reg_save);
17794 /* Does this function call anything? */
17795 info_ptr->calls_p = (! crtl->is_leaf
17796 || cfun->machine->ra_needs_full_frame);
17798 /* Determine if we need to save the condition code registers. */
17799 if (df_regs_ever_live_p (CR2_REGNO)
17800 || df_regs_ever_live_p (CR3_REGNO)
17801 || df_regs_ever_live_p (CR4_REGNO))
17803 info_ptr->cr_save_p = 1;
17804 if (DEFAULT_ABI == ABI_V4)
17805 info_ptr->cr_size = reg_size;
17808 /* If the current function calls __builtin_eh_return, then we need
17809 to allocate stack space for registers that will hold data for
17810 the exception handler. */
17811 if (crtl->calls_eh_return)
17813 unsigned int i;
17814 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
17815 continue;
17817 /* SPE saves EH registers in 64-bits. */
17818 ehrd_size = i * (TARGET_SPE_ABI
17819 && info_ptr->spe_64bit_regs_used != 0
17820 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
17822 else
17823 ehrd_size = 0;
17825 /* Determine various sizes. */
17826 info_ptr->reg_size = reg_size;
17827 info_ptr->fixed_size = RS6000_SAVE_AREA;
17828 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
17829 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
17830 TARGET_ALTIVEC ? 16 : 8);
17831 if (FRAME_GROWS_DOWNWARD)
17832 info_ptr->vars_size
17833 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
17834 + info_ptr->parm_size,
17835 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
17836 - (info_ptr->fixed_size + info_ptr->vars_size
17837 + info_ptr->parm_size);
17839 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17840 info_ptr->spe_gp_size = 8 * (32 - first_gp);
17841 else
17842 info_ptr->spe_gp_size = 0;
17844 /* Set VRSAVE register if it is saved and restored. */
17845 if (TARGET_ALTIVEC_ABI && TARGET_ALTIVEC_VRSAVE)
17846 info_ptr->vrsave_mask = compute_vrsave_mask ();
17847 else
17848 info_ptr->vrsave_mask = 0;
17850 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
17851 info_ptr->vrsave_size = 4;
17852 else
17853 info_ptr->vrsave_size = 0;
17855 compute_save_world_info (info_ptr);
17857 /* Calculate the offsets. */
17858 switch (DEFAULT_ABI)
17860 case ABI_NONE:
17861 default:
17862 gcc_unreachable ();
17864 case ABI_AIX:
17865 case ABI_DARWIN:
17866 info_ptr->fp_save_offset = - info_ptr->fp_size;
17867 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17869 if (TARGET_ALTIVEC_ABI)
17871 info_ptr->vrsave_save_offset
17872 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
17874 /* Align stack so vector save area is on a quadword boundary.
17875 The padding goes above the vectors. */
17876 if (info_ptr->altivec_size != 0)
17877 info_ptr->altivec_padding_size
17878 = info_ptr->vrsave_save_offset & 0xF;
17879 else
17880 info_ptr->altivec_padding_size = 0;
17882 info_ptr->altivec_save_offset
17883 = info_ptr->vrsave_save_offset
17884 - info_ptr->altivec_padding_size
17885 - info_ptr->altivec_size;
17886 gcc_assert (info_ptr->altivec_size == 0
17887 || info_ptr->altivec_save_offset % 16 == 0);
17889 /* Adjust for AltiVec case. */
17890 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
17892 else
17893 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
17894 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
17895 info_ptr->lr_save_offset = 2*reg_size;
17896 break;
17898 case ABI_V4:
17899 info_ptr->fp_save_offset = - info_ptr->fp_size;
17900 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17901 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
17903 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17905 /* Align stack so SPE GPR save area is aligned on a
17906 double-word boundary. */
17907 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
17908 info_ptr->spe_padding_size
17909 = 8 - (-info_ptr->cr_save_offset % 8);
17910 else
17911 info_ptr->spe_padding_size = 0;
17913 info_ptr->spe_gp_save_offset
17914 = info_ptr->cr_save_offset
17915 - info_ptr->spe_padding_size
17916 - info_ptr->spe_gp_size;
17918 /* Adjust for SPE case. */
17919 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
17921 else if (TARGET_ALTIVEC_ABI)
17923 info_ptr->vrsave_save_offset
17924 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
17926 /* Align stack so vector save area is on a quadword boundary. */
17927 if (info_ptr->altivec_size != 0)
17928 info_ptr->altivec_padding_size
17929 = 16 - (-info_ptr->vrsave_save_offset % 16);
17930 else
17931 info_ptr->altivec_padding_size = 0;
17933 info_ptr->altivec_save_offset
17934 = info_ptr->vrsave_save_offset
17935 - info_ptr->altivec_padding_size
17936 - info_ptr->altivec_size;
17938 /* Adjust for AltiVec case. */
17939 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
17941 else
17942 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
17943 info_ptr->ehrd_offset -= ehrd_size;
17944 info_ptr->lr_save_offset = reg_size;
17945 break;
17948 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
17949 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
17950 + info_ptr->gp_size
17951 + info_ptr->altivec_size
17952 + info_ptr->altivec_padding_size
17953 + info_ptr->spe_gp_size
17954 + info_ptr->spe_padding_size
17955 + ehrd_size
17956 + info_ptr->cr_size
17957 + info_ptr->vrsave_size,
17958 save_align);
17960 non_fixed_size = (info_ptr->vars_size
17961 + info_ptr->parm_size
17962 + info_ptr->save_size);
17964 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
17965 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
17967 /* Determine if we need to save the link register. */
17968 if (info_ptr->calls_p
17969 || (DEFAULT_ABI == ABI_AIX
17970 && crtl->profile
17971 && !TARGET_PROFILE_KERNEL)
17972 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
17973 #ifdef TARGET_RELOCATABLE
17974 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
17975 #endif
17976 || rs6000_ra_ever_killed ())
17977 info_ptr->lr_save_p = 1;
17979 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
17980 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
17981 && call_used_regs[STATIC_CHAIN_REGNUM]);
17982 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
17983 using_static_chain_p);
17985 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
17986 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
17987 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
17988 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
17989 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
17990 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
17991 info_ptr->lr_save_p = 1;
17993 if (info_ptr->lr_save_p)
17994 df_set_regs_ever_live (LR_REGNO, true);
17996 /* Determine if we need to allocate any stack frame:
17998 For AIX we need to push the stack if a frame pointer is needed
17999 (because the stack might be dynamically adjusted), if we are
18000 debugging, if we make calls, or if the sum of fp_save, gp_save,
18001 and local variables are more than the space needed to save all
18002 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
18003 + 18*8 = 288 (GPR13 reserved).
18005 For V.4 we don't have the stack cushion that AIX uses, but assume
18006 that the debugger can handle stackless frames. */
18008 if (info_ptr->calls_p)
18009 info_ptr->push_p = 1;
18011 else if (DEFAULT_ABI == ABI_V4)
18012 info_ptr->push_p = non_fixed_size != 0;
18014 else if (frame_pointer_needed)
18015 info_ptr->push_p = 1;
18017 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
18018 info_ptr->push_p = 1;
18020 else
18021 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
18023 /* Zero offsets if we're not saving those registers. */
18024 if (info_ptr->fp_size == 0)
18025 info_ptr->fp_save_offset = 0;
18027 if (info_ptr->gp_size == 0)
18028 info_ptr->gp_save_offset = 0;
18030 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
18031 info_ptr->altivec_save_offset = 0;
18033 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
18034 info_ptr->vrsave_save_offset = 0;
18036 if (! TARGET_SPE_ABI
18037 || info_ptr->spe_64bit_regs_used == 0
18038 || info_ptr->spe_gp_size == 0)
18039 info_ptr->spe_gp_save_offset = 0;
18041 if (! info_ptr->lr_save_p)
18042 info_ptr->lr_save_offset = 0;
18044 if (! info_ptr->cr_save_p)
18045 info_ptr->cr_save_offset = 0;
18047 return info_ptr;
18050 /* Return true if the current function uses any GPRs in 64-bit SIMD
18051 mode. */
18053 static bool
18054 spe_func_has_64bit_regs_p (void)
18056 rtx insns, insn;
18058 /* Functions that save and restore all the call-saved registers will
18059 need to save/restore the registers in 64-bits. */
18060 if (crtl->calls_eh_return
18061 || cfun->calls_setjmp
18062 || crtl->has_nonlocal_goto)
18063 return true;
18065 insns = get_insns ();
18067 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
18069 if (INSN_P (insn))
18071 rtx i;
18073 /* FIXME: This should be implemented with attributes...
18075 (set_attr "spe64" "true")....then,
18076 if (get_spe64(insn)) return true;
18078 It's the only reliable way to do the stuff below. */
18080 i = PATTERN (insn);
18081 if (GET_CODE (i) == SET)
18083 enum machine_mode mode = GET_MODE (SET_SRC (i));
18085 if (SPE_VECTOR_MODE (mode))
18086 return true;
18087 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
18088 return true;
18093 return false;
18096 static void
18097 debug_stack_info (rs6000_stack_t *info)
18099 const char *abi_string;
18101 if (! info)
18102 info = rs6000_stack_info ();
18104 fprintf (stderr, "\nStack information for function %s:\n",
18105 ((current_function_decl && DECL_NAME (current_function_decl))
18106 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
18107 : "<unknown>"));
18109 switch (info->abi)
18111 default: abi_string = "Unknown"; break;
18112 case ABI_NONE: abi_string = "NONE"; break;
18113 case ABI_AIX: abi_string = "AIX"; break;
18114 case ABI_DARWIN: abi_string = "Darwin"; break;
18115 case ABI_V4: abi_string = "V.4"; break;
18118 fprintf (stderr, "\tABI = %5s\n", abi_string);
18120 if (TARGET_ALTIVEC_ABI)
18121 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
18123 if (TARGET_SPE_ABI)
18124 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
18126 if (info->first_gp_reg_save != 32)
18127 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
18129 if (info->first_fp_reg_save != 64)
18130 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
18132 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
18133 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
18134 info->first_altivec_reg_save);
18136 if (info->lr_save_p)
18137 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
18139 if (info->cr_save_p)
18140 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
18142 if (info->vrsave_mask)
18143 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
18145 if (info->push_p)
18146 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
18148 if (info->calls_p)
18149 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
18151 if (info->gp_save_offset)
18152 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
18154 if (info->fp_save_offset)
18155 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
18157 if (info->altivec_save_offset)
18158 fprintf (stderr, "\taltivec_save_offset = %5d\n",
18159 info->altivec_save_offset);
18161 if (info->spe_gp_save_offset)
18162 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
18163 info->spe_gp_save_offset);
18165 if (info->vrsave_save_offset)
18166 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
18167 info->vrsave_save_offset);
18169 if (info->lr_save_offset)
18170 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
18172 if (info->cr_save_offset)
18173 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
18175 if (info->varargs_save_offset)
18176 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
18178 if (info->total_size)
18179 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18180 info->total_size);
18182 if (info->vars_size)
18183 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
18184 info->vars_size);
18186 if (info->parm_size)
18187 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
18189 if (info->fixed_size)
18190 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
18192 if (info->gp_size)
18193 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
18195 if (info->spe_gp_size)
18196 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
18198 if (info->fp_size)
18199 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
18201 if (info->altivec_size)
18202 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
18204 if (info->vrsave_size)
18205 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
18207 if (info->altivec_padding_size)
18208 fprintf (stderr, "\taltivec_padding_size= %5d\n",
18209 info->altivec_padding_size);
18211 if (info->spe_padding_size)
18212 fprintf (stderr, "\tspe_padding_size = %5d\n",
18213 info->spe_padding_size);
18215 if (info->cr_size)
18216 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
18218 if (info->save_size)
18219 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
18221 if (info->reg_size != 4)
18222 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
18224 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
18226 fprintf (stderr, "\n");
18230 rs6000_return_addr (int count, rtx frame)
18232 /* Currently we don't optimize very well between prolog and body
18233 code and for PIC code the code can be actually quite bad, so
18234 don't try to be too clever here. */
18235 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
18237 cfun->machine->ra_needs_full_frame = 1;
18239 return
18240 gen_rtx_MEM
18241 (Pmode,
18242 memory_address
18243 (Pmode,
18244 plus_constant (Pmode,
18245 copy_to_reg
18246 (gen_rtx_MEM (Pmode,
18247 memory_address (Pmode, frame))),
18248 RETURN_ADDRESS_OFFSET)));
18251 cfun->machine->ra_need_lr = 1;
18252 return get_hard_reg_initial_val (Pmode, LR_REGNO);
18255 /* Say whether a function is a candidate for sibcall handling or not. */
18257 static bool
18258 rs6000_function_ok_for_sibcall (tree decl, tree exp)
18260 tree fntype;
18262 if (decl)
18263 fntype = TREE_TYPE (decl);
18264 else
18265 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
18267 /* We can't do it if the called function has more vector parameters
18268 than the current function; there's nowhere to put the VRsave code. */
18269 if (TARGET_ALTIVEC_ABI
18270 && TARGET_ALTIVEC_VRSAVE
18271 && !(decl && decl == current_function_decl))
18273 function_args_iterator args_iter;
18274 tree type;
18275 int nvreg = 0;
18277 /* Functions with vector parameters are required to have a
18278 prototype, so the argument type info must be available
18279 here. */
18280 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
18281 if (TREE_CODE (type) == VECTOR_TYPE
18282 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18283 nvreg++;
18285 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
18286 if (TREE_CODE (type) == VECTOR_TYPE
18287 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18288 nvreg--;
18290 if (nvreg > 0)
18291 return false;
18294 /* Under the AIX ABI we can't allow calls to non-local functions,
18295 because the callee may have a different TOC pointer to the
18296 caller and there's no way to ensure we restore the TOC when we
18297 return. With the secure-plt SYSV ABI we can't make non-local
18298 calls when -fpic/PIC because the plt call stubs use r30. */
18299 if (DEFAULT_ABI == ABI_DARWIN
18300 || (DEFAULT_ABI == ABI_AIX
18301 && decl
18302 && !DECL_EXTERNAL (decl)
18303 && (*targetm.binds_local_p) (decl))
18304 || (DEFAULT_ABI == ABI_V4
18305 && (!TARGET_SECURE_PLT
18306 || !flag_pic
18307 || (decl
18308 && (*targetm.binds_local_p) (decl)))))
18310 tree attr_list = TYPE_ATTRIBUTES (fntype);
18312 if (!lookup_attribute ("longcall", attr_list)
18313 || lookup_attribute ("shortcall", attr_list))
18314 return true;
18317 return false;
18320 /* NULL if INSN insn is valid within a low-overhead loop.
18321 Otherwise return why doloop cannot be applied.
18322 PowerPC uses the COUNT register for branch on table instructions. */
18324 static const char *
18325 rs6000_invalid_within_doloop (const_rtx insn)
18327 if (CALL_P (insn))
18328 return "Function call in the loop.";
18330 if (JUMP_P (insn)
18331 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
18332 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
18333 return "Computed branch in the loop.";
18335 return NULL;
18338 static int
18339 rs6000_ra_ever_killed (void)
18341 rtx top;
18342 rtx reg;
18343 rtx insn;
18345 if (cfun->is_thunk)
18346 return 0;
18348 if (cfun->machine->lr_save_state)
18349 return cfun->machine->lr_save_state - 1;
18351 /* regs_ever_live has LR marked as used if any sibcalls are present,
18352 but this should not force saving and restoring in the
18353 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18354 clobbers LR, so that is inappropriate. */
18356 /* Also, the prologue can generate a store into LR that
18357 doesn't really count, like this:
18359 move LR->R0
18360 bcl to set PIC register
18361 move LR->R31
18362 move R0->LR
18364 When we're called from the epilogue, we need to avoid counting
18365 this as a store. */
18367 push_topmost_sequence ();
18368 top = get_insns ();
18369 pop_topmost_sequence ();
18370 reg = gen_rtx_REG (Pmode, LR_REGNO);
18372 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
18374 if (INSN_P (insn))
18376 if (CALL_P (insn))
18378 if (!SIBLING_CALL_P (insn))
18379 return 1;
18381 else if (find_regno_note (insn, REG_INC, LR_REGNO))
18382 return 1;
18383 else if (set_of (reg, insn) != NULL_RTX
18384 && !prologue_epilogue_contains (insn))
18385 return 1;
18388 return 0;
18391 /* Emit instructions needed to load the TOC register.
18392 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18393 a constant pool; or for SVR4 -fpic. */
18395 void
18396 rs6000_emit_load_toc_table (int fromprolog)
18398 rtx dest;
18399 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
18401 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
18403 char buf[30];
18404 rtx lab, tmp1, tmp2, got;
18406 lab = gen_label_rtx ();
18407 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
18408 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18409 if (flag_pic == 2)
18410 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18411 else
18412 got = rs6000_got_sym ();
18413 tmp1 = tmp2 = dest;
18414 if (!fromprolog)
18416 tmp1 = gen_reg_rtx (Pmode);
18417 tmp2 = gen_reg_rtx (Pmode);
18419 emit_insn (gen_load_toc_v4_PIC_1 (lab));
18420 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
18421 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
18422 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
18424 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
18426 emit_insn (gen_load_toc_v4_pic_si ());
18427 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18429 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
18431 char buf[30];
18432 rtx temp0 = (fromprolog
18433 ? gen_rtx_REG (Pmode, 0)
18434 : gen_reg_rtx (Pmode));
18436 if (fromprolog)
18438 rtx symF, symL;
18440 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
18441 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18443 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
18444 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18446 emit_insn (gen_load_toc_v4_PIC_1 (symF));
18447 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18448 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
18450 else
18452 rtx tocsym, lab;
18454 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18455 lab = gen_label_rtx ();
18456 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
18457 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18458 if (TARGET_LINK_STACK)
18459 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
18460 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
18462 emit_insn (gen_addsi3 (dest, temp0, dest));
18464 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
18466 /* This is for AIX code running in non-PIC ELF32. */
18467 char buf[30];
18468 rtx realsym;
18469 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
18470 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18472 emit_insn (gen_elf_high (dest, realsym));
18473 emit_insn (gen_elf_low (dest, dest, realsym));
18475 else
18477 gcc_assert (DEFAULT_ABI == ABI_AIX);
18479 if (TARGET_32BIT)
18480 emit_insn (gen_load_toc_aix_si (dest));
18481 else
18482 emit_insn (gen_load_toc_aix_di (dest));
18486 /* Emit instructions to restore the link register after determining where
18487 its value has been stored. */
18489 void
18490 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
18492 rs6000_stack_t *info = rs6000_stack_info ();
18493 rtx operands[2];
18495 operands[0] = source;
18496 operands[1] = scratch;
18498 if (info->lr_save_p)
18500 rtx frame_rtx = stack_pointer_rtx;
18501 HOST_WIDE_INT sp_offset = 0;
18502 rtx tmp;
18504 if (frame_pointer_needed
18505 || cfun->calls_alloca
18506 || info->total_size > 32767)
18508 tmp = gen_frame_mem (Pmode, frame_rtx);
18509 emit_move_insn (operands[1], tmp);
18510 frame_rtx = operands[1];
18512 else if (info->push_p)
18513 sp_offset = info->total_size;
18515 tmp = plus_constant (Pmode, frame_rtx,
18516 info->lr_save_offset + sp_offset);
18517 tmp = gen_frame_mem (Pmode, tmp);
18518 emit_move_insn (tmp, operands[0]);
18520 else
18521 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
18523 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18524 state of lr_save_p so any change from here on would be a bug. In
18525 particular, stop rs6000_ra_ever_killed from considering the SET
18526 of lr we may have added just above. */
18527 cfun->machine->lr_save_state = info->lr_save_p + 1;
18530 static GTY(()) alias_set_type set = -1;
18532 alias_set_type
18533 get_TOC_alias_set (void)
18535 if (set == -1)
18536 set = new_alias_set ();
18537 return set;
18540 /* This returns nonzero if the current function uses the TOC. This is
18541 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18542 is generated by the ABI_V4 load_toc_* patterns. */
18543 #if TARGET_ELF
18544 static int
18545 uses_TOC (void)
18547 rtx insn;
18549 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18550 if (INSN_P (insn))
18552 rtx pat = PATTERN (insn);
18553 int i;
18555 if (GET_CODE (pat) == PARALLEL)
18556 for (i = 0; i < XVECLEN (pat, 0); i++)
18558 rtx sub = XVECEXP (pat, 0, i);
18559 if (GET_CODE (sub) == USE)
18561 sub = XEXP (sub, 0);
18562 if (GET_CODE (sub) == UNSPEC
18563 && XINT (sub, 1) == UNSPEC_TOC)
18564 return 1;
18568 return 0;
18570 #endif
18573 create_TOC_reference (rtx symbol, rtx largetoc_reg)
18575 rtx tocrel, tocreg, hi;
18577 if (TARGET_DEBUG_ADDR)
18579 if (GET_CODE (symbol) == SYMBOL_REF)
18580 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18581 XSTR (symbol, 0));
18582 else
18584 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
18585 GET_RTX_NAME (GET_CODE (symbol)));
18586 debug_rtx (symbol);
18590 if (!can_create_pseudo_p ())
18591 df_set_regs_ever_live (TOC_REGISTER, true);
18593 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
18594 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
18595 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
18596 return tocrel;
18598 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
18599 if (largetoc_reg != NULL)
18601 emit_move_insn (largetoc_reg, hi);
18602 hi = largetoc_reg;
18604 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
18607 /* Issue assembly directives that create a reference to the given DWARF
18608 FRAME_TABLE_LABEL from the current function section. */
18609 void
18610 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
18612 fprintf (asm_out_file, "\t.ref %s\n",
18613 (* targetm.strip_name_encoding) (frame_table_label));
18616 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18617 and the change to the stack pointer. */
18619 static void
18620 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
18622 rtvec p;
18623 int i;
18624 rtx regs[3];
18626 i = 0;
18627 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18628 if (hard_frame_needed)
18629 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
18630 if (!(REGNO (fp) == STACK_POINTER_REGNUM
18631 || (hard_frame_needed
18632 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
18633 regs[i++] = fp;
18635 p = rtvec_alloc (i);
18636 while (--i >= 0)
18638 rtx mem = gen_frame_mem (BLKmode, regs[i]);
18639 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
18642 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
18645 /* Emit the correct code for allocating stack space, as insns.
18646 If COPY_REG, make sure a copy of the old frame is left there.
18647 The generated code may use hard register 0 as a temporary. */
18649 static void
18650 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
18652 rtx insn;
18653 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18654 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
18655 rtx todec = gen_int_mode (-size, Pmode);
18656 rtx par, set, mem;
18658 if (INTVAL (todec) != -size)
18660 warning (0, "stack frame too large");
18661 emit_insn (gen_trap ());
18662 return;
18665 if (crtl->limit_stack)
18667 if (REG_P (stack_limit_rtx)
18668 && REGNO (stack_limit_rtx) > 1
18669 && REGNO (stack_limit_rtx) <= 31)
18671 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
18672 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18673 const0_rtx));
18675 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
18676 && TARGET_32BIT
18677 && DEFAULT_ABI == ABI_V4)
18679 rtx toload = gen_rtx_CONST (VOIDmode,
18680 gen_rtx_PLUS (Pmode,
18681 stack_limit_rtx,
18682 GEN_INT (size)));
18684 emit_insn (gen_elf_high (tmp_reg, toload));
18685 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
18686 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18687 const0_rtx));
18689 else
18690 warning (0, "stack limit expression is not supported");
18693 if (copy_reg)
18695 if (copy_off != 0)
18696 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
18697 else
18698 emit_move_insn (copy_reg, stack_reg);
18701 if (size > 32767)
18703 /* Need a note here so that try_split doesn't get confused. */
18704 if (get_last_insn () == NULL_RTX)
18705 emit_note (NOTE_INSN_DELETED);
18706 insn = emit_move_insn (tmp_reg, todec);
18707 try_split (PATTERN (insn), insn, 0);
18708 todec = tmp_reg;
18711 insn = emit_insn (TARGET_32BIT
18712 ? gen_movsi_update_stack (stack_reg, stack_reg,
18713 todec, stack_reg)
18714 : gen_movdi_di_update_stack (stack_reg, stack_reg,
18715 todec, stack_reg));
18716 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18717 it now and set the alias set/attributes. The above gen_*_update
18718 calls will generate a PARALLEL with the MEM set being the first
18719 operation. */
18720 par = PATTERN (insn);
18721 gcc_assert (GET_CODE (par) == PARALLEL);
18722 set = XVECEXP (par, 0, 0);
18723 gcc_assert (GET_CODE (set) == SET);
18724 mem = SET_DEST (set);
18725 gcc_assert (MEM_P (mem));
18726 MEM_NOTRAP_P (mem) = 1;
18727 set_mem_alias_set (mem, get_frame_alias_set ());
18729 RTX_FRAME_RELATED_P (insn) = 1;
18730 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
18731 gen_rtx_SET (VOIDmode, stack_reg,
18732 gen_rtx_PLUS (Pmode, stack_reg,
18733 GEN_INT (-size))));
18736 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18738 #if PROBE_INTERVAL > 32768
18739 #error Cannot use indexed addressing mode for stack probing
18740 #endif
18742 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18743 inclusive. These are offsets from the current stack pointer. */
18745 static void
18746 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
18748 /* See if we have a constant small number of probes to generate. If so,
18749 that's the easy case. */
18750 if (first + size <= 32768)
18752 HOST_WIDE_INT i;
18754 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18755 it exceeds SIZE. If only one probe is needed, this will not
18756 generate any code. Then probe at FIRST + SIZE. */
18757 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
18758 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18759 -(first + i)));
18761 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18762 -(first + size)));
18765 /* Otherwise, do the same as above, but in a loop. Note that we must be
18766 extra careful with variables wrapping around because we might be at
18767 the very top (or the very bottom) of the address space and we have
18768 to be able to handle this case properly; in particular, we use an
18769 equality test for the loop condition. */
18770 else
18772 HOST_WIDE_INT rounded_size;
18773 rtx r12 = gen_rtx_REG (Pmode, 12);
18774 rtx r0 = gen_rtx_REG (Pmode, 0);
18776 /* Sanity check for the addressing mode we're going to use. */
18777 gcc_assert (first <= 32768);
18779 /* Step 1: round SIZE to the previous multiple of the interval. */
18781 rounded_size = size & -PROBE_INTERVAL;
18784 /* Step 2: compute initial and final value of the loop counter. */
18786 /* TEST_ADDR = SP + FIRST. */
18787 emit_insn (gen_rtx_SET (VOIDmode, r12,
18788 plus_constant (Pmode, stack_pointer_rtx,
18789 -first)));
18791 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18792 if (rounded_size > 32768)
18794 emit_move_insn (r0, GEN_INT (-rounded_size));
18795 emit_insn (gen_rtx_SET (VOIDmode, r0,
18796 gen_rtx_PLUS (Pmode, r12, r0)));
18798 else
18799 emit_insn (gen_rtx_SET (VOIDmode, r0,
18800 plus_constant (Pmode, r12, -rounded_size)));
18803 /* Step 3: the loop
18805 while (TEST_ADDR != LAST_ADDR)
18807 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18808 probe at TEST_ADDR
18811 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18812 until it is equal to ROUNDED_SIZE. */
18814 if (TARGET_64BIT)
18815 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
18816 else
18817 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
18820 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18821 that SIZE is equal to ROUNDED_SIZE. */
18823 if (size != rounded_size)
18824 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
18828 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18829 absolute addresses. */
18831 const char *
18832 output_probe_stack_range (rtx reg1, rtx reg2)
18834 static int labelno = 0;
18835 char loop_lab[32], end_lab[32];
18836 rtx xops[2];
18838 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
18839 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
18841 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
18843 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18844 xops[0] = reg1;
18845 xops[1] = reg2;
18846 if (TARGET_64BIT)
18847 output_asm_insn ("cmpd 0,%0,%1", xops);
18848 else
18849 output_asm_insn ("cmpw 0,%0,%1", xops);
18851 fputs ("\tbeq 0,", asm_out_file);
18852 assemble_name_raw (asm_out_file, end_lab);
18853 fputc ('\n', asm_out_file);
18855 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18856 xops[1] = GEN_INT (-PROBE_INTERVAL);
18857 output_asm_insn ("addi %0,%0,%1", xops);
18859 /* Probe at TEST_ADDR and branch. */
18860 xops[1] = gen_rtx_REG (Pmode, 0);
18861 output_asm_insn ("stw %1,0(%0)", xops);
18862 fprintf (asm_out_file, "\tb ");
18863 assemble_name_raw (asm_out_file, loop_lab);
18864 fputc ('\n', asm_out_file);
18866 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
18868 return "";
18871 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18872 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18873 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18874 deduce these equivalences by itself so it wasn't necessary to hold
18875 its hand so much. Don't be tempted to always supply d2_f_d_e with
18876 the actual cfa register, ie. r31 when we are using a hard frame
18877 pointer. That fails when saving regs off r1, and sched moves the
18878 r31 setup past the reg saves. */
18880 static rtx
18881 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
18882 rtx reg2, rtx rreg)
18884 rtx real, temp;
18886 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
18888 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18889 int i;
18891 gcc_checking_assert (val == 0);
18892 real = PATTERN (insn);
18893 if (GET_CODE (real) == PARALLEL)
18894 for (i = 0; i < XVECLEN (real, 0); i++)
18895 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18897 rtx set = XVECEXP (real, 0, i);
18899 RTX_FRAME_RELATED_P (set) = 1;
18901 RTX_FRAME_RELATED_P (insn) = 1;
18902 return insn;
18905 /* copy_rtx will not make unique copies of registers, so we need to
18906 ensure we don't have unwanted sharing here. */
18907 if (reg == reg2)
18908 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18910 if (reg == rreg)
18911 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18913 real = copy_rtx (PATTERN (insn));
18915 if (reg2 != NULL_RTX)
18916 real = replace_rtx (real, reg2, rreg);
18918 if (REGNO (reg) == STACK_POINTER_REGNUM)
18919 gcc_checking_assert (val == 0);
18920 else
18921 real = replace_rtx (real, reg,
18922 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
18923 STACK_POINTER_REGNUM),
18924 GEN_INT (val)));
18926 /* We expect that 'real' is either a SET or a PARALLEL containing
18927 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18928 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18930 if (GET_CODE (real) == SET)
18932 rtx set = real;
18934 temp = simplify_rtx (SET_SRC (set));
18935 if (temp)
18936 SET_SRC (set) = temp;
18937 temp = simplify_rtx (SET_DEST (set));
18938 if (temp)
18939 SET_DEST (set) = temp;
18940 if (GET_CODE (SET_DEST (set)) == MEM)
18942 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18943 if (temp)
18944 XEXP (SET_DEST (set), 0) = temp;
18947 else
18949 int i;
18951 gcc_assert (GET_CODE (real) == PARALLEL);
18952 for (i = 0; i < XVECLEN (real, 0); i++)
18953 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18955 rtx set = XVECEXP (real, 0, i);
18957 temp = simplify_rtx (SET_SRC (set));
18958 if (temp)
18959 SET_SRC (set) = temp;
18960 temp = simplify_rtx (SET_DEST (set));
18961 if (temp)
18962 SET_DEST (set) = temp;
18963 if (GET_CODE (SET_DEST (set)) == MEM)
18965 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18966 if (temp)
18967 XEXP (SET_DEST (set), 0) = temp;
18969 RTX_FRAME_RELATED_P (set) = 1;
18973 RTX_FRAME_RELATED_P (insn) = 1;
18974 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
18976 return insn;
18979 /* Returns an insn that has a vrsave set operation with the
18980 appropriate CLOBBERs. */
18982 static rtx
18983 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
18985 int nclobs, i;
18986 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
18987 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
18989 clobs[0]
18990 = gen_rtx_SET (VOIDmode,
18991 vrsave,
18992 gen_rtx_UNSPEC_VOLATILE (SImode,
18993 gen_rtvec (2, reg, vrsave),
18994 UNSPECV_SET_VRSAVE));
18996 nclobs = 1;
18998 /* We need to clobber the registers in the mask so the scheduler
18999 does not move sets to VRSAVE before sets of AltiVec registers.
19001 However, if the function receives nonlocal gotos, reload will set
19002 all call saved registers live. We will end up with:
19004 (set (reg 999) (mem))
19005 (parallel [ (set (reg vrsave) (unspec blah))
19006 (clobber (reg 999))])
19008 The clobber will cause the store into reg 999 to be dead, and
19009 flow will attempt to delete an epilogue insn. In this case, we
19010 need an unspec use/set of the register. */
19012 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
19013 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19015 if (!epiloguep || call_used_regs [i])
19016 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
19017 gen_rtx_REG (V4SImode, i));
19018 else
19020 rtx reg = gen_rtx_REG (V4SImode, i);
19022 clobs[nclobs++]
19023 = gen_rtx_SET (VOIDmode,
19024 reg,
19025 gen_rtx_UNSPEC (V4SImode,
19026 gen_rtvec (1, reg), 27));
19030 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
19032 for (i = 0; i < nclobs; ++i)
19033 XVECEXP (insn, 0, i) = clobs[i];
19035 return insn;
19038 static rtx
19039 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
19041 rtx addr, mem;
19043 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
19044 mem = gen_frame_mem (GET_MODE (reg), addr);
19045 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
19048 static rtx
19049 gen_frame_load (rtx reg, rtx frame_reg, int offset)
19051 return gen_frame_set (reg, frame_reg, offset, false);
19054 static rtx
19055 gen_frame_store (rtx reg, rtx frame_reg, int offset)
19057 return gen_frame_set (reg, frame_reg, offset, true);
19060 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19061 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19063 static rtx
19064 emit_frame_save (rtx frame_reg, enum machine_mode mode,
19065 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
19067 rtx reg, insn;
19069 /* Some cases that need register indexed addressing. */
19070 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
19071 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
19072 || (TARGET_E500_DOUBLE && mode == DFmode)
19073 || (TARGET_SPE_ABI
19074 && SPE_VECTOR_MODE (mode)
19075 && !SPE_CONST_OFFSET_OK (offset))));
19077 reg = gen_rtx_REG (mode, regno);
19078 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
19079 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
19080 NULL_RTX, NULL_RTX);
19083 /* Emit an offset memory reference suitable for a frame store, while
19084 converting to a valid addressing mode. */
19086 static rtx
19087 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
19089 rtx int_rtx, offset_rtx;
19091 int_rtx = GEN_INT (offset);
19093 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
19094 || (TARGET_E500_DOUBLE && mode == DFmode))
19096 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
19097 emit_move_insn (offset_rtx, int_rtx);
19099 else
19100 offset_rtx = int_rtx;
19102 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
19105 #ifndef TARGET_FIX_AND_CONTINUE
19106 #define TARGET_FIX_AND_CONTINUE 0
19107 #endif
19109 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19110 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19111 #define LAST_SAVRES_REGISTER 31
19112 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19114 enum {
19115 SAVRES_LR = 0x1,
19116 SAVRES_SAVE = 0x2,
19117 SAVRES_REG = 0x0c,
19118 SAVRES_GPR = 0,
19119 SAVRES_FPR = 4,
19120 SAVRES_VR = 8
19123 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
19125 /* Temporary holding space for an out-of-line register save/restore
19126 routine name. */
19127 static char savres_routine_name[30];
19129 /* Return the name for an out-of-line register save/restore routine.
19130 We are saving/restoring GPRs if GPR is true. */
19132 static char *
19133 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
19135 const char *prefix = "";
19136 const char *suffix = "";
19138 /* Different targets are supposed to define
19139 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19140 routine name could be defined with:
19142 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19144 This is a nice idea in practice, but in reality, things are
19145 complicated in several ways:
19147 - ELF targets have save/restore routines for GPRs.
19149 - SPE targets use different prefixes for 32/64-bit registers, and
19150 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19152 - PPC64 ELF targets have routines for save/restore of GPRs that
19153 differ in what they do with the link register, so having a set
19154 prefix doesn't work. (We only use one of the save routines at
19155 the moment, though.)
19157 - PPC32 elf targets have "exit" versions of the restore routines
19158 that restore the link register and can save some extra space.
19159 These require an extra suffix. (There are also "tail" versions
19160 of the restore routines and "GOT" versions of the save routines,
19161 but we don't generate those at present. Same problems apply,
19162 though.)
19164 We deal with all this by synthesizing our own prefix/suffix and
19165 using that for the simple sprintf call shown above. */
19166 if (TARGET_SPE)
19168 /* No floating point saves on the SPE. */
19169 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
19171 if ((sel & SAVRES_SAVE))
19172 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
19173 else
19174 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
19176 if ((sel & SAVRES_LR))
19177 suffix = "_x";
19179 else if (DEFAULT_ABI == ABI_V4)
19181 if (TARGET_64BIT)
19182 goto aix_names;
19184 if ((sel & SAVRES_REG) == SAVRES_GPR)
19185 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
19186 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19187 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
19188 else if ((sel & SAVRES_REG) == SAVRES_VR)
19189 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19190 else
19191 abort ();
19193 if ((sel & SAVRES_LR))
19194 suffix = "_x";
19196 else if (DEFAULT_ABI == ABI_AIX)
19198 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19199 /* No out-of-line save/restore routines for GPRs on AIX. */
19200 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
19201 #endif
19203 aix_names:
19204 if ((sel & SAVRES_REG) == SAVRES_GPR)
19205 prefix = ((sel & SAVRES_SAVE)
19206 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
19207 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
19208 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19210 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19211 if ((sel & SAVRES_LR))
19212 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
19213 else
19214 #endif
19216 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
19217 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
19220 else if ((sel & SAVRES_REG) == SAVRES_VR)
19221 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19222 else
19223 abort ();
19226 if (DEFAULT_ABI == ABI_DARWIN)
19228 /* The Darwin approach is (slightly) different, in order to be
19229 compatible with code generated by the system toolchain. There is a
19230 single symbol for the start of save sequence, and the code here
19231 embeds an offset into that code on the basis of the first register
19232 to be saved. */
19233 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
19234 if ((sel & SAVRES_REG) == SAVRES_GPR)
19235 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
19236 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
19237 (regno - 13) * 4, prefix, regno);
19238 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19239 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
19240 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
19241 else if ((sel & SAVRES_REG) == SAVRES_VR)
19242 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
19243 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
19244 else
19245 abort ();
19247 else
19248 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
19250 return savres_routine_name;
19253 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19254 We are saving/restoring GPRs if GPR is true. */
19256 static rtx
19257 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
19259 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
19260 ? info->first_gp_reg_save
19261 : (sel & SAVRES_REG) == SAVRES_FPR
19262 ? info->first_fp_reg_save - 32
19263 : (sel & SAVRES_REG) == SAVRES_VR
19264 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
19265 : -1);
19266 rtx sym;
19267 int select = sel;
19269 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19270 versions of the gpr routines. */
19271 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
19272 && info->spe_64bit_regs_used)
19273 select ^= SAVRES_FPR ^ SAVRES_GPR;
19275 /* Don't generate bogus routine names. */
19276 gcc_assert (FIRST_SAVRES_REGISTER <= regno
19277 && regno <= LAST_SAVRES_REGISTER
19278 && select >= 0 && select <= 12);
19280 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
19282 if (sym == NULL)
19284 char *name;
19286 name = rs6000_savres_routine_name (info, regno, sel);
19288 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
19289 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
19290 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
19293 return sym;
19296 /* Emit a sequence of insns, including a stack tie if needed, for
19297 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19298 reset the stack pointer, but move the base of the frame into
19299 reg UPDT_REGNO for use by out-of-line register restore routines. */
19301 static rtx
19302 rs6000_emit_stack_reset (rs6000_stack_t *info,
19303 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
19304 unsigned updt_regno)
19306 rtx updt_reg_rtx;
19308 /* This blockage is needed so that sched doesn't decide to move
19309 the sp change before the register restores. */
19310 if (DEFAULT_ABI == ABI_V4
19311 || (TARGET_SPE_ABI
19312 && info->spe_64bit_regs_used != 0
19313 && info->first_gp_reg_save != 32))
19314 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
19316 /* If we are restoring registers out-of-line, we will be using the
19317 "exit" variants of the restore routines, which will reset the
19318 stack for us. But we do need to point updt_reg into the
19319 right place for those routines. */
19320 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
19322 if (frame_off != 0)
19323 return emit_insn (gen_add3_insn (updt_reg_rtx,
19324 frame_reg_rtx, GEN_INT (frame_off)));
19325 else if (REGNO (frame_reg_rtx) != updt_regno)
19326 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
19328 return NULL_RTX;
19331 /* Return the register number used as a pointer by out-of-line
19332 save/restore functions. */
19334 static inline unsigned
19335 ptr_regno_for_savres (int sel)
19337 if (DEFAULT_ABI == ABI_AIX)
19338 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
19339 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
19342 /* Construct a parallel rtx describing the effect of a call to an
19343 out-of-line register save/restore routine, and emit the insn
19344 or jump_insn as appropriate. */
19346 static rtx
19347 rs6000_emit_savres_rtx (rs6000_stack_t *info,
19348 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
19349 enum machine_mode reg_mode, int sel)
19351 int i;
19352 int offset, start_reg, end_reg, n_regs, use_reg;
19353 int reg_size = GET_MODE_SIZE (reg_mode);
19354 rtx sym;
19355 rtvec p;
19356 rtx par, insn;
19358 offset = 0;
19359 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19360 ? info->first_gp_reg_save
19361 : (sel & SAVRES_REG) == SAVRES_FPR
19362 ? info->first_fp_reg_save
19363 : (sel & SAVRES_REG) == SAVRES_VR
19364 ? info->first_altivec_reg_save
19365 : -1);
19366 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19367 ? 32
19368 : (sel & SAVRES_REG) == SAVRES_FPR
19369 ? 64
19370 : (sel & SAVRES_REG) == SAVRES_VR
19371 ? LAST_ALTIVEC_REGNO + 1
19372 : -1);
19373 n_regs = end_reg - start_reg;
19374 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
19375 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
19376 + n_regs);
19378 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19379 RTVEC_ELT (p, offset++) = ret_rtx;
19381 RTVEC_ELT (p, offset++)
19382 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
19384 sym = rs6000_savres_routine_sym (info, sel);
19385 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
19387 use_reg = ptr_regno_for_savres (sel);
19388 if ((sel & SAVRES_REG) == SAVRES_VR)
19390 /* Vector regs are saved/restored using [reg+reg] addressing. */
19391 RTVEC_ELT (p, offset++)
19392 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19393 RTVEC_ELT (p, offset++)
19394 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
19396 else
19397 RTVEC_ELT (p, offset++)
19398 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19400 for (i = 0; i < end_reg - start_reg; i++)
19401 RTVEC_ELT (p, i + offset)
19402 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
19403 frame_reg_rtx, save_area_offset + reg_size * i,
19404 (sel & SAVRES_SAVE) != 0);
19406 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19407 RTVEC_ELT (p, i + offset)
19408 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
19410 par = gen_rtx_PARALLEL (VOIDmode, p);
19412 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19414 insn = emit_jump_insn (par);
19415 JUMP_LABEL (insn) = ret_rtx;
19417 else
19418 insn = emit_insn (par);
19419 return insn;
19422 /* Determine whether the gp REG is really used. */
19424 static bool
19425 rs6000_reg_live_or_pic_offset_p (int reg)
19427 /* If the function calls eh_return, claim used all the registers that would
19428 be checked for liveness otherwise. This is required for the PIC offset
19429 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19430 register allocation purposes in this case. */
19432 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
19433 && (!call_used_regs[reg]
19434 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19435 && !TARGET_SINGLE_PIC_BASE
19436 && TARGET_TOC && TARGET_MINIMAL_TOC)))
19437 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19438 && !TARGET_SINGLE_PIC_BASE
19439 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19440 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
19443 /* Emit function prologue as insns. */
19445 void
19446 rs6000_emit_prologue (void)
19448 rs6000_stack_t *info = rs6000_stack_info ();
19449 enum machine_mode reg_mode = Pmode;
19450 int reg_size = TARGET_32BIT ? 4 : 8;
19451 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19452 rtx frame_reg_rtx = sp_reg_rtx;
19453 unsigned int cr_save_regno;
19454 rtx cr_save_rtx = NULL_RTX;
19455 rtx insn;
19456 int strategy;
19457 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19458 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19459 && call_used_regs[STATIC_CHAIN_REGNUM]);
19460 /* Offset to top of frame for frame_reg and sp respectively. */
19461 HOST_WIDE_INT frame_off = 0;
19462 HOST_WIDE_INT sp_off = 0;
19464 #ifdef ENABLE_CHECKING
19465 /* Track and check usage of r0, r11, r12. */
19466 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
19467 #define START_USE(R) do \
19469 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19470 reg_inuse |= 1 << (R); \
19471 } while (0)
19472 #define END_USE(R) do \
19474 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19475 reg_inuse &= ~(1 << (R)); \
19476 } while (0)
19477 #define NOT_INUSE(R) do \
19479 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19480 } while (0)
19481 #else
19482 #define START_USE(R) do {} while (0)
19483 #define END_USE(R) do {} while (0)
19484 #define NOT_INUSE(R) do {} while (0)
19485 #endif
19487 if (flag_stack_usage_info)
19488 current_function_static_stack_size = info->total_size;
19490 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
19491 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
19493 if (TARGET_FIX_AND_CONTINUE)
19495 /* gdb on darwin arranges to forward a function from the old
19496 address by modifying the first 5 instructions of the function
19497 to branch to the overriding function. This is necessary to
19498 permit function pointers that point to the old function to
19499 actually forward to the new function. */
19500 emit_insn (gen_nop ());
19501 emit_insn (gen_nop ());
19502 emit_insn (gen_nop ());
19503 emit_insn (gen_nop ());
19504 emit_insn (gen_nop ());
19507 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
19509 reg_mode = V2SImode;
19510 reg_size = 8;
19513 /* Handle world saves specially here. */
19514 if (WORLD_SAVE_P (info))
19516 int i, j, sz;
19517 rtx treg;
19518 rtvec p;
19519 rtx reg0;
19521 /* save_world expects lr in r0. */
19522 reg0 = gen_rtx_REG (Pmode, 0);
19523 if (info->lr_save_p)
19525 insn = emit_move_insn (reg0,
19526 gen_rtx_REG (Pmode, LR_REGNO));
19527 RTX_FRAME_RELATED_P (insn) = 1;
19530 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19531 assumptions about the offsets of various bits of the stack
19532 frame. */
19533 gcc_assert (info->gp_save_offset == -220
19534 && info->fp_save_offset == -144
19535 && info->lr_save_offset == 8
19536 && info->cr_save_offset == 4
19537 && info->push_p
19538 && info->lr_save_p
19539 && (!crtl->calls_eh_return
19540 || info->ehrd_offset == -432)
19541 && info->vrsave_save_offset == -224
19542 && info->altivec_save_offset == -416);
19544 treg = gen_rtx_REG (SImode, 11);
19545 emit_move_insn (treg, GEN_INT (-info->total_size));
19547 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19548 in R11. It also clobbers R12, so beware! */
19550 /* Preserve CR2 for save_world prologues */
19551 sz = 5;
19552 sz += 32 - info->first_gp_reg_save;
19553 sz += 64 - info->first_fp_reg_save;
19554 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
19555 p = rtvec_alloc (sz);
19556 j = 0;
19557 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
19558 gen_rtx_REG (SImode,
19559 LR_REGNO));
19560 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
19561 gen_rtx_SYMBOL_REF (Pmode,
19562 "*save_world"));
19563 /* We do floats first so that the instruction pattern matches
19564 properly. */
19565 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19566 RTVEC_ELT (p, j++)
19567 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19568 ? DFmode : SFmode,
19569 info->first_fp_reg_save + i),
19570 frame_reg_rtx,
19571 info->fp_save_offset + frame_off + 8 * i);
19572 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
19573 RTVEC_ELT (p, j++)
19574 = gen_frame_store (gen_rtx_REG (V4SImode,
19575 info->first_altivec_reg_save + i),
19576 frame_reg_rtx,
19577 info->altivec_save_offset + frame_off + 16 * i);
19578 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19579 RTVEC_ELT (p, j++)
19580 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19581 frame_reg_rtx,
19582 info->gp_save_offset + frame_off + reg_size * i);
19584 /* CR register traditionally saved as CR2. */
19585 RTVEC_ELT (p, j++)
19586 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
19587 frame_reg_rtx, info->cr_save_offset + frame_off);
19588 /* Explain about use of R0. */
19589 if (info->lr_save_p)
19590 RTVEC_ELT (p, j++)
19591 = gen_frame_store (reg0,
19592 frame_reg_rtx, info->lr_save_offset + frame_off);
19593 /* Explain what happens to the stack pointer. */
19595 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
19596 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
19599 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19600 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19601 treg, GEN_INT (-info->total_size));
19602 sp_off = frame_off = info->total_size;
19605 strategy = info->savres_strategy;
19607 /* For V.4, update stack before we do any saving and set back pointer. */
19608 if (! WORLD_SAVE_P (info)
19609 && info->push_p
19610 && (DEFAULT_ABI == ABI_V4
19611 || crtl->calls_eh_return))
19613 bool need_r11 = (TARGET_SPE
19614 ? (!(strategy & SAVE_INLINE_GPRS)
19615 && info->spe_64bit_regs_used == 0)
19616 : (!(strategy & SAVE_INLINE_FPRS)
19617 || !(strategy & SAVE_INLINE_GPRS)
19618 || !(strategy & SAVE_INLINE_VRS)));
19619 int ptr_regno = -1;
19620 rtx ptr_reg = NULL_RTX;
19621 int ptr_off = 0;
19623 if (info->total_size < 32767)
19624 frame_off = info->total_size;
19625 else if (need_r11)
19626 ptr_regno = 11;
19627 else if (info->cr_save_p
19628 || info->lr_save_p
19629 || info->first_fp_reg_save < 64
19630 || info->first_gp_reg_save < 32
19631 || info->altivec_size != 0
19632 || info->vrsave_mask != 0
19633 || crtl->calls_eh_return)
19634 ptr_regno = 12;
19635 else
19637 /* The prologue won't be saving any regs so there is no need
19638 to set up a frame register to access any frame save area.
19639 We also won't be using frame_off anywhere below, but set
19640 the correct value anyway to protect against future
19641 changes to this function. */
19642 frame_off = info->total_size;
19644 if (ptr_regno != -1)
19646 /* Set up the frame offset to that needed by the first
19647 out-of-line save function. */
19648 START_USE (ptr_regno);
19649 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19650 frame_reg_rtx = ptr_reg;
19651 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
19652 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
19653 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
19654 ptr_off = info->gp_save_offset + info->gp_size;
19655 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
19656 ptr_off = info->altivec_save_offset + info->altivec_size;
19657 frame_off = -ptr_off;
19659 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
19660 sp_off = info->total_size;
19661 if (frame_reg_rtx != sp_reg_rtx)
19662 rs6000_emit_stack_tie (frame_reg_rtx, false);
19665 /* If we use the link register, get it into r0. */
19666 if (!WORLD_SAVE_P (info) && info->lr_save_p)
19668 rtx addr, reg, mem;
19670 reg = gen_rtx_REG (Pmode, 0);
19671 START_USE (0);
19672 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
19673 RTX_FRAME_RELATED_P (insn) = 1;
19675 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
19676 | SAVE_NOINLINE_FPRS_SAVES_LR)))
19678 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19679 GEN_INT (info->lr_save_offset + frame_off));
19680 mem = gen_rtx_MEM (Pmode, addr);
19681 /* This should not be of rs6000_sr_alias_set, because of
19682 __builtin_return_address. */
19684 insn = emit_move_insn (mem, reg);
19685 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19686 NULL_RTX, NULL_RTX);
19687 END_USE (0);
19691 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19692 r12 will be needed by out-of-line gpr restore. */
19693 cr_save_regno = (DEFAULT_ABI == ABI_AIX
19694 && !(strategy & (SAVE_INLINE_GPRS
19695 | SAVE_NOINLINE_GPRS_SAVES_LR))
19696 ? 11 : 12);
19697 if (!WORLD_SAVE_P (info)
19698 && info->cr_save_p
19699 && REGNO (frame_reg_rtx) != cr_save_regno
19700 && !(using_static_chain_p && cr_save_regno == 11))
19702 rtx set;
19704 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
19705 START_USE (cr_save_regno);
19706 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19707 RTX_FRAME_RELATED_P (insn) = 1;
19708 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19709 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19710 But that's OK. All we have to do is specify that _one_ condition
19711 code register is saved in this stack slot. The thrower's epilogue
19712 will then restore all the call-saved registers.
19713 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19714 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
19715 gen_rtx_REG (SImode, CR2_REGNO));
19716 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19719 /* Do any required saving of fpr's. If only one or two to save, do
19720 it ourselves. Otherwise, call function. */
19721 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
19723 int i;
19724 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19725 if (save_reg_p (info->first_fp_reg_save + i))
19726 emit_frame_save (frame_reg_rtx,
19727 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19728 ? DFmode : SFmode),
19729 info->first_fp_reg_save + i,
19730 info->fp_save_offset + frame_off + 8 * i,
19731 sp_off - frame_off);
19733 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
19735 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
19736 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
19737 unsigned ptr_regno = ptr_regno_for_savres (sel);
19738 rtx ptr_reg = frame_reg_rtx;
19740 if (REGNO (frame_reg_rtx) == ptr_regno)
19741 gcc_checking_assert (frame_off == 0);
19742 else
19744 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19745 NOT_INUSE (ptr_regno);
19746 emit_insn (gen_add3_insn (ptr_reg,
19747 frame_reg_rtx, GEN_INT (frame_off)));
19749 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19750 info->fp_save_offset,
19751 info->lr_save_offset,
19752 DFmode, sel);
19753 rs6000_frame_related (insn, ptr_reg, sp_off,
19754 NULL_RTX, NULL_RTX);
19755 if (lr)
19756 END_USE (0);
19759 /* Save GPRs. This is done as a PARALLEL if we are using
19760 the store-multiple instructions. */
19761 if (!WORLD_SAVE_P (info)
19762 && TARGET_SPE_ABI
19763 && info->spe_64bit_regs_used != 0
19764 && info->first_gp_reg_save != 32)
19766 int i;
19767 rtx spe_save_area_ptr;
19768 HOST_WIDE_INT save_off;
19769 int ool_adjust = 0;
19771 /* Determine whether we can address all of the registers that need
19772 to be saved with an offset from frame_reg_rtx that fits in
19773 the small const field for SPE memory instructions. */
19774 int spe_regs_addressable
19775 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
19776 + reg_size * (32 - info->first_gp_reg_save - 1))
19777 && (strategy & SAVE_INLINE_GPRS));
19779 if (spe_regs_addressable)
19781 spe_save_area_ptr = frame_reg_rtx;
19782 save_off = frame_off;
19784 else
19786 /* Make r11 point to the start of the SPE save area. We need
19787 to be careful here if r11 is holding the static chain. If
19788 it is, then temporarily save it in r0. */
19789 HOST_WIDE_INT offset;
19791 if (!(strategy & SAVE_INLINE_GPRS))
19792 ool_adjust = 8 * (info->first_gp_reg_save
19793 - (FIRST_SAVRES_REGISTER + 1));
19794 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
19795 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
19796 save_off = frame_off - offset;
19798 if (using_static_chain_p)
19800 rtx r0 = gen_rtx_REG (Pmode, 0);
19802 START_USE (0);
19803 gcc_assert (info->first_gp_reg_save > 11);
19805 emit_move_insn (r0, spe_save_area_ptr);
19807 else if (REGNO (frame_reg_rtx) != 11)
19808 START_USE (11);
19810 emit_insn (gen_addsi3 (spe_save_area_ptr,
19811 frame_reg_rtx, GEN_INT (offset)));
19812 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
19813 frame_off = -info->spe_gp_save_offset + ool_adjust;
19816 if ((strategy & SAVE_INLINE_GPRS))
19818 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19819 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19820 emit_frame_save (spe_save_area_ptr, reg_mode,
19821 info->first_gp_reg_save + i,
19822 (info->spe_gp_save_offset + save_off
19823 + reg_size * i),
19824 sp_off - save_off);
19826 else
19828 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
19829 info->spe_gp_save_offset + save_off,
19830 0, reg_mode,
19831 SAVRES_SAVE | SAVRES_GPR);
19833 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
19834 NULL_RTX, NULL_RTX);
19837 /* Move the static chain pointer back. */
19838 if (!spe_regs_addressable)
19840 if (using_static_chain_p)
19842 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
19843 END_USE (0);
19845 else if (REGNO (frame_reg_rtx) != 11)
19846 END_USE (11);
19849 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
19851 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
19852 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
19853 unsigned ptr_regno = ptr_regno_for_savres (sel);
19854 rtx ptr_reg = frame_reg_rtx;
19855 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
19856 int end_save = info->gp_save_offset + info->gp_size;
19857 int ptr_off;
19859 if (!ptr_set_up)
19860 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19862 /* Need to adjust r11 (r12) if we saved any FPRs. */
19863 if (end_save + frame_off != 0)
19865 rtx offset = GEN_INT (end_save + frame_off);
19867 if (ptr_set_up)
19868 frame_off = -end_save;
19869 else
19870 NOT_INUSE (ptr_regno);
19871 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
19873 else if (!ptr_set_up)
19875 NOT_INUSE (ptr_regno);
19876 emit_move_insn (ptr_reg, frame_reg_rtx);
19878 ptr_off = -end_save;
19879 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19880 info->gp_save_offset + ptr_off,
19881 info->lr_save_offset + ptr_off,
19882 reg_mode, sel);
19883 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
19884 NULL_RTX, NULL_RTX);
19885 if (lr)
19886 END_USE (0);
19888 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
19890 rtvec p;
19891 int i;
19892 p = rtvec_alloc (32 - info->first_gp_reg_save);
19893 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19894 RTVEC_ELT (p, i)
19895 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19896 frame_reg_rtx,
19897 info->gp_save_offset + frame_off + reg_size * i);
19898 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19899 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19900 NULL_RTX, NULL_RTX);
19902 else if (!WORLD_SAVE_P (info))
19904 int i;
19905 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19906 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19907 emit_frame_save (frame_reg_rtx, reg_mode,
19908 info->first_gp_reg_save + i,
19909 info->gp_save_offset + frame_off + reg_size * i,
19910 sp_off - frame_off);
19913 if (crtl->calls_eh_return)
19915 unsigned int i;
19916 rtvec p;
19918 for (i = 0; ; ++i)
19920 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19921 if (regno == INVALID_REGNUM)
19922 break;
19925 p = rtvec_alloc (i);
19927 for (i = 0; ; ++i)
19929 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19930 if (regno == INVALID_REGNUM)
19931 break;
19933 insn
19934 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
19935 sp_reg_rtx,
19936 info->ehrd_offset + sp_off + reg_size * (int) i);
19937 RTVEC_ELT (p, i) = insn;
19938 RTX_FRAME_RELATED_P (insn) = 1;
19941 insn = emit_insn (gen_blockage ());
19942 RTX_FRAME_RELATED_P (insn) = 1;
19943 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
19946 /* In AIX ABI we need to make sure r2 is really saved. */
19947 if (TARGET_AIX && crtl->calls_eh_return)
19949 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
19950 rtx save_insn, join_insn, note;
19951 long toc_restore_insn;
19953 tmp_reg = gen_rtx_REG (Pmode, 11);
19954 tmp_reg_si = gen_rtx_REG (SImode, 11);
19955 if (using_static_chain_p)
19957 START_USE (0);
19958 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
19960 else
19961 START_USE (11);
19962 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
19963 /* Peek at instruction to which this function returns. If it's
19964 restoring r2, then we know we've already saved r2. We can't
19965 unconditionally save r2 because the value we have will already
19966 be updated if we arrived at this function via a plt call or
19967 toc adjusting stub. */
19968 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
19969 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
19970 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
19971 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
19972 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
19973 validate_condition_mode (EQ, CCUNSmode);
19974 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
19975 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19976 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
19977 toc_save_done = gen_label_rtx ();
19978 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
19979 gen_rtx_EQ (VOIDmode, compare_result,
19980 const0_rtx),
19981 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
19982 pc_rtx);
19983 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
19984 JUMP_LABEL (jump) = toc_save_done;
19985 LABEL_NUSES (toc_save_done) += 1;
19987 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
19988 TOC_REGNUM, frame_off + 5 * reg_size,
19989 sp_off - frame_off);
19991 emit_label (toc_save_done);
19993 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
19994 have a CFG that has different saves along different paths.
19995 Move the note to a dummy blockage insn, which describes that
19996 R2 is unconditionally saved after the label. */
19997 /* ??? An alternate representation might be a special insn pattern
19998 containing both the branch and the store. That might let the
19999 code that minimizes the number of DW_CFA_advance opcodes better
20000 freedom in placing the annotations. */
20001 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
20002 if (note)
20003 remove_note (save_insn, note);
20004 else
20005 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
20006 copy_rtx (PATTERN (save_insn)), NULL_RTX);
20007 RTX_FRAME_RELATED_P (save_insn) = 0;
20009 join_insn = emit_insn (gen_blockage ());
20010 REG_NOTES (join_insn) = note;
20011 RTX_FRAME_RELATED_P (join_insn) = 1;
20013 if (using_static_chain_p)
20015 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
20016 END_USE (0);
20018 else
20019 END_USE (11);
20022 /* Save CR if we use any that must be preserved. */
20023 if (!WORLD_SAVE_P (info) && info->cr_save_p)
20025 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
20026 GEN_INT (info->cr_save_offset + frame_off));
20027 rtx mem = gen_frame_mem (SImode, addr);
20028 /* See the large comment above about why CR2_REGNO is used. */
20029 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
20031 /* If we didn't copy cr before, do so now using r0. */
20032 if (cr_save_rtx == NULL_RTX)
20034 rtx set;
20036 START_USE (0);
20037 cr_save_rtx = gen_rtx_REG (SImode, 0);
20038 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
20039 RTX_FRAME_RELATED_P (insn) = 1;
20040 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
20041 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
20043 insn = emit_move_insn (mem, cr_save_rtx);
20044 END_USE (REGNO (cr_save_rtx));
20046 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20047 NULL_RTX, NULL_RTX);
20050 /* Update stack and set back pointer unless this is V.4,
20051 for which it was done previously. */
20052 if (!WORLD_SAVE_P (info) && info->push_p
20053 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
20055 rtx ptr_reg = NULL;
20056 int ptr_off = 0;
20058 /* If saving altivec regs we need to be able to address all save
20059 locations using a 16-bit offset. */
20060 if ((strategy & SAVE_INLINE_VRS) == 0
20061 || (info->altivec_size != 0
20062 && (info->altivec_save_offset + info->altivec_size - 16
20063 + info->total_size - frame_off) > 32767)
20064 || (info->vrsave_mask != 0
20065 && (info->vrsave_save_offset
20066 + info->total_size - frame_off) > 32767))
20068 int sel = SAVRES_SAVE | SAVRES_VR;
20069 unsigned ptr_regno = ptr_regno_for_savres (sel);
20071 if (using_static_chain_p
20072 && ptr_regno == STATIC_CHAIN_REGNUM)
20073 ptr_regno = 12;
20074 if (REGNO (frame_reg_rtx) != ptr_regno)
20075 START_USE (ptr_regno);
20076 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20077 frame_reg_rtx = ptr_reg;
20078 ptr_off = info->altivec_save_offset + info->altivec_size;
20079 frame_off = -ptr_off;
20081 else if (REGNO (frame_reg_rtx) == 1)
20082 frame_off = info->total_size;
20083 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
20084 sp_off = info->total_size;
20085 if (frame_reg_rtx != sp_reg_rtx)
20086 rs6000_emit_stack_tie (frame_reg_rtx, false);
20089 /* Set frame pointer, if needed. */
20090 if (frame_pointer_needed)
20092 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
20093 sp_reg_rtx);
20094 RTX_FRAME_RELATED_P (insn) = 1;
20097 /* Save AltiVec registers if needed. Save here because the red zone does
20098 not always include AltiVec registers. */
20099 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20100 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
20102 int end_save = info->altivec_save_offset + info->altivec_size;
20103 int ptr_off;
20104 /* Oddly, the vector save/restore functions point r0 at the end
20105 of the save area, then use r11 or r12 to load offsets for
20106 [reg+reg] addressing. */
20107 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20108 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
20109 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20111 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20112 NOT_INUSE (0);
20113 if (end_save + frame_off != 0)
20115 rtx offset = GEN_INT (end_save + frame_off);
20117 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20119 else
20120 emit_move_insn (ptr_reg, frame_reg_rtx);
20122 ptr_off = -end_save;
20123 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20124 info->altivec_save_offset + ptr_off,
20125 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
20126 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
20127 NULL_RTX, NULL_RTX);
20128 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20130 /* The oddity mentioned above clobbered our frame reg. */
20131 emit_move_insn (frame_reg_rtx, ptr_reg);
20132 frame_off = ptr_off;
20135 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
20136 && info->altivec_size != 0)
20138 int i;
20140 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20141 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20143 rtx areg, savereg, mem;
20144 int offset;
20146 offset = (info->altivec_save_offset + frame_off
20147 + 16 * (i - info->first_altivec_reg_save));
20149 savereg = gen_rtx_REG (V4SImode, i);
20151 NOT_INUSE (0);
20152 areg = gen_rtx_REG (Pmode, 0);
20153 emit_move_insn (areg, GEN_INT (offset));
20155 /* AltiVec addressing mode is [reg+reg]. */
20156 mem = gen_frame_mem (V4SImode,
20157 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
20159 insn = emit_move_insn (mem, savereg);
20161 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
20162 areg, GEN_INT (offset));
20166 /* VRSAVE is a bit vector representing which AltiVec registers
20167 are used. The OS uses this to determine which vector
20168 registers to save on a context switch. We need to save
20169 VRSAVE on the stack frame, add whatever AltiVec registers we
20170 used in this function, and do the corresponding magic in the
20171 epilogue. */
20173 if (!WORLD_SAVE_P (info)
20174 && TARGET_ALTIVEC
20175 && TARGET_ALTIVEC_VRSAVE
20176 && info->vrsave_mask != 0)
20178 rtx reg, vrsave;
20179 int offset;
20180 int save_regno;
20182 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20183 be using r12 as frame_reg_rtx and r11 as the static chain
20184 pointer for nested functions. */
20185 save_regno = 12;
20186 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
20187 save_regno = 11;
20188 else if (REGNO (frame_reg_rtx) == 12)
20190 save_regno = 11;
20191 if (using_static_chain_p)
20192 save_regno = 0;
20195 NOT_INUSE (save_regno);
20196 reg = gen_rtx_REG (SImode, save_regno);
20197 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
20198 if (TARGET_MACHO)
20199 emit_insn (gen_get_vrsave_internal (reg));
20200 else
20201 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
20203 /* Save VRSAVE. */
20204 offset = info->vrsave_save_offset + frame_off;
20205 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
20207 /* Include the registers in the mask. */
20208 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
20210 insn = emit_insn (generate_set_vrsave (reg, info, 0));
20213 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20214 if (!TARGET_SINGLE_PIC_BASE
20215 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
20216 || (DEFAULT_ABI == ABI_V4
20217 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
20218 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
20220 /* If emit_load_toc_table will use the link register, we need to save
20221 it. We use R12 for this purpose because emit_load_toc_table
20222 can use register 0. This allows us to use a plain 'blr' to return
20223 from the procedure more often. */
20224 int save_LR_around_toc_setup = (TARGET_ELF
20225 && DEFAULT_ABI != ABI_AIX
20226 && flag_pic
20227 && ! info->lr_save_p
20228 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
20229 if (save_LR_around_toc_setup)
20231 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20232 rtx tmp = gen_rtx_REG (Pmode, 12);
20234 insn = emit_move_insn (tmp, lr);
20235 RTX_FRAME_RELATED_P (insn) = 1;
20237 rs6000_emit_load_toc_table (TRUE);
20239 insn = emit_move_insn (lr, tmp);
20240 add_reg_note (insn, REG_CFA_RESTORE, lr);
20241 RTX_FRAME_RELATED_P (insn) = 1;
20243 else
20244 rs6000_emit_load_toc_table (TRUE);
20247 #if TARGET_MACHO
20248 if (!TARGET_SINGLE_PIC_BASE
20249 && DEFAULT_ABI == ABI_DARWIN
20250 && flag_pic && crtl->uses_pic_offset_table)
20252 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20253 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
20255 /* Save and restore LR locally around this call (in R0). */
20256 if (!info->lr_save_p)
20257 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
20259 emit_insn (gen_load_macho_picbase (src));
20261 emit_move_insn (gen_rtx_REG (Pmode,
20262 RS6000_PIC_OFFSET_TABLE_REGNUM),
20263 lr);
20265 if (!info->lr_save_p)
20266 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
20268 #endif
20270 /* If we need to, save the TOC register after doing the stack setup.
20271 Do not emit eh frame info for this save. The unwinder wants info,
20272 conceptually attached to instructions in this function, about
20273 register values in the caller of this function. This R2 may have
20274 already been changed from the value in the caller.
20275 We don't attempt to write accurate DWARF EH frame info for R2
20276 because code emitted by gcc for a (non-pointer) function call
20277 doesn't save and restore R2. Instead, R2 is managed out-of-line
20278 by a linker generated plt call stub when the function resides in
20279 a shared library. This behaviour is costly to describe in DWARF,
20280 both in terms of the size of DWARF info and the time taken in the
20281 unwinder to interpret it. R2 changes, apart from the
20282 calls_eh_return case earlier in this function, are handled by
20283 linux-unwind.h frob_update_context. */
20284 if (rs6000_save_toc_in_prologue_p ())
20286 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
20287 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
20291 /* Write function prologue. */
20293 static void
20294 rs6000_output_function_prologue (FILE *file,
20295 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20297 rs6000_stack_t *info = rs6000_stack_info ();
20299 if (TARGET_DEBUG_STACK)
20300 debug_stack_info (info);
20302 /* Write .extern for any function we will call to save and restore
20303 fp values. */
20304 if (info->first_fp_reg_save < 64
20305 && !TARGET_MACHO
20306 && !TARGET_ELF)
20308 char *name;
20309 int regno = info->first_fp_reg_save - 32;
20311 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
20313 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20314 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20315 name = rs6000_savres_routine_name (info, regno, sel);
20316 fprintf (file, "\t.extern %s\n", name);
20318 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
20320 bool lr = (info->savres_strategy
20321 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20322 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20323 name = rs6000_savres_routine_name (info, regno, sel);
20324 fprintf (file, "\t.extern %s\n", name);
20328 rs6000_pic_labelno++;
20331 /* Non-zero if vmx regs are restored before the frame pop, zero if
20332 we restore after the pop when possible. */
20333 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20335 /* Restoring cr is a two step process: loading a reg from the frame
20336 save, then moving the reg to cr. For ABI_V4 we must let the
20337 unwinder know that the stack location is no longer valid at or
20338 before the stack deallocation, but we can't emit a cfa_restore for
20339 cr at the stack deallocation like we do for other registers.
20340 The trouble is that it is possible for the move to cr to be
20341 scheduled after the stack deallocation. So say exactly where cr
20342 is located on each of the two insns. */
20344 static rtx
20345 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
20347 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
20348 rtx reg = gen_rtx_REG (SImode, regno);
20349 rtx insn = emit_move_insn (reg, mem);
20351 if (!exit_func && DEFAULT_ABI == ABI_V4)
20353 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20354 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
20356 add_reg_note (insn, REG_CFA_REGISTER, set);
20357 RTX_FRAME_RELATED_P (insn) = 1;
20359 return reg;
20362 /* Reload CR from REG. */
20364 static void
20365 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
20367 int count = 0;
20368 int i;
20370 if (using_mfcr_multiple)
20372 for (i = 0; i < 8; i++)
20373 if (save_reg_p (CR0_REGNO + i))
20374 count++;
20375 gcc_assert (count);
20378 if (using_mfcr_multiple && count > 1)
20380 rtvec p;
20381 int ndx;
20383 p = rtvec_alloc (count);
20385 ndx = 0;
20386 for (i = 0; i < 8; i++)
20387 if (save_reg_p (CR0_REGNO + i))
20389 rtvec r = rtvec_alloc (2);
20390 RTVEC_ELT (r, 0) = reg;
20391 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
20392 RTVEC_ELT (p, ndx) =
20393 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
20394 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
20395 ndx++;
20397 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20398 gcc_assert (ndx == count);
20400 else
20401 for (i = 0; i < 8; i++)
20402 if (save_reg_p (CR0_REGNO + i))
20403 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
20404 reg));
20406 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20408 rtx insn = get_last_insn ();
20409 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20411 add_reg_note (insn, REG_CFA_RESTORE, cr);
20412 RTX_FRAME_RELATED_P (insn) = 1;
20416 /* Like cr, the move to lr instruction can be scheduled after the
20417 stack deallocation, but unlike cr, its stack frame save is still
20418 valid. So we only need to emit the cfa_restore on the correct
20419 instruction. */
20421 static void
20422 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
20424 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
20425 rtx reg = gen_rtx_REG (Pmode, regno);
20427 emit_move_insn (reg, mem);
20430 static void
20431 restore_saved_lr (int regno, bool exit_func)
20433 rtx reg = gen_rtx_REG (Pmode, regno);
20434 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20435 rtx insn = emit_move_insn (lr, reg);
20437 if (!exit_func && flag_shrink_wrap)
20439 add_reg_note (insn, REG_CFA_RESTORE, lr);
20440 RTX_FRAME_RELATED_P (insn) = 1;
20444 static rtx
20445 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
20447 if (info->cr_save_p)
20448 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20449 gen_rtx_REG (SImode, CR2_REGNO),
20450 cfa_restores);
20451 if (info->lr_save_p)
20452 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20453 gen_rtx_REG (Pmode, LR_REGNO),
20454 cfa_restores);
20455 return cfa_restores;
20458 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20459 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20460 below stack pointer not cloberred by signals. */
20462 static inline bool
20463 offset_below_red_zone_p (HOST_WIDE_INT offset)
20465 return offset < (DEFAULT_ABI == ABI_V4
20467 : TARGET_32BIT ? -220 : -288);
20470 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20472 static void
20473 emit_cfa_restores (rtx cfa_restores)
20475 rtx insn = get_last_insn ();
20476 rtx *loc = &REG_NOTES (insn);
20478 while (*loc)
20479 loc = &XEXP (*loc, 1);
20480 *loc = cfa_restores;
20481 RTX_FRAME_RELATED_P (insn) = 1;
20484 /* Emit function epilogue as insns. */
20486 void
20487 rs6000_emit_epilogue (int sibcall)
20489 rs6000_stack_t *info;
20490 int restoring_GPRs_inline;
20491 int restoring_FPRs_inline;
20492 int using_load_multiple;
20493 int using_mtcr_multiple;
20494 int use_backchain_to_restore_sp;
20495 int restore_lr;
20496 int strategy;
20497 HOST_WIDE_INT frame_off = 0;
20498 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
20499 rtx frame_reg_rtx = sp_reg_rtx;
20500 rtx cfa_restores = NULL_RTX;
20501 rtx insn;
20502 rtx cr_save_reg = NULL_RTX;
20503 enum machine_mode reg_mode = Pmode;
20504 int reg_size = TARGET_32BIT ? 4 : 8;
20505 int i;
20506 bool exit_func;
20507 unsigned ptr_regno;
20509 info = rs6000_stack_info ();
20511 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20513 reg_mode = V2SImode;
20514 reg_size = 8;
20517 strategy = info->savres_strategy;
20518 using_load_multiple = strategy & SAVRES_MULTIPLE;
20519 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
20520 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
20521 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
20522 || rs6000_cpu == PROCESSOR_PPC603
20523 || rs6000_cpu == PROCESSOR_PPC750
20524 || optimize_size);
20525 /* Restore via the backchain when we have a large frame, since this
20526 is more efficient than an addis, addi pair. The second condition
20527 here will not trigger at the moment; We don't actually need a
20528 frame pointer for alloca, but the generic parts of the compiler
20529 give us one anyway. */
20530 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
20531 || (cfun->calls_alloca
20532 && !frame_pointer_needed));
20533 restore_lr = (info->lr_save_p
20534 && (restoring_FPRs_inline
20535 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
20536 && (restoring_GPRs_inline
20537 || info->first_fp_reg_save < 64));
20539 if (WORLD_SAVE_P (info))
20541 int i, j;
20542 char rname[30];
20543 const char *alloc_rname;
20544 rtvec p;
20546 /* eh_rest_world_r10 will return to the location saved in the LR
20547 stack slot (which is not likely to be our caller.)
20548 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20549 rest_world is similar, except any R10 parameter is ignored.
20550 The exception-handling stuff that was here in 2.95 is no
20551 longer necessary. */
20553 p = rtvec_alloc (9
20555 + 32 - info->first_gp_reg_save
20556 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
20557 + 63 + 1 - info->first_fp_reg_save);
20559 strcpy (rname, ((crtl->calls_eh_return) ?
20560 "*eh_rest_world_r10" : "*rest_world"));
20561 alloc_rname = ggc_strdup (rname);
20563 j = 0;
20564 RTVEC_ELT (p, j++) = ret_rtx;
20565 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20566 gen_rtx_REG (Pmode,
20567 LR_REGNO));
20568 RTVEC_ELT (p, j++)
20569 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
20570 /* The instruction pattern requires a clobber here;
20571 it is shared with the restVEC helper. */
20572 RTVEC_ELT (p, j++)
20573 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
20576 /* CR register traditionally saved as CR2. */
20577 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
20578 RTVEC_ELT (p, j++)
20579 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
20580 if (flag_shrink_wrap)
20582 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20583 gen_rtx_REG (Pmode, LR_REGNO),
20584 cfa_restores);
20585 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20589 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20591 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20592 RTVEC_ELT (p, j++)
20593 = gen_frame_load (reg,
20594 frame_reg_rtx, info->gp_save_offset + reg_size * i);
20595 if (flag_shrink_wrap)
20596 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20598 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20600 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
20601 RTVEC_ELT (p, j++)
20602 = gen_frame_load (reg,
20603 frame_reg_rtx, info->altivec_save_offset + 16 * i);
20604 if (flag_shrink_wrap)
20605 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20607 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
20609 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20610 ? DFmode : SFmode),
20611 info->first_fp_reg_save + i);
20612 RTVEC_ELT (p, j++)
20613 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
20614 if (flag_shrink_wrap)
20615 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20617 RTVEC_ELT (p, j++)
20618 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
20619 RTVEC_ELT (p, j++)
20620 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
20621 RTVEC_ELT (p, j++)
20622 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
20623 RTVEC_ELT (p, j++)
20624 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
20625 RTVEC_ELT (p, j++)
20626 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
20627 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
20629 if (flag_shrink_wrap)
20631 REG_NOTES (insn) = cfa_restores;
20632 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20633 RTX_FRAME_RELATED_P (insn) = 1;
20635 return;
20638 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20639 if (info->push_p)
20640 frame_off = info->total_size;
20642 /* Restore AltiVec registers if we must do so before adjusting the
20643 stack. */
20644 if (TARGET_ALTIVEC_ABI
20645 && info->altivec_size != 0
20646 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20647 || (DEFAULT_ABI != ABI_V4
20648 && offset_below_red_zone_p (info->altivec_save_offset))))
20650 int i;
20651 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20653 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20654 if (use_backchain_to_restore_sp)
20656 int frame_regno = 11;
20658 if ((strategy & REST_INLINE_VRS) == 0)
20660 /* Of r11 and r12, select the one not clobbered by an
20661 out-of-line restore function for the frame register. */
20662 frame_regno = 11 + 12 - scratch_regno;
20664 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
20665 emit_move_insn (frame_reg_rtx,
20666 gen_rtx_MEM (Pmode, sp_reg_rtx));
20667 frame_off = 0;
20669 else if (frame_pointer_needed)
20670 frame_reg_rtx = hard_frame_pointer_rtx;
20672 if ((strategy & REST_INLINE_VRS) == 0)
20674 int end_save = info->altivec_save_offset + info->altivec_size;
20675 int ptr_off;
20676 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20677 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20679 if (end_save + frame_off != 0)
20681 rtx offset = GEN_INT (end_save + frame_off);
20683 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20685 else
20686 emit_move_insn (ptr_reg, frame_reg_rtx);
20688 ptr_off = -end_save;
20689 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20690 info->altivec_save_offset + ptr_off,
20691 0, V4SImode, SAVRES_VR);
20693 else
20695 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20696 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20698 rtx addr, areg, mem, reg;
20700 areg = gen_rtx_REG (Pmode, 0);
20701 emit_move_insn
20702 (areg, GEN_INT (info->altivec_save_offset
20703 + frame_off
20704 + 16 * (i - info->first_altivec_reg_save)));
20706 /* AltiVec addressing mode is [reg+reg]. */
20707 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20708 mem = gen_frame_mem (V4SImode, addr);
20710 reg = gen_rtx_REG (V4SImode, i);
20711 emit_move_insn (reg, mem);
20715 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20716 if (((strategy & REST_INLINE_VRS) == 0
20717 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20718 && (flag_shrink_wrap
20719 || (offset_below_red_zone_p
20720 (info->altivec_save_offset
20721 + 16 * (i - info->first_altivec_reg_save)))))
20723 rtx reg = gen_rtx_REG (V4SImode, i);
20724 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20728 /* Restore VRSAVE if we must do so before adjusting the stack. */
20729 if (TARGET_ALTIVEC
20730 && TARGET_ALTIVEC_VRSAVE
20731 && info->vrsave_mask != 0
20732 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20733 || (DEFAULT_ABI != ABI_V4
20734 && offset_below_red_zone_p (info->vrsave_save_offset))))
20736 rtx reg;
20738 if (frame_reg_rtx == sp_reg_rtx)
20740 if (use_backchain_to_restore_sp)
20742 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20743 emit_move_insn (frame_reg_rtx,
20744 gen_rtx_MEM (Pmode, sp_reg_rtx));
20745 frame_off = 0;
20747 else if (frame_pointer_needed)
20748 frame_reg_rtx = hard_frame_pointer_rtx;
20751 reg = gen_rtx_REG (SImode, 12);
20752 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20753 info->vrsave_save_offset + frame_off));
20755 emit_insn (generate_set_vrsave (reg, info, 1));
20758 insn = NULL_RTX;
20759 /* If we have a large stack frame, restore the old stack pointer
20760 using the backchain. */
20761 if (use_backchain_to_restore_sp)
20763 if (frame_reg_rtx == sp_reg_rtx)
20765 /* Under V.4, don't reset the stack pointer until after we're done
20766 loading the saved registers. */
20767 if (DEFAULT_ABI == ABI_V4)
20768 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20770 insn = emit_move_insn (frame_reg_rtx,
20771 gen_rtx_MEM (Pmode, sp_reg_rtx));
20772 frame_off = 0;
20774 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20775 && DEFAULT_ABI == ABI_V4)
20776 /* frame_reg_rtx has been set up by the altivec restore. */
20778 else
20780 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
20781 frame_reg_rtx = sp_reg_rtx;
20784 /* If we have a frame pointer, we can restore the old stack pointer
20785 from it. */
20786 else if (frame_pointer_needed)
20788 frame_reg_rtx = sp_reg_rtx;
20789 if (DEFAULT_ABI == ABI_V4)
20790 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20791 /* Prevent reordering memory accesses against stack pointer restore. */
20792 else if (cfun->calls_alloca
20793 || offset_below_red_zone_p (-info->total_size))
20794 rs6000_emit_stack_tie (frame_reg_rtx, true);
20796 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
20797 GEN_INT (info->total_size)));
20798 frame_off = 0;
20800 else if (info->push_p
20801 && DEFAULT_ABI != ABI_V4
20802 && !crtl->calls_eh_return)
20804 /* Prevent reordering memory accesses against stack pointer restore. */
20805 if (cfun->calls_alloca
20806 || offset_below_red_zone_p (-info->total_size))
20807 rs6000_emit_stack_tie (frame_reg_rtx, false);
20808 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
20809 GEN_INT (info->total_size)));
20810 frame_off = 0;
20812 if (insn && frame_reg_rtx == sp_reg_rtx)
20814 if (cfa_restores)
20816 REG_NOTES (insn) = cfa_restores;
20817 cfa_restores = NULL_RTX;
20819 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20820 RTX_FRAME_RELATED_P (insn) = 1;
20823 /* Restore AltiVec registers if we have not done so already. */
20824 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20825 && TARGET_ALTIVEC_ABI
20826 && info->altivec_size != 0
20827 && (DEFAULT_ABI == ABI_V4
20828 || !offset_below_red_zone_p (info->altivec_save_offset)))
20830 int i;
20832 if ((strategy & REST_INLINE_VRS) == 0)
20834 int end_save = info->altivec_save_offset + info->altivec_size;
20835 int ptr_off;
20836 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20837 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20838 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20840 if (end_save + frame_off != 0)
20842 rtx offset = GEN_INT (end_save + frame_off);
20844 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20846 else
20847 emit_move_insn (ptr_reg, frame_reg_rtx);
20849 ptr_off = -end_save;
20850 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20851 info->altivec_save_offset + ptr_off,
20852 0, V4SImode, SAVRES_VR);
20853 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20855 /* Frame reg was clobbered by out-of-line save. Restore it
20856 from ptr_reg, and if we are calling out-of-line gpr or
20857 fpr restore set up the correct pointer and offset. */
20858 unsigned newptr_regno = 1;
20859 if (!restoring_GPRs_inline)
20861 bool lr = info->gp_save_offset + info->gp_size == 0;
20862 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20863 newptr_regno = ptr_regno_for_savres (sel);
20864 end_save = info->gp_save_offset + info->gp_size;
20866 else if (!restoring_FPRs_inline)
20868 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
20869 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20870 newptr_regno = ptr_regno_for_savres (sel);
20871 end_save = info->gp_save_offset + info->gp_size;
20874 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
20875 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
20877 if (end_save + ptr_off != 0)
20879 rtx offset = GEN_INT (end_save + ptr_off);
20881 frame_off = -end_save;
20882 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
20884 else
20886 frame_off = ptr_off;
20887 emit_move_insn (frame_reg_rtx, ptr_reg);
20891 else
20893 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20894 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20896 rtx addr, areg, mem, reg;
20898 areg = gen_rtx_REG (Pmode, 0);
20899 emit_move_insn
20900 (areg, GEN_INT (info->altivec_save_offset
20901 + frame_off
20902 + 16 * (i - info->first_altivec_reg_save)));
20904 /* AltiVec addressing mode is [reg+reg]. */
20905 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20906 mem = gen_frame_mem (V4SImode, addr);
20908 reg = gen_rtx_REG (V4SImode, i);
20909 emit_move_insn (reg, mem);
20913 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20914 if (((strategy & REST_INLINE_VRS) == 0
20915 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20916 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20918 rtx reg = gen_rtx_REG (V4SImode, i);
20919 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20923 /* Restore VRSAVE if we have not done so already. */
20924 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20925 && TARGET_ALTIVEC
20926 && TARGET_ALTIVEC_VRSAVE
20927 && info->vrsave_mask != 0
20928 && (DEFAULT_ABI == ABI_V4
20929 || !offset_below_red_zone_p (info->vrsave_save_offset)))
20931 rtx reg;
20933 reg = gen_rtx_REG (SImode, 12);
20934 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20935 info->vrsave_save_offset + frame_off));
20937 emit_insn (generate_set_vrsave (reg, info, 1));
20940 /* If we exit by an out-of-line restore function on ABI_V4 then that
20941 function will deallocate the stack, so we don't need to worry
20942 about the unwinder restoring cr from an invalid stack frame
20943 location. */
20944 exit_func = (!restoring_FPRs_inline
20945 || (!restoring_GPRs_inline
20946 && info->first_fp_reg_save == 64));
20948 /* Get the old lr if we saved it. If we are restoring registers
20949 out-of-line, then the out-of-line routines can do this for us. */
20950 if (restore_lr && restoring_GPRs_inline)
20951 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
20953 /* Get the old cr if we saved it. */
20954 if (info->cr_save_p)
20956 unsigned cr_save_regno = 12;
20958 if (!restoring_GPRs_inline)
20960 /* Ensure we don't use the register used by the out-of-line
20961 gpr register restore below. */
20962 bool lr = info->gp_save_offset + info->gp_size == 0;
20963 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20964 int gpr_ptr_regno = ptr_regno_for_savres (sel);
20966 if (gpr_ptr_regno == 12)
20967 cr_save_regno = 11;
20968 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
20970 else if (REGNO (frame_reg_rtx) == 12)
20971 cr_save_regno = 11;
20973 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
20974 info->cr_save_offset + frame_off,
20975 exit_func);
20978 /* Set LR here to try to overlap restores below. */
20979 if (restore_lr && restoring_GPRs_inline)
20980 restore_saved_lr (0, exit_func);
20982 /* Load exception handler data registers, if needed. */
20983 if (crtl->calls_eh_return)
20985 unsigned int i, regno;
20987 if (TARGET_AIX)
20989 rtx reg = gen_rtx_REG (reg_mode, 2);
20990 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20991 frame_off + 5 * reg_size));
20994 for (i = 0; ; ++i)
20996 rtx mem;
20998 regno = EH_RETURN_DATA_REGNO (i);
20999 if (regno == INVALID_REGNUM)
21000 break;
21002 /* Note: possible use of r0 here to address SPE regs. */
21003 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
21004 info->ehrd_offset + frame_off
21005 + reg_size * (int) i);
21007 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
21011 /* Restore GPRs. This is done as a PARALLEL if we are using
21012 the load-multiple instructions. */
21013 if (TARGET_SPE_ABI
21014 && info->spe_64bit_regs_used
21015 && info->first_gp_reg_save != 32)
21017 /* Determine whether we can address all of the registers that need
21018 to be saved with an offset from frame_reg_rtx that fits in
21019 the small const field for SPE memory instructions. */
21020 int spe_regs_addressable
21021 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
21022 + reg_size * (32 - info->first_gp_reg_save - 1))
21023 && restoring_GPRs_inline);
21025 if (!spe_regs_addressable)
21027 int ool_adjust = 0;
21028 rtx old_frame_reg_rtx = frame_reg_rtx;
21029 /* Make r11 point to the start of the SPE save area. We worried about
21030 not clobbering it when we were saving registers in the prologue.
21031 There's no need to worry here because the static chain is passed
21032 anew to every function. */
21034 if (!restoring_GPRs_inline)
21035 ool_adjust = 8 * (info->first_gp_reg_save
21036 - (FIRST_SAVRES_REGISTER + 1));
21037 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
21038 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
21039 GEN_INT (info->spe_gp_save_offset
21040 + frame_off
21041 - ool_adjust)));
21042 /* Keep the invariant that frame_reg_rtx + frame_off points
21043 at the top of the stack frame. */
21044 frame_off = -info->spe_gp_save_offset + ool_adjust;
21047 if (restoring_GPRs_inline)
21049 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
21051 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21052 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21054 rtx offset, addr, mem, reg;
21056 /* We're doing all this to ensure that the immediate offset
21057 fits into the immediate field of 'evldd'. */
21058 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
21060 offset = GEN_INT (spe_offset + reg_size * i);
21061 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
21062 mem = gen_rtx_MEM (V2SImode, addr);
21063 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
21065 emit_move_insn (reg, mem);
21068 else
21069 rs6000_emit_savres_rtx (info, frame_reg_rtx,
21070 info->spe_gp_save_offset + frame_off,
21071 info->lr_save_offset + frame_off,
21072 reg_mode,
21073 SAVRES_GPR | SAVRES_LR);
21075 else if (!restoring_GPRs_inline)
21077 /* We are jumping to an out-of-line function. */
21078 rtx ptr_reg;
21079 int end_save = info->gp_save_offset + info->gp_size;
21080 bool can_use_exit = end_save == 0;
21081 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
21082 int ptr_off;
21084 /* Emit stack reset code if we need it. */
21085 ptr_regno = ptr_regno_for_savres (sel);
21086 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
21087 if (can_use_exit)
21088 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21089 else if (end_save + frame_off != 0)
21090 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
21091 GEN_INT (end_save + frame_off)));
21092 else if (REGNO (frame_reg_rtx) != ptr_regno)
21093 emit_move_insn (ptr_reg, frame_reg_rtx);
21094 if (REGNO (frame_reg_rtx) == ptr_regno)
21095 frame_off = -end_save;
21097 if (can_use_exit && info->cr_save_p)
21098 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
21100 ptr_off = -end_save;
21101 rs6000_emit_savres_rtx (info, ptr_reg,
21102 info->gp_save_offset + ptr_off,
21103 info->lr_save_offset + ptr_off,
21104 reg_mode, sel);
21106 else if (using_load_multiple)
21108 rtvec p;
21109 p = rtvec_alloc (32 - info->first_gp_reg_save);
21110 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21111 RTVEC_ELT (p, i)
21112 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21113 frame_reg_rtx,
21114 info->gp_save_offset + frame_off + reg_size * i);
21115 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
21117 else
21119 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
21120 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
21121 emit_insn (gen_frame_load
21122 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
21123 frame_reg_rtx,
21124 info->gp_save_offset + frame_off + reg_size * i));
21127 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21129 /* If the frame pointer was used then we can't delay emitting
21130 a REG_CFA_DEF_CFA note. This must happen on the insn that
21131 restores the frame pointer, r31. We may have already emitted
21132 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21133 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21134 be harmless if emitted. */
21135 if (frame_pointer_needed)
21137 insn = get_last_insn ();
21138 add_reg_note (insn, REG_CFA_DEF_CFA,
21139 plus_constant (Pmode, frame_reg_rtx, frame_off));
21140 RTX_FRAME_RELATED_P (insn) = 1;
21143 /* Set up cfa_restores. We always need these when
21144 shrink-wrapping. If not shrink-wrapping then we only need
21145 the cfa_restore when the stack location is no longer valid.
21146 The cfa_restores must be emitted on or before the insn that
21147 invalidates the stack, and of course must not be emitted
21148 before the insn that actually does the restore. The latter
21149 is why it is a bad idea to emit the cfa_restores as a group
21150 on the last instruction here that actually does a restore:
21151 That insn may be reordered with respect to others doing
21152 restores. */
21153 if (flag_shrink_wrap
21154 && !restoring_GPRs_inline
21155 && info->first_fp_reg_save == 64)
21156 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21158 for (i = info->first_gp_reg_save; i < 32; i++)
21159 if (!restoring_GPRs_inline
21160 || using_load_multiple
21161 || rs6000_reg_live_or_pic_offset_p (i))
21163 rtx reg = gen_rtx_REG (reg_mode, i);
21165 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21169 if (!restoring_GPRs_inline
21170 && info->first_fp_reg_save == 64)
21172 /* We are jumping to an out-of-line function. */
21173 if (cfa_restores)
21174 emit_cfa_restores (cfa_restores);
21175 return;
21178 if (restore_lr && !restoring_GPRs_inline)
21180 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
21181 restore_saved_lr (0, exit_func);
21184 /* Restore fpr's if we need to do it without calling a function. */
21185 if (restoring_FPRs_inline)
21186 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21187 if (save_reg_p (info->first_fp_reg_save + i))
21189 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
21190 ? DFmode : SFmode),
21191 info->first_fp_reg_save + i);
21192 emit_insn (gen_frame_load (reg, frame_reg_rtx,
21193 info->fp_save_offset + frame_off + 8 * i));
21194 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
21195 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
21198 /* If we saved cr, restore it here. Just those that were used. */
21199 if (info->cr_save_p)
21200 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
21202 /* If this is V.4, unwind the stack pointer after all of the loads
21203 have been done, or set up r11 if we are restoring fp out of line. */
21204 ptr_regno = 1;
21205 if (!restoring_FPRs_inline)
21207 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21208 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
21209 ptr_regno = ptr_regno_for_savres (sel);
21212 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
21213 if (REGNO (frame_reg_rtx) == ptr_regno)
21214 frame_off = 0;
21216 if (insn && restoring_FPRs_inline)
21218 if (cfa_restores)
21220 REG_NOTES (insn) = cfa_restores;
21221 cfa_restores = NULL_RTX;
21223 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21224 RTX_FRAME_RELATED_P (insn) = 1;
21227 if (crtl->calls_eh_return)
21229 rtx sa = EH_RETURN_STACKADJ_RTX;
21230 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
21233 if (!sibcall)
21235 rtvec p;
21236 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21237 if (! restoring_FPRs_inline)
21239 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
21240 RTVEC_ELT (p, 0) = ret_rtx;
21242 else
21244 if (cfa_restores)
21246 /* We can't hang the cfa_restores off a simple return,
21247 since the shrink-wrap code sometimes uses an existing
21248 return. This means there might be a path from
21249 pre-prologue code to this return, and dwarf2cfi code
21250 wants the eh_frame unwinder state to be the same on
21251 all paths to any point. So we need to emit the
21252 cfa_restores before the return. For -m64 we really
21253 don't need epilogue cfa_restores at all, except for
21254 this irritating dwarf2cfi with shrink-wrap
21255 requirement; The stack red-zone means eh_frame info
21256 from the prologue telling the unwinder to restore
21257 from the stack is perfectly good right to the end of
21258 the function. */
21259 emit_insn (gen_blockage ());
21260 emit_cfa_restores (cfa_restores);
21261 cfa_restores = NULL_RTX;
21263 p = rtvec_alloc (2);
21264 RTVEC_ELT (p, 0) = simple_return_rtx;
21267 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
21268 ? gen_rtx_USE (VOIDmode,
21269 gen_rtx_REG (Pmode, LR_REGNO))
21270 : gen_rtx_CLOBBER (VOIDmode,
21271 gen_rtx_REG (Pmode, LR_REGNO)));
21273 /* If we have to restore more than two FP registers, branch to the
21274 restore function. It will return to our caller. */
21275 if (! restoring_FPRs_inline)
21277 int i;
21278 rtx sym;
21280 if (flag_shrink_wrap)
21281 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21283 sym = rs6000_savres_routine_sym (info,
21284 SAVRES_FPR | (lr ? SAVRES_LR : 0));
21285 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
21286 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
21287 gen_rtx_REG (Pmode,
21288 DEFAULT_ABI == ABI_AIX
21289 ? 1 : 11));
21290 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21292 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
21294 RTVEC_ELT (p, i + 4)
21295 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
21296 if (flag_shrink_wrap)
21297 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
21298 cfa_restores);
21302 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21305 if (cfa_restores)
21307 if (sibcall)
21308 /* Ensure the cfa_restores are hung off an insn that won't
21309 be reordered above other restores. */
21310 emit_insn (gen_blockage ());
21312 emit_cfa_restores (cfa_restores);
21316 /* Write function epilogue. */
21318 static void
21319 rs6000_output_function_epilogue (FILE *file,
21320 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21322 #if TARGET_MACHO
21323 macho_branch_islands ();
21324 /* Mach-O doesn't support labels at the end of objects, so if
21325 it looks like we might want one, insert a NOP. */
21327 rtx insn = get_last_insn ();
21328 rtx deleted_debug_label = NULL_RTX;
21329 while (insn
21330 && NOTE_P (insn)
21331 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
21333 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21334 notes only, instead set their CODE_LABEL_NUMBER to -1,
21335 otherwise there would be code generation differences
21336 in between -g and -g0. */
21337 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21338 deleted_debug_label = insn;
21339 insn = PREV_INSN (insn);
21341 if (insn
21342 && (LABEL_P (insn)
21343 || (NOTE_P (insn)
21344 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
21345 fputs ("\tnop\n", file);
21346 else if (deleted_debug_label)
21347 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
21348 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21349 CODE_LABEL_NUMBER (insn) = -1;
21351 #endif
21353 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21354 on its format.
21356 We don't output a traceback table if -finhibit-size-directive was
21357 used. The documentation for -finhibit-size-directive reads
21358 ``don't output a @code{.size} assembler directive, or anything
21359 else that would cause trouble if the function is split in the
21360 middle, and the two halves are placed at locations far apart in
21361 memory.'' The traceback table has this property, since it
21362 includes the offset from the start of the function to the
21363 traceback table itself.
21365 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21366 different traceback table. */
21367 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
21368 && rs6000_traceback != traceback_none && !cfun->is_thunk)
21370 const char *fname = NULL;
21371 const char *language_string = lang_hooks.name;
21372 int fixed_parms = 0, float_parms = 0, parm_info = 0;
21373 int i;
21374 int optional_tbtab;
21375 rs6000_stack_t *info = rs6000_stack_info ();
21377 if (rs6000_traceback == traceback_full)
21378 optional_tbtab = 1;
21379 else if (rs6000_traceback == traceback_part)
21380 optional_tbtab = 0;
21381 else
21382 optional_tbtab = !optimize_size && !TARGET_ELF;
21384 if (optional_tbtab)
21386 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
21387 while (*fname == '.') /* V.4 encodes . in the name */
21388 fname++;
21390 /* Need label immediately before tbtab, so we can compute
21391 its offset from the function start. */
21392 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21393 ASM_OUTPUT_LABEL (file, fname);
21396 /* The .tbtab pseudo-op can only be used for the first eight
21397 expressions, since it can't handle the possibly variable
21398 length fields that follow. However, if you omit the optional
21399 fields, the assembler outputs zeros for all optional fields
21400 anyways, giving each variable length field is minimum length
21401 (as defined in sys/debug.h). Thus we can not use the .tbtab
21402 pseudo-op at all. */
21404 /* An all-zero word flags the start of the tbtab, for debuggers
21405 that have to find it by searching forward from the entry
21406 point or from the current pc. */
21407 fputs ("\t.long 0\n", file);
21409 /* Tbtab format type. Use format type 0. */
21410 fputs ("\t.byte 0,", file);
21412 /* Language type. Unfortunately, there does not seem to be any
21413 official way to discover the language being compiled, so we
21414 use language_string.
21415 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21416 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21417 a number, so for now use 9. LTO and Go aren't assigned numbers
21418 either, so for now use 0. */
21419 if (! strcmp (language_string, "GNU C")
21420 || ! strcmp (language_string, "GNU GIMPLE")
21421 || ! strcmp (language_string, "GNU Go"))
21422 i = 0;
21423 else if (! strcmp (language_string, "GNU F77")
21424 || ! strcmp (language_string, "GNU Fortran"))
21425 i = 1;
21426 else if (! strcmp (language_string, "GNU Pascal"))
21427 i = 2;
21428 else if (! strcmp (language_string, "GNU Ada"))
21429 i = 3;
21430 else if (! strcmp (language_string, "GNU C++")
21431 || ! strcmp (language_string, "GNU Objective-C++"))
21432 i = 9;
21433 else if (! strcmp (language_string, "GNU Java"))
21434 i = 13;
21435 else if (! strcmp (language_string, "GNU Objective-C"))
21436 i = 14;
21437 else
21438 gcc_unreachable ();
21439 fprintf (file, "%d,", i);
21441 /* 8 single bit fields: global linkage (not set for C extern linkage,
21442 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21443 from start of procedure stored in tbtab, internal function, function
21444 has controlled storage, function has no toc, function uses fp,
21445 function logs/aborts fp operations. */
21446 /* Assume that fp operations are used if any fp reg must be saved. */
21447 fprintf (file, "%d,",
21448 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
21450 /* 6 bitfields: function is interrupt handler, name present in
21451 proc table, function calls alloca, on condition directives
21452 (controls stack walks, 3 bits), saves condition reg, saves
21453 link reg. */
21454 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21455 set up as a frame pointer, even when there is no alloca call. */
21456 fprintf (file, "%d,",
21457 ((optional_tbtab << 6)
21458 | ((optional_tbtab & frame_pointer_needed) << 5)
21459 | (info->cr_save_p << 1)
21460 | (info->lr_save_p)));
21462 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21463 (6 bits). */
21464 fprintf (file, "%d,",
21465 (info->push_p << 7) | (64 - info->first_fp_reg_save));
21467 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21468 fprintf (file, "%d,", (32 - first_reg_to_save ()));
21470 if (optional_tbtab)
21472 /* Compute the parameter info from the function decl argument
21473 list. */
21474 tree decl;
21475 int next_parm_info_bit = 31;
21477 for (decl = DECL_ARGUMENTS (current_function_decl);
21478 decl; decl = DECL_CHAIN (decl))
21480 rtx parameter = DECL_INCOMING_RTL (decl);
21481 enum machine_mode mode = GET_MODE (parameter);
21483 if (GET_CODE (parameter) == REG)
21485 if (SCALAR_FLOAT_MODE_P (mode))
21487 int bits;
21489 float_parms++;
21491 switch (mode)
21493 case SFmode:
21494 case SDmode:
21495 bits = 0x2;
21496 break;
21498 case DFmode:
21499 case DDmode:
21500 case TFmode:
21501 case TDmode:
21502 bits = 0x3;
21503 break;
21505 default:
21506 gcc_unreachable ();
21509 /* If only one bit will fit, don't or in this entry. */
21510 if (next_parm_info_bit > 0)
21511 parm_info |= (bits << (next_parm_info_bit - 1));
21512 next_parm_info_bit -= 2;
21514 else
21516 fixed_parms += ((GET_MODE_SIZE (mode)
21517 + (UNITS_PER_WORD - 1))
21518 / UNITS_PER_WORD);
21519 next_parm_info_bit -= 1;
21525 /* Number of fixed point parameters. */
21526 /* This is actually the number of words of fixed point parameters; thus
21527 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21528 fprintf (file, "%d,", fixed_parms);
21530 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21531 all on stack. */
21532 /* This is actually the number of fp registers that hold parameters;
21533 and thus the maximum value is 13. */
21534 /* Set parameters on stack bit if parameters are not in their original
21535 registers, regardless of whether they are on the stack? Xlc
21536 seems to set the bit when not optimizing. */
21537 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
21539 if (! optional_tbtab)
21540 return;
21542 /* Optional fields follow. Some are variable length. */
21544 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21545 11 double float. */
21546 /* There is an entry for each parameter in a register, in the order that
21547 they occur in the parameter list. Any intervening arguments on the
21548 stack are ignored. If the list overflows a long (max possible length
21549 34 bits) then completely leave off all elements that don't fit. */
21550 /* Only emit this long if there was at least one parameter. */
21551 if (fixed_parms || float_parms)
21552 fprintf (file, "\t.long %d\n", parm_info);
21554 /* Offset from start of code to tb table. */
21555 fputs ("\t.long ", file);
21556 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21557 RS6000_OUTPUT_BASENAME (file, fname);
21558 putc ('-', file);
21559 rs6000_output_function_entry (file, fname);
21560 putc ('\n', file);
21562 /* Interrupt handler mask. */
21563 /* Omit this long, since we never set the interrupt handler bit
21564 above. */
21566 /* Number of CTL (controlled storage) anchors. */
21567 /* Omit this long, since the has_ctl bit is never set above. */
21569 /* Displacement into stack of each CTL anchor. */
21570 /* Omit this list of longs, because there are no CTL anchors. */
21572 /* Length of function name. */
21573 if (*fname == '*')
21574 ++fname;
21575 fprintf (file, "\t.short %d\n", (int) strlen (fname));
21577 /* Function name. */
21578 assemble_string (fname, strlen (fname));
21580 /* Register for alloca automatic storage; this is always reg 31.
21581 Only emit this if the alloca bit was set above. */
21582 if (frame_pointer_needed)
21583 fputs ("\t.byte 31\n", file);
21585 fputs ("\t.align 2\n", file);
21589 /* A C compound statement that outputs the assembler code for a thunk
21590 function, used to implement C++ virtual function calls with
21591 multiple inheritance. The thunk acts as a wrapper around a virtual
21592 function, adjusting the implicit object parameter before handing
21593 control off to the real function.
21595 First, emit code to add the integer DELTA to the location that
21596 contains the incoming first argument. Assume that this argument
21597 contains a pointer, and is the one used to pass the `this' pointer
21598 in C++. This is the incoming argument *before* the function
21599 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21600 values of all other incoming arguments.
21602 After the addition, emit code to jump to FUNCTION, which is a
21603 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21604 not touch the return address. Hence returning from FUNCTION will
21605 return to whoever called the current `thunk'.
21607 The effect must be as if FUNCTION had been called directly with the
21608 adjusted first argument. This macro is responsible for emitting
21609 all of the code for a thunk function; output_function_prologue()
21610 and output_function_epilogue() are not invoked.
21612 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21613 been extracted from it.) It might possibly be useful on some
21614 targets, but probably not.
21616 If you do not define this macro, the target-independent code in the
21617 C++ frontend will generate a less efficient heavyweight thunk that
21618 calls FUNCTION instead of jumping to it. The generic approach does
21619 not support varargs. */
21621 static void
21622 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
21623 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
21624 tree function)
21626 rtx this_rtx, insn, funexp;
21628 reload_completed = 1;
21629 epilogue_completed = 1;
21631 /* Mark the end of the (empty) prologue. */
21632 emit_note (NOTE_INSN_PROLOGUE_END);
21634 /* Find the "this" pointer. If the function returns a structure,
21635 the structure return pointer is in r3. */
21636 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
21637 this_rtx = gen_rtx_REG (Pmode, 4);
21638 else
21639 this_rtx = gen_rtx_REG (Pmode, 3);
21641 /* Apply the constant offset, if required. */
21642 if (delta)
21643 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
21645 /* Apply the offset from the vtable, if required. */
21646 if (vcall_offset)
21648 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
21649 rtx tmp = gen_rtx_REG (Pmode, 12);
21651 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
21652 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
21654 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
21655 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
21657 else
21659 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
21661 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
21663 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
21666 /* Generate a tail call to the target function. */
21667 if (!TREE_USED (function))
21669 assemble_external (function);
21670 TREE_USED (function) = 1;
21672 funexp = XEXP (DECL_RTL (function), 0);
21673 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
21675 #if TARGET_MACHO
21676 if (MACHOPIC_INDIRECT)
21677 funexp = machopic_indirect_call_target (funexp);
21678 #endif
21680 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21681 generate sibcall RTL explicitly. */
21682 insn = emit_call_insn (
21683 gen_rtx_PARALLEL (VOIDmode,
21684 gen_rtvec (4,
21685 gen_rtx_CALL (VOIDmode,
21686 funexp, const0_rtx),
21687 gen_rtx_USE (VOIDmode, const0_rtx),
21688 gen_rtx_USE (VOIDmode,
21689 gen_rtx_REG (SImode,
21690 LR_REGNO)),
21691 simple_return_rtx)));
21692 SIBLING_CALL_P (insn) = 1;
21693 emit_barrier ();
21695 /* Run just enough of rest_of_compilation to get the insns emitted.
21696 There's not really enough bulk here to make other passes such as
21697 instruction scheduling worth while. Note that use_thunk calls
21698 assemble_start_function and assemble_end_function. */
21699 insn = get_insns ();
21700 shorten_branches (insn);
21701 final_start_function (insn, file, 1);
21702 final (insn, file, 1);
21703 final_end_function ();
21705 reload_completed = 0;
21706 epilogue_completed = 0;
21709 /* A quick summary of the various types of 'constant-pool tables'
21710 under PowerPC:
21712 Target Flags Name One table per
21713 AIX (none) AIX TOC object file
21714 AIX -mfull-toc AIX TOC object file
21715 AIX -mminimal-toc AIX minimal TOC translation unit
21716 SVR4/EABI (none) SVR4 SDATA object file
21717 SVR4/EABI -fpic SVR4 pic object file
21718 SVR4/EABI -fPIC SVR4 PIC translation unit
21719 SVR4/EABI -mrelocatable EABI TOC function
21720 SVR4/EABI -maix AIX TOC object file
21721 SVR4/EABI -maix -mminimal-toc
21722 AIX minimal TOC translation unit
21724 Name Reg. Set by entries contains:
21725 made by addrs? fp? sum?
21727 AIX TOC 2 crt0 as Y option option
21728 AIX minimal TOC 30 prolog gcc Y Y option
21729 SVR4 SDATA 13 crt0 gcc N Y N
21730 SVR4 pic 30 prolog ld Y not yet N
21731 SVR4 PIC 30 prolog gcc Y option option
21732 EABI TOC 30 prolog gcc Y option option
21736 /* Hash functions for the hash table. */
21738 static unsigned
21739 rs6000_hash_constant (rtx k)
21741 enum rtx_code code = GET_CODE (k);
21742 enum machine_mode mode = GET_MODE (k);
21743 unsigned result = (code << 3) ^ mode;
21744 const char *format;
21745 int flen, fidx;
21747 format = GET_RTX_FORMAT (code);
21748 flen = strlen (format);
21749 fidx = 0;
21751 switch (code)
21753 case LABEL_REF:
21754 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
21756 case CONST_DOUBLE:
21757 if (mode != VOIDmode)
21758 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
21759 flen = 2;
21760 break;
21762 case CODE_LABEL:
21763 fidx = 3;
21764 break;
21766 default:
21767 break;
21770 for (; fidx < flen; fidx++)
21771 switch (format[fidx])
21773 case 's':
21775 unsigned i, len;
21776 const char *str = XSTR (k, fidx);
21777 len = strlen (str);
21778 result = result * 613 + len;
21779 for (i = 0; i < len; i++)
21780 result = result * 613 + (unsigned) str[i];
21781 break;
21783 case 'u':
21784 case 'e':
21785 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
21786 break;
21787 case 'i':
21788 case 'n':
21789 result = result * 613 + (unsigned) XINT (k, fidx);
21790 break;
21791 case 'w':
21792 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
21793 result = result * 613 + (unsigned) XWINT (k, fidx);
21794 else
21796 size_t i;
21797 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
21798 result = result * 613 + (unsigned) (XWINT (k, fidx)
21799 >> CHAR_BIT * i);
21801 break;
21802 case '0':
21803 break;
21804 default:
21805 gcc_unreachable ();
21808 return result;
21811 static unsigned
21812 toc_hash_function (const void *hash_entry)
21814 const struct toc_hash_struct *thc =
21815 (const struct toc_hash_struct *) hash_entry;
21816 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
21819 /* Compare H1 and H2 for equivalence. */
21821 static int
21822 toc_hash_eq (const void *h1, const void *h2)
21824 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
21825 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
21827 if (((const struct toc_hash_struct *) h1)->key_mode
21828 != ((const struct toc_hash_struct *) h2)->key_mode)
21829 return 0;
21831 return rtx_equal_p (r1, r2);
21834 /* These are the names given by the C++ front-end to vtables, and
21835 vtable-like objects. Ideally, this logic should not be here;
21836 instead, there should be some programmatic way of inquiring as
21837 to whether or not an object is a vtable. */
21839 #define VTABLE_NAME_P(NAME) \
21840 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21841 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21842 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21843 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21844 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21846 #ifdef NO_DOLLAR_IN_LABEL
21847 /* Return a GGC-allocated character string translating dollar signs in
21848 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21850 const char *
21851 rs6000_xcoff_strip_dollar (const char *name)
21853 char *strip, *p;
21854 const char *q;
21855 size_t len;
21857 q = (const char *) strchr (name, '$');
21859 if (q == 0 || q == name)
21860 return name;
21862 len = strlen (name);
21863 strip = XALLOCAVEC (char, len + 1);
21864 strcpy (strip, name);
21865 p = strip + (q - name);
21866 while (p)
21868 *p = '_';
21869 p = strchr (p + 1, '$');
21872 return ggc_alloc_string (strip, len);
21874 #endif
21876 void
21877 rs6000_output_symbol_ref (FILE *file, rtx x)
21879 /* Currently C++ toc references to vtables can be emitted before it
21880 is decided whether the vtable is public or private. If this is
21881 the case, then the linker will eventually complain that there is
21882 a reference to an unknown section. Thus, for vtables only,
21883 we emit the TOC reference to reference the symbol and not the
21884 section. */
21885 const char *name = XSTR (x, 0);
21887 if (VTABLE_NAME_P (name))
21889 RS6000_OUTPUT_BASENAME (file, name);
21891 else
21892 assemble_name (file, name);
21895 /* Output a TOC entry. We derive the entry name from what is being
21896 written. */
21898 void
21899 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
21901 char buf[256];
21902 const char *name = buf;
21903 rtx base = x;
21904 HOST_WIDE_INT offset = 0;
21906 gcc_assert (!TARGET_NO_TOC);
21908 /* When the linker won't eliminate them, don't output duplicate
21909 TOC entries (this happens on AIX if there is any kind of TOC,
21910 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21911 CODE_LABELs. */
21912 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
21914 struct toc_hash_struct *h;
21915 void * * found;
21917 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21918 time because GGC is not initialized at that point. */
21919 if (toc_hash_table == NULL)
21920 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
21921 toc_hash_eq, NULL);
21923 h = ggc_alloc_toc_hash_struct ();
21924 h->key = x;
21925 h->key_mode = mode;
21926 h->labelno = labelno;
21928 found = htab_find_slot (toc_hash_table, h, INSERT);
21929 if (*found == NULL)
21930 *found = h;
21931 else /* This is indeed a duplicate.
21932 Set this label equal to that label. */
21934 fputs ("\t.set ", file);
21935 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21936 fprintf (file, "%d,", labelno);
21937 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21938 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
21939 found)->labelno));
21940 return;
21944 /* If we're going to put a double constant in the TOC, make sure it's
21945 aligned properly when strict alignment is on. */
21946 if (GET_CODE (x) == CONST_DOUBLE
21947 && STRICT_ALIGNMENT
21948 && GET_MODE_BITSIZE (mode) >= 64
21949 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
21950 ASM_OUTPUT_ALIGN (file, 3);
21953 (*targetm.asm_out.internal_label) (file, "LC", labelno);
21955 /* Handle FP constants specially. Note that if we have a minimal
21956 TOC, things we put here aren't actually in the TOC, so we can allow
21957 FP constants. */
21958 if (GET_CODE (x) == CONST_DOUBLE &&
21959 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
21961 REAL_VALUE_TYPE rv;
21962 long k[4];
21964 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21965 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21966 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
21967 else
21968 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
21970 if (TARGET_64BIT)
21972 if (TARGET_MINIMAL_TOC)
21973 fputs (DOUBLE_INT_ASM_OP, file);
21974 else
21975 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21976 k[0] & 0xffffffff, k[1] & 0xffffffff,
21977 k[2] & 0xffffffff, k[3] & 0xffffffff);
21978 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
21979 k[0] & 0xffffffff, k[1] & 0xffffffff,
21980 k[2] & 0xffffffff, k[3] & 0xffffffff);
21981 return;
21983 else
21985 if (TARGET_MINIMAL_TOC)
21986 fputs ("\t.long ", file);
21987 else
21988 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21989 k[0] & 0xffffffff, k[1] & 0xffffffff,
21990 k[2] & 0xffffffff, k[3] & 0xffffffff);
21991 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
21992 k[0] & 0xffffffff, k[1] & 0xffffffff,
21993 k[2] & 0xffffffff, k[3] & 0xffffffff);
21994 return;
21997 else if (GET_CODE (x) == CONST_DOUBLE &&
21998 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
22000 REAL_VALUE_TYPE rv;
22001 long k[2];
22003 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22005 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22006 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
22007 else
22008 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
22010 if (TARGET_64BIT)
22012 if (TARGET_MINIMAL_TOC)
22013 fputs (DOUBLE_INT_ASM_OP, file);
22014 else
22015 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22016 k[0] & 0xffffffff, k[1] & 0xffffffff);
22017 fprintf (file, "0x%lx%08lx\n",
22018 k[0] & 0xffffffff, k[1] & 0xffffffff);
22019 return;
22021 else
22023 if (TARGET_MINIMAL_TOC)
22024 fputs ("\t.long ", file);
22025 else
22026 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
22027 k[0] & 0xffffffff, k[1] & 0xffffffff);
22028 fprintf (file, "0x%lx,0x%lx\n",
22029 k[0] & 0xffffffff, k[1] & 0xffffffff);
22030 return;
22033 else if (GET_CODE (x) == CONST_DOUBLE &&
22034 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
22036 REAL_VALUE_TYPE rv;
22037 long l;
22039 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
22040 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
22041 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
22042 else
22043 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
22045 if (TARGET_64BIT)
22047 if (TARGET_MINIMAL_TOC)
22048 fputs (DOUBLE_INT_ASM_OP, file);
22049 else
22050 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22051 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
22052 return;
22054 else
22056 if (TARGET_MINIMAL_TOC)
22057 fputs ("\t.long ", file);
22058 else
22059 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
22060 fprintf (file, "0x%lx\n", l & 0xffffffff);
22061 return;
22064 else if (GET_MODE (x) == VOIDmode
22065 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
22067 unsigned HOST_WIDE_INT low;
22068 HOST_WIDE_INT high;
22070 if (GET_CODE (x) == CONST_DOUBLE)
22072 low = CONST_DOUBLE_LOW (x);
22073 high = CONST_DOUBLE_HIGH (x);
22075 else
22076 #if HOST_BITS_PER_WIDE_INT == 32
22078 low = INTVAL (x);
22079 high = (low & 0x80000000) ? ~0 : 0;
22081 #else
22083 low = INTVAL (x) & 0xffffffff;
22084 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
22086 #endif
22088 /* TOC entries are always Pmode-sized, but since this
22089 is a bigendian machine then if we're putting smaller
22090 integer constants in the TOC we have to pad them.
22091 (This is still a win over putting the constants in
22092 a separate constant pool, because then we'd have
22093 to have both a TOC entry _and_ the actual constant.)
22095 For a 32-bit target, CONST_INT values are loaded and shifted
22096 entirely within `low' and can be stored in one TOC entry. */
22098 /* It would be easy to make this work, but it doesn't now. */
22099 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
22101 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
22103 #if HOST_BITS_PER_WIDE_INT == 32
22104 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
22105 POINTER_SIZE, &low, &high, 0);
22106 #else
22107 low |= high << 32;
22108 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
22109 high = (HOST_WIDE_INT) low >> 32;
22110 low &= 0xffffffff;
22111 #endif
22114 if (TARGET_64BIT)
22116 if (TARGET_MINIMAL_TOC)
22117 fputs (DOUBLE_INT_ASM_OP, file);
22118 else
22119 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22120 (long) high & 0xffffffff, (long) low & 0xffffffff);
22121 fprintf (file, "0x%lx%08lx\n",
22122 (long) high & 0xffffffff, (long) low & 0xffffffff);
22123 return;
22125 else
22127 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
22129 if (TARGET_MINIMAL_TOC)
22130 fputs ("\t.long ", file);
22131 else
22132 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
22133 (long) high & 0xffffffff, (long) low & 0xffffffff);
22134 fprintf (file, "0x%lx,0x%lx\n",
22135 (long) high & 0xffffffff, (long) low & 0xffffffff);
22137 else
22139 if (TARGET_MINIMAL_TOC)
22140 fputs ("\t.long ", file);
22141 else
22142 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
22143 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
22145 return;
22149 if (GET_CODE (x) == CONST)
22151 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
22152 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
22154 base = XEXP (XEXP (x, 0), 0);
22155 offset = INTVAL (XEXP (XEXP (x, 0), 1));
22158 switch (GET_CODE (base))
22160 case SYMBOL_REF:
22161 name = XSTR (base, 0);
22162 break;
22164 case LABEL_REF:
22165 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
22166 CODE_LABEL_NUMBER (XEXP (base, 0)));
22167 break;
22169 case CODE_LABEL:
22170 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
22171 break;
22173 default:
22174 gcc_unreachable ();
22177 if (TARGET_MINIMAL_TOC)
22178 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
22179 else
22181 fputs ("\t.tc ", file);
22182 RS6000_OUTPUT_BASENAME (file, name);
22184 if (offset < 0)
22185 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
22186 else if (offset)
22187 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
22189 fputs ("[TC],", file);
22192 /* Currently C++ toc references to vtables can be emitted before it
22193 is decided whether the vtable is public or private. If this is
22194 the case, then the linker will eventually complain that there is
22195 a TOC reference to an unknown section. Thus, for vtables only,
22196 we emit the TOC reference to reference the symbol and not the
22197 section. */
22198 if (VTABLE_NAME_P (name))
22200 RS6000_OUTPUT_BASENAME (file, name);
22201 if (offset < 0)
22202 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
22203 else if (offset > 0)
22204 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
22206 else
22207 output_addr_const (file, x);
22208 putc ('\n', file);
22211 /* Output an assembler pseudo-op to write an ASCII string of N characters
22212 starting at P to FILE.
22214 On the RS/6000, we have to do this using the .byte operation and
22215 write out special characters outside the quoted string.
22216 Also, the assembler is broken; very long strings are truncated,
22217 so we must artificially break them up early. */
22219 void
22220 output_ascii (FILE *file, const char *p, int n)
22222 char c;
22223 int i, count_string;
22224 const char *for_string = "\t.byte \"";
22225 const char *for_decimal = "\t.byte ";
22226 const char *to_close = NULL;
22228 count_string = 0;
22229 for (i = 0; i < n; i++)
22231 c = *p++;
22232 if (c >= ' ' && c < 0177)
22234 if (for_string)
22235 fputs (for_string, file);
22236 putc (c, file);
22238 /* Write two quotes to get one. */
22239 if (c == '"')
22241 putc (c, file);
22242 ++count_string;
22245 for_string = NULL;
22246 for_decimal = "\"\n\t.byte ";
22247 to_close = "\"\n";
22248 ++count_string;
22250 if (count_string >= 512)
22252 fputs (to_close, file);
22254 for_string = "\t.byte \"";
22255 for_decimal = "\t.byte ";
22256 to_close = NULL;
22257 count_string = 0;
22260 else
22262 if (for_decimal)
22263 fputs (for_decimal, file);
22264 fprintf (file, "%d", c);
22266 for_string = "\n\t.byte \"";
22267 for_decimal = ", ";
22268 to_close = "\n";
22269 count_string = 0;
22273 /* Now close the string if we have written one. Then end the line. */
22274 if (to_close)
22275 fputs (to_close, file);
22278 /* Generate a unique section name for FILENAME for a section type
22279 represented by SECTION_DESC. Output goes into BUF.
22281 SECTION_DESC can be any string, as long as it is different for each
22282 possible section type.
22284 We name the section in the same manner as xlc. The name begins with an
22285 underscore followed by the filename (after stripping any leading directory
22286 names) with the last period replaced by the string SECTION_DESC. If
22287 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22288 the name. */
22290 void
22291 rs6000_gen_section_name (char **buf, const char *filename,
22292 const char *section_desc)
22294 const char *q, *after_last_slash, *last_period = 0;
22295 char *p;
22296 int len;
22298 after_last_slash = filename;
22299 for (q = filename; *q; q++)
22301 if (*q == '/')
22302 after_last_slash = q + 1;
22303 else if (*q == '.')
22304 last_period = q;
22307 len = strlen (after_last_slash) + strlen (section_desc) + 2;
22308 *buf = (char *) xmalloc (len);
22310 p = *buf;
22311 *p++ = '_';
22313 for (q = after_last_slash; *q; q++)
22315 if (q == last_period)
22317 strcpy (p, section_desc);
22318 p += strlen (section_desc);
22319 break;
22322 else if (ISALNUM (*q))
22323 *p++ = *q;
22326 if (last_period == 0)
22327 strcpy (p, section_desc);
22328 else
22329 *p = '\0';
22332 /* Emit profile function. */
22334 void
22335 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
22337 /* Non-standard profiling for kernels, which just saves LR then calls
22338 _mcount without worrying about arg saves. The idea is to change
22339 the function prologue as little as possible as it isn't easy to
22340 account for arg save/restore code added just for _mcount. */
22341 if (TARGET_PROFILE_KERNEL)
22342 return;
22344 if (DEFAULT_ABI == ABI_AIX)
22346 #ifndef NO_PROFILE_COUNTERS
22347 # define NO_PROFILE_COUNTERS 0
22348 #endif
22349 if (NO_PROFILE_COUNTERS)
22350 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22351 LCT_NORMAL, VOIDmode, 0);
22352 else
22354 char buf[30];
22355 const char *label_name;
22356 rtx fun;
22358 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22359 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
22360 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
22362 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22363 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
22366 else if (DEFAULT_ABI == ABI_DARWIN)
22368 const char *mcount_name = RS6000_MCOUNT;
22369 int caller_addr_regno = LR_REGNO;
22371 /* Be conservative and always set this, at least for now. */
22372 crtl->uses_pic_offset_table = 1;
22374 #if TARGET_MACHO
22375 /* For PIC code, set up a stub and collect the caller's address
22376 from r0, which is where the prologue puts it. */
22377 if (MACHOPIC_INDIRECT
22378 && crtl->uses_pic_offset_table)
22379 caller_addr_regno = 0;
22380 #endif
22381 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
22382 LCT_NORMAL, VOIDmode, 1,
22383 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
22387 /* Write function profiler code. */
22389 void
22390 output_function_profiler (FILE *file, int labelno)
22392 char buf[100];
22394 switch (DEFAULT_ABI)
22396 default:
22397 gcc_unreachable ();
22399 case ABI_V4:
22400 if (!TARGET_32BIT)
22402 warning (0, "no profiling of 64-bit code for this ABI");
22403 return;
22405 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22406 fprintf (file, "\tmflr %s\n", reg_names[0]);
22407 if (NO_PROFILE_COUNTERS)
22409 asm_fprintf (file, "\tstw %s,4(%s)\n",
22410 reg_names[0], reg_names[1]);
22412 else if (TARGET_SECURE_PLT && flag_pic)
22414 if (TARGET_LINK_STACK)
22416 char name[32];
22417 get_ppc476_thunk_name (name);
22418 asm_fprintf (file, "\tbl %s\n", name);
22420 else
22421 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
22422 asm_fprintf (file, "\tstw %s,4(%s)\n",
22423 reg_names[0], reg_names[1]);
22424 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22425 asm_fprintf (file, "\taddis %s,%s,",
22426 reg_names[12], reg_names[12]);
22427 assemble_name (file, buf);
22428 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
22429 assemble_name (file, buf);
22430 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
22432 else if (flag_pic == 1)
22434 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
22435 asm_fprintf (file, "\tstw %s,4(%s)\n",
22436 reg_names[0], reg_names[1]);
22437 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22438 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
22439 assemble_name (file, buf);
22440 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
22442 else if (flag_pic > 1)
22444 asm_fprintf (file, "\tstw %s,4(%s)\n",
22445 reg_names[0], reg_names[1]);
22446 /* Now, we need to get the address of the label. */
22447 if (TARGET_LINK_STACK)
22449 char name[32];
22450 get_ppc476_thunk_name (name);
22451 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
22452 assemble_name (file, buf);
22453 fputs ("-.\n1:", file);
22454 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22455 asm_fprintf (file, "\taddi %s,%s,4\n",
22456 reg_names[11], reg_names[11]);
22458 else
22460 fputs ("\tbcl 20,31,1f\n\t.long ", file);
22461 assemble_name (file, buf);
22462 fputs ("-.\n1:", file);
22463 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22465 asm_fprintf (file, "\tlwz %s,0(%s)\n",
22466 reg_names[0], reg_names[11]);
22467 asm_fprintf (file, "\tadd %s,%s,%s\n",
22468 reg_names[0], reg_names[0], reg_names[11]);
22470 else
22472 asm_fprintf (file, "\tlis %s,", reg_names[12]);
22473 assemble_name (file, buf);
22474 fputs ("@ha\n", file);
22475 asm_fprintf (file, "\tstw %s,4(%s)\n",
22476 reg_names[0], reg_names[1]);
22477 asm_fprintf (file, "\tla %s,", reg_names[0]);
22478 assemble_name (file, buf);
22479 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
22482 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22483 fprintf (file, "\tbl %s%s\n",
22484 RS6000_MCOUNT, flag_pic ? "@plt" : "");
22485 break;
22487 case ABI_AIX:
22488 case ABI_DARWIN:
22489 if (!TARGET_PROFILE_KERNEL)
22491 /* Don't do anything, done in output_profile_hook (). */
22493 else
22495 gcc_assert (!TARGET_32BIT);
22497 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
22498 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
22500 if (cfun->static_chain_decl != NULL)
22502 asm_fprintf (file, "\tstd %s,24(%s)\n",
22503 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22504 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22505 asm_fprintf (file, "\tld %s,24(%s)\n",
22506 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22508 else
22509 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22511 break;
22517 /* The following variable value is the last issued insn. */
22519 static rtx last_scheduled_insn;
22521 /* The following variable helps to balance issuing of load and
22522 store instructions */
22524 static int load_store_pendulum;
22526 /* Power4 load update and store update instructions are cracked into a
22527 load or store and an integer insn which are executed in the same cycle.
22528 Branches have their own dispatch slot which does not count against the
22529 GCC issue rate, but it changes the program flow so there are no other
22530 instructions to issue in this cycle. */
22532 static int
22533 rs6000_variable_issue_1 (rtx insn, int more)
22535 last_scheduled_insn = insn;
22536 if (GET_CODE (PATTERN (insn)) == USE
22537 || GET_CODE (PATTERN (insn)) == CLOBBER)
22539 cached_can_issue_more = more;
22540 return cached_can_issue_more;
22543 if (insn_terminates_group_p (insn, current_group))
22545 cached_can_issue_more = 0;
22546 return cached_can_issue_more;
22549 /* If no reservation, but reach here */
22550 if (recog_memoized (insn) < 0)
22551 return more;
22553 if (rs6000_sched_groups)
22555 if (is_microcoded_insn (insn))
22556 cached_can_issue_more = 0;
22557 else if (is_cracked_insn (insn))
22558 cached_can_issue_more = more > 2 ? more - 2 : 0;
22559 else
22560 cached_can_issue_more = more - 1;
22562 return cached_can_issue_more;
22565 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
22566 return 0;
22568 cached_can_issue_more = more - 1;
22569 return cached_can_issue_more;
22572 static int
22573 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
22575 int r = rs6000_variable_issue_1 (insn, more);
22576 if (verbose)
22577 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
22578 return r;
22581 /* Adjust the cost of a scheduling dependency. Return the new cost of
22582 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22584 static int
22585 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22587 enum attr_type attr_type;
22589 if (! recog_memoized (insn))
22590 return 0;
22592 switch (REG_NOTE_KIND (link))
22594 case REG_DEP_TRUE:
22596 /* Data dependency; DEP_INSN writes a register that INSN reads
22597 some cycles later. */
22599 /* Separate a load from a narrower, dependent store. */
22600 if (rs6000_sched_groups
22601 && GET_CODE (PATTERN (insn)) == SET
22602 && GET_CODE (PATTERN (dep_insn)) == SET
22603 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
22604 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
22605 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
22606 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
22607 return cost + 14;
22609 attr_type = get_attr_type (insn);
22611 switch (attr_type)
22613 case TYPE_JMPREG:
22614 /* Tell the first scheduling pass about the latency between
22615 a mtctr and bctr (and mtlr and br/blr). The first
22616 scheduling pass will not know about this latency since
22617 the mtctr instruction, which has the latency associated
22618 to it, will be generated by reload. */
22619 return 4;
22620 case TYPE_BRANCH:
22621 /* Leave some extra cycles between a compare and its
22622 dependent branch, to inhibit expensive mispredicts. */
22623 if ((rs6000_cpu_attr == CPU_PPC603
22624 || rs6000_cpu_attr == CPU_PPC604
22625 || rs6000_cpu_attr == CPU_PPC604E
22626 || rs6000_cpu_attr == CPU_PPC620
22627 || rs6000_cpu_attr == CPU_PPC630
22628 || rs6000_cpu_attr == CPU_PPC750
22629 || rs6000_cpu_attr == CPU_PPC7400
22630 || rs6000_cpu_attr == CPU_PPC7450
22631 || rs6000_cpu_attr == CPU_PPCE5500
22632 || rs6000_cpu_attr == CPU_PPCE6500
22633 || rs6000_cpu_attr == CPU_POWER4
22634 || rs6000_cpu_attr == CPU_POWER5
22635 || rs6000_cpu_attr == CPU_POWER7
22636 || rs6000_cpu_attr == CPU_CELL)
22637 && recog_memoized (dep_insn)
22638 && (INSN_CODE (dep_insn) >= 0))
22640 switch (get_attr_type (dep_insn))
22642 case TYPE_CMP:
22643 case TYPE_COMPARE:
22644 case TYPE_DELAYED_COMPARE:
22645 case TYPE_IMUL_COMPARE:
22646 case TYPE_LMUL_COMPARE:
22647 case TYPE_FPCOMPARE:
22648 case TYPE_CR_LOGICAL:
22649 case TYPE_DELAYED_CR:
22650 return cost + 2;
22651 default:
22652 break;
22654 break;
22656 case TYPE_STORE:
22657 case TYPE_STORE_U:
22658 case TYPE_STORE_UX:
22659 case TYPE_FPSTORE:
22660 case TYPE_FPSTORE_U:
22661 case TYPE_FPSTORE_UX:
22662 if ((rs6000_cpu == PROCESSOR_POWER6)
22663 && recog_memoized (dep_insn)
22664 && (INSN_CODE (dep_insn) >= 0))
22667 if (GET_CODE (PATTERN (insn)) != SET)
22668 /* If this happens, we have to extend this to schedule
22669 optimally. Return default for now. */
22670 return cost;
22672 /* Adjust the cost for the case where the value written
22673 by a fixed point operation is used as the address
22674 gen value on a store. */
22675 switch (get_attr_type (dep_insn))
22677 case TYPE_LOAD:
22678 case TYPE_LOAD_U:
22679 case TYPE_LOAD_UX:
22680 case TYPE_CNTLZ:
22682 if (! store_data_bypass_p (dep_insn, insn))
22683 return 4;
22684 break;
22686 case TYPE_LOAD_EXT:
22687 case TYPE_LOAD_EXT_U:
22688 case TYPE_LOAD_EXT_UX:
22689 case TYPE_VAR_SHIFT_ROTATE:
22690 case TYPE_VAR_DELAYED_COMPARE:
22692 if (! store_data_bypass_p (dep_insn, insn))
22693 return 6;
22694 break;
22696 case TYPE_INTEGER:
22697 case TYPE_COMPARE:
22698 case TYPE_FAST_COMPARE:
22699 case TYPE_EXTS:
22700 case TYPE_SHIFT:
22701 case TYPE_INSERT_WORD:
22702 case TYPE_INSERT_DWORD:
22703 case TYPE_FPLOAD_U:
22704 case TYPE_FPLOAD_UX:
22705 case TYPE_STORE_U:
22706 case TYPE_STORE_UX:
22707 case TYPE_FPSTORE_U:
22708 case TYPE_FPSTORE_UX:
22710 if (! store_data_bypass_p (dep_insn, insn))
22711 return 3;
22712 break;
22714 case TYPE_IMUL:
22715 case TYPE_IMUL2:
22716 case TYPE_IMUL3:
22717 case TYPE_LMUL:
22718 case TYPE_IMUL_COMPARE:
22719 case TYPE_LMUL_COMPARE:
22721 if (! store_data_bypass_p (dep_insn, insn))
22722 return 17;
22723 break;
22725 case TYPE_IDIV:
22727 if (! store_data_bypass_p (dep_insn, insn))
22728 return 45;
22729 break;
22731 case TYPE_LDIV:
22733 if (! store_data_bypass_p (dep_insn, insn))
22734 return 57;
22735 break;
22737 default:
22738 break;
22741 break;
22743 case TYPE_LOAD:
22744 case TYPE_LOAD_U:
22745 case TYPE_LOAD_UX:
22746 case TYPE_LOAD_EXT:
22747 case TYPE_LOAD_EXT_U:
22748 case TYPE_LOAD_EXT_UX:
22749 if ((rs6000_cpu == PROCESSOR_POWER6)
22750 && recog_memoized (dep_insn)
22751 && (INSN_CODE (dep_insn) >= 0))
22754 /* Adjust the cost for the case where the value written
22755 by a fixed point instruction is used within the address
22756 gen portion of a subsequent load(u)(x) */
22757 switch (get_attr_type (dep_insn))
22759 case TYPE_LOAD:
22760 case TYPE_LOAD_U:
22761 case TYPE_LOAD_UX:
22762 case TYPE_CNTLZ:
22764 if (set_to_load_agen (dep_insn, insn))
22765 return 4;
22766 break;
22768 case TYPE_LOAD_EXT:
22769 case TYPE_LOAD_EXT_U:
22770 case TYPE_LOAD_EXT_UX:
22771 case TYPE_VAR_SHIFT_ROTATE:
22772 case TYPE_VAR_DELAYED_COMPARE:
22774 if (set_to_load_agen (dep_insn, insn))
22775 return 6;
22776 break;
22778 case TYPE_INTEGER:
22779 case TYPE_COMPARE:
22780 case TYPE_FAST_COMPARE:
22781 case TYPE_EXTS:
22782 case TYPE_SHIFT:
22783 case TYPE_INSERT_WORD:
22784 case TYPE_INSERT_DWORD:
22785 case TYPE_FPLOAD_U:
22786 case TYPE_FPLOAD_UX:
22787 case TYPE_STORE_U:
22788 case TYPE_STORE_UX:
22789 case TYPE_FPSTORE_U:
22790 case TYPE_FPSTORE_UX:
22792 if (set_to_load_agen (dep_insn, insn))
22793 return 3;
22794 break;
22796 case TYPE_IMUL:
22797 case TYPE_IMUL2:
22798 case TYPE_IMUL3:
22799 case TYPE_LMUL:
22800 case TYPE_IMUL_COMPARE:
22801 case TYPE_LMUL_COMPARE:
22803 if (set_to_load_agen (dep_insn, insn))
22804 return 17;
22805 break;
22807 case TYPE_IDIV:
22809 if (set_to_load_agen (dep_insn, insn))
22810 return 45;
22811 break;
22813 case TYPE_LDIV:
22815 if (set_to_load_agen (dep_insn, insn))
22816 return 57;
22817 break;
22819 default:
22820 break;
22823 break;
22825 case TYPE_FPLOAD:
22826 if ((rs6000_cpu == PROCESSOR_POWER6)
22827 && recog_memoized (dep_insn)
22828 && (INSN_CODE (dep_insn) >= 0)
22829 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
22830 return 2;
22832 default:
22833 break;
22836 /* Fall out to return default cost. */
22838 break;
22840 case REG_DEP_OUTPUT:
22841 /* Output dependency; DEP_INSN writes a register that INSN writes some
22842 cycles later. */
22843 if ((rs6000_cpu == PROCESSOR_POWER6)
22844 && recog_memoized (dep_insn)
22845 && (INSN_CODE (dep_insn) >= 0))
22847 attr_type = get_attr_type (insn);
22849 switch (attr_type)
22851 case TYPE_FP:
22852 if (get_attr_type (dep_insn) == TYPE_FP)
22853 return 1;
22854 break;
22855 case TYPE_FPLOAD:
22856 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
22857 return 2;
22858 break;
22859 default:
22860 break;
22863 case REG_DEP_ANTI:
22864 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22865 cycles later. */
22866 return 0;
22868 default:
22869 gcc_unreachable ();
22872 return cost;
22875 /* Debug version of rs6000_adjust_cost. */
22877 static int
22878 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22880 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
22882 if (ret != cost)
22884 const char *dep;
22886 switch (REG_NOTE_KIND (link))
22888 default: dep = "unknown depencency"; break;
22889 case REG_DEP_TRUE: dep = "data dependency"; break;
22890 case REG_DEP_OUTPUT: dep = "output dependency"; break;
22891 case REG_DEP_ANTI: dep = "anti depencency"; break;
22894 fprintf (stderr,
22895 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22896 "%s, insn:\n", ret, cost, dep);
22898 debug_rtx (insn);
22901 return ret;
22904 /* The function returns a true if INSN is microcoded.
22905 Return false otherwise. */
22907 static bool
22908 is_microcoded_insn (rtx insn)
22910 if (!insn || !NONDEBUG_INSN_P (insn)
22911 || GET_CODE (PATTERN (insn)) == USE
22912 || GET_CODE (PATTERN (insn)) == CLOBBER)
22913 return false;
22915 if (rs6000_cpu_attr == CPU_CELL)
22916 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
22918 if (rs6000_sched_groups)
22920 enum attr_type type = get_attr_type (insn);
22921 if (type == TYPE_LOAD_EXT_U
22922 || type == TYPE_LOAD_EXT_UX
22923 || type == TYPE_LOAD_UX
22924 || type == TYPE_STORE_UX
22925 || type == TYPE_MFCR)
22926 return true;
22929 return false;
22932 /* The function returns true if INSN is cracked into 2 instructions
22933 by the processor (and therefore occupies 2 issue slots). */
22935 static bool
22936 is_cracked_insn (rtx insn)
22938 if (!insn || !NONDEBUG_INSN_P (insn)
22939 || GET_CODE (PATTERN (insn)) == USE
22940 || GET_CODE (PATTERN (insn)) == CLOBBER)
22941 return false;
22943 if (rs6000_sched_groups)
22945 enum attr_type type = get_attr_type (insn);
22946 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
22947 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
22948 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
22949 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
22950 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
22951 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
22952 || type == TYPE_IDIV || type == TYPE_LDIV
22953 || type == TYPE_INSERT_WORD)
22954 return true;
22957 return false;
22960 /* The function returns true if INSN can be issued only from
22961 the branch slot. */
22963 static bool
22964 is_branch_slot_insn (rtx insn)
22966 if (!insn || !NONDEBUG_INSN_P (insn)
22967 || GET_CODE (PATTERN (insn)) == USE
22968 || GET_CODE (PATTERN (insn)) == CLOBBER)
22969 return false;
22971 if (rs6000_sched_groups)
22973 enum attr_type type = get_attr_type (insn);
22974 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
22975 return true;
22976 return false;
22979 return false;
22982 /* The function returns true if out_inst sets a value that is
22983 used in the address generation computation of in_insn */
22984 static bool
22985 set_to_load_agen (rtx out_insn, rtx in_insn)
22987 rtx out_set, in_set;
22989 /* For performance reasons, only handle the simple case where
22990 both loads are a single_set. */
22991 out_set = single_set (out_insn);
22992 if (out_set)
22994 in_set = single_set (in_insn);
22995 if (in_set)
22996 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
22999 return false;
23002 /* Try to determine base/offset/size parts of the given MEM.
23003 Return true if successful, false if all the values couldn't
23004 be determined.
23006 This function only looks for REG or REG+CONST address forms.
23007 REG+REG address form will return false. */
23009 static bool
23010 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
23011 HOST_WIDE_INT *size)
23013 rtx addr_rtx;
23014 if MEM_SIZE_KNOWN_P (mem)
23015 *size = MEM_SIZE (mem);
23016 else
23017 return false;
23019 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
23020 addr_rtx = XEXP (XEXP (mem, 0), 1);
23021 else
23022 addr_rtx = (XEXP (mem, 0));
23024 if (GET_CODE (addr_rtx) == REG)
23026 *base = addr_rtx;
23027 *offset = 0;
23029 else if (GET_CODE (addr_rtx) == PLUS
23030 && CONST_INT_P (XEXP (addr_rtx, 1)))
23032 *base = XEXP (addr_rtx, 0);
23033 *offset = INTVAL (XEXP (addr_rtx, 1));
23035 else
23036 return false;
23038 return true;
23041 /* The function returns true if the target storage location of
23042 mem1 is adjacent to the target storage location of mem2 */
23043 /* Return 1 if memory locations are adjacent. */
23045 static bool
23046 adjacent_mem_locations (rtx mem1, rtx mem2)
23048 rtx reg1, reg2;
23049 HOST_WIDE_INT off1, size1, off2, size2;
23051 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23052 && get_memref_parts (mem2, &reg2, &off2, &size2))
23053 return ((REGNO (reg1) == REGNO (reg2))
23054 && ((off1 + size1 == off2)
23055 || (off2 + size2 == off1)));
23057 return false;
23060 /* This function returns true if it can be determined that the two MEM
23061 locations overlap by at least 1 byte based on base reg/offset/size. */
23063 static bool
23064 mem_locations_overlap (rtx mem1, rtx mem2)
23066 rtx reg1, reg2;
23067 HOST_WIDE_INT off1, size1, off2, size2;
23069 if (get_memref_parts (mem1, &reg1, &off1, &size1)
23070 && get_memref_parts (mem2, &reg2, &off2, &size2))
23071 return ((REGNO (reg1) == REGNO (reg2))
23072 && (((off1 <= off2) && (off1 + size1 > off2))
23073 || ((off2 <= off1) && (off2 + size2 > off1))));
23075 return false;
23078 /* A C statement (sans semicolon) to update the integer scheduling
23079 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23080 INSN earlier, reduce the priority to execute INSN later. Do not
23081 define this macro if you do not need to adjust the scheduling
23082 priorities of insns. */
23084 static int
23085 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
23087 rtx load_mem, str_mem;
23088 /* On machines (like the 750) which have asymmetric integer units,
23089 where one integer unit can do multiply and divides and the other
23090 can't, reduce the priority of multiply/divide so it is scheduled
23091 before other integer operations. */
23093 #if 0
23094 if (! INSN_P (insn))
23095 return priority;
23097 if (GET_CODE (PATTERN (insn)) == USE)
23098 return priority;
23100 switch (rs6000_cpu_attr) {
23101 case CPU_PPC750:
23102 switch (get_attr_type (insn))
23104 default:
23105 break;
23107 case TYPE_IMUL:
23108 case TYPE_IDIV:
23109 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
23110 priority, priority);
23111 if (priority >= 0 && priority < 0x01000000)
23112 priority >>= 3;
23113 break;
23116 #endif
23118 if (insn_must_be_first_in_group (insn)
23119 && reload_completed
23120 && current_sched_info->sched_max_insns_priority
23121 && rs6000_sched_restricted_insns_priority)
23124 /* Prioritize insns that can be dispatched only in the first
23125 dispatch slot. */
23126 if (rs6000_sched_restricted_insns_priority == 1)
23127 /* Attach highest priority to insn. This means that in
23128 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23129 precede 'priority' (critical path) considerations. */
23130 return current_sched_info->sched_max_insns_priority;
23131 else if (rs6000_sched_restricted_insns_priority == 2)
23132 /* Increase priority of insn by a minimal amount. This means that in
23133 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23134 considerations precede dispatch-slot restriction considerations. */
23135 return (priority + 1);
23138 if (rs6000_cpu == PROCESSOR_POWER6
23139 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
23140 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
23141 /* Attach highest priority to insn if the scheduler has just issued two
23142 stores and this instruction is a load, or two loads and this instruction
23143 is a store. Power6 wants loads and stores scheduled alternately
23144 when possible */
23145 return current_sched_info->sched_max_insns_priority;
23147 return priority;
23150 /* Return true if the instruction is nonpipelined on the Cell. */
23151 static bool
23152 is_nonpipeline_insn (rtx insn)
23154 enum attr_type type;
23155 if (!insn || !NONDEBUG_INSN_P (insn)
23156 || GET_CODE (PATTERN (insn)) == USE
23157 || GET_CODE (PATTERN (insn)) == CLOBBER)
23158 return false;
23160 type = get_attr_type (insn);
23161 if (type == TYPE_IMUL
23162 || type == TYPE_IMUL2
23163 || type == TYPE_IMUL3
23164 || type == TYPE_LMUL
23165 || type == TYPE_IDIV
23166 || type == TYPE_LDIV
23167 || type == TYPE_SDIV
23168 || type == TYPE_DDIV
23169 || type == TYPE_SSQRT
23170 || type == TYPE_DSQRT
23171 || type == TYPE_MFCR
23172 || type == TYPE_MFCRF
23173 || type == TYPE_MFJMPR)
23175 return true;
23177 return false;
23181 /* Return how many instructions the machine can issue per cycle. */
23183 static int
23184 rs6000_issue_rate (void)
23186 /* Unless scheduling for register pressure, use issue rate of 1 for
23187 first scheduling pass to decrease degradation. */
23188 if (!reload_completed && !flag_sched_pressure)
23189 return 1;
23191 switch (rs6000_cpu_attr) {
23192 case CPU_RS64A:
23193 case CPU_PPC601: /* ? */
23194 case CPU_PPC7450:
23195 return 3;
23196 case CPU_PPC440:
23197 case CPU_PPC603:
23198 case CPU_PPC750:
23199 case CPU_PPC7400:
23200 case CPU_PPC8540:
23201 case CPU_PPC8548:
23202 case CPU_CELL:
23203 case CPU_PPCE300C2:
23204 case CPU_PPCE300C3:
23205 case CPU_PPCE500MC:
23206 case CPU_PPCE500MC64:
23207 case CPU_PPCE5500:
23208 case CPU_PPCE6500:
23209 case CPU_TITAN:
23210 return 2;
23211 case CPU_PPC476:
23212 case CPU_PPC604:
23213 case CPU_PPC604E:
23214 case CPU_PPC620:
23215 case CPU_PPC630:
23216 return 4;
23217 case CPU_POWER4:
23218 case CPU_POWER5:
23219 case CPU_POWER6:
23220 case CPU_POWER7:
23221 return 5;
23222 default:
23223 return 1;
23227 /* Return how many instructions to look ahead for better insn
23228 scheduling. */
23230 static int
23231 rs6000_use_sched_lookahead (void)
23233 switch (rs6000_cpu_attr)
23235 case CPU_PPC8540:
23236 case CPU_PPC8548:
23237 return 4;
23239 case CPU_CELL:
23240 return (reload_completed ? 8 : 0);
23242 default:
23243 return 0;
23247 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23248 static int
23249 rs6000_use_sched_lookahead_guard (rtx insn)
23251 if (rs6000_cpu_attr != CPU_CELL)
23252 return 1;
23254 if (insn == NULL_RTX || !INSN_P (insn))
23255 abort ();
23257 if (!reload_completed
23258 || is_nonpipeline_insn (insn)
23259 || is_microcoded_insn (insn))
23260 return 0;
23262 return 1;
23265 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23266 and return true. */
23268 static bool
23269 find_mem_ref (rtx pat, rtx *mem_ref)
23271 const char * fmt;
23272 int i, j;
23274 /* stack_tie does not produce any real memory traffic. */
23275 if (tie_operand (pat, VOIDmode))
23276 return false;
23278 if (GET_CODE (pat) == MEM)
23280 *mem_ref = pat;
23281 return true;
23284 /* Recursively process the pattern. */
23285 fmt = GET_RTX_FORMAT (GET_CODE (pat));
23287 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
23289 if (fmt[i] == 'e')
23291 if (find_mem_ref (XEXP (pat, i), mem_ref))
23292 return true;
23294 else if (fmt[i] == 'E')
23295 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
23297 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
23298 return true;
23302 return false;
23305 /* Determine if PAT is a PATTERN of a load insn. */
23307 static bool
23308 is_load_insn1 (rtx pat, rtx *load_mem)
23310 if (!pat || pat == NULL_RTX)
23311 return false;
23313 if (GET_CODE (pat) == SET)
23314 return find_mem_ref (SET_SRC (pat), load_mem);
23316 if (GET_CODE (pat) == PARALLEL)
23318 int i;
23320 for (i = 0; i < XVECLEN (pat, 0); i++)
23321 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
23322 return true;
23325 return false;
23328 /* Determine if INSN loads from memory. */
23330 static bool
23331 is_load_insn (rtx insn, rtx *load_mem)
23333 if (!insn || !INSN_P (insn))
23334 return false;
23336 if (GET_CODE (insn) == CALL_INSN)
23337 return false;
23339 return is_load_insn1 (PATTERN (insn), load_mem);
23342 /* Determine if PAT is a PATTERN of a store insn. */
23344 static bool
23345 is_store_insn1 (rtx pat, rtx *str_mem)
23347 if (!pat || pat == NULL_RTX)
23348 return false;
23350 if (GET_CODE (pat) == SET)
23351 return find_mem_ref (SET_DEST (pat), str_mem);
23353 if (GET_CODE (pat) == PARALLEL)
23355 int i;
23357 for (i = 0; i < XVECLEN (pat, 0); i++)
23358 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
23359 return true;
23362 return false;
23365 /* Determine if INSN stores to memory. */
23367 static bool
23368 is_store_insn (rtx insn, rtx *str_mem)
23370 if (!insn || !INSN_P (insn))
23371 return false;
23373 return is_store_insn1 (PATTERN (insn), str_mem);
23376 /* Returns whether the dependence between INSN and NEXT is considered
23377 costly by the given target. */
23379 static bool
23380 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
23382 rtx insn;
23383 rtx next;
23384 rtx load_mem, str_mem;
23386 /* If the flag is not enabled - no dependence is considered costly;
23387 allow all dependent insns in the same group.
23388 This is the most aggressive option. */
23389 if (rs6000_sched_costly_dep == no_dep_costly)
23390 return false;
23392 /* If the flag is set to 1 - a dependence is always considered costly;
23393 do not allow dependent instructions in the same group.
23394 This is the most conservative option. */
23395 if (rs6000_sched_costly_dep == all_deps_costly)
23396 return true;
23398 insn = DEP_PRO (dep);
23399 next = DEP_CON (dep);
23401 if (rs6000_sched_costly_dep == store_to_load_dep_costly
23402 && is_load_insn (next, &load_mem)
23403 && is_store_insn (insn, &str_mem))
23404 /* Prevent load after store in the same group. */
23405 return true;
23407 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
23408 && is_load_insn (next, &load_mem)
23409 && is_store_insn (insn, &str_mem)
23410 && DEP_TYPE (dep) == REG_DEP_TRUE
23411 && mem_locations_overlap(str_mem, load_mem))
23412 /* Prevent load after store in the same group if it is a true
23413 dependence. */
23414 return true;
23416 /* The flag is set to X; dependences with latency >= X are considered costly,
23417 and will not be scheduled in the same group. */
23418 if (rs6000_sched_costly_dep <= max_dep_latency
23419 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
23420 return true;
23422 return false;
23425 /* Return the next insn after INSN that is found before TAIL is reached,
23426 skipping any "non-active" insns - insns that will not actually occupy
23427 an issue slot. Return NULL_RTX if such an insn is not found. */
23429 static rtx
23430 get_next_active_insn (rtx insn, rtx tail)
23432 if (insn == NULL_RTX || insn == tail)
23433 return NULL_RTX;
23435 while (1)
23437 insn = NEXT_INSN (insn);
23438 if (insn == NULL_RTX || insn == tail)
23439 return NULL_RTX;
23441 if (CALL_P (insn)
23442 || JUMP_P (insn)
23443 || (NONJUMP_INSN_P (insn)
23444 && GET_CODE (PATTERN (insn)) != USE
23445 && GET_CODE (PATTERN (insn)) != CLOBBER
23446 && INSN_CODE (insn) != CODE_FOR_stack_tie))
23447 break;
23449 return insn;
23452 /* We are about to begin issuing insns for this clock cycle. */
23454 static int
23455 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
23456 rtx *ready ATTRIBUTE_UNUSED,
23457 int *pn_ready ATTRIBUTE_UNUSED,
23458 int clock_var ATTRIBUTE_UNUSED)
23460 int n_ready = *pn_ready;
23462 if (sched_verbose)
23463 fprintf (dump, "// rs6000_sched_reorder :\n");
23465 /* Reorder the ready list, if the second to last ready insn
23466 is a nonepipeline insn. */
23467 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
23469 if (is_nonpipeline_insn (ready[n_ready - 1])
23470 && (recog_memoized (ready[n_ready - 2]) > 0))
23471 /* Simply swap first two insns. */
23473 rtx tmp = ready[n_ready - 1];
23474 ready[n_ready - 1] = ready[n_ready - 2];
23475 ready[n_ready - 2] = tmp;
23479 if (rs6000_cpu == PROCESSOR_POWER6)
23480 load_store_pendulum = 0;
23482 return rs6000_issue_rate ();
23485 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23487 static int
23488 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
23489 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
23491 if (sched_verbose)
23492 fprintf (dump, "// rs6000_sched_reorder2 :\n");
23494 /* For Power6, we need to handle some special cases to try and keep the
23495 store queue from overflowing and triggering expensive flushes.
23497 This code monitors how load and store instructions are being issued
23498 and skews the ready list one way or the other to increase the likelihood
23499 that a desired instruction is issued at the proper time.
23501 A couple of things are done. First, we maintain a "load_store_pendulum"
23502 to track the current state of load/store issue.
23504 - If the pendulum is at zero, then no loads or stores have been
23505 issued in the current cycle so we do nothing.
23507 - If the pendulum is 1, then a single load has been issued in this
23508 cycle and we attempt to locate another load in the ready list to
23509 issue with it.
23511 - If the pendulum is -2, then two stores have already been
23512 issued in this cycle, so we increase the priority of the first load
23513 in the ready list to increase it's likelihood of being chosen first
23514 in the next cycle.
23516 - If the pendulum is -1, then a single store has been issued in this
23517 cycle and we attempt to locate another store in the ready list to
23518 issue with it, preferring a store to an adjacent memory location to
23519 facilitate store pairing in the store queue.
23521 - If the pendulum is 2, then two loads have already been
23522 issued in this cycle, so we increase the priority of the first store
23523 in the ready list to increase it's likelihood of being chosen first
23524 in the next cycle.
23526 - If the pendulum < -2 or > 2, then do nothing.
23528 Note: This code covers the most common scenarios. There exist non
23529 load/store instructions which make use of the LSU and which
23530 would need to be accounted for to strictly model the behavior
23531 of the machine. Those instructions are currently unaccounted
23532 for to help minimize compile time overhead of this code.
23534 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
23536 int pos;
23537 int i;
23538 rtx tmp, load_mem, str_mem;
23540 if (is_store_insn (last_scheduled_insn, &str_mem))
23541 /* Issuing a store, swing the load_store_pendulum to the left */
23542 load_store_pendulum--;
23543 else if (is_load_insn (last_scheduled_insn, &load_mem))
23544 /* Issuing a load, swing the load_store_pendulum to the right */
23545 load_store_pendulum++;
23546 else
23547 return cached_can_issue_more;
23549 /* If the pendulum is balanced, or there is only one instruction on
23550 the ready list, then all is well, so return. */
23551 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
23552 return cached_can_issue_more;
23554 if (load_store_pendulum == 1)
23556 /* A load has been issued in this cycle. Scan the ready list
23557 for another load to issue with it */
23558 pos = *pn_ready-1;
23560 while (pos >= 0)
23562 if (is_load_insn (ready[pos], &load_mem))
23564 /* Found a load. Move it to the head of the ready list,
23565 and adjust it's priority so that it is more likely to
23566 stay there */
23567 tmp = ready[pos];
23568 for (i=pos; i<*pn_ready-1; i++)
23569 ready[i] = ready[i + 1];
23570 ready[*pn_ready-1] = tmp;
23572 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23573 INSN_PRIORITY (tmp)++;
23574 break;
23576 pos--;
23579 else if (load_store_pendulum == -2)
23581 /* Two stores have been issued in this cycle. Increase the
23582 priority of the first load in the ready list to favor it for
23583 issuing in the next cycle. */
23584 pos = *pn_ready-1;
23586 while (pos >= 0)
23588 if (is_load_insn (ready[pos], &load_mem)
23589 && !sel_sched_p ()
23590 && INSN_PRIORITY_KNOWN (ready[pos]))
23592 INSN_PRIORITY (ready[pos])++;
23594 /* Adjust the pendulum to account for the fact that a load
23595 was found and increased in priority. This is to prevent
23596 increasing the priority of multiple loads */
23597 load_store_pendulum--;
23599 break;
23601 pos--;
23604 else if (load_store_pendulum == -1)
23606 /* A store has been issued in this cycle. Scan the ready list for
23607 another store to issue with it, preferring a store to an adjacent
23608 memory location */
23609 int first_store_pos = -1;
23611 pos = *pn_ready-1;
23613 while (pos >= 0)
23615 if (is_store_insn (ready[pos], &str_mem))
23617 rtx str_mem2;
23618 /* Maintain the index of the first store found on the
23619 list */
23620 if (first_store_pos == -1)
23621 first_store_pos = pos;
23623 if (is_store_insn (last_scheduled_insn, &str_mem2)
23624 && adjacent_mem_locations (str_mem, str_mem2))
23626 /* Found an adjacent store. Move it to the head of the
23627 ready list, and adjust it's priority so that it is
23628 more likely to stay there */
23629 tmp = ready[pos];
23630 for (i=pos; i<*pn_ready-1; i++)
23631 ready[i] = ready[i + 1];
23632 ready[*pn_ready-1] = tmp;
23634 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23635 INSN_PRIORITY (tmp)++;
23637 first_store_pos = -1;
23639 break;
23642 pos--;
23645 if (first_store_pos >= 0)
23647 /* An adjacent store wasn't found, but a non-adjacent store was,
23648 so move the non-adjacent store to the front of the ready
23649 list, and adjust its priority so that it is more likely to
23650 stay there. */
23651 tmp = ready[first_store_pos];
23652 for (i=first_store_pos; i<*pn_ready-1; i++)
23653 ready[i] = ready[i + 1];
23654 ready[*pn_ready-1] = tmp;
23655 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23656 INSN_PRIORITY (tmp)++;
23659 else if (load_store_pendulum == 2)
23661 /* Two loads have been issued in this cycle. Increase the priority
23662 of the first store in the ready list to favor it for issuing in
23663 the next cycle. */
23664 pos = *pn_ready-1;
23666 while (pos >= 0)
23668 if (is_store_insn (ready[pos], &str_mem)
23669 && !sel_sched_p ()
23670 && INSN_PRIORITY_KNOWN (ready[pos]))
23672 INSN_PRIORITY (ready[pos])++;
23674 /* Adjust the pendulum to account for the fact that a store
23675 was found and increased in priority. This is to prevent
23676 increasing the priority of multiple stores */
23677 load_store_pendulum++;
23679 break;
23681 pos--;
23686 return cached_can_issue_more;
23689 /* Return whether the presence of INSN causes a dispatch group termination
23690 of group WHICH_GROUP.
23692 If WHICH_GROUP == current_group, this function will return true if INSN
23693 causes the termination of the current group (i.e, the dispatch group to
23694 which INSN belongs). This means that INSN will be the last insn in the
23695 group it belongs to.
23697 If WHICH_GROUP == previous_group, this function will return true if INSN
23698 causes the termination of the previous group (i.e, the dispatch group that
23699 precedes the group to which INSN belongs). This means that INSN will be
23700 the first insn in the group it belongs to). */
23702 static bool
23703 insn_terminates_group_p (rtx insn, enum group_termination which_group)
23705 bool first, last;
23707 if (! insn)
23708 return false;
23710 first = insn_must_be_first_in_group (insn);
23711 last = insn_must_be_last_in_group (insn);
23713 if (first && last)
23714 return true;
23716 if (which_group == current_group)
23717 return last;
23718 else if (which_group == previous_group)
23719 return first;
23721 return false;
23725 static bool
23726 insn_must_be_first_in_group (rtx insn)
23728 enum attr_type type;
23730 if (!insn
23731 || GET_CODE (insn) == NOTE
23732 || DEBUG_INSN_P (insn)
23733 || GET_CODE (PATTERN (insn)) == USE
23734 || GET_CODE (PATTERN (insn)) == CLOBBER)
23735 return false;
23737 switch (rs6000_cpu)
23739 case PROCESSOR_POWER5:
23740 if (is_cracked_insn (insn))
23741 return true;
23742 case PROCESSOR_POWER4:
23743 if (is_microcoded_insn (insn))
23744 return true;
23746 if (!rs6000_sched_groups)
23747 return false;
23749 type = get_attr_type (insn);
23751 switch (type)
23753 case TYPE_MFCR:
23754 case TYPE_MFCRF:
23755 case TYPE_MTCR:
23756 case TYPE_DELAYED_CR:
23757 case TYPE_CR_LOGICAL:
23758 case TYPE_MTJMPR:
23759 case TYPE_MFJMPR:
23760 case TYPE_IDIV:
23761 case TYPE_LDIV:
23762 case TYPE_LOAD_L:
23763 case TYPE_STORE_C:
23764 case TYPE_ISYNC:
23765 case TYPE_SYNC:
23766 return true;
23767 default:
23768 break;
23770 break;
23771 case PROCESSOR_POWER6:
23772 type = get_attr_type (insn);
23774 switch (type)
23776 case TYPE_INSERT_DWORD:
23777 case TYPE_EXTS:
23778 case TYPE_CNTLZ:
23779 case TYPE_SHIFT:
23780 case TYPE_VAR_SHIFT_ROTATE:
23781 case TYPE_TRAP:
23782 case TYPE_IMUL:
23783 case TYPE_IMUL2:
23784 case TYPE_IMUL3:
23785 case TYPE_LMUL:
23786 case TYPE_IDIV:
23787 case TYPE_INSERT_WORD:
23788 case TYPE_DELAYED_COMPARE:
23789 case TYPE_IMUL_COMPARE:
23790 case TYPE_LMUL_COMPARE:
23791 case TYPE_FPCOMPARE:
23792 case TYPE_MFCR:
23793 case TYPE_MTCR:
23794 case TYPE_MFJMPR:
23795 case TYPE_MTJMPR:
23796 case TYPE_ISYNC:
23797 case TYPE_SYNC:
23798 case TYPE_LOAD_L:
23799 case TYPE_STORE_C:
23800 case TYPE_LOAD_U:
23801 case TYPE_LOAD_UX:
23802 case TYPE_LOAD_EXT_UX:
23803 case TYPE_STORE_U:
23804 case TYPE_STORE_UX:
23805 case TYPE_FPLOAD_U:
23806 case TYPE_FPLOAD_UX:
23807 case TYPE_FPSTORE_U:
23808 case TYPE_FPSTORE_UX:
23809 return true;
23810 default:
23811 break;
23813 break;
23814 case PROCESSOR_POWER7:
23815 type = get_attr_type (insn);
23817 switch (type)
23819 case TYPE_CR_LOGICAL:
23820 case TYPE_MFCR:
23821 case TYPE_MFCRF:
23822 case TYPE_MTCR:
23823 case TYPE_IDIV:
23824 case TYPE_LDIV:
23825 case TYPE_COMPARE:
23826 case TYPE_DELAYED_COMPARE:
23827 case TYPE_VAR_DELAYED_COMPARE:
23828 case TYPE_ISYNC:
23829 case TYPE_LOAD_L:
23830 case TYPE_STORE_C:
23831 case TYPE_LOAD_U:
23832 case TYPE_LOAD_UX:
23833 case TYPE_LOAD_EXT:
23834 case TYPE_LOAD_EXT_U:
23835 case TYPE_LOAD_EXT_UX:
23836 case TYPE_STORE_U:
23837 case TYPE_STORE_UX:
23838 case TYPE_FPLOAD_U:
23839 case TYPE_FPLOAD_UX:
23840 case TYPE_FPSTORE_U:
23841 case TYPE_FPSTORE_UX:
23842 case TYPE_MFJMPR:
23843 case TYPE_MTJMPR:
23844 return true;
23845 default:
23846 break;
23848 break;
23849 default:
23850 break;
23853 return false;
23856 static bool
23857 insn_must_be_last_in_group (rtx insn)
23859 enum attr_type type;
23861 if (!insn
23862 || GET_CODE (insn) == NOTE
23863 || DEBUG_INSN_P (insn)
23864 || GET_CODE (PATTERN (insn)) == USE
23865 || GET_CODE (PATTERN (insn)) == CLOBBER)
23866 return false;
23868 switch (rs6000_cpu) {
23869 case PROCESSOR_POWER4:
23870 case PROCESSOR_POWER5:
23871 if (is_microcoded_insn (insn))
23872 return true;
23874 if (is_branch_slot_insn (insn))
23875 return true;
23877 break;
23878 case PROCESSOR_POWER6:
23879 type = get_attr_type (insn);
23881 switch (type)
23883 case TYPE_EXTS:
23884 case TYPE_CNTLZ:
23885 case TYPE_SHIFT:
23886 case TYPE_VAR_SHIFT_ROTATE:
23887 case TYPE_TRAP:
23888 case TYPE_IMUL:
23889 case TYPE_IMUL2:
23890 case TYPE_IMUL3:
23891 case TYPE_LMUL:
23892 case TYPE_IDIV:
23893 case TYPE_DELAYED_COMPARE:
23894 case TYPE_IMUL_COMPARE:
23895 case TYPE_LMUL_COMPARE:
23896 case TYPE_FPCOMPARE:
23897 case TYPE_MFCR:
23898 case TYPE_MTCR:
23899 case TYPE_MFJMPR:
23900 case TYPE_MTJMPR:
23901 case TYPE_ISYNC:
23902 case TYPE_SYNC:
23903 case TYPE_LOAD_L:
23904 case TYPE_STORE_C:
23905 return true;
23906 default:
23907 break;
23909 break;
23910 case PROCESSOR_POWER7:
23911 type = get_attr_type (insn);
23913 switch (type)
23915 case TYPE_ISYNC:
23916 case TYPE_SYNC:
23917 case TYPE_LOAD_L:
23918 case TYPE_STORE_C:
23919 case TYPE_LOAD_EXT_U:
23920 case TYPE_LOAD_EXT_UX:
23921 case TYPE_STORE_UX:
23922 return true;
23923 default:
23924 break;
23926 break;
23927 default:
23928 break;
23931 return false;
23934 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23935 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23937 static bool
23938 is_costly_group (rtx *group_insns, rtx next_insn)
23940 int i;
23941 int issue_rate = rs6000_issue_rate ();
23943 for (i = 0; i < issue_rate; i++)
23945 sd_iterator_def sd_it;
23946 dep_t dep;
23947 rtx insn = group_insns[i];
23949 if (!insn)
23950 continue;
23952 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
23954 rtx next = DEP_CON (dep);
23956 if (next == next_insn
23957 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
23958 return true;
23962 return false;
23965 /* Utility of the function redefine_groups.
23966 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23967 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23968 to keep it "far" (in a separate group) from GROUP_INSNS, following
23969 one of the following schemes, depending on the value of the flag
23970 -minsert_sched_nops = X:
23971 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23972 in order to force NEXT_INSN into a separate group.
23973 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23974 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23975 insertion (has a group just ended, how many vacant issue slots remain in the
23976 last group, and how many dispatch groups were encountered so far). */
23978 static int
23979 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
23980 rtx next_insn, bool *group_end, int can_issue_more,
23981 int *group_count)
23983 rtx nop;
23984 bool force;
23985 int issue_rate = rs6000_issue_rate ();
23986 bool end = *group_end;
23987 int i;
23989 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
23990 return can_issue_more;
23992 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
23993 return can_issue_more;
23995 force = is_costly_group (group_insns, next_insn);
23996 if (!force)
23997 return can_issue_more;
23999 if (sched_verbose > 6)
24000 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
24001 *group_count ,can_issue_more);
24003 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
24005 if (*group_end)
24006 can_issue_more = 0;
24008 /* Since only a branch can be issued in the last issue_slot, it is
24009 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
24010 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
24011 in this case the last nop will start a new group and the branch
24012 will be forced to the new group. */
24013 if (can_issue_more && !is_branch_slot_insn (next_insn))
24014 can_issue_more--;
24016 /* Power6 and Power7 have special group ending nop. */
24017 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
24019 nop = gen_group_ending_nop ();
24020 emit_insn_before (nop, next_insn);
24021 can_issue_more = 0;
24023 else
24024 while (can_issue_more > 0)
24026 nop = gen_nop ();
24027 emit_insn_before (nop, next_insn);
24028 can_issue_more--;
24031 *group_end = true;
24032 return 0;
24035 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
24037 int n_nops = rs6000_sched_insert_nops;
24039 /* Nops can't be issued from the branch slot, so the effective
24040 issue_rate for nops is 'issue_rate - 1'. */
24041 if (can_issue_more == 0)
24042 can_issue_more = issue_rate;
24043 can_issue_more--;
24044 if (can_issue_more == 0)
24046 can_issue_more = issue_rate - 1;
24047 (*group_count)++;
24048 end = true;
24049 for (i = 0; i < issue_rate; i++)
24051 group_insns[i] = 0;
24055 while (n_nops > 0)
24057 nop = gen_nop ();
24058 emit_insn_before (nop, next_insn);
24059 if (can_issue_more == issue_rate - 1) /* new group begins */
24060 end = false;
24061 can_issue_more--;
24062 if (can_issue_more == 0)
24064 can_issue_more = issue_rate - 1;
24065 (*group_count)++;
24066 end = true;
24067 for (i = 0; i < issue_rate; i++)
24069 group_insns[i] = 0;
24072 n_nops--;
24075 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24076 can_issue_more++;
24078 /* Is next_insn going to start a new group? */
24079 *group_end
24080 = (end
24081 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24082 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24083 || (can_issue_more < issue_rate &&
24084 insn_terminates_group_p (next_insn, previous_group)));
24085 if (*group_end && end)
24086 (*group_count)--;
24088 if (sched_verbose > 6)
24089 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
24090 *group_count, can_issue_more);
24091 return can_issue_more;
24094 return can_issue_more;
24097 /* This function tries to synch the dispatch groups that the compiler "sees"
24098 with the dispatch groups that the processor dispatcher is expected to
24099 form in practice. It tries to achieve this synchronization by forcing the
24100 estimated processor grouping on the compiler (as opposed to the function
24101 'pad_goups' which tries to force the scheduler's grouping on the processor).
24103 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24104 examines the (estimated) dispatch groups that will be formed by the processor
24105 dispatcher. It marks these group boundaries to reflect the estimated
24106 processor grouping, overriding the grouping that the scheduler had marked.
24107 Depending on the value of the flag '-minsert-sched-nops' this function can
24108 force certain insns into separate groups or force a certain distance between
24109 them by inserting nops, for example, if there exists a "costly dependence"
24110 between the insns.
24112 The function estimates the group boundaries that the processor will form as
24113 follows: It keeps track of how many vacant issue slots are available after
24114 each insn. A subsequent insn will start a new group if one of the following
24115 4 cases applies:
24116 - no more vacant issue slots remain in the current dispatch group.
24117 - only the last issue slot, which is the branch slot, is vacant, but the next
24118 insn is not a branch.
24119 - only the last 2 or less issue slots, including the branch slot, are vacant,
24120 which means that a cracked insn (which occupies two issue slots) can't be
24121 issued in this group.
24122 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24123 start a new group. */
24125 static int
24126 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24128 rtx insn, next_insn;
24129 int issue_rate;
24130 int can_issue_more;
24131 int slot, i;
24132 bool group_end;
24133 int group_count = 0;
24134 rtx *group_insns;
24136 /* Initialize. */
24137 issue_rate = rs6000_issue_rate ();
24138 group_insns = XALLOCAVEC (rtx, issue_rate);
24139 for (i = 0; i < issue_rate; i++)
24141 group_insns[i] = 0;
24143 can_issue_more = issue_rate;
24144 slot = 0;
24145 insn = get_next_active_insn (prev_head_insn, tail);
24146 group_end = false;
24148 while (insn != NULL_RTX)
24150 slot = (issue_rate - can_issue_more);
24151 group_insns[slot] = insn;
24152 can_issue_more =
24153 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24154 if (insn_terminates_group_p (insn, current_group))
24155 can_issue_more = 0;
24157 next_insn = get_next_active_insn (insn, tail);
24158 if (next_insn == NULL_RTX)
24159 return group_count + 1;
24161 /* Is next_insn going to start a new group? */
24162 group_end
24163 = (can_issue_more == 0
24164 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
24165 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
24166 || (can_issue_more < issue_rate &&
24167 insn_terminates_group_p (next_insn, previous_group)));
24169 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
24170 next_insn, &group_end, can_issue_more,
24171 &group_count);
24173 if (group_end)
24175 group_count++;
24176 can_issue_more = 0;
24177 for (i = 0; i < issue_rate; i++)
24179 group_insns[i] = 0;
24183 if (GET_MODE (next_insn) == TImode && can_issue_more)
24184 PUT_MODE (next_insn, VOIDmode);
24185 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
24186 PUT_MODE (next_insn, TImode);
24188 insn = next_insn;
24189 if (can_issue_more == 0)
24190 can_issue_more = issue_rate;
24191 } /* while */
24193 return group_count;
24196 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24197 dispatch group boundaries that the scheduler had marked. Pad with nops
24198 any dispatch groups which have vacant issue slots, in order to force the
24199 scheduler's grouping on the processor dispatcher. The function
24200 returns the number of dispatch groups found. */
24202 static int
24203 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
24205 rtx insn, next_insn;
24206 rtx nop;
24207 int issue_rate;
24208 int can_issue_more;
24209 int group_end;
24210 int group_count = 0;
24212 /* Initialize issue_rate. */
24213 issue_rate = rs6000_issue_rate ();
24214 can_issue_more = issue_rate;
24216 insn = get_next_active_insn (prev_head_insn, tail);
24217 next_insn = get_next_active_insn (insn, tail);
24219 while (insn != NULL_RTX)
24221 can_issue_more =
24222 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24224 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
24226 if (next_insn == NULL_RTX)
24227 break;
24229 if (group_end)
24231 /* If the scheduler had marked group termination at this location
24232 (between insn and next_insn), and neither insn nor next_insn will
24233 force group termination, pad the group with nops to force group
24234 termination. */
24235 if (can_issue_more
24236 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
24237 && !insn_terminates_group_p (insn, current_group)
24238 && !insn_terminates_group_p (next_insn, previous_group))
24240 if (!is_branch_slot_insn (next_insn))
24241 can_issue_more--;
24243 while (can_issue_more)
24245 nop = gen_nop ();
24246 emit_insn_before (nop, next_insn);
24247 can_issue_more--;
24251 can_issue_more = issue_rate;
24252 group_count++;
24255 insn = next_insn;
24256 next_insn = get_next_active_insn (insn, tail);
24259 return group_count;
24262 /* We're beginning a new block. Initialize data structures as necessary. */
24264 static void
24265 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
24266 int sched_verbose ATTRIBUTE_UNUSED,
24267 int max_ready ATTRIBUTE_UNUSED)
24269 last_scheduled_insn = NULL_RTX;
24270 load_store_pendulum = 0;
24273 /* The following function is called at the end of scheduling BB.
24274 After reload, it inserts nops at insn group bundling. */
24276 static void
24277 rs6000_sched_finish (FILE *dump, int sched_verbose)
24279 int n_groups;
24281 if (sched_verbose)
24282 fprintf (dump, "=== Finishing schedule.\n");
24284 if (reload_completed && rs6000_sched_groups)
24286 /* Do not run sched_finish hook when selective scheduling enabled. */
24287 if (sel_sched_p ())
24288 return;
24290 if (rs6000_sched_insert_nops == sched_finish_none)
24291 return;
24293 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
24294 n_groups = pad_groups (dump, sched_verbose,
24295 current_sched_info->prev_head,
24296 current_sched_info->next_tail);
24297 else
24298 n_groups = redefine_groups (dump, sched_verbose,
24299 current_sched_info->prev_head,
24300 current_sched_info->next_tail);
24302 if (sched_verbose >= 6)
24304 fprintf (dump, "ngroups = %d\n", n_groups);
24305 print_rtl (dump, current_sched_info->prev_head);
24306 fprintf (dump, "Done finish_sched\n");
24311 struct _rs6000_sched_context
24313 short cached_can_issue_more;
24314 rtx last_scheduled_insn;
24315 int load_store_pendulum;
24318 typedef struct _rs6000_sched_context rs6000_sched_context_def;
24319 typedef rs6000_sched_context_def *rs6000_sched_context_t;
24321 /* Allocate store for new scheduling context. */
24322 static void *
24323 rs6000_alloc_sched_context (void)
24325 return xmalloc (sizeof (rs6000_sched_context_def));
24328 /* If CLEAN_P is true then initializes _SC with clean data,
24329 and from the global context otherwise. */
24330 static void
24331 rs6000_init_sched_context (void *_sc, bool clean_p)
24333 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24335 if (clean_p)
24337 sc->cached_can_issue_more = 0;
24338 sc->last_scheduled_insn = NULL_RTX;
24339 sc->load_store_pendulum = 0;
24341 else
24343 sc->cached_can_issue_more = cached_can_issue_more;
24344 sc->last_scheduled_insn = last_scheduled_insn;
24345 sc->load_store_pendulum = load_store_pendulum;
24349 /* Sets the global scheduling context to the one pointed to by _SC. */
24350 static void
24351 rs6000_set_sched_context (void *_sc)
24353 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24355 gcc_assert (sc != NULL);
24357 cached_can_issue_more = sc->cached_can_issue_more;
24358 last_scheduled_insn = sc->last_scheduled_insn;
24359 load_store_pendulum = sc->load_store_pendulum;
24362 /* Free _SC. */
24363 static void
24364 rs6000_free_sched_context (void *_sc)
24366 gcc_assert (_sc != NULL);
24368 free (_sc);
24372 /* Length in units of the trampoline for entering a nested function. */
24375 rs6000_trampoline_size (void)
24377 int ret = 0;
24379 switch (DEFAULT_ABI)
24381 default:
24382 gcc_unreachable ();
24384 case ABI_AIX:
24385 ret = (TARGET_32BIT) ? 12 : 24;
24386 break;
24388 case ABI_DARWIN:
24389 case ABI_V4:
24390 ret = (TARGET_32BIT) ? 40 : 48;
24391 break;
24394 return ret;
24397 /* Emit RTL insns to initialize the variable parts of a trampoline.
24398 FNADDR is an RTX for the address of the function's pure code.
24399 CXT is an RTX for the static chain value for the function. */
24401 static void
24402 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
24404 int regsize = (TARGET_32BIT) ? 4 : 8;
24405 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
24406 rtx ctx_reg = force_reg (Pmode, cxt);
24407 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
24409 switch (DEFAULT_ABI)
24411 default:
24412 gcc_unreachable ();
24414 /* Under AIX, just build the 3 word function descriptor */
24415 case ABI_AIX:
24417 rtx fnmem, fn_reg, toc_reg;
24419 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
24420 error ("You cannot take the address of a nested function if you use "
24421 "the -mno-pointers-to-nested-functions option.");
24423 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
24424 fn_reg = gen_reg_rtx (Pmode);
24425 toc_reg = gen_reg_rtx (Pmode);
24427 /* Macro to shorten the code expansions below. */
24428 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24430 m_tramp = replace_equiv_address (m_tramp, addr);
24432 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
24433 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
24434 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
24435 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
24436 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
24438 # undef MEM_PLUS
24440 break;
24442 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24443 case ABI_DARWIN:
24444 case ABI_V4:
24445 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
24446 LCT_NORMAL, VOIDmode, 4,
24447 addr, Pmode,
24448 GEN_INT (rs6000_trampoline_size ()), SImode,
24449 fnaddr, Pmode,
24450 ctx_reg, Pmode);
24451 break;
24456 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24457 identifier as an argument, so the front end shouldn't look it up. */
24459 static bool
24460 rs6000_attribute_takes_identifier_p (const_tree attr_id)
24462 return is_attribute_p ("altivec", attr_id);
24465 /* Handle the "altivec" attribute. The attribute may have
24466 arguments as follows:
24468 __attribute__((altivec(vector__)))
24469 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24470 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24472 and may appear more than once (e.g., 'vector bool char') in a
24473 given declaration. */
24475 static tree
24476 rs6000_handle_altivec_attribute (tree *node,
24477 tree name ATTRIBUTE_UNUSED,
24478 tree args,
24479 int flags ATTRIBUTE_UNUSED,
24480 bool *no_add_attrs)
24482 tree type = *node, result = NULL_TREE;
24483 enum machine_mode mode;
24484 int unsigned_p;
24485 char altivec_type
24486 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
24487 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
24488 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
24489 : '?');
24491 while (POINTER_TYPE_P (type)
24492 || TREE_CODE (type) == FUNCTION_TYPE
24493 || TREE_CODE (type) == METHOD_TYPE
24494 || TREE_CODE (type) == ARRAY_TYPE)
24495 type = TREE_TYPE (type);
24497 mode = TYPE_MODE (type);
24499 /* Check for invalid AltiVec type qualifiers. */
24500 if (type == long_double_type_node)
24501 error ("use of %<long double%> in AltiVec types is invalid");
24502 else if (type == boolean_type_node)
24503 error ("use of boolean types in AltiVec types is invalid");
24504 else if (TREE_CODE (type) == COMPLEX_TYPE)
24505 error ("use of %<complex%> in AltiVec types is invalid");
24506 else if (DECIMAL_FLOAT_MODE_P (mode))
24507 error ("use of decimal floating point types in AltiVec types is invalid");
24508 else if (!TARGET_VSX)
24510 if (type == long_unsigned_type_node || type == long_integer_type_node)
24512 if (TARGET_64BIT)
24513 error ("use of %<long%> in AltiVec types is invalid for "
24514 "64-bit code without -mvsx");
24515 else if (rs6000_warn_altivec_long)
24516 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24517 "use %<int%>");
24519 else if (type == long_long_unsigned_type_node
24520 || type == long_long_integer_type_node)
24521 error ("use of %<long long%> in AltiVec types is invalid without "
24522 "-mvsx");
24523 else if (type == double_type_node)
24524 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24527 switch (altivec_type)
24529 case 'v':
24530 unsigned_p = TYPE_UNSIGNED (type);
24531 switch (mode)
24533 case DImode:
24534 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
24535 break;
24536 case SImode:
24537 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
24538 break;
24539 case HImode:
24540 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
24541 break;
24542 case QImode:
24543 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
24544 break;
24545 case SFmode: result = V4SF_type_node; break;
24546 case DFmode: result = V2DF_type_node; break;
24547 /* If the user says 'vector int bool', we may be handed the 'bool'
24548 attribute _before_ the 'vector' attribute, and so select the
24549 proper type in the 'b' case below. */
24550 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
24551 case V2DImode: case V2DFmode:
24552 result = type;
24553 default: break;
24555 break;
24556 case 'b':
24557 switch (mode)
24559 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
24560 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
24561 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
24562 case QImode: case V16QImode: result = bool_V16QI_type_node;
24563 default: break;
24565 break;
24566 case 'p':
24567 switch (mode)
24569 case V8HImode: result = pixel_V8HI_type_node;
24570 default: break;
24572 default: break;
24575 /* Propagate qualifiers attached to the element type
24576 onto the vector type. */
24577 if (result && result != type && TYPE_QUALS (type))
24578 result = build_qualified_type (result, TYPE_QUALS (type));
24580 *no_add_attrs = true; /* No need to hang on to the attribute. */
24582 if (result)
24583 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
24585 return NULL_TREE;
24588 /* AltiVec defines four built-in scalar types that serve as vector
24589 elements; we must teach the compiler how to mangle them. */
24591 static const char *
24592 rs6000_mangle_type (const_tree type)
24594 type = TYPE_MAIN_VARIANT (type);
24596 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24597 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24598 return NULL;
24600 if (type == bool_char_type_node) return "U6__boolc";
24601 if (type == bool_short_type_node) return "U6__bools";
24602 if (type == pixel_type_node) return "u7__pixel";
24603 if (type == bool_int_type_node) return "U6__booli";
24604 if (type == bool_long_type_node) return "U6__booll";
24606 /* Mangle IBM extended float long double as `g' (__float128) on
24607 powerpc*-linux where long-double-64 previously was the default. */
24608 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
24609 && TARGET_ELF
24610 && TARGET_LONG_DOUBLE_128
24611 && !TARGET_IEEEQUAD)
24612 return "g";
24614 /* For all other types, use normal C++ mangling. */
24615 return NULL;
24618 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24619 struct attribute_spec.handler. */
24621 static tree
24622 rs6000_handle_longcall_attribute (tree *node, tree name,
24623 tree args ATTRIBUTE_UNUSED,
24624 int flags ATTRIBUTE_UNUSED,
24625 bool *no_add_attrs)
24627 if (TREE_CODE (*node) != FUNCTION_TYPE
24628 && TREE_CODE (*node) != FIELD_DECL
24629 && TREE_CODE (*node) != TYPE_DECL)
24631 warning (OPT_Wattributes, "%qE attribute only applies to functions",
24632 name);
24633 *no_add_attrs = true;
24636 return NULL_TREE;
24639 /* Set longcall attributes on all functions declared when
24640 rs6000_default_long_calls is true. */
24641 static void
24642 rs6000_set_default_type_attributes (tree type)
24644 if (rs6000_default_long_calls
24645 && (TREE_CODE (type) == FUNCTION_TYPE
24646 || TREE_CODE (type) == METHOD_TYPE))
24647 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
24648 NULL_TREE,
24649 TYPE_ATTRIBUTES (type));
24651 #if TARGET_MACHO
24652 darwin_set_default_type_attributes (type);
24653 #endif
24656 /* Return a reference suitable for calling a function with the
24657 longcall attribute. */
24660 rs6000_longcall_ref (rtx call_ref)
24662 const char *call_name;
24663 tree node;
24665 if (GET_CODE (call_ref) != SYMBOL_REF)
24666 return call_ref;
24668 /* System V adds '.' to the internal name, so skip them. */
24669 call_name = XSTR (call_ref, 0);
24670 if (*call_name == '.')
24672 while (*call_name == '.')
24673 call_name++;
24675 node = get_identifier (call_name);
24676 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
24679 return force_reg (Pmode, call_ref);
24682 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24683 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24684 #endif
24686 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24687 struct attribute_spec.handler. */
24688 static tree
24689 rs6000_handle_struct_attribute (tree *node, tree name,
24690 tree args ATTRIBUTE_UNUSED,
24691 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24693 tree *type = NULL;
24694 if (DECL_P (*node))
24696 if (TREE_CODE (*node) == TYPE_DECL)
24697 type = &TREE_TYPE (*node);
24699 else
24700 type = node;
24702 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
24703 || TREE_CODE (*type) == UNION_TYPE)))
24705 warning (OPT_Wattributes, "%qE attribute ignored", name);
24706 *no_add_attrs = true;
24709 else if ((is_attribute_p ("ms_struct", name)
24710 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
24711 || ((is_attribute_p ("gcc_struct", name)
24712 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
24714 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
24715 name);
24716 *no_add_attrs = true;
24719 return NULL_TREE;
24722 static bool
24723 rs6000_ms_bitfield_layout_p (const_tree record_type)
24725 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
24726 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
24727 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
24730 #ifdef USING_ELFOS_H
24732 /* A get_unnamed_section callback, used for switching to toc_section. */
24734 static void
24735 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
24737 if (DEFAULT_ABI == ABI_AIX
24738 && TARGET_MINIMAL_TOC
24739 && !TARGET_RELOCATABLE)
24741 if (!toc_initialized)
24743 toc_initialized = 1;
24744 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24745 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
24746 fprintf (asm_out_file, "\t.tc ");
24747 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
24748 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24749 fprintf (asm_out_file, "\n");
24751 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24752 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24753 fprintf (asm_out_file, " = .+32768\n");
24755 else
24756 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24758 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
24759 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24760 else
24762 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24763 if (!toc_initialized)
24765 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24766 fprintf (asm_out_file, " = .+32768\n");
24767 toc_initialized = 1;
24772 /* Implement TARGET_ASM_INIT_SECTIONS. */
24774 static void
24775 rs6000_elf_asm_init_sections (void)
24777 toc_section
24778 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
24780 sdata2_section
24781 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
24782 SDATA2_SECTION_ASM_OP);
24785 /* Implement TARGET_SELECT_RTX_SECTION. */
24787 static section *
24788 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
24789 unsigned HOST_WIDE_INT align)
24791 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
24792 return toc_section;
24793 else
24794 return default_elf_select_rtx_section (mode, x, align);
24797 /* For a SYMBOL_REF, set generic flags and then perform some
24798 target-specific processing.
24800 When the AIX ABI is requested on a non-AIX system, replace the
24801 function name with the real name (with a leading .) rather than the
24802 function descriptor name. This saves a lot of overriding code to
24803 read the prefixes. */
24805 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
24806 static void
24807 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
24809 default_encode_section_info (decl, rtl, first);
24811 if (first
24812 && TREE_CODE (decl) == FUNCTION_DECL
24813 && !TARGET_AIX
24814 && DEFAULT_ABI == ABI_AIX)
24816 rtx sym_ref = XEXP (rtl, 0);
24817 size_t len = strlen (XSTR (sym_ref, 0));
24818 char *str = XALLOCAVEC (char, len + 2);
24819 str[0] = '.';
24820 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
24821 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
24825 static inline bool
24826 compare_section_name (const char *section, const char *templ)
24828 int len;
24830 len = strlen (templ);
24831 return (strncmp (section, templ, len) == 0
24832 && (section[len] == 0 || section[len] == '.'));
24835 bool
24836 rs6000_elf_in_small_data_p (const_tree decl)
24838 if (rs6000_sdata == SDATA_NONE)
24839 return false;
24841 /* We want to merge strings, so we never consider them small data. */
24842 if (TREE_CODE (decl) == STRING_CST)
24843 return false;
24845 /* Functions are never in the small data area. */
24846 if (TREE_CODE (decl) == FUNCTION_DECL)
24847 return false;
24849 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
24851 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
24852 if (compare_section_name (section, ".sdata")
24853 || compare_section_name (section, ".sdata2")
24854 || compare_section_name (section, ".gnu.linkonce.s")
24855 || compare_section_name (section, ".sbss")
24856 || compare_section_name (section, ".sbss2")
24857 || compare_section_name (section, ".gnu.linkonce.sb")
24858 || strcmp (section, ".PPC.EMB.sdata0") == 0
24859 || strcmp (section, ".PPC.EMB.sbss0") == 0)
24860 return true;
24862 else
24864 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
24866 if (size > 0
24867 && size <= g_switch_value
24868 /* If it's not public, and we're not going to reference it there,
24869 there's no need to put it in the small data section. */
24870 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
24871 return true;
24874 return false;
24877 #endif /* USING_ELFOS_H */
24879 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24881 static bool
24882 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
24884 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
24887 /* Return a REG that occurs in ADDR with coefficient 1.
24888 ADDR can be effectively incremented by incrementing REG.
24890 r0 is special and we must not select it as an address
24891 register by this routine since our caller will try to
24892 increment the returned register via an "la" instruction. */
24895 find_addr_reg (rtx addr)
24897 while (GET_CODE (addr) == PLUS)
24899 if (GET_CODE (XEXP (addr, 0)) == REG
24900 && REGNO (XEXP (addr, 0)) != 0)
24901 addr = XEXP (addr, 0);
24902 else if (GET_CODE (XEXP (addr, 1)) == REG
24903 && REGNO (XEXP (addr, 1)) != 0)
24904 addr = XEXP (addr, 1);
24905 else if (CONSTANT_P (XEXP (addr, 0)))
24906 addr = XEXP (addr, 1);
24907 else if (CONSTANT_P (XEXP (addr, 1)))
24908 addr = XEXP (addr, 0);
24909 else
24910 gcc_unreachable ();
24912 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
24913 return addr;
24916 void
24917 rs6000_fatal_bad_address (rtx op)
24919 fatal_insn ("bad address", op);
24922 #if TARGET_MACHO
24924 typedef struct branch_island_d {
24925 tree function_name;
24926 tree label_name;
24927 int line_number;
24928 } branch_island;
24930 DEF_VEC_O(branch_island);
24931 DEF_VEC_ALLOC_O(branch_island,gc);
24933 static VEC(branch_island,gc) *branch_islands;
24935 /* Remember to generate a branch island for far calls to the given
24936 function. */
24938 static void
24939 add_compiler_branch_island (tree label_name, tree function_name,
24940 int line_number)
24942 branch_island bi = {function_name, label_name, line_number};
24943 VEC_safe_push (branch_island, gc, branch_islands, bi);
24946 /* Generate far-jump branch islands for everything recorded in
24947 branch_islands. Invoked immediately after the last instruction of
24948 the epilogue has been emitted; the branch islands must be appended
24949 to, and contiguous with, the function body. Mach-O stubs are
24950 generated in machopic_output_stub(). */
24952 static void
24953 macho_branch_islands (void)
24955 char tmp_buf[512];
24957 while (!VEC_empty (branch_island, branch_islands))
24959 branch_island *bi = &VEC_last (branch_island, branch_islands);
24960 const char *label = IDENTIFIER_POINTER (bi->label_name);
24961 const char *name = IDENTIFIER_POINTER (bi->function_name);
24962 char name_buf[512];
24963 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24964 if (name[0] == '*' || name[0] == '&')
24965 strcpy (name_buf, name+1);
24966 else
24968 name_buf[0] = '_';
24969 strcpy (name_buf+1, name);
24971 strcpy (tmp_buf, "\n");
24972 strcat (tmp_buf, label);
24973 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24974 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
24975 dbxout_stabd (N_SLINE, bi->line_number);
24976 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24977 if (flag_pic)
24979 if (TARGET_LINK_STACK)
24981 char name[32];
24982 get_ppc476_thunk_name (name);
24983 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
24984 strcat (tmp_buf, name);
24985 strcat (tmp_buf, "\n");
24986 strcat (tmp_buf, label);
24987 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24989 else
24991 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
24992 strcat (tmp_buf, label);
24993 strcat (tmp_buf, "_pic\n");
24994 strcat (tmp_buf, label);
24995 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24998 strcat (tmp_buf, "\taddis r11,r11,ha16(");
24999 strcat (tmp_buf, name_buf);
25000 strcat (tmp_buf, " - ");
25001 strcat (tmp_buf, label);
25002 strcat (tmp_buf, "_pic)\n");
25004 strcat (tmp_buf, "\tmtlr r0\n");
25006 strcat (tmp_buf, "\taddi r12,r11,lo16(");
25007 strcat (tmp_buf, name_buf);
25008 strcat (tmp_buf, " - ");
25009 strcat (tmp_buf, label);
25010 strcat (tmp_buf, "_pic)\n");
25012 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
25014 else
25016 strcat (tmp_buf, ":\nlis r12,hi16(");
25017 strcat (tmp_buf, name_buf);
25018 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
25019 strcat (tmp_buf, name_buf);
25020 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
25022 output_asm_insn (tmp_buf, 0);
25023 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25024 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
25025 dbxout_stabd (N_SLINE, bi->line_number);
25026 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25027 VEC_pop (branch_island, branch_islands);
25031 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25032 already there or not. */
25034 static int
25035 no_previous_def (tree function_name)
25037 branch_island *bi;
25038 unsigned ix;
25040 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25041 if (function_name == bi->function_name)
25042 return 0;
25043 return 1;
25046 /* GET_PREV_LABEL gets the label name from the previous definition of
25047 the function. */
25049 static tree
25050 get_prev_label (tree function_name)
25052 branch_island *bi;
25053 unsigned ix;
25055 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
25056 if (function_name == bi->function_name)
25057 return bi->label_name;
25058 return NULL_TREE;
25061 /* INSN is either a function call or a millicode call. It may have an
25062 unconditional jump in its delay slot.
25064 CALL_DEST is the routine we are calling. */
25066 char *
25067 output_call (rtx insn, rtx *operands, int dest_operand_number,
25068 int cookie_operand_number)
25070 static char buf[256];
25071 if (darwin_emit_branch_islands
25072 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
25073 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
25075 tree labelname;
25076 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
25078 if (no_previous_def (funname))
25080 rtx label_rtx = gen_label_rtx ();
25081 char *label_buf, temp_buf[256];
25082 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
25083 CODE_LABEL_NUMBER (label_rtx));
25084 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
25085 labelname = get_identifier (label_buf);
25086 add_compiler_branch_island (labelname, funname, insn_line (insn));
25088 else
25089 labelname = get_prev_label (funname);
25091 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25092 instruction will reach 'foo', otherwise link as 'bl L42'".
25093 "L42" should be a 'branch island', that will do a far jump to
25094 'foo'. Branch islands are generated in
25095 macho_branch_islands(). */
25096 sprintf (buf, "jbsr %%z%d,%.246s",
25097 dest_operand_number, IDENTIFIER_POINTER (labelname));
25099 else
25100 sprintf (buf, "bl %%z%d", dest_operand_number);
25101 return buf;
25104 /* Generate PIC and indirect symbol stubs. */
25106 void
25107 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25109 unsigned int length;
25110 char *symbol_name, *lazy_ptr_name;
25111 char *local_label_0;
25112 static int label = 0;
25114 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25115 symb = (*targetm.strip_name_encoding) (symb);
25118 length = strlen (symb);
25119 symbol_name = XALLOCAVEC (char, length + 32);
25120 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25122 lazy_ptr_name = XALLOCAVEC (char, length + 32);
25123 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
25125 if (flag_pic == 2)
25126 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
25127 else
25128 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
25130 if (flag_pic == 2)
25132 fprintf (file, "\t.align 5\n");
25134 fprintf (file, "%s:\n", stub);
25135 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25137 label++;
25138 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25139 sprintf (local_label_0, "\"L%011d$spb\"", label);
25141 fprintf (file, "\tmflr r0\n");
25142 if (TARGET_LINK_STACK)
25144 char name[32];
25145 get_ppc476_thunk_name (name);
25146 fprintf (file, "\tbl %s\n", name);
25147 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25149 else
25151 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
25152 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
25154 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
25155 lazy_ptr_name, local_label_0);
25156 fprintf (file, "\tmtlr r0\n");
25157 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
25158 (TARGET_64BIT ? "ldu" : "lwzu"),
25159 lazy_ptr_name, local_label_0);
25160 fprintf (file, "\tmtctr r12\n");
25161 fprintf (file, "\tbctr\n");
25163 else
25165 fprintf (file, "\t.align 4\n");
25167 fprintf (file, "%s:\n", stub);
25168 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25170 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
25171 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
25172 (TARGET_64BIT ? "ldu" : "lwzu"),
25173 lazy_ptr_name);
25174 fprintf (file, "\tmtctr r12\n");
25175 fprintf (file, "\tbctr\n");
25178 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25179 fprintf (file, "%s:\n", lazy_ptr_name);
25180 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25181 fprintf (file, "%sdyld_stub_binding_helper\n",
25182 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
25185 /* Legitimize PIC addresses. If the address is already
25186 position-independent, we return ORIG. Newly generated
25187 position-independent addresses go into a reg. This is REG if non
25188 zero, otherwise we allocate register(s) as necessary. */
25190 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25193 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
25194 rtx reg)
25196 rtx base, offset;
25198 if (reg == NULL && ! reload_in_progress && ! reload_completed)
25199 reg = gen_reg_rtx (Pmode);
25201 if (GET_CODE (orig) == CONST)
25203 rtx reg_temp;
25205 if (GET_CODE (XEXP (orig, 0)) == PLUS
25206 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
25207 return orig;
25209 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
25211 /* Use a different reg for the intermediate value, as
25212 it will be marked UNCHANGING. */
25213 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
25214 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
25215 Pmode, reg_temp);
25216 offset =
25217 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
25218 Pmode, reg);
25220 if (GET_CODE (offset) == CONST_INT)
25222 if (SMALL_INT (offset))
25223 return plus_constant (Pmode, base, INTVAL (offset));
25224 else if (! reload_in_progress && ! reload_completed)
25225 offset = force_reg (Pmode, offset);
25226 else
25228 rtx mem = force_const_mem (Pmode, orig);
25229 return machopic_legitimize_pic_address (mem, Pmode, reg);
25232 return gen_rtx_PLUS (Pmode, base, offset);
25235 /* Fall back on generic machopic code. */
25236 return machopic_legitimize_pic_address (orig, mode, reg);
25239 /* Output a .machine directive for the Darwin assembler, and call
25240 the generic start_file routine. */
25242 static void
25243 rs6000_darwin_file_start (void)
25245 static const struct
25247 const char *arg;
25248 const char *name;
25249 int if_set;
25250 } mapping[] = {
25251 { "ppc64", "ppc64", MASK_64BIT },
25252 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
25253 { "power4", "ppc970", 0 },
25254 { "G5", "ppc970", 0 },
25255 { "7450", "ppc7450", 0 },
25256 { "7400", "ppc7400", MASK_ALTIVEC },
25257 { "G4", "ppc7400", 0 },
25258 { "750", "ppc750", 0 },
25259 { "740", "ppc750", 0 },
25260 { "G3", "ppc750", 0 },
25261 { "604e", "ppc604e", 0 },
25262 { "604", "ppc604", 0 },
25263 { "603e", "ppc603", 0 },
25264 { "603", "ppc603", 0 },
25265 { "601", "ppc601", 0 },
25266 { NULL, "ppc", 0 } };
25267 const char *cpu_id = "";
25268 size_t i;
25270 rs6000_file_start ();
25271 darwin_file_start ();
25273 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25275 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
25276 cpu_id = rs6000_default_cpu;
25278 if (global_options_set.x_rs6000_cpu_index)
25279 cpu_id = processor_target_table[rs6000_cpu_index].name;
25281 /* Look through the mapping array. Pick the first name that either
25282 matches the argument, has a bit set in IF_SET that is also set
25283 in the target flags, or has a NULL name. */
25285 i = 0;
25286 while (mapping[i].arg != NULL
25287 && strcmp (mapping[i].arg, cpu_id) != 0
25288 && (mapping[i].if_set & target_flags) == 0)
25289 i++;
25291 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
25294 #endif /* TARGET_MACHO */
25296 #if TARGET_ELF
25297 static int
25298 rs6000_elf_reloc_rw_mask (void)
25300 if (flag_pic)
25301 return 3;
25302 else if (DEFAULT_ABI == ABI_AIX)
25303 return 2;
25304 else
25305 return 0;
25308 /* Record an element in the table of global constructors. SYMBOL is
25309 a SYMBOL_REF of the function to be called; PRIORITY is a number
25310 between 0 and MAX_INIT_PRIORITY.
25312 This differs from default_named_section_asm_out_constructor in
25313 that we have special handling for -mrelocatable. */
25315 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
25316 static void
25317 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
25319 const char *section = ".ctors";
25320 char buf[16];
25322 if (priority != DEFAULT_INIT_PRIORITY)
25324 sprintf (buf, ".ctors.%.5u",
25325 /* Invert the numbering so the linker puts us in the proper
25326 order; constructors are run from right to left, and the
25327 linker sorts in increasing order. */
25328 MAX_INIT_PRIORITY - priority);
25329 section = buf;
25332 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25333 assemble_align (POINTER_SIZE);
25335 if (TARGET_RELOCATABLE)
25337 fputs ("\t.long (", asm_out_file);
25338 output_addr_const (asm_out_file, symbol);
25339 fputs (")@fixup\n", asm_out_file);
25341 else
25342 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25345 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
25346 static void
25347 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
25349 const char *section = ".dtors";
25350 char buf[16];
25352 if (priority != DEFAULT_INIT_PRIORITY)
25354 sprintf (buf, ".dtors.%.5u",
25355 /* Invert the numbering so the linker puts us in the proper
25356 order; constructors are run from right to left, and the
25357 linker sorts in increasing order. */
25358 MAX_INIT_PRIORITY - priority);
25359 section = buf;
25362 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25363 assemble_align (POINTER_SIZE);
25365 if (TARGET_RELOCATABLE)
25367 fputs ("\t.long (", asm_out_file);
25368 output_addr_const (asm_out_file, symbol);
25369 fputs (")@fixup\n", asm_out_file);
25371 else
25372 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25375 void
25376 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
25378 if (TARGET_64BIT)
25380 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
25381 ASM_OUTPUT_LABEL (file, name);
25382 fputs (DOUBLE_INT_ASM_OP, file);
25383 rs6000_output_function_entry (file, name);
25384 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
25385 if (DOT_SYMBOLS)
25387 fputs ("\t.size\t", file);
25388 assemble_name (file, name);
25389 fputs (",24\n\t.type\t.", file);
25390 assemble_name (file, name);
25391 fputs (",@function\n", file);
25392 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
25394 fputs ("\t.globl\t.", file);
25395 assemble_name (file, name);
25396 putc ('\n', file);
25399 else
25400 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25401 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25402 rs6000_output_function_entry (file, name);
25403 fputs (":\n", file);
25404 return;
25407 if (TARGET_RELOCATABLE
25408 && !TARGET_SECURE_PLT
25409 && (get_pool_size () != 0 || crtl->profile)
25410 && uses_TOC ())
25412 char buf[256];
25414 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
25416 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
25417 fprintf (file, "\t.long ");
25418 assemble_name (file, buf);
25419 putc ('-', file);
25420 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25421 assemble_name (file, buf);
25422 putc ('\n', file);
25425 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25426 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25428 if (DEFAULT_ABI == ABI_AIX)
25430 const char *desc_name, *orig_name;
25432 orig_name = (*targetm.strip_name_encoding) (name);
25433 desc_name = orig_name;
25434 while (*desc_name == '.')
25435 desc_name++;
25437 if (TREE_PUBLIC (decl))
25438 fprintf (file, "\t.globl %s\n", desc_name);
25440 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25441 fprintf (file, "%s:\n", desc_name);
25442 fprintf (file, "\t.long %s\n", orig_name);
25443 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
25444 if (DEFAULT_ABI == ABI_AIX)
25445 fputs ("\t.long 0\n", file);
25446 fprintf (file, "\t.previous\n");
25448 ASM_OUTPUT_LABEL (file, name);
25451 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
25452 static void
25453 rs6000_elf_file_end (void)
25455 #ifdef HAVE_AS_GNU_ATTRIBUTE
25456 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
25458 if (rs6000_passes_float)
25459 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
25460 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
25461 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
25462 : 2));
25463 if (rs6000_passes_vector)
25464 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
25465 (TARGET_ALTIVEC_ABI ? 2
25466 : TARGET_SPE_ABI ? 3
25467 : 1));
25468 if (rs6000_returns_struct)
25469 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
25470 aix_struct_return ? 2 : 1);
25472 #endif
25473 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25474 if (TARGET_32BIT)
25475 file_end_indicate_exec_stack ();
25476 #endif
25478 #endif
25480 #if TARGET_XCOFF
25481 static void
25482 rs6000_xcoff_asm_output_anchor (rtx symbol)
25484 char buffer[100];
25486 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
25487 SYMBOL_REF_BLOCK_OFFSET (symbol));
25488 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
25491 static void
25492 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
25494 fputs (GLOBAL_ASM_OP, stream);
25495 RS6000_OUTPUT_BASENAME (stream, name);
25496 putc ('\n', stream);
25499 /* A get_unnamed_decl callback, used for read-only sections. PTR
25500 points to the section string variable. */
25502 static void
25503 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
25505 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
25506 *(const char *const *) directive,
25507 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25510 /* Likewise for read-write sections. */
25512 static void
25513 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
25515 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
25516 *(const char *const *) directive,
25517 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25520 /* A get_unnamed_section callback, used for switching to toc_section. */
25522 static void
25523 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
25525 if (TARGET_MINIMAL_TOC)
25527 /* toc_section is always selected at least once from
25528 rs6000_xcoff_file_start, so this is guaranteed to
25529 always be defined once and only once in each file. */
25530 if (!toc_initialized)
25532 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
25533 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
25534 toc_initialized = 1;
25536 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
25537 (TARGET_32BIT ? "" : ",3"));
25539 else
25540 fputs ("\t.toc\n", asm_out_file);
25543 /* Implement TARGET_ASM_INIT_SECTIONS. */
25545 static void
25546 rs6000_xcoff_asm_init_sections (void)
25548 read_only_data_section
25549 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25550 &xcoff_read_only_section_name);
25552 private_data_section
25553 = get_unnamed_section (SECTION_WRITE,
25554 rs6000_xcoff_output_readwrite_section_asm_op,
25555 &xcoff_private_data_section_name);
25557 read_only_private_data_section
25558 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25559 &xcoff_private_data_section_name);
25561 toc_section
25562 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
25564 readonly_data_section = read_only_data_section;
25565 exception_section = data_section;
25568 static int
25569 rs6000_xcoff_reloc_rw_mask (void)
25571 return 3;
25574 static void
25575 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
25576 tree decl ATTRIBUTE_UNUSED)
25578 int smclass;
25579 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
25581 if (flags & SECTION_CODE)
25582 smclass = 0;
25583 else if (flags & SECTION_TLS)
25584 smclass = 3;
25585 else if (flags & SECTION_WRITE)
25586 smclass = 2;
25587 else
25588 smclass = 1;
25590 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
25591 (flags & SECTION_CODE) ? "." : "",
25592 name, suffix[smclass], flags & SECTION_ENTSIZE);
25595 static section *
25596 rs6000_xcoff_select_section (tree decl, int reloc,
25597 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25599 if (decl_readonly_section (decl, reloc))
25601 if (TREE_PUBLIC (decl))
25602 return read_only_data_section;
25603 else
25604 return read_only_private_data_section;
25606 else
25608 if (TREE_PUBLIC (decl))
25609 return data_section;
25610 else
25611 return private_data_section;
25615 static void
25616 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
25618 const char *name;
25620 /* Use select_section for private and uninitialized data. */
25621 if (!TREE_PUBLIC (decl)
25622 || DECL_COMMON (decl)
25623 || DECL_INITIAL (decl) == NULL_TREE
25624 || DECL_INITIAL (decl) == error_mark_node
25625 || (flag_zero_initialized_in_bss
25626 && initializer_zerop (DECL_INITIAL (decl))))
25627 return;
25629 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
25630 name = (*targetm.strip_name_encoding) (name);
25631 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
25634 /* Select section for constant in constant pool.
25636 On RS/6000, all constants are in the private read-only data area.
25637 However, if this is being placed in the TOC it must be output as a
25638 toc entry. */
25640 static section *
25641 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
25642 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25644 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25645 return toc_section;
25646 else
25647 return read_only_private_data_section;
25650 /* Remove any trailing [DS] or the like from the symbol name. */
25652 static const char *
25653 rs6000_xcoff_strip_name_encoding (const char *name)
25655 size_t len;
25656 if (*name == '*')
25657 name++;
25658 len = strlen (name);
25659 if (name[len - 1] == ']')
25660 return ggc_alloc_string (name, len - 4);
25661 else
25662 return name;
25665 /* Section attributes. AIX is always PIC. */
25667 static unsigned int
25668 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
25670 unsigned int align;
25671 unsigned int flags = default_section_type_flags (decl, name, reloc);
25673 /* Align to at least UNIT size. */
25674 if (flags & SECTION_CODE || !decl)
25675 align = MIN_UNITS_PER_WORD;
25676 else
25677 /* Increase alignment of large objects if not already stricter. */
25678 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
25679 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
25680 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
25682 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
25685 /* Output at beginning of assembler file.
25687 Initialize the section names for the RS/6000 at this point.
25689 Specify filename, including full path, to assembler.
25691 We want to go into the TOC section so at least one .toc will be emitted.
25692 Also, in order to output proper .bs/.es pairs, we need at least one static
25693 [RW] section emitted.
25695 Finally, declare mcount when profiling to make the assembler happy. */
25697 static void
25698 rs6000_xcoff_file_start (void)
25700 rs6000_gen_section_name (&xcoff_bss_section_name,
25701 main_input_filename, ".bss_");
25702 rs6000_gen_section_name (&xcoff_private_data_section_name,
25703 main_input_filename, ".rw_");
25704 rs6000_gen_section_name (&xcoff_read_only_section_name,
25705 main_input_filename, ".ro_");
25707 fputs ("\t.file\t", asm_out_file);
25708 output_quoted_string (asm_out_file, main_input_filename);
25709 fputc ('\n', asm_out_file);
25710 if (write_symbols != NO_DEBUG)
25711 switch_to_section (private_data_section);
25712 switch_to_section (text_section);
25713 if (profile_flag)
25714 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
25715 rs6000_file_start ();
25718 /* Output at end of assembler file.
25719 On the RS/6000, referencing data should automatically pull in text. */
25721 static void
25722 rs6000_xcoff_file_end (void)
25724 switch_to_section (text_section);
25725 fputs ("_section_.text:\n", asm_out_file);
25726 switch_to_section (data_section);
25727 fputs (TARGET_32BIT
25728 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25729 asm_out_file);
25731 #endif /* TARGET_XCOFF */
25733 /* Compute a (partial) cost for rtx X. Return true if the complete
25734 cost has been computed, and false if subexpressions should be
25735 scanned. In either case, *TOTAL contains the cost result. */
25737 static bool
25738 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
25739 int *total, bool speed)
25741 enum machine_mode mode = GET_MODE (x);
25743 switch (code)
25745 /* On the RS/6000, if it is valid in the insn, it is free. */
25746 case CONST_INT:
25747 if (((outer_code == SET
25748 || outer_code == PLUS
25749 || outer_code == MINUS)
25750 && (satisfies_constraint_I (x)
25751 || satisfies_constraint_L (x)))
25752 || (outer_code == AND
25753 && (satisfies_constraint_K (x)
25754 || (mode == SImode
25755 ? satisfies_constraint_L (x)
25756 : satisfies_constraint_J (x))
25757 || mask_operand (x, mode)
25758 || (mode == DImode
25759 && mask64_operand (x, DImode))))
25760 || ((outer_code == IOR || outer_code == XOR)
25761 && (satisfies_constraint_K (x)
25762 || (mode == SImode
25763 ? satisfies_constraint_L (x)
25764 : satisfies_constraint_J (x))))
25765 || outer_code == ASHIFT
25766 || outer_code == ASHIFTRT
25767 || outer_code == LSHIFTRT
25768 || outer_code == ROTATE
25769 || outer_code == ROTATERT
25770 || outer_code == ZERO_EXTRACT
25771 || (outer_code == MULT
25772 && satisfies_constraint_I (x))
25773 || ((outer_code == DIV || outer_code == UDIV
25774 || outer_code == MOD || outer_code == UMOD)
25775 && exact_log2 (INTVAL (x)) >= 0)
25776 || (outer_code == COMPARE
25777 && (satisfies_constraint_I (x)
25778 || satisfies_constraint_K (x)))
25779 || ((outer_code == EQ || outer_code == NE)
25780 && (satisfies_constraint_I (x)
25781 || satisfies_constraint_K (x)
25782 || (mode == SImode
25783 ? satisfies_constraint_L (x)
25784 : satisfies_constraint_J (x))))
25785 || (outer_code == GTU
25786 && satisfies_constraint_I (x))
25787 || (outer_code == LTU
25788 && satisfies_constraint_P (x)))
25790 *total = 0;
25791 return true;
25793 else if ((outer_code == PLUS
25794 && reg_or_add_cint_operand (x, VOIDmode))
25795 || (outer_code == MINUS
25796 && reg_or_sub_cint_operand (x, VOIDmode))
25797 || ((outer_code == SET
25798 || outer_code == IOR
25799 || outer_code == XOR)
25800 && (INTVAL (x)
25801 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
25803 *total = COSTS_N_INSNS (1);
25804 return true;
25806 /* FALLTHRU */
25808 case CONST_DOUBLE:
25809 if (mode == DImode && code == CONST_DOUBLE)
25811 if ((outer_code == IOR || outer_code == XOR)
25812 && CONST_DOUBLE_HIGH (x) == 0
25813 && (CONST_DOUBLE_LOW (x)
25814 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
25816 *total = 0;
25817 return true;
25819 else if ((outer_code == AND && and64_2_operand (x, DImode))
25820 || ((outer_code == SET
25821 || outer_code == IOR
25822 || outer_code == XOR)
25823 && CONST_DOUBLE_HIGH (x) == 0))
25825 *total = COSTS_N_INSNS (1);
25826 return true;
25829 /* FALLTHRU */
25831 case CONST:
25832 case HIGH:
25833 case SYMBOL_REF:
25834 case MEM:
25835 /* When optimizing for size, MEM should be slightly more expensive
25836 than generating address, e.g., (plus (reg) (const)).
25837 L1 cache latency is about two instructions. */
25838 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25839 return true;
25841 case LABEL_REF:
25842 *total = 0;
25843 return true;
25845 case PLUS:
25846 case MINUS:
25847 if (FLOAT_MODE_P (mode))
25848 *total = rs6000_cost->fp;
25849 else
25850 *total = COSTS_N_INSNS (1);
25851 return false;
25853 case MULT:
25854 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25855 && satisfies_constraint_I (XEXP (x, 1)))
25857 if (INTVAL (XEXP (x, 1)) >= -256
25858 && INTVAL (XEXP (x, 1)) <= 255)
25859 *total = rs6000_cost->mulsi_const9;
25860 else
25861 *total = rs6000_cost->mulsi_const;
25863 else if (mode == SFmode)
25864 *total = rs6000_cost->fp;
25865 else if (FLOAT_MODE_P (mode))
25866 *total = rs6000_cost->dmul;
25867 else if (mode == DImode)
25868 *total = rs6000_cost->muldi;
25869 else
25870 *total = rs6000_cost->mulsi;
25871 return false;
25873 case FMA:
25874 if (mode == SFmode)
25875 *total = rs6000_cost->fp;
25876 else
25877 *total = rs6000_cost->dmul;
25878 break;
25880 case DIV:
25881 case MOD:
25882 if (FLOAT_MODE_P (mode))
25884 *total = mode == DFmode ? rs6000_cost->ddiv
25885 : rs6000_cost->sdiv;
25886 return false;
25888 /* FALLTHRU */
25890 case UDIV:
25891 case UMOD:
25892 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25893 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
25895 if (code == DIV || code == MOD)
25896 /* Shift, addze */
25897 *total = COSTS_N_INSNS (2);
25898 else
25899 /* Shift */
25900 *total = COSTS_N_INSNS (1);
25902 else
25904 if (GET_MODE (XEXP (x, 1)) == DImode)
25905 *total = rs6000_cost->divdi;
25906 else
25907 *total = rs6000_cost->divsi;
25909 /* Add in shift and subtract for MOD. */
25910 if (code == MOD || code == UMOD)
25911 *total += COSTS_N_INSNS (2);
25912 return false;
25914 case CTZ:
25915 case FFS:
25916 *total = COSTS_N_INSNS (4);
25917 return false;
25919 case POPCOUNT:
25920 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
25921 return false;
25923 case PARITY:
25924 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
25925 return false;
25927 case NOT:
25928 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
25930 *total = 0;
25931 return false;
25933 /* FALLTHRU */
25935 case AND:
25936 case CLZ:
25937 case IOR:
25938 case XOR:
25939 case ZERO_EXTRACT:
25940 *total = COSTS_N_INSNS (1);
25941 return false;
25943 case ASHIFT:
25944 case ASHIFTRT:
25945 case LSHIFTRT:
25946 case ROTATE:
25947 case ROTATERT:
25948 /* Handle mul_highpart. */
25949 if (outer_code == TRUNCATE
25950 && GET_CODE (XEXP (x, 0)) == MULT)
25952 if (mode == DImode)
25953 *total = rs6000_cost->muldi;
25954 else
25955 *total = rs6000_cost->mulsi;
25956 return true;
25958 else if (outer_code == AND)
25959 *total = 0;
25960 else
25961 *total = COSTS_N_INSNS (1);
25962 return false;
25964 case SIGN_EXTEND:
25965 case ZERO_EXTEND:
25966 if (GET_CODE (XEXP (x, 0)) == MEM)
25967 *total = 0;
25968 else
25969 *total = COSTS_N_INSNS (1);
25970 return false;
25972 case COMPARE:
25973 case NEG:
25974 case ABS:
25975 if (!FLOAT_MODE_P (mode))
25977 *total = COSTS_N_INSNS (1);
25978 return false;
25980 /* FALLTHRU */
25982 case FLOAT:
25983 case UNSIGNED_FLOAT:
25984 case FIX:
25985 case UNSIGNED_FIX:
25986 case FLOAT_TRUNCATE:
25987 *total = rs6000_cost->fp;
25988 return false;
25990 case FLOAT_EXTEND:
25991 if (mode == DFmode)
25992 *total = 0;
25993 else
25994 *total = rs6000_cost->fp;
25995 return false;
25997 case UNSPEC:
25998 switch (XINT (x, 1))
26000 case UNSPEC_FRSP:
26001 *total = rs6000_cost->fp;
26002 return true;
26004 default:
26005 break;
26007 break;
26009 case CALL:
26010 case IF_THEN_ELSE:
26011 if (!speed)
26013 *total = COSTS_N_INSNS (1);
26014 return true;
26016 else if (FLOAT_MODE_P (mode)
26017 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
26019 *total = rs6000_cost->fp;
26020 return false;
26022 break;
26024 case EQ:
26025 case GTU:
26026 case LTU:
26027 /* Carry bit requires mode == Pmode.
26028 NEG or PLUS already counted so only add one. */
26029 if (mode == Pmode
26030 && (outer_code == NEG || outer_code == PLUS))
26032 *total = COSTS_N_INSNS (1);
26033 return true;
26035 if (outer_code == SET)
26037 if (XEXP (x, 1) == const0_rtx)
26039 if (TARGET_ISEL && !TARGET_MFCRF)
26040 *total = COSTS_N_INSNS (8);
26041 else
26042 *total = COSTS_N_INSNS (2);
26043 return true;
26045 else if (mode == Pmode)
26047 *total = COSTS_N_INSNS (3);
26048 return false;
26051 /* FALLTHRU */
26053 case GT:
26054 case LT:
26055 case UNORDERED:
26056 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
26058 if (TARGET_ISEL && !TARGET_MFCRF)
26059 *total = COSTS_N_INSNS (8);
26060 else
26061 *total = COSTS_N_INSNS (2);
26062 return true;
26064 /* CC COMPARE. */
26065 if (outer_code == COMPARE)
26067 *total = 0;
26068 return true;
26070 break;
26072 default:
26073 break;
26076 return false;
26079 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26081 static bool
26082 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
26083 bool speed)
26085 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
26087 fprintf (stderr,
26088 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26089 "opno = %d, total = %d, speed = %s, x:\n",
26090 ret ? "complete" : "scan inner",
26091 GET_RTX_NAME (code),
26092 GET_RTX_NAME (outer_code),
26093 opno,
26094 *total,
26095 speed ? "true" : "false");
26097 debug_rtx (x);
26099 return ret;
26102 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26104 static int
26105 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
26106 addr_space_t as, bool speed)
26108 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
26110 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26111 ret, speed ? "true" : "false");
26112 debug_rtx (x);
26114 return ret;
26118 /* A C expression returning the cost of moving data from a register of class
26119 CLASS1 to one of CLASS2. */
26121 static int
26122 rs6000_register_move_cost (enum machine_mode mode,
26123 reg_class_t from, reg_class_t to)
26125 int ret;
26127 if (TARGET_DEBUG_COST)
26128 dbg_cost_ctrl++;
26130 /* Moves from/to GENERAL_REGS. */
26131 if (reg_classes_intersect_p (to, GENERAL_REGS)
26132 || reg_classes_intersect_p (from, GENERAL_REGS))
26134 reg_class_t rclass = from;
26136 if (! reg_classes_intersect_p (to, GENERAL_REGS))
26137 rclass = to;
26139 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
26140 ret = (rs6000_memory_move_cost (mode, rclass, false)
26141 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
26143 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26144 shift. */
26145 else if (rclass == CR_REGS)
26146 ret = 4;
26148 /* For those processors that have slow LR/CTR moves, make them more
26149 expensive than memory in order to bias spills to memory .*/
26150 else if ((rs6000_cpu == PROCESSOR_POWER6
26151 || rs6000_cpu == PROCESSOR_POWER7)
26152 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
26153 ret = 6 * hard_regno_nregs[0][mode];
26155 else
26156 /* A move will cost one instruction per GPR moved. */
26157 ret = 2 * hard_regno_nregs[0][mode];
26160 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26161 else if (VECTOR_UNIT_VSX_P (mode)
26162 && reg_classes_intersect_p (to, VSX_REGS)
26163 && reg_classes_intersect_p (from, VSX_REGS))
26164 ret = 2 * hard_regno_nregs[32][mode];
26166 /* Moving between two similar registers is just one instruction. */
26167 else if (reg_classes_intersect_p (to, from))
26168 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
26170 /* Everything else has to go through GENERAL_REGS. */
26171 else
26172 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
26173 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
26175 if (TARGET_DEBUG_COST)
26177 if (dbg_cost_ctrl == 1)
26178 fprintf (stderr,
26179 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26180 ret, GET_MODE_NAME (mode), reg_class_names[from],
26181 reg_class_names[to]);
26182 dbg_cost_ctrl--;
26185 return ret;
26188 /* A C expressions returning the cost of moving data of MODE from a register to
26189 or from memory. */
26191 static int
26192 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
26193 bool in ATTRIBUTE_UNUSED)
26195 int ret;
26197 if (TARGET_DEBUG_COST)
26198 dbg_cost_ctrl++;
26200 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
26201 ret = 4 * hard_regno_nregs[0][mode];
26202 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
26203 ret = 4 * hard_regno_nregs[32][mode];
26204 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
26205 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
26206 else
26207 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
26209 if (TARGET_DEBUG_COST)
26211 if (dbg_cost_ctrl == 1)
26212 fprintf (stderr,
26213 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26214 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
26215 dbg_cost_ctrl--;
26218 return ret;
26221 /* Returns a code for a target-specific builtin that implements
26222 reciprocal of the function, or NULL_TREE if not available. */
26224 static tree
26225 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
26226 bool sqrt ATTRIBUTE_UNUSED)
26228 if (optimize_insn_for_size_p ())
26229 return NULL_TREE;
26231 if (md_fn)
26232 switch (fn)
26234 case VSX_BUILTIN_XVSQRTDP:
26235 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
26236 return NULL_TREE;
26238 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
26240 case VSX_BUILTIN_XVSQRTSP:
26241 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
26242 return NULL_TREE;
26244 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
26246 default:
26247 return NULL_TREE;
26250 else
26251 switch (fn)
26253 case BUILT_IN_SQRT:
26254 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
26255 return NULL_TREE;
26257 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
26259 case BUILT_IN_SQRTF:
26260 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
26261 return NULL_TREE;
26263 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
26265 default:
26266 return NULL_TREE;
26270 /* Load up a constant. If the mode is a vector mode, splat the value across
26271 all of the vector elements. */
26273 static rtx
26274 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
26276 rtx reg;
26278 if (mode == SFmode || mode == DFmode)
26280 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
26281 reg = force_reg (mode, d);
26283 else if (mode == V4SFmode)
26285 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
26286 rtvec v = gen_rtvec (4, d, d, d, d);
26287 reg = gen_reg_rtx (mode);
26288 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26290 else if (mode == V2DFmode)
26292 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
26293 rtvec v = gen_rtvec (2, d, d);
26294 reg = gen_reg_rtx (mode);
26295 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26297 else
26298 gcc_unreachable ();
26300 return reg;
26303 /* Generate an FMA instruction. */
26305 static void
26306 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
26308 enum machine_mode mode = GET_MODE (target);
26309 rtx dst;
26311 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26312 gcc_assert (dst != NULL);
26314 if (dst != target)
26315 emit_move_insn (target, dst);
26318 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26320 static void
26321 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
26323 enum machine_mode mode = GET_MODE (target);
26324 rtx dst;
26326 /* Altivec does not support fms directly;
26327 generate in terms of fma in that case. */
26328 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
26329 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
26330 else
26332 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
26333 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26335 gcc_assert (dst != NULL);
26337 if (dst != target)
26338 emit_move_insn (target, dst);
26341 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26343 static void
26344 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
26346 enum machine_mode mode = GET_MODE (dst);
26347 rtx r;
26349 /* This is a tad more complicated, since the fnma_optab is for
26350 a different expression: fma(-m1, m2, a), which is the same
26351 thing except in the case of signed zeros.
26353 Fortunately we know that if FMA is supported that FNMSUB is
26354 also supported in the ISA. Just expand it directly. */
26356 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
26358 r = gen_rtx_NEG (mode, a);
26359 r = gen_rtx_FMA (mode, m1, m2, r);
26360 r = gen_rtx_NEG (mode, r);
26361 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
26364 /* Newton-Raphson approximation of floating point divide with just 2 passes
26365 (either single precision floating point, or newer machines with higher
26366 accuracy estimates). Support both scalar and vector divide. Assumes no
26367 trapping math and finite arguments. */
26369 static void
26370 rs6000_emit_swdiv_high_precision (rtx dst, rtx n, rtx d)
26372 enum machine_mode mode = GET_MODE (dst);
26373 rtx x0, e0, e1, y1, u0, v0;
26374 enum insn_code code = optab_handler (smul_optab, mode);
26375 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26376 rtx one = rs6000_load_constant_and_splat (mode, dconst1);
26378 gcc_assert (code != CODE_FOR_nothing);
26380 /* x0 = 1./d estimate */
26381 x0 = gen_reg_rtx (mode);
26382 emit_insn (gen_rtx_SET (VOIDmode, x0,
26383 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26384 UNSPEC_FRES)));
26386 e0 = gen_reg_rtx (mode);
26387 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - (d * x0) */
26389 e1 = gen_reg_rtx (mode);
26390 rs6000_emit_madd (e1, e0, e0, e0); /* e1 = (e0 * e0) + e0 */
26392 y1 = gen_reg_rtx (mode);
26393 rs6000_emit_madd (y1, e1, x0, x0); /* y1 = (e1 * x0) + x0 */
26395 u0 = gen_reg_rtx (mode);
26396 emit_insn (gen_mul (u0, n, y1)); /* u0 = n * y1 */
26398 v0 = gen_reg_rtx (mode);
26399 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - (d * u0) */
26401 rs6000_emit_madd (dst, v0, y1, u0); /* dst = (v0 * y1) + u0 */
26404 /* Newton-Raphson approximation of floating point divide that has a low
26405 precision estimate. Assumes no trapping math and finite arguments. */
26407 static void
26408 rs6000_emit_swdiv_low_precision (rtx dst, rtx n, rtx d)
26410 enum machine_mode mode = GET_MODE (dst);
26411 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
26412 enum insn_code code = optab_handler (smul_optab, mode);
26413 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26415 gcc_assert (code != CODE_FOR_nothing);
26417 one = rs6000_load_constant_and_splat (mode, dconst1);
26419 /* x0 = 1./d estimate */
26420 x0 = gen_reg_rtx (mode);
26421 emit_insn (gen_rtx_SET (VOIDmode, x0,
26422 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26423 UNSPEC_FRES)));
26425 e0 = gen_reg_rtx (mode);
26426 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - d * x0 */
26428 y1 = gen_reg_rtx (mode);
26429 rs6000_emit_madd (y1, e0, x0, x0); /* y1 = x0 + e0 * x0 */
26431 e1 = gen_reg_rtx (mode);
26432 emit_insn (gen_mul (e1, e0, e0)); /* e1 = e0 * e0 */
26434 y2 = gen_reg_rtx (mode);
26435 rs6000_emit_madd (y2, e1, y1, y1); /* y2 = y1 + e1 * y1 */
26437 e2 = gen_reg_rtx (mode);
26438 emit_insn (gen_mul (e2, e1, e1)); /* e2 = e1 * e1 */
26440 y3 = gen_reg_rtx (mode);
26441 rs6000_emit_madd (y3, e2, y2, y2); /* y3 = y2 + e2 * y2 */
26443 u0 = gen_reg_rtx (mode);
26444 emit_insn (gen_mul (u0, n, y3)); /* u0 = n * y3 */
26446 v0 = gen_reg_rtx (mode);
26447 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - d * u0 */
26449 rs6000_emit_madd (dst, v0, y3, u0); /* dst = u0 + v0 * y3 */
26452 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26453 add a reg_note saying that this was a division. Support both scalar and
26454 vector divide. Assumes no trapping math and finite arguments. */
26456 void
26457 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
26459 enum machine_mode mode = GET_MODE (dst);
26461 if (RS6000_RECIP_HIGH_PRECISION_P (mode))
26462 rs6000_emit_swdiv_high_precision (dst, n, d);
26463 else
26464 rs6000_emit_swdiv_low_precision (dst, n, d);
26466 if (note_p)
26467 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
26470 /* Newton-Raphson approximation of single/double-precision floating point
26471 rsqrt. Assumes no trapping math and finite arguments. */
26473 void
26474 rs6000_emit_swrsqrt (rtx dst, rtx src)
26476 enum machine_mode mode = GET_MODE (src);
26477 rtx x0 = gen_reg_rtx (mode);
26478 rtx y = gen_reg_rtx (mode);
26479 int passes = (TARGET_RECIP_PRECISION) ? 2 : 3;
26480 REAL_VALUE_TYPE dconst3_2;
26481 int i;
26482 rtx halfthree;
26483 enum insn_code code = optab_handler (smul_optab, mode);
26484 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26486 gcc_assert (code != CODE_FOR_nothing);
26488 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26489 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
26490 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
26492 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
26494 /* x0 = rsqrt estimate */
26495 emit_insn (gen_rtx_SET (VOIDmode, x0,
26496 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
26497 UNSPEC_RSQRT)));
26499 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26500 rs6000_emit_msub (y, src, halfthree, src);
26502 for (i = 0; i < passes; i++)
26504 rtx x1 = gen_reg_rtx (mode);
26505 rtx u = gen_reg_rtx (mode);
26506 rtx v = gen_reg_rtx (mode);
26508 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26509 emit_insn (gen_mul (u, x0, x0));
26510 rs6000_emit_nmsub (v, y, u, halfthree);
26511 emit_insn (gen_mul (x1, x0, v));
26512 x0 = x1;
26515 emit_move_insn (dst, x0);
26516 return;
26519 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26520 (Power7) targets. DST is the target, and SRC is the argument operand. */
26522 void
26523 rs6000_emit_popcount (rtx dst, rtx src)
26525 enum machine_mode mode = GET_MODE (dst);
26526 rtx tmp1, tmp2;
26528 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26529 if (TARGET_POPCNTD)
26531 if (mode == SImode)
26532 emit_insn (gen_popcntdsi2 (dst, src));
26533 else
26534 emit_insn (gen_popcntddi2 (dst, src));
26535 return;
26538 tmp1 = gen_reg_rtx (mode);
26540 if (mode == SImode)
26542 emit_insn (gen_popcntbsi2 (tmp1, src));
26543 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
26544 NULL_RTX, 0);
26545 tmp2 = force_reg (SImode, tmp2);
26546 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
26548 else
26550 emit_insn (gen_popcntbdi2 (tmp1, src));
26551 tmp2 = expand_mult (DImode, tmp1,
26552 GEN_INT ((HOST_WIDE_INT)
26553 0x01010101 << 32 | 0x01010101),
26554 NULL_RTX, 0);
26555 tmp2 = force_reg (DImode, tmp2);
26556 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
26561 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26562 target, and SRC is the argument operand. */
26564 void
26565 rs6000_emit_parity (rtx dst, rtx src)
26567 enum machine_mode mode = GET_MODE (dst);
26568 rtx tmp;
26570 tmp = gen_reg_rtx (mode);
26572 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26573 if (TARGET_CMPB)
26575 if (mode == SImode)
26577 emit_insn (gen_popcntbsi2 (tmp, src));
26578 emit_insn (gen_paritysi2_cmpb (dst, tmp));
26580 else
26582 emit_insn (gen_popcntbdi2 (tmp, src));
26583 emit_insn (gen_paritydi2_cmpb (dst, tmp));
26585 return;
26588 if (mode == SImode)
26590 /* Is mult+shift >= shift+xor+shift+xor? */
26591 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
26593 rtx tmp1, tmp2, tmp3, tmp4;
26595 tmp1 = gen_reg_rtx (SImode);
26596 emit_insn (gen_popcntbsi2 (tmp1, src));
26598 tmp2 = gen_reg_rtx (SImode);
26599 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
26600 tmp3 = gen_reg_rtx (SImode);
26601 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
26603 tmp4 = gen_reg_rtx (SImode);
26604 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
26605 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
26607 else
26608 rs6000_emit_popcount (tmp, src);
26609 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
26611 else
26613 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26614 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
26616 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
26618 tmp1 = gen_reg_rtx (DImode);
26619 emit_insn (gen_popcntbdi2 (tmp1, src));
26621 tmp2 = gen_reg_rtx (DImode);
26622 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
26623 tmp3 = gen_reg_rtx (DImode);
26624 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
26626 tmp4 = gen_reg_rtx (DImode);
26627 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
26628 tmp5 = gen_reg_rtx (DImode);
26629 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
26631 tmp6 = gen_reg_rtx (DImode);
26632 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
26633 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
26635 else
26636 rs6000_emit_popcount (tmp, src);
26637 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
26641 /* Expand an Altivec constant permutation. Return true if we match
26642 an efficient implementation; false to fall back to VPERM. */
26644 bool
26645 altivec_expand_vec_perm_const (rtx operands[4])
26647 struct altivec_perm_insn {
26648 enum insn_code impl;
26649 unsigned char perm[16];
26651 static const struct altivec_perm_insn patterns[] = {
26652 { CODE_FOR_altivec_vpkuhum,
26653 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26654 { CODE_FOR_altivec_vpkuwum,
26655 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26656 { CODE_FOR_altivec_vmrghb,
26657 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26658 { CODE_FOR_altivec_vmrghh,
26659 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26660 { CODE_FOR_altivec_vmrghw,
26661 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26662 { CODE_FOR_altivec_vmrglb,
26663 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26664 { CODE_FOR_altivec_vmrglh,
26665 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26666 { CODE_FOR_altivec_vmrglw,
26667 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26670 unsigned int i, j, elt, which;
26671 unsigned char perm[16];
26672 rtx target, op0, op1, sel, x;
26673 bool one_vec;
26675 target = operands[0];
26676 op0 = operands[1];
26677 op1 = operands[2];
26678 sel = operands[3];
26680 /* Unpack the constant selector. */
26681 for (i = which = 0; i < 16; ++i)
26683 rtx e = XVECEXP (sel, 0, i);
26684 elt = INTVAL (e) & 31;
26685 which |= (elt < 16 ? 1 : 2);
26686 perm[i] = elt;
26689 /* Simplify the constant selector based on operands. */
26690 switch (which)
26692 default:
26693 gcc_unreachable ();
26695 case 3:
26696 one_vec = false;
26697 if (!rtx_equal_p (op0, op1))
26698 break;
26699 /* FALLTHRU */
26701 case 2:
26702 for (i = 0; i < 16; ++i)
26703 perm[i] &= 15;
26704 op0 = op1;
26705 one_vec = true;
26706 break;
26708 case 1:
26709 op1 = op0;
26710 one_vec = true;
26711 break;
26714 /* Look for splat patterns. */
26715 if (one_vec)
26717 elt = perm[0];
26719 for (i = 0; i < 16; ++i)
26720 if (perm[i] != elt)
26721 break;
26722 if (i == 16)
26724 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
26725 return true;
26728 if (elt % 2 == 0)
26730 for (i = 0; i < 16; i += 2)
26731 if (perm[i] != elt || perm[i + 1] != elt + 1)
26732 break;
26733 if (i == 16)
26735 x = gen_reg_rtx (V8HImode);
26736 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
26737 GEN_INT (elt / 2)));
26738 emit_move_insn (target, gen_lowpart (V16QImode, x));
26739 return true;
26743 if (elt % 4 == 0)
26745 for (i = 0; i < 16; i += 4)
26746 if (perm[i] != elt
26747 || perm[i + 1] != elt + 1
26748 || perm[i + 2] != elt + 2
26749 || perm[i + 3] != elt + 3)
26750 break;
26751 if (i == 16)
26753 x = gen_reg_rtx (V4SImode);
26754 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
26755 GEN_INT (elt / 4)));
26756 emit_move_insn (target, gen_lowpart (V16QImode, x));
26757 return true;
26762 /* Look for merge and pack patterns. */
26763 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
26765 bool swapped;
26767 elt = patterns[j].perm[0];
26768 if (perm[0] == elt)
26769 swapped = false;
26770 else if (perm[0] == elt + 16)
26771 swapped = true;
26772 else
26773 continue;
26774 for (i = 1; i < 16; ++i)
26776 elt = patterns[j].perm[i];
26777 if (swapped)
26778 elt = (elt >= 16 ? elt - 16 : elt + 16);
26779 else if (one_vec && elt >= 16)
26780 elt -= 16;
26781 if (perm[i] != elt)
26782 break;
26784 if (i == 16)
26786 enum insn_code icode = patterns[j].impl;
26787 enum machine_mode omode = insn_data[icode].operand[0].mode;
26788 enum machine_mode imode = insn_data[icode].operand[1].mode;
26790 if (swapped)
26791 x = op0, op0 = op1, op1 = x;
26792 if (imode != V16QImode)
26794 op0 = gen_lowpart (imode, op0);
26795 op1 = gen_lowpart (imode, op1);
26797 if (omode == V16QImode)
26798 x = target;
26799 else
26800 x = gen_reg_rtx (omode);
26801 emit_insn (GEN_FCN (icode) (x, op0, op1));
26802 if (omode != V16QImode)
26803 emit_move_insn (target, gen_lowpart (V16QImode, x));
26804 return true;
26808 return false;
26811 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26812 Return true if we match an efficient implementation. */
26814 static bool
26815 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
26816 unsigned char perm0, unsigned char perm1)
26818 rtx x;
26820 /* If both selectors come from the same operand, fold to single op. */
26821 if ((perm0 & 2) == (perm1 & 2))
26823 if (perm0 & 2)
26824 op0 = op1;
26825 else
26826 op1 = op0;
26828 /* If both operands are equal, fold to simpler permutation. */
26829 if (rtx_equal_p (op0, op1))
26831 perm0 = perm0 & 1;
26832 perm1 = (perm1 & 1) + 2;
26834 /* If the first selector comes from the second operand, swap. */
26835 else if (perm0 & 2)
26837 if (perm1 & 2)
26838 return false;
26839 perm0 -= 2;
26840 perm1 += 2;
26841 x = op0, op0 = op1, op1 = x;
26843 /* If the second selector does not come from the second operand, fail. */
26844 else if ((perm1 & 2) == 0)
26845 return false;
26847 /* Success! */
26848 if (target != NULL)
26850 enum machine_mode vmode, dmode;
26851 rtvec v;
26853 vmode = GET_MODE (target);
26854 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
26855 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
26857 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
26858 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
26859 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
26860 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26862 return true;
26865 bool
26866 rs6000_expand_vec_perm_const (rtx operands[4])
26868 rtx target, op0, op1, sel;
26869 unsigned char perm0, perm1;
26871 target = operands[0];
26872 op0 = operands[1];
26873 op1 = operands[2];
26874 sel = operands[3];
26876 /* Unpack the constant selector. */
26877 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
26878 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
26880 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
26883 /* Test whether a constant permutation is supported. */
26885 static bool
26886 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
26887 const unsigned char *sel)
26889 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26890 if (TARGET_ALTIVEC)
26891 return true;
26893 /* Check for ps_merge* or evmerge* insns. */
26894 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
26895 || (TARGET_SPE && vmode == V2SImode))
26897 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
26898 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
26899 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
26902 return false;
26905 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26907 static void
26908 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
26909 enum machine_mode vmode, unsigned nelt, rtx perm[])
26911 enum machine_mode imode;
26912 rtx x;
26914 imode = vmode;
26915 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
26917 imode = GET_MODE_INNER (vmode);
26918 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
26919 imode = mode_for_vector (imode, nelt);
26922 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
26923 x = expand_vec_perm (vmode, op0, op1, x, target);
26924 if (x != target)
26925 emit_move_insn (target, x);
26928 /* Expand an extract even operation. */
26930 void
26931 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
26933 enum machine_mode vmode = GET_MODE (target);
26934 unsigned i, nelt = GET_MODE_NUNITS (vmode);
26935 rtx perm[16];
26937 for (i = 0; i < nelt; i++)
26938 perm[i] = GEN_INT (i * 2);
26940 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26943 /* Expand a vector interleave operation. */
26945 void
26946 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
26948 enum machine_mode vmode = GET_MODE (target);
26949 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
26950 rtx perm[16];
26952 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
26953 for (i = 0; i < nelt / 2; i++)
26955 perm[i * 2] = GEN_INT (i + high);
26956 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
26959 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26962 /* Return an RTX representing where to find the function value of a
26963 function returning MODE. */
26964 static rtx
26965 rs6000_complex_function_value (enum machine_mode mode)
26967 unsigned int regno;
26968 rtx r1, r2;
26969 enum machine_mode inner = GET_MODE_INNER (mode);
26970 unsigned int inner_bytes = GET_MODE_SIZE (inner);
26972 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
26973 regno = FP_ARG_RETURN;
26974 else
26976 regno = GP_ARG_RETURN;
26978 /* 32-bit is OK since it'll go in r3/r4. */
26979 if (TARGET_32BIT && inner_bytes >= 4)
26980 return gen_rtx_REG (mode, regno);
26983 if (inner_bytes >= 8)
26984 return gen_rtx_REG (mode, regno);
26986 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
26987 const0_rtx);
26988 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
26989 GEN_INT (inner_bytes));
26990 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
26993 /* Target hook for TARGET_FUNCTION_VALUE.
26995 On the SPE, both FPs and vectors are returned in r3.
26997 On RS/6000 an integer value is in r3 and a floating-point value is in
26998 fp1, unless -msoft-float. */
27000 static rtx
27001 rs6000_function_value (const_tree valtype,
27002 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
27003 bool outgoing ATTRIBUTE_UNUSED)
27005 enum machine_mode mode;
27006 unsigned int regno;
27008 /* Special handling for structs in darwin64. */
27009 if (TARGET_MACHO
27010 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
27012 CUMULATIVE_ARGS valcum;
27013 rtx valret;
27015 valcum.words = 0;
27016 valcum.fregno = FP_ARG_MIN_REG;
27017 valcum.vregno = ALTIVEC_ARG_MIN_REG;
27018 /* Do a trial code generation as if this were going to be passed as
27019 an argument; if any part goes in memory, we return NULL. */
27020 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
27021 if (valret)
27022 return valret;
27023 /* Otherwise fall through to standard ABI rules. */
27026 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
27028 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27029 return gen_rtx_PARALLEL (DImode,
27030 gen_rtvec (2,
27031 gen_rtx_EXPR_LIST (VOIDmode,
27032 gen_rtx_REG (SImode, GP_ARG_RETURN),
27033 const0_rtx),
27034 gen_rtx_EXPR_LIST (VOIDmode,
27035 gen_rtx_REG (SImode,
27036 GP_ARG_RETURN + 1),
27037 GEN_INT (4))));
27039 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
27041 return gen_rtx_PARALLEL (DCmode,
27042 gen_rtvec (4,
27043 gen_rtx_EXPR_LIST (VOIDmode,
27044 gen_rtx_REG (SImode, GP_ARG_RETURN),
27045 const0_rtx),
27046 gen_rtx_EXPR_LIST (VOIDmode,
27047 gen_rtx_REG (SImode,
27048 GP_ARG_RETURN + 1),
27049 GEN_INT (4)),
27050 gen_rtx_EXPR_LIST (VOIDmode,
27051 gen_rtx_REG (SImode,
27052 GP_ARG_RETURN + 2),
27053 GEN_INT (8)),
27054 gen_rtx_EXPR_LIST (VOIDmode,
27055 gen_rtx_REG (SImode,
27056 GP_ARG_RETURN + 3),
27057 GEN_INT (12))));
27060 mode = TYPE_MODE (valtype);
27061 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
27062 || POINTER_TYPE_P (valtype))
27063 mode = TARGET_32BIT ? SImode : DImode;
27065 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27066 /* _Decimal128 must use an even/odd register pair. */
27067 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27068 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
27069 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
27070 regno = FP_ARG_RETURN;
27071 else if (TREE_CODE (valtype) == COMPLEX_TYPE
27072 && targetm.calls.split_complex_arg)
27073 return rs6000_complex_function_value (mode);
27074 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27075 return register is used in both cases, and we won't see V2DImode/V2DFmode
27076 for pure altivec, combine the two cases. */
27077 else if (TREE_CODE (valtype) == VECTOR_TYPE
27078 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
27079 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
27080 regno = ALTIVEC_ARG_RETURN;
27081 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27082 && (mode == DFmode || mode == DCmode
27083 || mode == TFmode || mode == TCmode))
27084 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27085 else
27086 regno = GP_ARG_RETURN;
27088 return gen_rtx_REG (mode, regno);
27091 /* Define how to find the value returned by a library function
27092 assuming the value has mode MODE. */
27094 rs6000_libcall_value (enum machine_mode mode)
27096 unsigned int regno;
27098 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
27100 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27101 return gen_rtx_PARALLEL (DImode,
27102 gen_rtvec (2,
27103 gen_rtx_EXPR_LIST (VOIDmode,
27104 gen_rtx_REG (SImode, GP_ARG_RETURN),
27105 const0_rtx),
27106 gen_rtx_EXPR_LIST (VOIDmode,
27107 gen_rtx_REG (SImode,
27108 GP_ARG_RETURN + 1),
27109 GEN_INT (4))));
27112 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
27113 /* _Decimal128 must use an even/odd register pair. */
27114 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
27115 else if (SCALAR_FLOAT_MODE_P (mode)
27116 && TARGET_HARD_FLOAT && TARGET_FPRS
27117 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
27118 regno = FP_ARG_RETURN;
27119 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27120 return register is used in both cases, and we won't see V2DImode/V2DFmode
27121 for pure altivec, combine the two cases. */
27122 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
27123 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
27124 regno = ALTIVEC_ARG_RETURN;
27125 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
27126 return rs6000_complex_function_value (mode);
27127 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
27128 && (mode == DFmode || mode == DCmode
27129 || mode == TFmode || mode == TCmode))
27130 return spe_build_register_parallel (mode, GP_ARG_RETURN);
27131 else
27132 regno = GP_ARG_RETURN;
27134 return gen_rtx_REG (mode, regno);
27138 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27139 Frame pointer elimination is automatically handled.
27141 For the RS/6000, if frame pointer elimination is being done, we would like
27142 to convert ap into fp, not sp.
27144 We need r30 if -mminimal-toc was specified, and there are constant pool
27145 references. */
27147 static bool
27148 rs6000_can_eliminate (const int from, const int to)
27150 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
27151 ? ! frame_pointer_needed
27152 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
27153 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
27154 : true);
27157 /* Define the offset between two registers, FROM to be eliminated and its
27158 replacement TO, at the start of a routine. */
27159 HOST_WIDE_INT
27160 rs6000_initial_elimination_offset (int from, int to)
27162 rs6000_stack_t *info = rs6000_stack_info ();
27163 HOST_WIDE_INT offset;
27165 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27166 offset = info->push_p ? 0 : -info->total_size;
27167 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27169 offset = info->push_p ? 0 : -info->total_size;
27170 if (FRAME_GROWS_DOWNWARD)
27171 offset += info->fixed_size + info->vars_size + info->parm_size;
27173 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27174 offset = FRAME_GROWS_DOWNWARD
27175 ? info->fixed_size + info->vars_size + info->parm_size
27176 : 0;
27177 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
27178 offset = info->total_size;
27179 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
27180 offset = info->push_p ? info->total_size : 0;
27181 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
27182 offset = 0;
27183 else
27184 gcc_unreachable ();
27186 return offset;
27189 static rtx
27190 rs6000_dwarf_register_span (rtx reg)
27192 rtx parts[8];
27193 int i, words;
27194 unsigned regno = REGNO (reg);
27195 enum machine_mode mode = GET_MODE (reg);
27197 if (TARGET_SPE
27198 && regno < 32
27199 && (SPE_VECTOR_MODE (GET_MODE (reg))
27200 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
27201 && mode != SFmode && mode != SDmode && mode != SCmode)))
27203 else
27204 return NULL_RTX;
27206 regno = REGNO (reg);
27208 /* The duality of the SPE register size wreaks all kinds of havoc.
27209 This is a way of distinguishing r0 in 32-bits from r0 in
27210 64-bits. */
27211 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
27212 gcc_assert (words <= 4);
27213 for (i = 0; i < words; i++, regno++)
27215 if (BYTES_BIG_ENDIAN)
27217 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
27218 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
27220 else
27222 parts[2 * i] = gen_rtx_REG (SImode, regno);
27223 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
27227 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
27230 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27232 static void
27233 rs6000_init_dwarf_reg_sizes_extra (tree address)
27235 if (TARGET_SPE)
27237 int i;
27238 enum machine_mode mode = TYPE_MODE (char_type_node);
27239 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
27240 rtx mem = gen_rtx_MEM (BLKmode, addr);
27241 rtx value = gen_int_mode (4, mode);
27243 for (i = 1201; i < 1232; i++)
27245 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
27246 HOST_WIDE_INT offset
27247 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
27249 emit_move_insn (adjust_address (mem, mode, offset), value);
27254 /* Map internal gcc register numbers to DWARF2 register numbers. */
27256 unsigned int
27257 rs6000_dbx_register_number (unsigned int regno)
27259 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
27260 return regno;
27261 if (regno == LR_REGNO)
27262 return 108;
27263 if (regno == CTR_REGNO)
27264 return 109;
27265 if (CR_REGNO_P (regno))
27266 return regno - CR0_REGNO + 86;
27267 if (regno == CA_REGNO)
27268 return 101; /* XER */
27269 if (ALTIVEC_REGNO_P (regno))
27270 return regno - FIRST_ALTIVEC_REGNO + 1124;
27271 if (regno == VRSAVE_REGNO)
27272 return 356;
27273 if (regno == VSCR_REGNO)
27274 return 67;
27275 if (regno == SPE_ACC_REGNO)
27276 return 99;
27277 if (regno == SPEFSCR_REGNO)
27278 return 612;
27279 /* SPE high reg number. We get these values of regno from
27280 rs6000_dwarf_register_span. */
27281 gcc_assert (regno >= 1200 && regno < 1232);
27282 return regno;
27285 /* target hook eh_return_filter_mode */
27286 static enum machine_mode
27287 rs6000_eh_return_filter_mode (void)
27289 return TARGET_32BIT ? SImode : word_mode;
27292 /* Target hook for scalar_mode_supported_p. */
27293 static bool
27294 rs6000_scalar_mode_supported_p (enum machine_mode mode)
27296 if (DECIMAL_FLOAT_MODE_P (mode))
27297 return default_decimal_float_supported_p ();
27298 else
27299 return default_scalar_mode_supported_p (mode);
27302 /* Target hook for vector_mode_supported_p. */
27303 static bool
27304 rs6000_vector_mode_supported_p (enum machine_mode mode)
27307 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
27308 return true;
27310 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
27311 return true;
27313 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
27314 return true;
27316 else
27317 return false;
27320 /* Target hook for invalid_arg_for_unprototyped_fn. */
27321 static const char *
27322 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
27324 return (!rs6000_darwin64_abi
27325 && typelist == 0
27326 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
27327 && (funcdecl == NULL_TREE
27328 || (TREE_CODE (funcdecl) == FUNCTION_DECL
27329 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
27330 ? N_("AltiVec argument passed to unprototyped function")
27331 : NULL;
27334 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27335 setup by using __stack_chk_fail_local hidden function instead of
27336 calling __stack_chk_fail directly. Otherwise it is better to call
27337 __stack_chk_fail directly. */
27339 static tree ATTRIBUTE_UNUSED
27340 rs6000_stack_protect_fail (void)
27342 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
27343 ? default_hidden_stack_protect_fail ()
27344 : default_external_stack_protect_fail ();
27347 void
27348 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
27349 int num_operands ATTRIBUTE_UNUSED)
27351 if (rs6000_warn_cell_microcode)
27353 const char *temp;
27354 int insn_code_number = recog_memoized (insn);
27355 location_t location = INSN_LOCATION (insn);
27357 /* Punt on insns we cannot recognize. */
27358 if (insn_code_number < 0)
27359 return;
27361 temp = get_insn_template (insn_code_number, insn);
27363 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
27364 warning_at (location, OPT_mwarn_cell_microcode,
27365 "emitting microcode insn %s\t[%s] #%d",
27366 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27367 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
27368 warning_at (location, OPT_mwarn_cell_microcode,
27369 "emitting conditional microcode insn %s\t[%s] #%d",
27370 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27375 /* Mask options that we want to support inside of attribute((target)) and
27376 #pragma GCC target operations. Note, we do not include things like
27377 64/32-bit, endianess, hard/soft floating point, etc. that would have
27378 different calling sequences. */
27380 struct rs6000_opt_mask {
27381 const char *name; /* option name */
27382 int mask; /* mask to set */
27383 bool invert; /* invert sense of mask */
27384 bool valid_target; /* option is a target option */
27387 static struct rs6000_opt_mask const rs6000_opt_masks[] =
27389 { "altivec", MASK_ALTIVEC, false, true },
27390 { "cmpb", MASK_CMPB, false, true },
27391 { "dlmzb", MASK_DLMZB, false, true },
27392 { "fprnd", MASK_FPRND, false, true },
27393 { "hard-dfp", MASK_DFP, false, true },
27394 { "isel", MASK_ISEL, false, true },
27395 { "mfcrf", MASK_MFCRF, false, true },
27396 { "mfpgpr", MASK_MFPGPR, false, true },
27397 { "mulhw", MASK_MULHW, false, true },
27398 { "multiple", MASK_MULTIPLE, false, true },
27399 { "update", MASK_NO_UPDATE, true , true },
27400 { "popcntb", MASK_POPCNTB, false, true },
27401 { "popcntd", MASK_POPCNTD, false, true },
27402 { "powerpc-gfxopt", MASK_PPC_GFXOPT, false, true },
27403 { "powerpc-gpopt", MASK_PPC_GPOPT, false, true },
27404 { "recip-precision", MASK_RECIP_PRECISION, false, true },
27405 { "string", MASK_STRING, false, true },
27406 { "vsx", MASK_VSX, false, true },
27407 #ifdef MASK_64BIT
27408 #if TARGET_AIX_OS
27409 { "aix64", MASK_64BIT, false, false },
27410 { "aix32", MASK_64BIT, true, false },
27411 #else
27412 { "64", MASK_64BIT, false, false },
27413 { "32", MASK_64BIT, true, false },
27414 #endif
27415 #endif
27416 #ifdef MASK_EABI
27417 { "eabi", MASK_EABI, false, false },
27418 #endif
27419 #ifdef MASK_LITTLE_ENDIAN
27420 { "little", MASK_LITTLE_ENDIAN, false, false },
27421 { "big", MASK_LITTLE_ENDIAN, true, false },
27422 #endif
27423 #ifdef MASK_RELOCATABLE
27424 { "relocatable", MASK_RELOCATABLE, false, false },
27425 #endif
27426 #ifdef MASK_STRICT_ALIGN
27427 { "strict-align", MASK_STRICT_ALIGN, false, false },
27428 #endif
27429 { "soft-float", MASK_SOFT_FLOAT, false, false },
27430 { "string", MASK_STRING, false, false },
27433 /* Builtin mask mapping for printing the flags. */
27434 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
27436 { "altivec", RS6000_BTM_ALTIVEC, false, false },
27437 { "vsx", RS6000_BTM_VSX, false, false },
27438 { "spe", RS6000_BTM_SPE, false, false },
27439 { "paired", RS6000_BTM_PAIRED, false, false },
27440 { "fre", RS6000_BTM_FRE, false, false },
27441 { "fres", RS6000_BTM_FRES, false, false },
27442 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
27443 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
27444 { "popcntd", RS6000_BTM_POPCNTD, false, false },
27445 { "cell", RS6000_BTM_CELL, false, false },
27448 /* Option variables that we want to support inside attribute((target)) and
27449 #pragma GCC target operations. */
27451 struct rs6000_opt_var {
27452 const char *name; /* option name */
27453 size_t global_offset; /* offset of the option in global_options. */
27454 size_t target_offset; /* offset of the option in target optiosn. */
27457 static struct rs6000_opt_var const rs6000_opt_vars[] =
27459 { "friz",
27460 offsetof (struct gcc_options, x_TARGET_FRIZ),
27461 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
27462 { "avoid-indexed-addresses",
27463 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
27464 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
27465 { "paired",
27466 offsetof (struct gcc_options, x_rs6000_paired_float),
27467 offsetof (struct cl_target_option, x_rs6000_paired_float), },
27468 { "longcall",
27469 offsetof (struct gcc_options, x_rs6000_default_long_calls),
27470 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
27473 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27474 parsing. Return true if there were no errors. */
27476 static bool
27477 rs6000_inner_target_options (tree args, bool attr_p)
27479 bool ret = true;
27481 if (args == NULL_TREE)
27484 else if (TREE_CODE (args) == STRING_CST)
27486 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27487 char *q;
27489 while ((q = strtok (p, ",")) != NULL)
27491 bool error_p = false;
27492 bool not_valid_p = false;
27493 const char *cpu_opt = NULL;
27495 p = NULL;
27496 if (strncmp (q, "cpu=", 4) == 0)
27498 int cpu_index = rs6000_cpu_name_lookup (q+4);
27499 if (cpu_index >= 0)
27500 rs6000_cpu_index = cpu_index;
27501 else
27503 error_p = true;
27504 cpu_opt = q+4;
27507 else if (strncmp (q, "tune=", 5) == 0)
27509 int tune_index = rs6000_cpu_name_lookup (q+5);
27510 if (tune_index >= 0)
27511 rs6000_tune_index = tune_index;
27512 else
27514 error_p = true;
27515 cpu_opt = q+5;
27518 else
27520 size_t i;
27521 bool invert = false;
27522 char *r = q;
27524 error_p = true;
27525 if (strncmp (r, "no-", 3) == 0)
27527 invert = true;
27528 r += 3;
27531 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27532 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
27534 int mask = rs6000_opt_masks[i].mask;
27536 if (!rs6000_opt_masks[i].valid_target)
27537 not_valid_p = true;
27538 else
27540 error_p = false;
27541 target_flags_explicit |= mask;
27543 /* VSX needs altivec, so -mvsx automagically sets
27544 altivec. */
27545 if (mask == MASK_VSX && !invert)
27546 mask |= MASK_ALTIVEC;
27548 if (rs6000_opt_masks[i].invert)
27549 invert = !invert;
27551 if (invert)
27552 target_flags &= ~mask;
27553 else
27554 target_flags |= mask;
27556 break;
27559 if (error_p && !not_valid_p)
27561 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27562 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
27564 size_t j = rs6000_opt_vars[i].global_offset;
27565 *((int *) ((char *)&global_options + j)) = !invert;
27566 error_p = false;
27567 break;
27572 if (error_p)
27574 const char *eprefix, *esuffix;
27576 ret = false;
27577 if (attr_p)
27579 eprefix = "__attribute__((__target__(";
27580 esuffix = ")))";
27582 else
27584 eprefix = "#pragma GCC target ";
27585 esuffix = "";
27588 if (cpu_opt)
27589 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
27590 q, esuffix);
27591 else if (not_valid_p)
27592 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
27593 else
27594 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
27599 else if (TREE_CODE (args) == TREE_LIST)
27603 tree value = TREE_VALUE (args);
27604 if (value)
27606 bool ret2 = rs6000_inner_target_options (value, attr_p);
27607 if (!ret2)
27608 ret = false;
27610 args = TREE_CHAIN (args);
27612 while (args != NULL_TREE);
27615 else
27616 gcc_unreachable ();
27618 return ret;
27621 /* Print out the target options as a list for -mdebug=target. */
27623 static void
27624 rs6000_debug_target_options (tree args, const char *prefix)
27626 if (args == NULL_TREE)
27627 fprintf (stderr, "%s<NULL>", prefix);
27629 else if (TREE_CODE (args) == STRING_CST)
27631 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27632 char *q;
27634 while ((q = strtok (p, ",")) != NULL)
27636 p = NULL;
27637 fprintf (stderr, "%s\"%s\"", prefix, q);
27638 prefix = ", ";
27642 else if (TREE_CODE (args) == TREE_LIST)
27646 tree value = TREE_VALUE (args);
27647 if (value)
27649 rs6000_debug_target_options (value, prefix);
27650 prefix = ", ";
27652 args = TREE_CHAIN (args);
27654 while (args != NULL_TREE);
27657 else
27658 gcc_unreachable ();
27660 return;
27664 /* Hook to validate attribute((target("..."))). */
27666 static bool
27667 rs6000_valid_attribute_p (tree fndecl,
27668 tree ARG_UNUSED (name),
27669 tree args,
27670 int flags)
27672 struct cl_target_option cur_target;
27673 bool ret;
27674 tree old_optimize = build_optimization_node ();
27675 tree new_target, new_optimize;
27676 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27678 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
27680 if (TARGET_DEBUG_TARGET)
27682 tree tname = DECL_NAME (fndecl);
27683 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
27684 if (tname)
27685 fprintf (stderr, "function: %.*s\n",
27686 (int) IDENTIFIER_LENGTH (tname),
27687 IDENTIFIER_POINTER (tname));
27688 else
27689 fprintf (stderr, "function: unknown\n");
27691 fprintf (stderr, "args:");
27692 rs6000_debug_target_options (args, " ");
27693 fprintf (stderr, "\n");
27695 if (flags)
27696 fprintf (stderr, "flags: 0x%x\n", flags);
27698 fprintf (stderr, "--------------------\n");
27701 old_optimize = build_optimization_node ();
27702 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27704 /* If the function changed the optimization levels as well as setting target
27705 options, start with the optimizations specified. */
27706 if (func_optimize && func_optimize != old_optimize)
27707 cl_optimization_restore (&global_options,
27708 TREE_OPTIMIZATION (func_optimize));
27710 /* The target attributes may also change some optimization flags, so update
27711 the optimization options if necessary. */
27712 cl_target_option_save (&cur_target, &global_options);
27713 rs6000_cpu_index = rs6000_tune_index = -1;
27714 ret = rs6000_inner_target_options (args, true);
27716 /* Set up any additional state. */
27717 if (ret)
27719 ret = rs6000_option_override_internal (false);
27720 new_target = build_target_option_node ();
27722 else
27723 new_target = NULL;
27725 new_optimize = build_optimization_node ();
27727 if (!new_target)
27728 ret = false;
27730 else if (fndecl)
27732 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
27734 if (old_optimize != new_optimize)
27735 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
27738 cl_target_option_restore (&global_options, &cur_target);
27740 if (old_optimize != new_optimize)
27741 cl_optimization_restore (&global_options,
27742 TREE_OPTIMIZATION (old_optimize));
27744 return ret;
27748 /* Hook to validate the current #pragma GCC target and set the state, and
27749 update the macros based on what was changed. If ARGS is NULL, then
27750 POP_TARGET is used to reset the options. */
27752 bool
27753 rs6000_pragma_target_parse (tree args, tree pop_target)
27755 tree prev_tree = build_target_option_node ();
27756 tree cur_tree;
27757 struct cl_target_option *prev_opt, *cur_opt;
27758 unsigned prev_bumask, cur_bumask, diff_bumask;
27759 int prev_flags, cur_flags, diff_flags;
27761 if (TARGET_DEBUG_TARGET)
27763 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
27764 fprintf (stderr, "args:");
27765 rs6000_debug_target_options (args, " ");
27766 fprintf (stderr, "\n");
27768 if (pop_target)
27770 fprintf (stderr, "pop_target:\n");
27771 debug_tree (pop_target);
27773 else
27774 fprintf (stderr, "pop_target: <NULL>\n");
27776 fprintf (stderr, "--------------------\n");
27779 if (! args)
27781 cur_tree = ((pop_target)
27782 ? pop_target
27783 : target_option_default_node);
27784 cl_target_option_restore (&global_options,
27785 TREE_TARGET_OPTION (cur_tree));
27787 else
27789 rs6000_cpu_index = rs6000_tune_index = -1;
27790 if (!rs6000_inner_target_options (args, false)
27791 || !rs6000_option_override_internal (false)
27792 || (cur_tree = build_target_option_node ()) == NULL_TREE)
27794 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
27795 fprintf (stderr, "invalid pragma\n");
27797 return false;
27801 target_option_current_node = cur_tree;
27803 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27804 change the macros that are defined. */
27805 if (rs6000_target_modify_macros_ptr)
27807 prev_opt = TREE_TARGET_OPTION (prev_tree);
27808 prev_bumask = prev_opt->x_rs6000_builtin_mask;
27809 prev_flags = prev_opt->x_target_flags;
27811 cur_opt = TREE_TARGET_OPTION (cur_tree);
27812 cur_flags = cur_opt->x_target_flags;
27813 cur_bumask = cur_opt->x_rs6000_builtin_mask;
27815 diff_bumask = (prev_bumask ^ cur_bumask);
27816 diff_flags = (prev_flags ^ cur_flags);
27818 if ((diff_flags != 0) || (diff_bumask != 0))
27820 /* Delete old macros. */
27821 rs6000_target_modify_macros_ptr (false,
27822 prev_flags & diff_flags,
27823 prev_bumask & diff_bumask);
27825 /* Define new macros. */
27826 rs6000_target_modify_macros_ptr (true,
27827 cur_flags & diff_flags,
27828 cur_bumask & diff_bumask);
27832 return true;
27836 /* Remember the last target of rs6000_set_current_function. */
27837 static GTY(()) tree rs6000_previous_fndecl;
27839 /* Establish appropriate back-end context for processing the function
27840 FNDECL. The argument might be NULL to indicate processing at top
27841 level, outside of any function scope. */
27842 static void
27843 rs6000_set_current_function (tree fndecl)
27845 tree old_tree = (rs6000_previous_fndecl
27846 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
27847 : NULL_TREE);
27849 tree new_tree = (fndecl
27850 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
27851 : NULL_TREE);
27853 if (TARGET_DEBUG_TARGET)
27855 bool print_final = false;
27856 fprintf (stderr, "\n==================== rs6000_set_current_function");
27858 if (fndecl)
27859 fprintf (stderr, ", fndecl %s (%p)",
27860 (DECL_NAME (fndecl)
27861 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
27862 : "<unknown>"), (void *)fndecl);
27864 if (rs6000_previous_fndecl)
27865 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
27867 fprintf (stderr, "\n");
27868 if (new_tree)
27870 fprintf (stderr, "\nnew fndecl target specific options:\n");
27871 debug_tree (new_tree);
27872 print_final = true;
27875 if (old_tree)
27877 fprintf (stderr, "\nold fndecl target specific options:\n");
27878 debug_tree (old_tree);
27879 print_final = true;
27882 if (print_final)
27883 fprintf (stderr, "--------------------\n");
27886 /* Only change the context if the function changes. This hook is called
27887 several times in the course of compiling a function, and we don't want to
27888 slow things down too much or call target_reinit when it isn't safe. */
27889 if (fndecl && fndecl != rs6000_previous_fndecl)
27891 rs6000_previous_fndecl = fndecl;
27892 if (old_tree == new_tree)
27895 else if (new_tree)
27897 cl_target_option_restore (&global_options,
27898 TREE_TARGET_OPTION (new_tree));
27899 target_reinit ();
27902 else if (old_tree)
27904 struct cl_target_option *def
27905 = TREE_TARGET_OPTION (target_option_current_node);
27907 cl_target_option_restore (&global_options, def);
27908 target_reinit ();
27914 /* Save the current options */
27916 static void
27917 rs6000_function_specific_save (struct cl_target_option *ptr)
27919 ptr->rs6000_target_flags_explicit = target_flags_explicit;
27922 /* Restore the current options */
27924 static void
27925 rs6000_function_specific_restore (struct cl_target_option *ptr)
27927 target_flags_explicit = ptr->rs6000_target_flags_explicit;
27928 (void) rs6000_option_override_internal (false);
27931 /* Print the current options */
27933 static void
27934 rs6000_function_specific_print (FILE *file, int indent,
27935 struct cl_target_option *ptr)
27937 size_t i;
27938 int flags = ptr->x_target_flags;
27939 unsigned bu_mask = ptr->x_rs6000_builtin_mask;
27941 /* Print the various mask options. */
27942 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27943 if ((flags & rs6000_opt_masks[i].mask) != 0)
27945 flags &= ~ rs6000_opt_masks[i].mask;
27946 fprintf (file, "%*s-m%s%s\n", indent, "",
27947 rs6000_opt_masks[i].invert ? "no-" : "",
27948 rs6000_opt_masks[i].name);
27951 /* Print the various options that are variables. */
27952 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27954 size_t j = rs6000_opt_vars[i].target_offset;
27955 if (((signed char *) ptr)[j])
27956 fprintf (file, "%*s-m%s\n", indent, "",
27957 rs6000_opt_vars[i].name);
27960 /* Print the various builtin flags. */
27961 fprintf (file, "%*sbuiltin mask = 0x%x\n", indent, "", bu_mask);
27962 for (i = 0; i < ARRAY_SIZE (rs6000_builtin_mask_names); i++)
27963 if ((bu_mask & rs6000_builtin_mask_names[i].mask) != 0)
27965 fprintf (file, "%*s%s builtins supported\n", indent, "",
27966 rs6000_builtin_mask_names[i].name);
27971 /* Hook to determine if one function can safely inline another. */
27973 static bool
27974 rs6000_can_inline_p (tree caller, tree callee)
27976 bool ret = false;
27977 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
27978 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
27980 /* If callee has no option attributes, then it is ok to inline. */
27981 if (!callee_tree)
27982 ret = true;
27984 /* If caller has no option attributes, but callee does then it is not ok to
27985 inline. */
27986 else if (!caller_tree)
27987 ret = false;
27989 else
27991 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
27992 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
27994 /* Callee's options should a subset of the caller's, i.e. a vsx function
27995 can inline an altivec function but a non-vsx function can't inline a
27996 vsx function. */
27997 if ((caller_opts->x_target_flags & callee_opts->x_target_flags)
27998 == callee_opts->x_target_flags)
27999 ret = true;
28002 if (TARGET_DEBUG_TARGET)
28003 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
28004 (DECL_NAME (caller)
28005 ? IDENTIFIER_POINTER (DECL_NAME (caller))
28006 : "<unknown>"),
28007 (DECL_NAME (callee)
28008 ? IDENTIFIER_POINTER (DECL_NAME (callee))
28009 : "<unknown>"),
28010 (ret ? "can" : "cannot"));
28012 return ret;
28015 /* Allocate a stack temp and fixup the address so it meets the particular
28016 memory requirements (either offetable or REG+REG addressing). */
28019 rs6000_allocate_stack_temp (enum machine_mode mode,
28020 bool offsettable_p,
28021 bool reg_reg_p)
28023 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
28024 rtx addr = XEXP (stack, 0);
28025 int strict_p = (reload_in_progress || reload_completed);
28027 if (!legitimate_indirect_address_p (addr, strict_p))
28029 if (offsettable_p
28030 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
28031 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28033 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
28034 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
28037 return stack;
28040 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28041 to such a form to deal with memory reference instructions like STFIWX that
28042 only take reg+reg addressing. */
28045 rs6000_address_for_fpconvert (rtx x)
28047 int strict_p = (reload_in_progress || reload_completed);
28048 rtx addr;
28050 gcc_assert (MEM_P (x));
28051 addr = XEXP (x, 0);
28052 if (! legitimate_indirect_address_p (addr, strict_p)
28053 && ! legitimate_indexed_address_p (addr, strict_p))
28055 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
28057 rtx reg = XEXP (addr, 0);
28058 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
28059 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
28060 gcc_assert (REG_P (reg));
28061 emit_insn (gen_add3_insn (reg, reg, size_rtx));
28062 addr = reg;
28064 else if (GET_CODE (addr) == PRE_MODIFY)
28066 rtx reg = XEXP (addr, 0);
28067 rtx expr = XEXP (addr, 1);
28068 gcc_assert (REG_P (reg));
28069 gcc_assert (GET_CODE (expr) == PLUS);
28070 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
28071 addr = reg;
28074 x = replace_equiv_address (x, copy_addr_to_reg (addr));
28077 return x;
28080 /* Given a memory reference, if it is not in the form for altivec memory
28081 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28082 convert to the altivec format. */
28085 rs6000_address_for_altivec (rtx x)
28087 gcc_assert (MEM_P (x));
28088 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
28090 rtx addr = XEXP (x, 0);
28091 int strict_p = (reload_in_progress || reload_completed);
28093 if (!legitimate_indexed_address_p (addr, strict_p)
28094 && !legitimate_indirect_address_p (addr, strict_p))
28095 addr = copy_to_mode_reg (Pmode, addr);
28097 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
28098 x = change_address (x, GET_MODE (x), addr);
28101 return x;
28104 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28106 On the RS/6000, all integer constants are acceptable, most won't be valid
28107 for particular insns, though. Only easy FP constants are acceptable. */
28109 static bool
28110 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
28112 if (rs6000_tls_referenced_p (x))
28113 return false;
28115 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
28116 || GET_MODE (x) == VOIDmode
28117 || (TARGET_POWERPC64 && mode == DImode)
28118 || easy_fp_constant (x, mode)
28119 || easy_vector_constant (x, mode));
28123 /* A function pointer under AIX is a pointer to a data area whose first word
28124 contains the actual address of the function, whose second word contains a
28125 pointer to its TOC, and whose third word contains a value to place in the
28126 static chain register (r11). Note that if we load the static chain, our
28127 "trampoline" need not have any executable code. */
28129 void
28130 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
28132 rtx func_addr;
28133 rtx toc_reg;
28134 rtx sc_reg;
28135 rtx stack_ptr;
28136 rtx stack_toc_offset;
28137 rtx stack_toc_mem;
28138 rtx func_toc_offset;
28139 rtx func_toc_mem;
28140 rtx func_sc_offset;
28141 rtx func_sc_mem;
28142 rtx insn;
28143 rtx (*call_func) (rtx, rtx, rtx, rtx);
28144 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
28146 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28147 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
28149 /* Load up address of the actual function. */
28150 func_desc = force_reg (Pmode, func_desc);
28151 func_addr = gen_reg_rtx (Pmode);
28152 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
28154 if (TARGET_32BIT)
28157 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
28158 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
28159 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
28160 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28162 call_func = gen_call_indirect_aix32bit;
28163 call_value_func = gen_call_value_indirect_aix32bit;
28165 else
28167 call_func = gen_call_indirect_aix32bit_nor11;
28168 call_value_func = gen_call_value_indirect_aix32bit_nor11;
28171 else
28173 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
28174 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
28175 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
28176 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28178 call_func = gen_call_indirect_aix64bit;
28179 call_value_func = gen_call_value_indirect_aix64bit;
28181 else
28183 call_func = gen_call_indirect_aix64bit_nor11;
28184 call_value_func = gen_call_value_indirect_aix64bit_nor11;
28188 /* Reserved spot to store the TOC. */
28189 stack_toc_mem = gen_frame_mem (Pmode,
28190 gen_rtx_PLUS (Pmode,
28191 stack_ptr,
28192 stack_toc_offset));
28194 gcc_assert (cfun);
28195 gcc_assert (cfun->machine);
28197 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28198 every call? */
28199 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
28200 cfun->machine->save_toc_in_prologue = true;
28202 else
28204 MEM_VOLATILE_P (stack_toc_mem) = 1;
28205 emit_move_insn (stack_toc_mem, toc_reg);
28208 /* Calculate the address to load the TOC of the called function. We don't
28209 actually load this until the split after reload. */
28210 func_toc_mem = gen_rtx_MEM (Pmode,
28211 gen_rtx_PLUS (Pmode,
28212 func_desc,
28213 func_toc_offset));
28215 /* If we have a static chain, load it up. */
28216 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28218 func_sc_mem = gen_rtx_MEM (Pmode,
28219 gen_rtx_PLUS (Pmode,
28220 func_desc,
28221 func_sc_offset));
28223 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
28224 emit_move_insn (sc_reg, func_sc_mem);
28227 /* Create the call. */
28228 if (value)
28229 insn = call_value_func (value, func_addr, flag, func_toc_mem,
28230 stack_toc_mem);
28231 else
28232 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
28234 emit_call_insn (insn);
28237 /* Return whether we need to always update the saved TOC pointer when we update
28238 the stack pointer. */
28240 static bool
28241 rs6000_save_toc_in_prologue_p (void)
28243 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
28246 #ifdef HAVE_GAS_HIDDEN
28247 # define USE_HIDDEN_LINKONCE 1
28248 #else
28249 # define USE_HIDDEN_LINKONCE 0
28250 #endif
28252 /* Fills in the label name that should be used for a 476 link stack thunk. */
28254 void
28255 get_ppc476_thunk_name (char name[32])
28257 gcc_assert (TARGET_LINK_STACK);
28259 if (USE_HIDDEN_LINKONCE)
28260 sprintf (name, "__ppc476.get_thunk");
28261 else
28262 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
28265 /* This function emits the simple thunk routine that is used to preserve
28266 the link stack on the 476 cpu. */
28268 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
28269 static void
28270 rs6000_code_end (void)
28272 char name[32];
28273 tree decl;
28275 if (!TARGET_LINK_STACK)
28276 return;
28278 get_ppc476_thunk_name (name);
28280 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
28281 build_function_type_list (void_type_node, NULL_TREE));
28282 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
28283 NULL_TREE, void_type_node);
28284 TREE_PUBLIC (decl) = 1;
28285 TREE_STATIC (decl) = 1;
28287 #if RS6000_WEAK
28288 if (USE_HIDDEN_LINKONCE)
28290 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
28291 targetm.asm_out.unique_section (decl, 0);
28292 switch_to_section (get_named_section (decl, NULL, 0));
28293 DECL_WEAK (decl) = 1;
28294 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
28295 targetm.asm_out.globalize_label (asm_out_file, name);
28296 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
28297 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
28299 else
28300 #endif
28302 switch_to_section (text_section);
28303 ASM_OUTPUT_LABEL (asm_out_file, name);
28306 DECL_INITIAL (decl) = make_node (BLOCK);
28307 current_function_decl = decl;
28308 init_function_start (decl);
28309 first_function_block_is_cold = false;
28310 /* Make sure unwind info is emitted for the thunk if needed. */
28311 final_start_function (emit_barrier (), asm_out_file, 1);
28313 fputs ("\tblr\n", asm_out_file);
28315 final_end_function ();
28316 init_insn_lengths ();
28317 free_after_compilation (cfun);
28318 set_cfun (NULL);
28319 current_function_decl = NULL;
28322 /* Add r30 to hard reg set if the prologue sets it up and it is not
28323 pic_offset_table_rtx. */
28325 static void
28326 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
28328 if (!TARGET_SINGLE_PIC_BASE
28329 && TARGET_TOC
28330 && TARGET_MINIMAL_TOC
28331 && get_pool_size () != 0)
28332 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28335 struct gcc_target targetm = TARGET_INITIALIZER;
28337 #include "gt-rs6000.h"