[RS6000] Built-in __PCREL__ define
[official-gcc.git] / gcc / config / rs6000 / rs6000-c.c
blobcc1e997524e3b01f9dc65744f087b2284622e229
1 /* Subroutines for the C front end on the PowerPC architecture.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4 Contributed by Zack Weinberg <zack@codesourcery.com>
5 and Paolo Bonzini <bonzini@gnu.org>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #define IN_TARGET_CODE 1
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "target.h"
29 #include "c-family/c-common.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "stor-layout.h"
34 #include "c-family/c-pragma.h"
35 #include "langhooks.h"
36 #include "c/c-tree.h"
40 /* Handle the machine specific pragma longcall. Its syntax is
42 # pragma longcall ( TOGGLE )
44 where TOGGLE is either 0 or 1.
46 rs6000_default_long_calls is set to the value of TOGGLE, changing
47 whether or not new function declarations receive a longcall
48 attribute by default. */
50 void
51 rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
53 #define SYNTAX_ERROR(gmsgid) do { \
54 warning (OPT_Wpragmas, gmsgid); \
55 warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>"); \
56 return; \
57 } while (0)
61 tree x, n;
63 /* If we get here, generic code has already scanned the directive
64 leader and the word "longcall". */
66 if (pragma_lex (&x) != CPP_OPEN_PAREN)
67 SYNTAX_ERROR ("missing open paren");
68 if (pragma_lex (&n) != CPP_NUMBER)
69 SYNTAX_ERROR ("missing number");
70 if (pragma_lex (&x) != CPP_CLOSE_PAREN)
71 SYNTAX_ERROR ("missing close paren");
73 if (n != integer_zero_node && n != integer_one_node)
74 SYNTAX_ERROR ("number must be 0 or 1");
76 if (pragma_lex (&x) != CPP_EOF)
77 warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>");
79 rs6000_default_long_calls = (n == integer_one_node);
82 /* Handle defining many CPP flags based on TARGET_xxx. As a general
83 policy, rather than trying to guess what flags a user might want a
84 #define for, it's better to define a flag for everything. */
86 #define builtin_define(TXT) cpp_define (pfile, TXT)
87 #define builtin_assert(TXT) cpp_assert (pfile, TXT)
89 /* Keep the AltiVec keywords handy for fast comparisons. */
90 static GTY(()) tree __vector_keyword;
91 static GTY(()) tree vector_keyword;
92 static GTY(()) tree __pixel_keyword;
93 static GTY(()) tree pixel_keyword;
94 static GTY(()) tree __bool_keyword;
95 static GTY(()) tree bool_keyword;
96 static GTY(()) tree _Bool_keyword;
97 static GTY(()) tree __int128_type;
98 static GTY(()) tree __uint128_type;
100 /* Preserved across calls. */
101 static tree expand_bool_pixel;
103 static cpp_hashnode *
104 altivec_categorize_keyword (const cpp_token *tok)
106 if (tok->type == CPP_NAME)
108 cpp_hashnode *ident = tok->val.node.node;
110 if (ident == C_CPP_HASHNODE (vector_keyword))
111 return C_CPP_HASHNODE (__vector_keyword);
113 if (ident == C_CPP_HASHNODE (pixel_keyword))
114 return C_CPP_HASHNODE (__pixel_keyword);
116 if (ident == C_CPP_HASHNODE (bool_keyword))
117 return C_CPP_HASHNODE (__bool_keyword);
119 if (ident == C_CPP_HASHNODE (_Bool_keyword))
120 return C_CPP_HASHNODE (__bool_keyword);
122 return ident;
125 return 0;
128 static void
129 init_vector_keywords (void)
131 /* Keywords without two leading underscores are context-sensitive, and hence
132 implemented as conditional macros, controlled by the
133 rs6000_macro_to_expand() function below. If we have ISA 2.07 64-bit
134 support, record the __int128_t and __uint128_t types. */
136 __vector_keyword = get_identifier ("__vector");
137 C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
139 __pixel_keyword = get_identifier ("__pixel");
140 C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
142 __bool_keyword = get_identifier ("__bool");
143 C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
145 vector_keyword = get_identifier ("vector");
146 C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
148 pixel_keyword = get_identifier ("pixel");
149 C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
151 bool_keyword = get_identifier ("bool");
152 C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
154 _Bool_keyword = get_identifier ("_Bool");
155 C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
157 if (TARGET_VADDUQM)
159 __int128_type = get_identifier ("__int128_t");
160 __uint128_type = get_identifier ("__uint128_t");
164 /* Helper function to find out which RID_INT_N_* code is the one for
165 __int128, if any. Returns RID_MAX+1 if none apply, which is safe
166 (for our purposes, since we always expect to have __int128) to
167 compare against. */
168 static int
169 rid_int128(void)
171 int i;
173 for (i = 0; i < NUM_INT_N_ENTS; i ++)
174 if (int_n_enabled_p[i]
175 && int_n_data[i].bitsize == 128)
176 return RID_INT_N_0 + i;
178 return RID_MAX + 1;
181 /* Called to decide whether a conditional macro should be expanded.
182 Since we have exactly one such macro (i.e, 'vector'), we do not
183 need to examine the 'tok' parameter. */
185 static cpp_hashnode *
186 rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
188 cpp_hashnode *expand_this = tok->val.node.node;
189 cpp_hashnode *ident;
191 /* If the current machine does not have altivec, don't look for the
192 keywords. */
193 if (!TARGET_ALTIVEC)
194 return NULL;
196 ident = altivec_categorize_keyword (tok);
198 if (ident != expand_this)
199 expand_this = NULL;
201 if (ident == C_CPP_HASHNODE (__vector_keyword))
203 int idx = 0;
205 tok = cpp_peek_token (pfile, idx++);
206 while (tok->type == CPP_PADDING);
207 ident = altivec_categorize_keyword (tok);
209 if (ident == C_CPP_HASHNODE (__pixel_keyword))
211 expand_this = C_CPP_HASHNODE (__vector_keyword);
212 expand_bool_pixel = __pixel_keyword;
214 else if (ident == C_CPP_HASHNODE (__bool_keyword))
216 expand_this = C_CPP_HASHNODE (__vector_keyword);
217 expand_bool_pixel = __bool_keyword;
219 /* The boost libraries have code with Iterator::vector vector in it. If
220 we allow the normal handling, this module will be called recursively,
221 and the vector will be skipped.; */
222 else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword)))
224 enum rid rid_code = (enum rid)(ident->rid_code);
225 bool is_macro = cpp_macro_p (ident);
227 /* If there is a function-like macro, check if it is going to be
228 invoked with or without arguments. Without following ( treat
229 it like non-macro, otherwise the following cpp_get_token eats
230 what should be preserved. */
231 if (is_macro && cpp_fun_like_macro_p (ident))
233 int idx2 = idx;
235 tok = cpp_peek_token (pfile, idx2++);
236 while (tok->type == CPP_PADDING);
237 if (tok->type != CPP_OPEN_PAREN)
238 is_macro = false;
241 if (is_macro)
244 (void) cpp_get_token (pfile);
245 while (--idx > 0);
247 tok = cpp_peek_token (pfile, idx++);
248 while (tok->type == CPP_PADDING);
249 ident = altivec_categorize_keyword (tok);
250 if (ident == C_CPP_HASHNODE (__pixel_keyword))
252 expand_this = C_CPP_HASHNODE (__vector_keyword);
253 expand_bool_pixel = __pixel_keyword;
254 rid_code = RID_MAX;
256 else if (ident == C_CPP_HASHNODE (__bool_keyword))
258 expand_this = C_CPP_HASHNODE (__vector_keyword);
259 expand_bool_pixel = __bool_keyword;
260 rid_code = RID_MAX;
262 else if (ident)
263 rid_code = (enum rid)(ident->rid_code);
266 if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
267 || rid_code == RID_SHORT || rid_code == RID_SIGNED
268 || rid_code == RID_INT || rid_code == RID_CHAR
269 || rid_code == RID_FLOAT
270 || (rid_code == RID_DOUBLE && TARGET_VSX)
271 || (rid_code == rid_int128 () && TARGET_VADDUQM))
273 expand_this = C_CPP_HASHNODE (__vector_keyword);
274 /* If the next keyword is bool or pixel, it
275 will need to be expanded as well. */
277 tok = cpp_peek_token (pfile, idx++);
278 while (tok->type == CPP_PADDING);
279 ident = altivec_categorize_keyword (tok);
281 if (ident == C_CPP_HASHNODE (__pixel_keyword))
282 expand_bool_pixel = __pixel_keyword;
283 else if (ident == C_CPP_HASHNODE (__bool_keyword))
284 expand_bool_pixel = __bool_keyword;
285 else
287 /* Try two tokens down, too. */
289 tok = cpp_peek_token (pfile, idx++);
290 while (tok->type == CPP_PADDING);
291 ident = altivec_categorize_keyword (tok);
292 if (ident == C_CPP_HASHNODE (__pixel_keyword))
293 expand_bool_pixel = __pixel_keyword;
294 else if (ident == C_CPP_HASHNODE (__bool_keyword))
295 expand_bool_pixel = __bool_keyword;
299 /* Support vector __int128_t, but we don't need to worry about bool
300 or pixel on this type. */
301 else if (TARGET_VADDUQM
302 && (ident == C_CPP_HASHNODE (__int128_type)
303 || ident == C_CPP_HASHNODE (__uint128_type)))
304 expand_this = C_CPP_HASHNODE (__vector_keyword);
307 else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
309 expand_this = C_CPP_HASHNODE (__pixel_keyword);
310 expand_bool_pixel = 0;
312 else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
314 expand_this = C_CPP_HASHNODE (__bool_keyword);
315 expand_bool_pixel = 0;
318 return expand_this;
322 /* Define or undefine a single macro. */
324 static void
325 rs6000_define_or_undefine_macro (bool define_p, const char *name)
327 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
328 fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name);
330 if (define_p)
331 cpp_define (parse_in, name);
332 else
333 cpp_undef (parse_in, name);
336 /* Define or undefine macros based on the current target. If the user does
337 #pragma GCC target, we need to adjust the macros dynamically. Note, some of
338 the options needed for builtins have been moved to separate variables, so
339 have both the target flags and the builtin flags as arguments. */
341 void
342 rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
343 HOST_WIDE_INT bu_mask)
345 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
346 fprintf (stderr,
347 "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX
348 ", " HOST_WIDE_INT_PRINT_HEX ")\n",
349 (define_p) ? "define" : "undef",
350 flags, bu_mask);
352 /* Each of the flags mentioned below controls whether certain
353 preprocessor macros will be automatically defined when
354 preprocessing source files for compilation by this compiler.
355 While most of these flags can be enabled or disabled
356 explicitly by specifying certain command-line options when
357 invoking the compiler, there are also many ways in which these
358 flags are enabled or disabled implicitly, based on compiler
359 defaults, configuration choices, and on the presence of certain
360 related command-line options. Many, but not all, of these
361 implicit behaviors can be found in file "rs6000.c", the
362 rs6000_option_override_internal() function.
364 In general, each of the flags may be automatically enabled in
365 any of the following conditions:
367 1. If no -mcpu target is specified on the command line and no
368 --with-cpu target is specified to the configure command line
369 and the TARGET_DEFAULT macro for this default cpu host
370 includes the flag, and the flag has not been explicitly disabled
371 by command-line options.
373 2. If the target specified with -mcpu=target on the command line, or
374 in the absence of a -mcpu=target command-line option, if the
375 target specified using --with-cpu=target on the configure
376 command line, is disqualified because the associated binary
377 tools (e.g. the assembler) lack support for the requested cpu,
378 and the TARGET_DEFAULT macro for this default cpu host
379 includes the flag, and the flag has not been explicitly disabled
380 by command-line options.
382 3. If either of the above two conditions apply except that the
383 TARGET_DEFAULT macro is defined to equal zero, and
384 TARGET_POWERPC64 and
385 a) BYTES_BIG_ENDIAN and the flag to be enabled is either
386 MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64"
387 target), or
388 b) !BYTES_BIG_ENDIAN and the flag to be enabled is either
389 MASK_POWERPC64 or it is one of the flags included in
390 ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target).
392 4. If a cpu has been requested with a -mcpu=target command-line option
393 and this cpu has not been disqualified due to shortcomings of the
394 binary tools, and the set of flags associated with the requested cpu
395 include the flag to be enabled. See rs6000-cpus.def for macro
396 definitions that represent various ABI standards
397 (e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of
398 the specific flags that are associated with each of the cpu
399 choices that can be specified as the target of a -mcpu=target
400 compile option, or as the target of a --with-cpu=target
401 configure option. Target flags that are specified in either
402 of these two ways are considered "implicit" since the flags
403 are not mentioned specifically by name.
405 Additional documentation describing behavior specific to
406 particular flags is provided below, immediately preceding the
407 use of each relevant flag.
409 5. If there is no -mcpu=target command-line option, and the cpu
410 requested by a --with-cpu=target command-line option has not
411 been disqualified due to shortcomings of the binary tools, and
412 the set of flags associated with the specified target include
413 the flag to be enabled. See the notes immediately above for a
414 summary of the flags associated with particular cpu
415 definitions. */
417 /* rs6000_isa_flags based options. */
418 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC");
419 if ((flags & OPTION_MASK_PPC_GPOPT) != 0)
420 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ");
421 if ((flags & OPTION_MASK_PPC_GFXOPT) != 0)
422 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR");
423 if ((flags & OPTION_MASK_POWERPC64) != 0)
424 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64");
425 if ((flags & OPTION_MASK_MFCRF) != 0)
426 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4");
427 if ((flags & OPTION_MASK_POPCNTB) != 0)
428 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5");
429 if ((flags & OPTION_MASK_FPRND) != 0)
430 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X");
431 if ((flags & OPTION_MASK_CMPB) != 0)
432 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6");
433 if ((flags & OPTION_MASK_POPCNTD) != 0)
434 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7");
435 /* Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
436 turned on in the following condition:
437 1. TARGET_P8_VECTOR is enabled and OPTION_MASK_DIRECT_MOVE is not
438 explicitly disabled.
439 Hereafter, the OPTION_MASK_DIRECT_MOVE flag is considered to
440 have been turned on explicitly.
441 Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
442 turned off in any of the following conditions:
443 1. TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX is explicitly
444 disabled and OPTION_MASK_DIRECT_MOVE was not explicitly
445 enabled.
446 2. TARGET_VSX is off. */
447 if ((flags & OPTION_MASK_DIRECT_MOVE) != 0)
448 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8");
449 if ((flags & OPTION_MASK_MODULO) != 0)
450 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9");
451 if ((flags & OPTION_MASK_POWER10) != 0)
452 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10");
453 if ((flags & OPTION_MASK_SOFT_FLOAT) != 0)
454 rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT");
455 if ((flags & OPTION_MASK_RECIP_PRECISION) != 0)
456 rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
457 /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
458 in any of the following conditions:
459 1. The operating system is Darwin and it is configured for 64
460 bit. (See darwin_rs6000_override_options.)
461 2. The operating system is Darwin and the operating system
462 version is 10.5 or higher and the user has not explicitly
463 disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
464 the compiler is not producing code for integration within the
465 kernel. (See darwin_rs6000_override_options.)
466 Note that the OPTION_MASK_ALTIVEC flag is automatically turned
467 off in any of the following conditions:
468 1. The operating system does not support saving of AltiVec
469 registers (OS_MISSING_ALTIVEC).
470 2. If an inner context (as introduced by
471 __attribute__((__target__())) or #pragma GCC target()
472 requests a target that normally enables the
473 OPTION_MASK_ALTIVEC flag but the outer-most "main target"
474 does not support the rs6000_altivec_abi, this flag is
475 turned off for the inner context unless OPTION_MASK_ALTIVEC
476 was explicitly enabled for the inner context. */
477 if ((flags & OPTION_MASK_ALTIVEC) != 0)
479 const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__";
480 rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__");
481 rs6000_define_or_undefine_macro (define_p, vec_str);
483 /* Define this when supporting context-sensitive keywords. */
484 if (!flag_iso)
485 rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__");
487 /* Note that the OPTION_MASK_VSX flag is automatically turned on in
488 the following conditions:
489 1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX
490 was not explicitly turned off. Hereafter, the OPTION_MASK_VSX
491 flag is considered to have been explicitly turned on.
492 Note that the OPTION_MASK_VSX flag is automatically turned off in
493 the following conditions:
494 1. The operating system does not support saving of AltiVec
495 registers (OS_MISSING_ALTIVEC).
496 2. If the option TARGET_HARD_FLOAT is turned off. Hereafter, the
497 OPTION_MASK_VSX flag is considered to have been turned off
498 explicitly.
499 3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost
500 compilation context, or if it is turned on by any means in an
501 inner compilation context. Hereafter, the OPTION_MASK_VSX
502 flag is considered to have been turned off explicitly.
503 4. If TARGET_ALTIVEC was explicitly disabled. Hereafter, the
504 OPTION_MASK_VSX flag is considered to have been turned off
505 explicitly.
506 5. If an inner context (as introduced by
507 __attribute__((__target__())) or #pragma GCC target()
508 requests a target that normally enables the
509 OPTION_MASK_VSX flag but the outer-most "main target"
510 does not support the rs6000_altivec_abi, this flag is
511 turned off for the inner context unless OPTION_MASK_VSX
512 was explicitly enabled for the inner context. */
513 if ((flags & OPTION_MASK_VSX) != 0)
514 rs6000_define_or_undefine_macro (define_p, "__VSX__");
515 if ((flags & OPTION_MASK_HTM) != 0)
517 rs6000_define_or_undefine_macro (define_p, "__HTM__");
518 /* Tell the user that our HTM insn patterns act as memory barriers. */
519 rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
521 /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
522 on in the following conditions:
523 1. TARGET_P9_VECTOR is explicitly turned on and
524 OPTION_MASK_P8_VECTOR is not explicitly turned off.
525 Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to
526 have been turned off explicitly.
527 Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
528 off in the following conditions:
529 1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX
530 were turned off explicitly and OPTION_MASK_P8_VECTOR flag was
531 not turned on explicitly.
532 2. If TARGET_ALTIVEC is turned off. Hereafter, the
533 OPTION_MASK_P8_VECTOR flag is considered to have been turned off
534 explicitly.
535 3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not
536 explicitly enabled. If TARGET_VSX is explicitly enabled, the
537 OPTION_MASK_P8_VECTOR flag is hereafter also considered to
538 have been turned off explicitly. */
539 if ((flags & OPTION_MASK_P8_VECTOR) != 0)
540 rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
541 /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned
542 off in the following conditions:
543 1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is
544 not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR
545 was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is
546 also considered to have been turned off explicitly.
547 Note that the OPTION_MASK_P9_VECTOR is automatically turned on
548 in the following conditions:
549 1. If TARGET_P9_MINMAX was turned on explicitly.
550 Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to
551 have been turned on explicitly. */
552 if ((flags & OPTION_MASK_P9_VECTOR) != 0)
553 rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__");
554 /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically
555 turned off in the following conditions:
556 1. If TARGET_POWERPC64 is turned off.
557 2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory
558 load/store are disabled on little endian). */
559 if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
560 rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__");
561 /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically
562 turned off in the following conditions:
563 1. If TARGET_POWERPC64 is turned off.
564 Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is
565 automatically turned on in the following conditions:
566 1. If TARGET_QUAD_MEMORY and this flag was not explicitly
567 disabled. */
568 if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
569 rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__");
570 /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off
571 in the following conditions:
572 1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX
573 are turned off explicitly and OPTION_MASK_CRYPTO is not turned
574 on explicitly.
575 2. If TARGET_ALTIVEC is turned off. */
576 if ((flags & OPTION_MASK_CRYPTO) != 0)
577 rs6000_define_or_undefine_macro (define_p, "__CRYPTO__");
578 if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0)
580 rs6000_define_or_undefine_macro (define_p, "__FLOAT128__");
581 if (define_p)
582 rs6000_define_or_undefine_macro (true, "__float128=__ieee128");
583 else
584 rs6000_define_or_undefine_macro (false, "__float128");
586 /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or
587 via the target attribute/pragma. */
588 if ((flags & OPTION_MASK_FLOAT128_HW) != 0)
589 rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__");
591 /* options from the builtin masks. */
592 /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu ==
593 PROCESSOR_CELL) (e.g. -mcpu=cell). */
594 if ((bu_mask & RS6000_BTM_CELL) != 0)
595 rs6000_define_or_undefine_macro (define_p, "__PPU__");
597 /* Tell the user if we support the MMA instructions. */
598 if ((flags & OPTION_MASK_MMA) != 0)
599 rs6000_define_or_undefine_macro (define_p, "__MMA__");
600 /* Whether pc-relative code is being generated. */
601 if ((flags & OPTION_MASK_PCREL) != 0)
602 rs6000_define_or_undefine_macro (define_p, "__PCREL__");
605 void
606 rs6000_cpu_cpp_builtins (cpp_reader *pfile)
608 /* Define all of the common macros. */
609 rs6000_target_modify_macros (true, rs6000_isa_flags,
610 rs6000_builtin_mask_calculate ());
612 if (TARGET_FRE)
613 builtin_define ("__RECIP__");
614 if (TARGET_FRES)
615 builtin_define ("__RECIPF__");
616 if (TARGET_FRSQRTE)
617 builtin_define ("__RSQRTE__");
618 if (TARGET_FRSQRTES)
619 builtin_define ("__RSQRTEF__");
620 if (TARGET_FLOAT128_TYPE)
621 builtin_define ("__FLOAT128_TYPE__");
622 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
623 builtin_define ("__BUILTIN_CPU_SUPPORTS__");
624 #endif
626 if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM)
628 /* Define the AltiVec syntactic elements. */
629 builtin_define ("__vector=__attribute__((altivec(vector__)))");
630 builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
631 builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
633 if (!flag_iso)
635 builtin_define ("vector=vector");
636 builtin_define ("pixel=pixel");
637 builtin_define ("bool=bool");
638 builtin_define ("_Bool=_Bool");
639 init_vector_keywords ();
641 /* Enable context-sensitive macros. */
642 cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
645 if (!TARGET_HARD_FLOAT)
646 builtin_define ("_SOFT_DOUBLE");
647 /* Used by lwarx/stwcx. errata work-around. */
648 if (rs6000_cpu == PROCESSOR_PPC405)
649 builtin_define ("__PPC405__");
650 /* Used by libstdc++. */
651 if (TARGET_NO_LWSYNC)
652 builtin_define ("__NO_LWSYNC__");
654 if (TARGET_EXTRA_BUILTINS)
656 /* For the VSX builtin functions identical to Altivec functions, just map
657 the altivec builtin into the vsx version (the altivec functions
658 generate VSX code if -mvsx). */
659 builtin_define ("__builtin_vsx_xxland=__builtin_vec_and");
660 builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc");
661 builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor");
662 builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or");
663 builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor");
664 builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel");
665 builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm");
667 /* Also map the a and m versions of the multiply/add instructions to the
668 builtin for people blindly going off the instruction manual. */
669 builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp");
670 builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp");
671 builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp");
672 builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp");
673 builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp");
674 builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp");
675 builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp");
676 builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp");
677 builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp");
678 builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp");
679 builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp");
680 builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp");
681 builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp");
682 builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp");
683 builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp");
684 builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
687 /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */
688 if (TARGET_FLOAT128_TYPE)
690 builtin_define ("__builtin_fabsq=__builtin_fabsf128");
691 builtin_define ("__builtin_copysignq=__builtin_copysignf128");
692 builtin_define ("__builtin_nanq=__builtin_nanf128");
693 builtin_define ("__builtin_nansq=__builtin_nansf128");
694 builtin_define ("__builtin_infq=__builtin_inff128");
695 builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
698 /* Tell users they can use __builtin_bswap{16,64}. */
699 builtin_define ("__HAVE_BSWAP__");
701 /* May be overridden by target configuration. */
702 RS6000_CPU_CPP_ENDIAN_BUILTINS();
704 if (TARGET_LONG_DOUBLE_128)
706 builtin_define ("__LONG_DOUBLE_128__");
707 builtin_define ("__LONGDOUBLE128");
709 if (TARGET_IEEEQUAD)
711 /* Older versions of GLIBC used __attribute__((__KC__)) to create the
712 IEEE 128-bit floating point complex type for C++ (which does not
713 support _Float128 _Complex). If the default for long double is
714 IEEE 128-bit mode, the library would need to use
715 __attribute__((__TC__)) instead. Defining __KF__ and __KC__
716 is a stop-gap to build with the older libraries, until we
717 get an updated library. */
718 builtin_define ("__LONG_DOUBLE_IEEE128__");
719 builtin_define ("__KF__=__TF__");
720 builtin_define ("__KC__=__TC__");
722 else
723 builtin_define ("__LONG_DOUBLE_IBM128__");
726 switch (TARGET_CMODEL)
728 /* Deliberately omit __CMODEL_SMALL__ since that was the default
729 before --mcmodel support was added. */
730 case CMODEL_MEDIUM:
731 builtin_define ("__CMODEL_MEDIUM__");
732 break;
733 case CMODEL_LARGE:
734 builtin_define ("__CMODEL_LARGE__");
735 break;
736 default:
737 break;
740 switch (rs6000_current_abi)
742 case ABI_V4:
743 builtin_define ("_CALL_SYSV");
744 break;
745 case ABI_AIX:
746 builtin_define ("_CALL_AIXDESC");
747 builtin_define ("_CALL_AIX");
748 builtin_define ("_CALL_ELF=1");
749 break;
750 case ABI_ELFv2:
751 builtin_define ("_CALL_ELF=2");
752 break;
753 case ABI_DARWIN:
754 builtin_define ("_CALL_DARWIN");
755 break;
756 default:
757 break;
760 /* Vector element order. */
761 if (BYTES_BIG_ENDIAN)
762 builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
763 else
764 builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
766 /* Let the compiled code know if 'f' class registers will not be available. */
767 if (TARGET_SOFT_FLOAT)
768 builtin_define ("__NO_FPRS__");
770 /* Whether aggregates passed by value are aligned to a 16 byte boundary
771 if their alignment is 16 bytes or larger. */
772 if ((TARGET_MACHO && rs6000_darwin64_abi)
773 || DEFAULT_ABI == ABI_ELFv2
774 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
775 builtin_define ("__STRUCT_PARM_ALIGN__=16");
780 /* Convert a type stored into a struct altivec_builtin_types as ID,
781 into a tree. The types are in rs6000_builtin_types: negative values
782 create a pointer type for the type associated to ~ID. Note it is
783 a logical NOT, rather than a negation, otherwise you cannot represent
784 a pointer type for ID 0. */
786 static inline tree
787 rs6000_builtin_type (int id)
789 tree t;
790 t = rs6000_builtin_types[id < 0 ? ~id : id];
791 return id < 0 ? build_pointer_type (t) : t;
794 /* Check whether the type of an argument, T, is compatible with a type ID
795 stored into a struct altivec_builtin_types. Integer types are considered
796 compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
797 the decision. Also allow long double and _Float128 to be compatible if
798 -mabi=ieeelongdouble. */
800 static inline bool
801 is_float128_p (tree t)
803 return (t == float128_type_node
804 || (TARGET_IEEEQUAD
805 && TARGET_LONG_DOUBLE_128
806 && t == long_double_type_node));
809 static inline bool
810 rs6000_builtin_type_compatible (tree t, int id)
812 tree builtin_type;
813 builtin_type = rs6000_builtin_type (id);
814 if (t == error_mark_node)
815 return false;
816 if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
817 return true;
818 else if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
819 && is_float128_p (t) && is_float128_p (builtin_type))
820 return true;
821 else
822 return lang_hooks.types_compatible_p (t, builtin_type);
826 /* In addition to calling fold_convert for EXPR of type TYPE, also
827 call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be
828 hiding there (PR47197). */
830 static tree
831 fully_fold_convert (tree type, tree expr)
833 tree result = fold_convert (type, expr);
834 bool maybe_const = true;
836 if (!c_dialect_cxx ())
837 result = c_fully_fold (result, false, &maybe_const);
839 return result;
842 /* Build a tree for a function call to an Altivec non-overloaded builtin.
843 The overloaded builtin that matched the types and args is described
844 by DESC. The N arguments are given in ARGS, respectively.
846 Actually the only thing it does is calling fold_convert on ARGS, with
847 a small exception for vec_{all,any}_{ge,le} predicates. */
849 static tree
850 altivec_build_resolved_builtin (tree *args, int n,
851 const struct altivec_builtin_types *desc)
853 tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code];
854 tree ret_type = rs6000_builtin_type (desc->ret_type);
855 tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl));
856 tree arg_type[4];
857 tree call;
859 int i;
860 for (i = 0; i < n; i++)
861 arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes);
863 /* The AltiVec overloading implementation is overall gross, but this
864 is particularly disgusting. The vec_{all,any}_{ge,le} builtins
865 are completely different for floating-point vs. integer vector
866 types, because the former has vcmpgefp, but the latter should use
867 vcmpgtXX.
869 In practice, the second and third arguments are swapped, and the
870 condition (LT vs. EQ, which is recognizable by bit 1 of the first
871 argument) is reversed. Patch the arguments here before building
872 the resolved CALL_EXPR. */
873 if (n == 3
874 && desc->code == ALTIVEC_BUILTIN_VEC_VCMPGE_P
875 && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P
876 && desc->overloaded_code != VSX_BUILTIN_XVCMPGEDP_P)
878 std::swap (args[1], args[2]);
879 std::swap (arg_type[1], arg_type[2]);
881 args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
882 build_int_cst (NULL_TREE, 2));
885 switch (n)
887 case 0:
888 call = build_call_expr (impl_fndecl, 0);
889 break;
890 case 1:
891 call = build_call_expr (impl_fndecl, 1,
892 fully_fold_convert (arg_type[0], args[0]));
893 break;
894 case 2:
895 call = build_call_expr (impl_fndecl, 2,
896 fully_fold_convert (arg_type[0], args[0]),
897 fully_fold_convert (arg_type[1], args[1]));
898 break;
899 case 3:
900 call = build_call_expr (impl_fndecl, 3,
901 fully_fold_convert (arg_type[0], args[0]),
902 fully_fold_convert (arg_type[1], args[1]),
903 fully_fold_convert (arg_type[2], args[2]));
904 break;
905 case 4:
906 call = build_call_expr (impl_fndecl, 4,
907 fully_fold_convert (arg_type[0], args[0]),
908 fully_fold_convert (arg_type[1], args[1]),
909 fully_fold_convert (arg_type[2], args[2]),
910 fully_fold_convert (arg_type[3], args[3]));
911 break;
912 default:
913 gcc_unreachable ();
915 return fold_convert (ret_type, call);
918 /* Implementation of the resolve_overloaded_builtin target hook, to
919 support Altivec's overloaded builtins. */
921 tree
922 altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
923 void *passed_arglist)
925 vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
926 unsigned int nargs = vec_safe_length (arglist);
927 enum rs6000_builtins fcode
928 = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl);
929 tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
930 tree types[4], args[4];
931 const struct altivec_builtin_types *desc;
932 unsigned int n;
934 if (!rs6000_overloaded_builtin_p (fcode))
935 return NULL_TREE;
937 if (TARGET_DEBUG_BUILTIN)
938 fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
939 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
941 /* vec_lvsl and vec_lvsr are deprecated for use with LE element order. */
942 if (fcode == ALTIVEC_BUILTIN_VEC_LVSL && !BYTES_BIG_ENDIAN)
943 warning (OPT_Wdeprecated,
944 "%<vec_lvsl%> is deprecated for little endian; use "
945 "assignment for unaligned loads and stores");
946 else if (fcode == ALTIVEC_BUILTIN_VEC_LVSR && !BYTES_BIG_ENDIAN)
947 warning (OPT_Wdeprecated,
948 "%<vec_lvsr%> is deprecated for little endian; use "
949 "assignment for unaligned loads and stores");
951 if (fcode == ALTIVEC_BUILTIN_VEC_MUL)
953 /* vec_mul needs to be special cased because there are no instructions
954 for it for the {un}signed char, {un}signed short, and {un}signed int
955 types. */
956 if (nargs != 2)
958 error ("builtin %qs only accepts 2 arguments", "vec_mul");
959 return error_mark_node;
962 tree arg0 = (*arglist)[0];
963 tree arg0_type = TREE_TYPE (arg0);
964 tree arg1 = (*arglist)[1];
965 tree arg1_type = TREE_TYPE (arg1);
967 /* Both arguments must be vectors and the types must be compatible. */
968 if (TREE_CODE (arg0_type) != VECTOR_TYPE)
969 goto bad;
970 if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
971 goto bad;
973 switch (TYPE_MODE (TREE_TYPE (arg0_type)))
975 case E_QImode:
976 case E_HImode:
977 case E_SImode:
978 case E_DImode:
979 case E_TImode:
981 /* For scalar types just use a multiply expression. */
982 return fold_build2_loc (loc, MULT_EXPR, TREE_TYPE (arg0), arg0,
983 fold_convert (TREE_TYPE (arg0), arg1));
985 case E_SFmode:
987 /* For floats use the xvmulsp instruction directly. */
988 tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULSP];
989 return build_call_expr (call, 2, arg0, arg1);
991 case E_DFmode:
993 /* For doubles use the xvmuldp instruction directly. */
994 tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULDP];
995 return build_call_expr (call, 2, arg0, arg1);
997 /* Other types are errors. */
998 default:
999 goto bad;
1003 if (fcode == ALTIVEC_BUILTIN_VEC_CMPNE)
1005 /* vec_cmpne needs to be special cased because there are no instructions
1006 for it (prior to power 9). */
1007 if (nargs != 2)
1009 error ("builtin %qs only accepts 2 arguments", "vec_cmpne");
1010 return error_mark_node;
1013 tree arg0 = (*arglist)[0];
1014 tree arg0_type = TREE_TYPE (arg0);
1015 tree arg1 = (*arglist)[1];
1016 tree arg1_type = TREE_TYPE (arg1);
1018 /* Both arguments must be vectors and the types must be compatible. */
1019 if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1020 goto bad;
1021 if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
1022 goto bad;
1024 /* Power9 instructions provide the most efficient implementation of
1025 ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
1026 or SFmode or DFmode. */
1027 if (!TARGET_P9_VECTOR
1028 || (TYPE_MODE (TREE_TYPE (arg0_type)) == DImode)
1029 || (TYPE_MODE (TREE_TYPE (arg0_type)) == TImode)
1030 || (TYPE_MODE (TREE_TYPE (arg0_type)) == SFmode)
1031 || (TYPE_MODE (TREE_TYPE (arg0_type)) == DFmode))
1033 switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1035 /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
1036 vec_cmpeq (va, vb)). */
1037 /* Note: vec_nand also works but opt changes vec_nand's
1038 to vec_nor's anyway. */
1039 case E_QImode:
1040 case E_HImode:
1041 case E_SImode:
1042 case E_DImode:
1043 case E_TImode:
1044 case E_SFmode:
1045 case E_DFmode:
1047 /* call = vec_cmpeq (va, vb)
1048 result = vec_nor (call, call). */
1049 vec<tree, va_gc> *params = make_tree_vector ();
1050 vec_safe_push (params, arg0);
1051 vec_safe_push (params, arg1);
1052 tree call = altivec_resolve_overloaded_builtin
1053 (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_CMPEQ],
1054 params);
1055 /* Use save_expr to ensure that operands used more than once
1056 that may have side effects (like calls) are only evaluated
1057 once. */
1058 call = save_expr (call);
1059 params = make_tree_vector ();
1060 vec_safe_push (params, call);
1061 vec_safe_push (params, call);
1062 return altivec_resolve_overloaded_builtin
1063 (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_NOR], params);
1065 /* Other types are errors. */
1066 default:
1067 goto bad;
1070 /* else, fall through and process the Power9 alternative below */
1073 if (fcode == ALTIVEC_BUILTIN_VEC_ADDE
1074 || fcode == ALTIVEC_BUILTIN_VEC_SUBE)
1076 /* vec_adde needs to be special cased because there is no instruction
1077 for the {un}signed int version. */
1078 if (nargs != 3)
1080 const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDE ?
1081 "vec_adde": "vec_sube";
1082 error ("builtin %qs only accepts 3 arguments", name);
1083 return error_mark_node;
1086 tree arg0 = (*arglist)[0];
1087 tree arg0_type = TREE_TYPE (arg0);
1088 tree arg1 = (*arglist)[1];
1089 tree arg1_type = TREE_TYPE (arg1);
1090 tree arg2 = (*arglist)[2];
1091 tree arg2_type = TREE_TYPE (arg2);
1093 /* All 3 arguments must be vectors of (signed or unsigned) (int or
1094 __int128) and the types must be compatible. */
1095 if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1096 goto bad;
1097 if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1098 || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1099 goto bad;
1101 switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1103 /* For {un}signed ints,
1104 vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
1105 vec_and (carryv, 1)).
1106 vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
1107 vec_and (carryv, 1)). */
1108 case E_SImode:
1110 tree add_sub_builtin;
1112 vec<tree, va_gc> *params = make_tree_vector ();
1113 vec_safe_push (params, arg0);
1114 vec_safe_push (params, arg1);
1116 if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1117 add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1118 else
1119 add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1121 tree call = altivec_resolve_overloaded_builtin (loc,
1122 add_sub_builtin,
1123 params);
1124 tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1125 tree ones_vector = build_vector_from_val (arg0_type, const1);
1126 tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1127 arg2, ones_vector);
1128 params = make_tree_vector ();
1129 vec_safe_push (params, call);
1130 vec_safe_push (params, and_expr);
1131 return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1132 params);
1134 /* For {un}signed __int128s use the vaddeuqm instruction
1135 directly. */
1136 case E_TImode:
1138 tree bii;
1140 if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1141 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM];
1143 else
1144 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBEUQM];
1146 return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1149 /* Types other than {un}signed int and {un}signed __int128
1150 are errors. */
1151 default:
1152 goto bad;
1156 if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC
1157 || fcode == ALTIVEC_BUILTIN_VEC_SUBEC)
1159 /* vec_addec and vec_subec needs to be special cased because there is
1160 no instruction for the {un}signed int version. */
1161 if (nargs != 3)
1163 const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDEC ?
1164 "vec_addec": "vec_subec";
1165 error ("builtin %qs only accepts 3 arguments", name);
1166 return error_mark_node;
1169 tree arg0 = (*arglist)[0];
1170 tree arg0_type = TREE_TYPE (arg0);
1171 tree arg1 = (*arglist)[1];
1172 tree arg1_type = TREE_TYPE (arg1);
1173 tree arg2 = (*arglist)[2];
1174 tree arg2_type = TREE_TYPE (arg2);
1176 /* All 3 arguments must be vectors of (signed or unsigned) (int or
1177 __int128) and the types must be compatible. */
1178 if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1179 goto bad;
1180 if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1181 || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1182 goto bad;
1184 switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1186 /* For {un}signed ints,
1187 vec_addec (va, vb, carryv) ==
1188 vec_or (vec_addc (va, vb),
1189 vec_addc (vec_add (va, vb),
1190 vec_and (carryv, 0x1))). */
1191 case E_SImode:
1193 /* Use save_expr to ensure that operands used more than once
1194 that may have side effects (like calls) are only evaluated
1195 once. */
1196 tree as_builtin;
1197 tree as_c_builtin;
1199 arg0 = save_expr (arg0);
1200 arg1 = save_expr (arg1);
1201 vec<tree, va_gc> *params = make_tree_vector ();
1202 vec_safe_push (params, arg0);
1203 vec_safe_push (params, arg1);
1205 if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1206 as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC];
1207 else
1208 as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUBC];
1210 tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1211 params);
1212 params = make_tree_vector ();
1213 vec_safe_push (params, arg0);
1214 vec_safe_push (params, arg1);
1217 if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1218 as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1219 else
1220 as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1222 tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
1223 params);
1224 tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1225 tree ones_vector = build_vector_from_val (arg0_type, const1);
1226 tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1227 arg2, ones_vector);
1228 params = make_tree_vector ();
1229 vec_safe_push (params, call2);
1230 vec_safe_push (params, and_expr);
1231 call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1232 params);
1233 params = make_tree_vector ();
1234 vec_safe_push (params, call1);
1235 vec_safe_push (params, call2);
1236 tree or_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_OR];
1237 return altivec_resolve_overloaded_builtin (loc, or_builtin,
1238 params);
1240 /* For {un}signed __int128s use the vaddecuq/vsubbecuq
1241 instructions. */
1242 case E_TImode:
1244 tree bii;
1246 if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1247 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDECUQ];
1249 else
1250 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBECUQ];
1252 return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1254 /* Types other than {un}signed int and {un}signed __int128
1255 are errors. */
1256 default:
1257 goto bad;
1261 /* For now treat vec_splats and vec_promote as the same. */
1262 if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS
1263 || fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)
1265 tree type, arg;
1266 int size;
1267 int i;
1268 bool unsigned_p;
1269 vec<constructor_elt, va_gc> *vec;
1270 const char *name = fcode == ALTIVEC_BUILTIN_VEC_SPLATS ? "vec_splats": "vec_promote";
1272 if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS && nargs != 1)
1274 error ("builtin %qs only accepts 1 argument", name);
1275 return error_mark_node;
1277 if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE && nargs != 2)
1279 error ("builtin %qs only accepts 2 arguments", name);
1280 return error_mark_node;
1282 /* Ignore promote's element argument. */
1283 if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE
1284 && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
1285 goto bad;
1287 arg = (*arglist)[0];
1288 type = TREE_TYPE (arg);
1289 if (!SCALAR_FLOAT_TYPE_P (type)
1290 && !INTEGRAL_TYPE_P (type))
1291 goto bad;
1292 unsigned_p = TYPE_UNSIGNED (type);
1293 switch (TYPE_MODE (type))
1295 case E_TImode:
1296 type = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
1297 size = 1;
1298 break;
1299 case E_DImode:
1300 type = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
1301 size = 2;
1302 break;
1303 case E_SImode:
1304 type = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
1305 size = 4;
1306 break;
1307 case E_HImode:
1308 type = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
1309 size = 8;
1310 break;
1311 case E_QImode:
1312 type = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
1313 size = 16;
1314 break;
1315 case E_SFmode: type = V4SF_type_node; size = 4; break;
1316 case E_DFmode: type = V2DF_type_node; size = 2; break;
1317 default:
1318 goto bad;
1320 arg = save_expr (fold_convert (TREE_TYPE (type), arg));
1321 vec_alloc (vec, size);
1322 for(i = 0; i < size; i++)
1324 constructor_elt elt = {NULL_TREE, arg};
1325 vec->quick_push (elt);
1327 return build_constructor (type, vec);
1330 /* For now use pointer tricks to do the extraction, unless we are on VSX
1331 extracting a double from a constant offset. */
1332 if (fcode == ALTIVEC_BUILTIN_VEC_EXTRACT)
1334 tree arg1;
1335 tree arg1_type;
1336 tree arg2;
1337 tree arg1_inner_type;
1338 tree decl, stmt;
1339 tree innerptrtype;
1340 machine_mode mode;
1342 /* No second argument. */
1343 if (nargs != 2)
1345 error ("builtin %qs only accepts 2 arguments", "vec_extract");
1346 return error_mark_node;
1349 arg2 = (*arglist)[1];
1350 arg1 = (*arglist)[0];
1351 arg1_type = TREE_TYPE (arg1);
1353 if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1354 goto bad;
1355 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1356 goto bad;
1358 /* See if we can optimize vec_extracts with the current VSX instruction
1359 set. */
1360 mode = TYPE_MODE (arg1_type);
1361 if (VECTOR_MEM_VSX_P (mode))
1364 tree call = NULL_TREE;
1365 int nunits = GET_MODE_NUNITS (mode);
1367 arg2 = fold_for_warn (arg2);
1369 /* If the second argument is an integer constant, generate
1370 the built-in code if we can. We need 64-bit and direct
1371 move to extract the small integer vectors. */
1372 if (TREE_CODE (arg2) == INTEGER_CST)
1374 wide_int selector = wi::to_wide (arg2);
1375 selector = wi::umod_trunc (selector, nunits);
1376 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1377 switch (mode)
1379 default:
1380 break;
1382 case E_V1TImode:
1383 call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V1TI];
1384 break;
1386 case E_V2DFmode:
1387 call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1388 break;
1390 case E_V2DImode:
1391 call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1392 break;
1394 case E_V4SFmode:
1395 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1396 break;
1398 case E_V4SImode:
1399 if (TARGET_DIRECT_MOVE_64BIT)
1400 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1401 break;
1403 case E_V8HImode:
1404 if (TARGET_DIRECT_MOVE_64BIT)
1405 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1406 break;
1408 case E_V16QImode:
1409 if (TARGET_DIRECT_MOVE_64BIT)
1410 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1411 break;
1415 /* If the second argument is variable, we can optimize it if we are
1416 generating 64-bit code on a machine with direct move. */
1417 else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
1419 switch (mode)
1421 default:
1422 break;
1424 case E_V2DFmode:
1425 call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1426 break;
1428 case E_V2DImode:
1429 call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1430 break;
1432 case E_V4SFmode:
1433 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1434 break;
1436 case E_V4SImode:
1437 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1438 break;
1440 case E_V8HImode:
1441 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1442 break;
1444 case E_V16QImode:
1445 call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1446 break;
1450 if (call)
1452 tree result = build_call_expr (call, 2, arg1, arg2);
1453 /* Coerce the result to vector element type. May be no-op. */
1454 arg1_inner_type = TREE_TYPE (arg1_type);
1455 result = fold_convert (arg1_inner_type, result);
1456 return result;
1460 /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2). */
1461 arg1_inner_type = TREE_TYPE (arg1_type);
1462 arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1463 build_int_cst (TREE_TYPE (arg2),
1464 TYPE_VECTOR_SUBPARTS (arg1_type)
1465 - 1), 0);
1466 decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1467 DECL_EXTERNAL (decl) = 0;
1468 TREE_PUBLIC (decl) = 0;
1469 DECL_CONTEXT (decl) = current_function_decl;
1470 TREE_USED (decl) = 1;
1471 TREE_TYPE (decl) = arg1_type;
1472 TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1473 if (c_dialect_cxx ())
1475 stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1476 NULL_TREE, NULL_TREE);
1477 SET_EXPR_LOCATION (stmt, loc);
1479 else
1481 DECL_INITIAL (decl) = arg1;
1482 stmt = build1 (DECL_EXPR, arg1_type, decl);
1483 TREE_ADDRESSABLE (decl) = 1;
1484 SET_EXPR_LOCATION (stmt, loc);
1485 stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1488 innerptrtype = build_pointer_type (arg1_inner_type);
1490 stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1491 stmt = convert (innerptrtype, stmt);
1492 stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1493 stmt = build_indirect_ref (loc, stmt, RO_NULL);
1495 /* PR83660: We mark this as having side effects so that
1496 downstream in fold_build_cleanup_point_expr () it will get a
1497 CLEANUP_POINT_EXPR. If it does not we can run into an ICE
1498 later in gimplify_cleanup_point_expr (). Potentially this
1499 causes missed optimization because the actually is no side
1500 effect. */
1501 if (c_dialect_cxx ())
1502 TREE_SIDE_EFFECTS (stmt) = 1;
1504 return stmt;
1507 /* For now use pointer tricks to do the insertion, unless we are on VSX
1508 inserting a double to a constant offset.. */
1509 if (fcode == ALTIVEC_BUILTIN_VEC_INSERT)
1511 tree arg0;
1512 tree arg1;
1513 tree arg2;
1514 tree arg1_type;
1515 tree arg1_inner_type;
1516 tree decl, stmt;
1517 tree innerptrtype;
1518 machine_mode mode;
1520 /* No second or third arguments. */
1521 if (nargs != 3)
1523 error ("builtin %qs only accepts 3 arguments", "vec_insert");
1524 return error_mark_node;
1527 arg0 = (*arglist)[0];
1528 arg1 = (*arglist)[1];
1529 arg1_type = TREE_TYPE (arg1);
1530 arg2 = fold_for_warn ((*arglist)[2]);
1532 if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1533 goto bad;
1534 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1535 goto bad;
1537 /* If we can use the VSX xxpermdi instruction, use that for insert. */
1538 mode = TYPE_MODE (arg1_type);
1539 if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
1540 && TREE_CODE (arg2) == INTEGER_CST)
1542 wide_int selector = wi::to_wide (arg2);
1543 selector = wi::umod_trunc (selector, 2);
1544 tree call = NULL_TREE;
1546 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1547 if (mode == V2DFmode)
1548 call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DF];
1549 else if (mode == V2DImode)
1550 call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DI];
1552 /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1553 reversed. */
1554 if (call)
1555 return build_call_expr (call, 3, arg1, arg0, arg2);
1557 else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
1558 && TREE_CODE (arg2) == INTEGER_CST)
1560 tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
1561 wide_int selector = wi::zero(32);
1563 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1564 /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1565 reversed. */
1566 return build_call_expr (call, 3, arg1, arg0, arg2);
1569 /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2) = arg0. */
1570 arg1_inner_type = TREE_TYPE (arg1_type);
1571 if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
1572 arg2 = build_int_cst (TREE_TYPE (arg2), 0);
1573 else
1574 arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1575 build_int_cst (TREE_TYPE (arg2),
1576 TYPE_VECTOR_SUBPARTS (arg1_type)
1577 - 1), 0);
1578 decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1579 DECL_EXTERNAL (decl) = 0;
1580 TREE_PUBLIC (decl) = 0;
1581 DECL_CONTEXT (decl) = current_function_decl;
1582 TREE_USED (decl) = 1;
1583 TREE_TYPE (decl) = arg1_type;
1584 TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1585 if (c_dialect_cxx ())
1587 stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1588 NULL_TREE, NULL_TREE);
1589 SET_EXPR_LOCATION (stmt, loc);
1591 else
1593 DECL_INITIAL (decl) = arg1;
1594 stmt = build1 (DECL_EXPR, arg1_type, decl);
1595 TREE_ADDRESSABLE (decl) = 1;
1596 SET_EXPR_LOCATION (stmt, loc);
1597 stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1600 innerptrtype = build_pointer_type (arg1_inner_type);
1602 stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1603 stmt = convert (innerptrtype, stmt);
1604 stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1605 stmt = build_indirect_ref (loc, stmt, RO_NULL);
1606 stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
1607 convert (TREE_TYPE (stmt), arg0));
1608 stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1609 return stmt;
1612 for (n = 0;
1613 !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
1614 fnargs = TREE_CHAIN (fnargs), n++)
1616 tree decl_type = TREE_VALUE (fnargs);
1617 tree arg = (*arglist)[n];
1618 tree type;
1620 if (arg == error_mark_node)
1621 return error_mark_node;
1623 if (n >= 4)
1624 abort ();
1626 arg = default_conversion (arg);
1628 /* The C++ front-end converts float * to const void * using
1629 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)). */
1630 type = TREE_TYPE (arg);
1631 if (POINTER_TYPE_P (type)
1632 && TREE_CODE (arg) == NOP_EXPR
1633 && lang_hooks.types_compatible_p (TREE_TYPE (arg),
1634 const_ptr_type_node)
1635 && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
1636 ptr_type_node))
1638 arg = TREE_OPERAND (arg, 0);
1639 type = TREE_TYPE (arg);
1642 /* Remove the const from the pointers to simplify the overload
1643 matching further down. */
1644 if (POINTER_TYPE_P (decl_type)
1645 && POINTER_TYPE_P (type)
1646 && TYPE_QUALS (TREE_TYPE (type)) != 0)
1648 if (TYPE_READONLY (TREE_TYPE (type))
1649 && !TYPE_READONLY (TREE_TYPE (decl_type)))
1650 warning (0, "passing argument %d of %qE discards qualifiers from "
1651 "pointer target type", n + 1, fndecl);
1652 type = build_pointer_type (build_qualified_type (TREE_TYPE (type),
1653 0));
1654 arg = fold_convert (type, arg);
1657 /* For P9V_BUILTIN_VEC_LXVL, convert any const * to its non constant
1658 equivalent to simplify the overload matching below. */
1659 if (fcode == P9V_BUILTIN_VEC_LXVL)
1661 if (POINTER_TYPE_P (type)
1662 && TYPE_READONLY (TREE_TYPE (type)))
1664 type = build_pointer_type (build_qualified_type (
1665 TREE_TYPE (type),0));
1666 arg = fold_convert (type, arg);
1670 args[n] = arg;
1671 types[n] = type;
1674 /* If the number of arguments did not match the prototype, return NULL
1675 and the generic code will issue the appropriate error message. */
1676 if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || n < nargs)
1677 return NULL;
1679 if (n == 0)
1680 abort ();
1682 if (fcode == ALTIVEC_BUILTIN_VEC_STEP)
1684 if (TREE_CODE (types[0]) != VECTOR_TYPE)
1685 goto bad;
1687 return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
1691 bool unsupported_builtin = false;
1692 enum rs6000_builtins overloaded_code;
1693 tree result = NULL;
1694 for (desc = altivec_overloaded_builtins;
1695 desc->code && desc->code != fcode; desc++)
1696 continue;
1698 /* Need to special case __builtin_cmp because the overloaded forms
1699 of this function take (unsigned int, unsigned int) or (unsigned
1700 long long int, unsigned long long int). Since C conventions
1701 allow the respective argument types to be implicitly coerced into
1702 each other, the default handling does not provide adequate
1703 discrimination between the desired forms of the function. */
1704 if (fcode == P6_OV_BUILTIN_CMPB)
1706 machine_mode arg1_mode = TYPE_MODE (types[0]);
1707 machine_mode arg2_mode = TYPE_MODE (types[1]);
1709 if (nargs != 2)
1711 error ("builtin %qs only accepts 2 arguments", "__builtin_cmpb");
1712 return error_mark_node;
1715 /* If any supplied arguments are wider than 32 bits, resolve to
1716 64-bit variant of built-in function. */
1717 if ((GET_MODE_PRECISION (arg1_mode) > 32)
1718 || (GET_MODE_PRECISION (arg2_mode) > 32))
1720 /* Assure all argument and result types are compatible with
1721 the built-in function represented by P6_BUILTIN_CMPB. */
1722 overloaded_code = P6_BUILTIN_CMPB;
1724 else
1726 /* Assure all argument and result types are compatible with
1727 the built-in function represented by P6_BUILTIN_CMPB_32. */
1728 overloaded_code = P6_BUILTIN_CMPB_32;
1731 while (desc->code && desc->code == fcode
1732 && desc->overloaded_code != overloaded_code)
1733 desc++;
1735 if (desc->code && (desc->code == fcode)
1736 && rs6000_builtin_type_compatible (types[0], desc->op1)
1737 && rs6000_builtin_type_compatible (types[1], desc->op2))
1739 if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1741 result = altivec_build_resolved_builtin (args, n, desc);
1742 /* overloaded_code is set above */
1743 if (!rs6000_builtin_is_supported_p (overloaded_code))
1744 unsupported_builtin = true;
1745 else
1746 return result;
1748 else
1749 unsupported_builtin = true;
1752 else if (fcode == P9V_BUILTIN_VEC_VSIEDP)
1754 machine_mode arg1_mode = TYPE_MODE (types[0]);
1756 if (nargs != 2)
1758 error ("builtin %qs only accepts 2 arguments",
1759 "scalar_insert_exp");
1760 return error_mark_node;
1763 /* If supplied first argument is wider than 64 bits, resolve to
1764 128-bit variant of built-in function. */
1765 if (GET_MODE_PRECISION (arg1_mode) > 64)
1767 /* If first argument is of float variety, choose variant
1768 that expects __ieee128 argument. Otherwise, expect
1769 __int128 argument. */
1770 if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1771 overloaded_code = P9V_BUILTIN_VSIEQPF;
1772 else
1773 overloaded_code = P9V_BUILTIN_VSIEQP;
1775 else
1777 /* If first argument is of float variety, choose variant
1778 that expects double argument. Otherwise, expect
1779 long long int argument. */
1780 if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1781 overloaded_code = P9V_BUILTIN_VSIEDPF;
1782 else
1783 overloaded_code = P9V_BUILTIN_VSIEDP;
1785 while (desc->code && desc->code == fcode
1786 && desc->overloaded_code != overloaded_code)
1787 desc++;
1789 if (desc->code && (desc->code == fcode)
1790 && rs6000_builtin_type_compatible (types[0], desc->op1)
1791 && rs6000_builtin_type_compatible (types[1], desc->op2))
1793 if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1795 result = altivec_build_resolved_builtin (args, n, desc);
1796 /* overloaded_code is set above. */
1797 if (!rs6000_builtin_is_supported_p (overloaded_code))
1798 unsupported_builtin = true;
1799 else
1800 return result;
1802 else
1803 unsupported_builtin = true;
1806 else if ((fcode == P10_BUILTIN_VEC_XXEVAL)
1807 || (fcode == P10V_BUILTIN_VXXPERMX))
1809 signed char op3_type;
1811 /* Need to special case P10_BUILTIN_VEC_XXEVAL and
1812 P10V_BUILTIN_VXXPERMX because they take 4 arguments and the
1813 existing infrastructure only handles three. */
1814 if (nargs != 4)
1816 const char *name = fcode == P10_BUILTIN_VEC_XXEVAL ?
1817 "__builtin_vec_xxeval":"__builtin_vec_xxpermx";
1819 error ("builtin %qs requires 4 arguments", name);
1820 return error_mark_node;
1823 for ( ; desc->code == fcode; desc++)
1825 if (fcode == P10_BUILTIN_VEC_XXEVAL)
1826 op3_type = desc->op3;
1827 else /* P10V_BUILTIN_VXXPERMX */
1828 op3_type = RS6000_BTI_V16QI;
1830 if (rs6000_builtin_type_compatible (types[0], desc->op1)
1831 && rs6000_builtin_type_compatible (types[1], desc->op2)
1832 && rs6000_builtin_type_compatible (types[2], desc->op3)
1833 && rs6000_builtin_type_compatible (types[2], op3_type)
1834 && rs6000_builtin_type_compatible (types[3],
1835 RS6000_BTI_UINTSI))
1837 if (rs6000_builtin_decls[desc->overloaded_code] == NULL_TREE)
1838 unsupported_builtin = true;
1839 else
1841 result = altivec_build_resolved_builtin (args, n, desc);
1842 if (rs6000_builtin_is_supported_p (desc->overloaded_code))
1843 return result;
1844 /* Allow loop to continue in case a different
1845 definition is supported. */
1846 overloaded_code = desc->overloaded_code;
1847 unsupported_builtin = true;
1852 else
1854 /* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in
1855 the opX fields. */
1856 for (; desc->code == fcode; desc++)
1858 if ((desc->op1 == RS6000_BTI_NOT_OPAQUE
1859 || rs6000_builtin_type_compatible (types[0], desc->op1))
1860 && (desc->op2 == RS6000_BTI_NOT_OPAQUE
1861 || rs6000_builtin_type_compatible (types[1], desc->op2))
1862 && (desc->op3 == RS6000_BTI_NOT_OPAQUE
1863 || rs6000_builtin_type_compatible (types[2], desc->op3)))
1865 if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1867 result = altivec_build_resolved_builtin (args, n, desc);
1868 if (!rs6000_builtin_is_supported_p (desc->overloaded_code))
1870 /* Allow loop to continue in case a different
1871 definition is supported. */
1872 overloaded_code = desc->overloaded_code;
1873 unsupported_builtin = true;
1875 else
1876 return result;
1878 else
1879 unsupported_builtin = true;
1884 if (unsupported_builtin)
1886 const char *name = rs6000_overloaded_builtin_name (fcode);
1887 if (result != NULL)
1889 const char *internal_name
1890 = rs6000_overloaded_builtin_name (overloaded_code);
1891 /* An error message making reference to the name of the
1892 non-overloaded function has already been issued. Add
1893 clarification of the previous message. */
1894 rich_location richloc (line_table, input_location);
1895 inform (&richloc, "builtin %qs requires builtin %qs",
1896 name, internal_name);
1898 else
1899 error ("%qs is not supported in this compiler configuration", name);
1900 /* If an error-representing result tree was returned from
1901 altivec_build_resolved_builtin above, use it. */
1902 return (result != NULL) ? result : error_mark_node;
1905 bad:
1907 const char *name = rs6000_overloaded_builtin_name (fcode);
1908 error ("invalid parameter combination for AltiVec intrinsic %qs", name);
1909 return error_mark_node;