c++: ICE on unviable/ambiguous constrained dtors [PR96745]
[official-gcc.git] / gcc / optabs-tree.cc
blob8010046c6a8b3e809c989ddef7a06ddaa68ae32a
1 /* Tree-based target query functions relating to optabs
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "optabs.h"
30 #include "optabs-tree.h"
31 #include "stor-layout.h"
33 /* Return the optab used for computing the operation given by the tree code,
34 CODE and the tree EXP. This function is not always usable (for example, it
35 cannot give complete results for multiplication or division) but probably
36 ought to be relied on more widely throughout the expander. */
37 optab
38 optab_for_tree_code (enum tree_code code, const_tree type,
39 enum optab_subtype subtype)
41 bool trapv;
42 switch (code)
44 case BIT_AND_EXPR:
45 return and_optab;
47 case BIT_IOR_EXPR:
48 return ior_optab;
50 case BIT_NOT_EXPR:
51 return one_cmpl_optab;
53 case BIT_XOR_EXPR:
54 return xor_optab;
56 case MULT_HIGHPART_EXPR:
57 return TYPE_UNSIGNED (type) ? umul_highpart_optab : smul_highpart_optab;
59 case CEIL_MOD_EXPR:
60 case FLOOR_MOD_EXPR:
61 case ROUND_MOD_EXPR:
62 /* {s,u}mod_optab implements TRUNC_MOD_EXPR. For scalar modes,
63 expansion has code to adjust TRUNC_MOD_EXPR into the desired other
64 modes, but for vector modes it does not. The adjustment code
65 should be instead emitted in tree-vect-patterns.cc. */
66 if (TREE_CODE (type) == VECTOR_TYPE)
67 return unknown_optab;
68 /* FALLTHRU */
69 case TRUNC_MOD_EXPR:
70 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
72 case CEIL_DIV_EXPR:
73 case FLOOR_DIV_EXPR:
74 case ROUND_DIV_EXPR:
75 /* {,u}{s,u}div_optab implements {TRUNC,EXACT}_DIV_EXPR or RDIV_EXPR.
76 For scalar modes, expansion has code to adjust TRUNC_DIV_EXPR
77 into the desired other modes, but for vector modes it does not.
78 The adjustment code should be instead emitted in
79 tree-vect-patterns.cc. */
80 if (TREE_CODE (type) == VECTOR_TYPE)
81 return unknown_optab;
82 /* FALLTHRU */
83 case RDIV_EXPR:
84 case TRUNC_DIV_EXPR:
85 case EXACT_DIV_EXPR:
86 if (TYPE_SATURATING (type))
87 return TYPE_UNSIGNED (type) ? usdiv_optab : ssdiv_optab;
88 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
90 case LSHIFT_EXPR:
91 if (TREE_CODE (type) == VECTOR_TYPE)
93 if (subtype == optab_vector)
94 return TYPE_SATURATING (type) ? unknown_optab : vashl_optab;
96 gcc_assert (subtype == optab_scalar);
98 if (TYPE_SATURATING (type))
99 return TYPE_UNSIGNED (type) ? usashl_optab : ssashl_optab;
100 return ashl_optab;
102 case RSHIFT_EXPR:
103 if (TREE_CODE (type) == VECTOR_TYPE)
105 if (subtype == optab_vector)
106 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
108 gcc_assert (subtype == optab_scalar);
110 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
112 case LROTATE_EXPR:
113 if (TREE_CODE (type) == VECTOR_TYPE)
115 if (subtype == optab_vector)
116 return vrotl_optab;
118 gcc_assert (subtype == optab_scalar);
120 return rotl_optab;
122 case RROTATE_EXPR:
123 if (TREE_CODE (type) == VECTOR_TYPE)
125 if (subtype == optab_vector)
126 return vrotr_optab;
128 gcc_assert (subtype == optab_scalar);
130 return rotr_optab;
132 case MAX_EXPR:
133 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
135 case MIN_EXPR:
136 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
138 case REALIGN_LOAD_EXPR:
139 return vec_realign_load_optab;
141 case WIDEN_SUM_EXPR:
142 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
144 case DOT_PROD_EXPR:
146 if (subtype == optab_vector_mixed_sign)
147 return usdot_prod_optab;
149 return (TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab);
152 case SAD_EXPR:
153 return TYPE_UNSIGNED (type) ? usad_optab : ssad_optab;
155 case WIDEN_MULT_PLUS_EXPR:
156 return (TYPE_UNSIGNED (type)
157 ? (TYPE_SATURATING (type)
158 ? usmadd_widen_optab : umadd_widen_optab)
159 : (TYPE_SATURATING (type)
160 ? ssmadd_widen_optab : smadd_widen_optab));
162 case WIDEN_MULT_MINUS_EXPR:
163 return (TYPE_UNSIGNED (type)
164 ? (TYPE_SATURATING (type)
165 ? usmsub_widen_optab : umsub_widen_optab)
166 : (TYPE_SATURATING (type)
167 ? ssmsub_widen_optab : smsub_widen_optab));
169 case VEC_WIDEN_MULT_HI_EXPR:
170 return (TYPE_UNSIGNED (type)
171 ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab);
173 case VEC_WIDEN_MULT_LO_EXPR:
174 return (TYPE_UNSIGNED (type)
175 ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab);
177 case VEC_WIDEN_MULT_EVEN_EXPR:
178 return (TYPE_UNSIGNED (type)
179 ? vec_widen_umult_even_optab : vec_widen_smult_even_optab);
181 case VEC_WIDEN_MULT_ODD_EXPR:
182 return (TYPE_UNSIGNED (type)
183 ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab);
185 case VEC_WIDEN_LSHIFT_HI_EXPR:
186 return (TYPE_UNSIGNED (type)
187 ? vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab);
189 case VEC_WIDEN_LSHIFT_LO_EXPR:
190 return (TYPE_UNSIGNED (type)
191 ? vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab);
193 case VEC_WIDEN_PLUS_LO_EXPR:
194 return (TYPE_UNSIGNED (type)
195 ? vec_widen_uaddl_lo_optab : vec_widen_saddl_lo_optab);
197 case VEC_WIDEN_PLUS_HI_EXPR:
198 return (TYPE_UNSIGNED (type)
199 ? vec_widen_uaddl_hi_optab : vec_widen_saddl_hi_optab);
201 case VEC_WIDEN_MINUS_LO_EXPR:
202 return (TYPE_UNSIGNED (type)
203 ? vec_widen_usubl_lo_optab : vec_widen_ssubl_lo_optab);
205 case VEC_WIDEN_MINUS_HI_EXPR:
206 return (TYPE_UNSIGNED (type)
207 ? vec_widen_usubl_hi_optab : vec_widen_ssubl_hi_optab);
209 case VEC_UNPACK_HI_EXPR:
210 return (TYPE_UNSIGNED (type)
211 ? vec_unpacku_hi_optab : vec_unpacks_hi_optab);
213 case VEC_UNPACK_LO_EXPR:
214 return (TYPE_UNSIGNED (type)
215 ? vec_unpacku_lo_optab : vec_unpacks_lo_optab);
217 case VEC_UNPACK_FLOAT_HI_EXPR:
218 /* The signedness is determined from input operand. */
219 return (TYPE_UNSIGNED (type)
220 ? vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab);
222 case VEC_UNPACK_FLOAT_LO_EXPR:
223 /* The signedness is determined from input operand. */
224 return (TYPE_UNSIGNED (type)
225 ? vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab);
227 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
228 /* The signedness is determined from output operand. */
229 return (TYPE_UNSIGNED (type)
230 ? vec_unpack_ufix_trunc_hi_optab
231 : vec_unpack_sfix_trunc_hi_optab);
233 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
234 /* The signedness is determined from output operand. */
235 return (TYPE_UNSIGNED (type)
236 ? vec_unpack_ufix_trunc_lo_optab
237 : vec_unpack_sfix_trunc_lo_optab);
239 case VEC_PACK_TRUNC_EXPR:
240 return vec_pack_trunc_optab;
242 case VEC_PACK_SAT_EXPR:
243 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
245 case VEC_PACK_FIX_TRUNC_EXPR:
246 /* The signedness is determined from output operand. */
247 return (TYPE_UNSIGNED (type)
248 ? vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab);
250 case VEC_PACK_FLOAT_EXPR:
251 /* The signedness is determined from input operand. */
252 return (TYPE_UNSIGNED (type)
253 ? vec_packu_float_optab : vec_packs_float_optab);
255 case VEC_DUPLICATE_EXPR:
256 return vec_duplicate_optab;
258 case VEC_SERIES_EXPR:
259 return vec_series_optab;
261 default:
262 break;
265 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
266 switch (code)
268 case POINTER_PLUS_EXPR:
269 case PLUS_EXPR:
270 if (TYPE_SATURATING (type))
271 return TYPE_UNSIGNED (type) ? usadd_optab : ssadd_optab;
272 return trapv ? addv_optab : add_optab;
274 case POINTER_DIFF_EXPR:
275 case MINUS_EXPR:
276 if (TYPE_SATURATING (type))
277 return TYPE_UNSIGNED (type) ? ussub_optab : sssub_optab;
278 return trapv ? subv_optab : sub_optab;
280 case MULT_EXPR:
281 if (TYPE_SATURATING (type))
282 return TYPE_UNSIGNED (type) ? usmul_optab : ssmul_optab;
283 return trapv ? smulv_optab : smul_optab;
285 case NEGATE_EXPR:
286 if (TYPE_SATURATING (type))
287 return TYPE_UNSIGNED (type) ? usneg_optab : ssneg_optab;
288 return trapv ? negv_optab : neg_optab;
290 case ABS_EXPR:
291 return trapv ? absv_optab : abs_optab;
293 case ABSU_EXPR:
294 return abs_optab;
295 default:
296 return unknown_optab;
300 /* Check whether an operation represented by CODE is a 'half' widening operation
301 in which the input vector type has half the number of bits of the output
302 vector type e.g. V8QI->V8HI.
304 This is handled by widening the inputs using NOP_EXPRs then using a
305 non-widening stmt e.g. MINUS_EXPR. RTL fusing converts these to the widening
306 hardware instructions if supported.
308 The more typical case (handled in supportable_widening_operation) is where
309 the input vector type has the same number of bits as the output vector type.
310 In this case half the elements of the input vectors must be processed at a
311 time into respective vector outputs with elements twice as wide i.e. a
312 'hi'/'lo' pair using codes such as VEC_WIDEN_MINUS_HI/LO.
314 Supported widening operations:
315 WIDEN_MINUS_EXPR
316 WIDEN_PLUS_EXPR
317 WIDEN_MULT_EXPR
318 WIDEN_LSHIFT_EXPR
320 Output:
321 - CODE1 - The non-widened code, which will be used after the inputs are
322 converted to the wide type. */
323 bool
324 supportable_half_widening_operation (enum tree_code code, tree vectype_out,
325 tree vectype_in, enum tree_code *code1)
327 machine_mode m1,m2;
328 enum tree_code dummy_code;
329 optab op;
331 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
333 m1 = TYPE_MODE (vectype_out);
334 m2 = TYPE_MODE (vectype_in);
336 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
337 return false;
339 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in),
340 TYPE_VECTOR_SUBPARTS (vectype_out)))
341 return false;
343 switch (code)
345 case WIDEN_LSHIFT_EXPR:
346 *code1 = LSHIFT_EXPR;
347 break;
348 case WIDEN_MINUS_EXPR:
349 *code1 = MINUS_EXPR;
350 break;
351 case WIDEN_PLUS_EXPR:
352 *code1 = PLUS_EXPR;
353 break;
354 case WIDEN_MULT_EXPR:
355 *code1 = MULT_EXPR;
356 break;
357 default:
358 return false;
361 if (!supportable_convert_operation (NOP_EXPR, vectype_out, vectype_in,
362 &dummy_code))
363 return false;
365 op = optab_for_tree_code (*code1, vectype_out, optab_vector);
366 return (optab_handler (op, TYPE_MODE (vectype_out)) != CODE_FOR_nothing);
369 /* Function supportable_convert_operation
371 Check whether an operation represented by the code CODE is a
372 convert operation that is supported by the target platform in
373 vector form (i.e., when operating on arguments of type VECTYPE_IN
374 producing a result of type VECTYPE_OUT).
376 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
377 This function checks if these operations are supported
378 by the target platform directly (via vector tree-codes).
380 Output:
381 - CODE1 is code of vector operation to be used when
382 vectorizing the operation, if available. */
384 bool
385 supportable_convert_operation (enum tree_code code,
386 tree vectype_out, tree vectype_in,
387 enum tree_code *code1)
389 machine_mode m1,m2;
390 bool truncp;
392 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
394 m1 = TYPE_MODE (vectype_out);
395 m2 = TYPE_MODE (vectype_in);
397 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
398 return false;
400 /* First check if we can done conversion directly. */
401 if ((code == FIX_TRUNC_EXPR
402 && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
403 != CODE_FOR_nothing)
404 || (code == FLOAT_EXPR
405 && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
406 != CODE_FOR_nothing))
408 *code1 = code;
409 return true;
412 if (GET_MODE_UNIT_PRECISION (m1) > GET_MODE_UNIT_PRECISION (m2)
413 && can_extend_p (m1, m2, TYPE_UNSIGNED (vectype_in)))
415 *code1 = code;
416 return true;
419 if (GET_MODE_UNIT_PRECISION (m1) < GET_MODE_UNIT_PRECISION (m2)
420 && convert_optab_handler (trunc_optab, m1, m2) != CODE_FOR_nothing)
422 *code1 = code;
423 return true;
426 return false;
429 /* Return true iff vec_cmp_optab/vec_cmpu_optab can handle a vector comparison
430 for code CODE, comparing operands of type VALUE_TYPE and producing a result
431 of type MASK_TYPE. */
433 static bool
434 vec_cmp_icode_p (tree value_type, tree mask_type, enum tree_code code)
436 enum rtx_code rcode = get_rtx_code_1 (code, TYPE_UNSIGNED (value_type));
437 if (rcode == UNKNOWN)
438 return false;
440 return can_vec_cmp_compare_p (rcode, TYPE_MODE (value_type),
441 TYPE_MODE (mask_type));
444 /* Return true iff vec_cmpeq_optab can handle a vector comparison for code
445 CODE, comparing operands of type VALUE_TYPE and producing a result of type
446 MASK_TYPE. */
448 static bool
449 vec_cmp_eq_icode_p (tree value_type, tree mask_type, enum tree_code code)
451 if (code != EQ_EXPR && code != NE_EXPR)
452 return false;
454 return get_vec_cmp_eq_icode (TYPE_MODE (value_type), TYPE_MODE (mask_type))
455 != CODE_FOR_nothing;
458 /* Return TRUE if appropriate vector insn is available
459 for vector comparison expr with vector type VALUE_TYPE
460 and resulting mask with MASK_TYPE. */
462 bool
463 expand_vec_cmp_expr_p (tree value_type, tree mask_type, enum tree_code code)
465 return vec_cmp_icode_p (value_type, mask_type, code)
466 || vec_cmp_eq_icode_p (value_type, mask_type, code);
469 /* Return true iff vcond_optab/vcondu_optab can handle a vector
470 comparison for code CODE, comparing operands of type CMP_OP_TYPE and
471 producing a result of type VALUE_TYPE. */
473 static bool
474 vcond_icode_p (tree value_type, tree cmp_op_type, enum tree_code code)
476 enum rtx_code rcode = get_rtx_code_1 (code, TYPE_UNSIGNED (cmp_op_type));
477 if (rcode == UNKNOWN)
478 return false;
480 return can_vcond_compare_p (rcode, TYPE_MODE (value_type),
481 TYPE_MODE (cmp_op_type));
484 /* Return true iff vcondeq_optab can handle a vector comparison for code CODE,
485 comparing operands of type CMP_OP_TYPE and producing a result of type
486 VALUE_TYPE. */
488 static bool
489 vcond_eq_icode_p (tree value_type, tree cmp_op_type, enum tree_code code)
491 if (code != EQ_EXPR && code != NE_EXPR)
492 return false;
494 return get_vcond_eq_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type))
495 != CODE_FOR_nothing;
498 /* Return TRUE iff, appropriate vector insns are available
499 for vector cond expr with vector type VALUE_TYPE and a comparison
500 with operand vector types in CMP_OP_TYPE. */
502 bool
503 expand_vec_cond_expr_p (tree value_type, tree cmp_op_type, enum tree_code code)
505 machine_mode value_mode = TYPE_MODE (value_type);
506 machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
507 if (VECTOR_BOOLEAN_TYPE_P (cmp_op_type)
508 && get_vcond_mask_icode (TYPE_MODE (value_type),
509 TYPE_MODE (cmp_op_type)) != CODE_FOR_nothing)
510 return true;
512 if (maybe_ne (GET_MODE_NUNITS (value_mode), GET_MODE_NUNITS (cmp_op_mode)))
513 return false;
515 if (TREE_CODE_CLASS (code) != tcc_comparison)
516 /* This may happen, for example, if code == SSA_NAME, in which case we
517 cannot be certain whether a vector insn is available. */
518 return false;
520 return vcond_icode_p (value_type, cmp_op_type, code)
521 || vcond_eq_icode_p (value_type, cmp_op_type, code);
524 /* Use the current target and options to initialize
525 TREE_OPTIMIZATION_OPTABS (OPTNODE). */
527 void
528 init_tree_optimization_optabs (tree optnode)
530 /* Quick exit if we have already computed optabs for this target. */
531 if (TREE_OPTIMIZATION_BASE_OPTABS (optnode) == this_target_optabs)
532 return;
534 /* Forget any previous information and set up for the current target. */
535 TREE_OPTIMIZATION_BASE_OPTABS (optnode) = this_target_optabs;
536 struct target_optabs *tmp_optabs = (struct target_optabs *)
537 TREE_OPTIMIZATION_OPTABS (optnode);
538 if (tmp_optabs)
539 memset (tmp_optabs, 0, sizeof (struct target_optabs));
540 else
541 tmp_optabs = ggc_cleared_alloc<target_optabs> ();
543 /* Generate a new set of optabs into tmp_optabs. */
544 init_all_optabs (tmp_optabs);
546 /* If the optabs changed, record it. */
547 if (memcmp (tmp_optabs, this_target_optabs, sizeof (struct target_optabs)))
548 TREE_OPTIMIZATION_OPTABS (optnode) = tmp_optabs;
549 else
551 TREE_OPTIMIZATION_OPTABS (optnode) = NULL;
552 ggc_free (tmp_optabs);
556 /* Return TRUE if the target has support for vector right shift of an
557 operand of type TYPE. If OT_TYPE is OPTAB_DEFAULT, check for existence
558 of a shift by either a scalar or a vector. Otherwise, check only
559 for a shift that matches OT_TYPE. */
561 bool
562 target_supports_op_p (tree type, enum tree_code code,
563 enum optab_subtype ot_subtype)
565 optab ot = optab_for_tree_code (code, type, ot_subtype);
566 return (ot != unknown_optab
567 && optab_handler (ot, TYPE_MODE (type)) != CODE_FOR_nothing);