c++: Implement C++26 P2573R2 - = delete("should have a reason"); [PR114458]
[official-gcc.git] / gcc / optabs-tree.cc
blobb69a5bc367626aca6b281c6f5627fc2f5a47ca4f
1 /* Tree-based target query functions relating to optabs
2 Copyright (C) 1987-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "optabs.h"
30 #include "optabs-tree.h"
31 #include "stor-layout.h"
33 /* Return the optab used for computing the operation given by the tree code,
34 CODE and the tree EXP. This function is not always usable (for example, it
35 cannot give complete results for multiplication or division) but probably
36 ought to be relied on more widely throughout the expander. */
37 optab
38 optab_for_tree_code (enum tree_code code, const_tree type,
39 enum optab_subtype subtype)
41 bool trapv;
42 switch (code)
44 case BIT_AND_EXPR:
45 return and_optab;
47 case BIT_IOR_EXPR:
48 return ior_optab;
50 case BIT_NOT_EXPR:
51 return one_cmpl_optab;
53 case BIT_XOR_EXPR:
54 return xor_optab;
56 case MULT_HIGHPART_EXPR:
57 return TYPE_UNSIGNED (type) ? umul_highpart_optab : smul_highpart_optab;
59 case CEIL_MOD_EXPR:
60 case FLOOR_MOD_EXPR:
61 case ROUND_MOD_EXPR:
62 /* {s,u}mod_optab implements TRUNC_MOD_EXPR. For scalar modes,
63 expansion has code to adjust TRUNC_MOD_EXPR into the desired other
64 modes, but for vector modes it does not. The adjustment code
65 should be instead emitted in tree-vect-patterns.cc. */
66 if (VECTOR_TYPE_P (type))
67 return unknown_optab;
68 /* FALLTHRU */
69 case TRUNC_MOD_EXPR:
70 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
72 case CEIL_DIV_EXPR:
73 case FLOOR_DIV_EXPR:
74 case ROUND_DIV_EXPR:
75 /* {,u}{s,u}div_optab implements {TRUNC,EXACT}_DIV_EXPR or RDIV_EXPR.
76 For scalar modes, expansion has code to adjust TRUNC_DIV_EXPR
77 into the desired other modes, but for vector modes it does not.
78 The adjustment code should be instead emitted in
79 tree-vect-patterns.cc. */
80 if (VECTOR_TYPE_P (type))
81 return unknown_optab;
82 /* FALLTHRU */
83 case RDIV_EXPR:
84 case TRUNC_DIV_EXPR:
85 case EXACT_DIV_EXPR:
86 if (TYPE_SATURATING (type))
87 return TYPE_UNSIGNED (type) ? usdiv_optab : ssdiv_optab;
88 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
90 case LSHIFT_EXPR:
91 if (VECTOR_TYPE_P (type))
93 if (subtype == optab_vector)
94 return TYPE_SATURATING (type) ? unknown_optab : vashl_optab;
96 gcc_assert (subtype == optab_scalar);
98 if (TYPE_SATURATING (type))
99 return TYPE_UNSIGNED (type) ? usashl_optab : ssashl_optab;
100 return ashl_optab;
102 case RSHIFT_EXPR:
103 if (VECTOR_TYPE_P (type))
105 if (subtype == optab_vector)
106 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
108 gcc_assert (subtype == optab_scalar);
110 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
112 case LROTATE_EXPR:
113 if (VECTOR_TYPE_P (type))
115 if (subtype == optab_vector)
116 return vrotl_optab;
118 gcc_assert (subtype == optab_scalar);
120 return rotl_optab;
122 case RROTATE_EXPR:
123 if (VECTOR_TYPE_P (type))
125 if (subtype == optab_vector)
126 return vrotr_optab;
128 gcc_assert (subtype == optab_scalar);
130 return rotr_optab;
132 case MAX_EXPR:
133 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
135 case MIN_EXPR:
136 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
138 case POINTER_PLUS_EXPR:
139 return add_optab;
141 case POINTER_DIFF_EXPR:
142 return sub_optab;
144 case REALIGN_LOAD_EXPR:
145 return vec_realign_load_optab;
147 case WIDEN_SUM_EXPR:
148 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
150 case DOT_PROD_EXPR:
152 if (subtype == optab_vector_mixed_sign)
153 return usdot_prod_optab;
155 return (TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab);
158 case SAD_EXPR:
159 return TYPE_UNSIGNED (type) ? usad_optab : ssad_optab;
161 case WIDEN_MULT_PLUS_EXPR:
162 return (TYPE_UNSIGNED (type)
163 ? (TYPE_SATURATING (type)
164 ? usmadd_widen_optab : umadd_widen_optab)
165 : (TYPE_SATURATING (type)
166 ? ssmadd_widen_optab : smadd_widen_optab));
168 case WIDEN_MULT_MINUS_EXPR:
169 return (TYPE_UNSIGNED (type)
170 ? (TYPE_SATURATING (type)
171 ? usmsub_widen_optab : umsub_widen_optab)
172 : (TYPE_SATURATING (type)
173 ? ssmsub_widen_optab : smsub_widen_optab));
175 case VEC_WIDEN_MULT_HI_EXPR:
176 return (TYPE_UNSIGNED (type)
177 ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab);
179 case VEC_WIDEN_MULT_LO_EXPR:
180 return (TYPE_UNSIGNED (type)
181 ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab);
183 case VEC_WIDEN_MULT_EVEN_EXPR:
184 return (TYPE_UNSIGNED (type)
185 ? vec_widen_umult_even_optab : vec_widen_smult_even_optab);
187 case VEC_WIDEN_MULT_ODD_EXPR:
188 return (TYPE_UNSIGNED (type)
189 ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab);
191 case VEC_WIDEN_LSHIFT_HI_EXPR:
192 return (TYPE_UNSIGNED (type)
193 ? vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab);
195 case VEC_WIDEN_LSHIFT_LO_EXPR:
196 return (TYPE_UNSIGNED (type)
197 ? vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab);
199 case VEC_UNPACK_HI_EXPR:
200 return (TYPE_UNSIGNED (type)
201 ? vec_unpacku_hi_optab : vec_unpacks_hi_optab);
203 case VEC_UNPACK_LO_EXPR:
204 return (TYPE_UNSIGNED (type)
205 ? vec_unpacku_lo_optab : vec_unpacks_lo_optab);
207 case VEC_UNPACK_FLOAT_HI_EXPR:
208 /* The signedness is determined from input operand. */
209 return (TYPE_UNSIGNED (type)
210 ? vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab);
212 case VEC_UNPACK_FLOAT_LO_EXPR:
213 /* The signedness is determined from input operand. */
214 return (TYPE_UNSIGNED (type)
215 ? vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab);
217 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
218 /* The signedness is determined from output operand. */
219 return (TYPE_UNSIGNED (type)
220 ? vec_unpack_ufix_trunc_hi_optab
221 : vec_unpack_sfix_trunc_hi_optab);
223 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
224 /* The signedness is determined from output operand. */
225 return (TYPE_UNSIGNED (type)
226 ? vec_unpack_ufix_trunc_lo_optab
227 : vec_unpack_sfix_trunc_lo_optab);
229 case VEC_PACK_TRUNC_EXPR:
230 return vec_pack_trunc_optab;
232 case VEC_PACK_SAT_EXPR:
233 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
235 case VEC_PACK_FIX_TRUNC_EXPR:
236 /* The signedness is determined from output operand. */
237 return (TYPE_UNSIGNED (type)
238 ? vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab);
240 case VEC_PACK_FLOAT_EXPR:
241 /* The signedness is determined from input operand. */
242 return (TYPE_UNSIGNED (type)
243 ? vec_packu_float_optab : vec_packs_float_optab);
245 case VEC_DUPLICATE_EXPR:
246 return vec_duplicate_optab;
248 case VEC_SERIES_EXPR:
249 return vec_series_optab;
251 default:
252 break;
255 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
256 switch (code)
258 case PLUS_EXPR:
259 if (TYPE_SATURATING (type))
260 return TYPE_UNSIGNED (type) ? usadd_optab : ssadd_optab;
261 return trapv ? addv_optab : add_optab;
263 case MINUS_EXPR:
264 if (TYPE_SATURATING (type))
265 return TYPE_UNSIGNED (type) ? ussub_optab : sssub_optab;
266 return trapv ? subv_optab : sub_optab;
268 case MULT_EXPR:
269 if (TYPE_SATURATING (type))
270 return TYPE_UNSIGNED (type) ? usmul_optab : ssmul_optab;
271 return trapv ? smulv_optab : smul_optab;
273 case NEGATE_EXPR:
274 if (TYPE_SATURATING (type))
275 return TYPE_UNSIGNED (type) ? usneg_optab : ssneg_optab;
276 return trapv ? negv_optab : neg_optab;
278 case ABS_EXPR:
279 return trapv ? absv_optab : abs_optab;
281 case ABSU_EXPR:
282 return abs_optab;
283 default:
284 return unknown_optab;
288 /* Check whether an operation represented by CODE is a 'half' widening operation
289 in which the input vector type has half the number of bits of the output
290 vector type e.g. V8QI->V8HI.
292 This is handled by widening the inputs using NOP_EXPRs then using a
293 non-widening stmt e.g. MINUS_EXPR. RTL fusing converts these to the widening
294 hardware instructions if supported.
296 The more typical case (handled in supportable_widening_operation) is where
297 the input vector type has the same number of bits as the output vector type.
298 In this case half the elements of the input vectors must be processed at a
299 time into respective vector outputs with elements twice as wide i.e. a
300 'hi'/'lo' pair using codes such as VEC_WIDEN_MINUS_HI/LO.
302 Supported widening operations:
303 WIDEN_MULT_EXPR
304 WIDEN_LSHIFT_EXPR
306 Output:
307 - CODE1 - The non-widened code, which will be used after the inputs are
308 converted to the wide type. */
309 bool
310 supportable_half_widening_operation (enum tree_code code, tree vectype_out,
311 tree vectype_in, enum tree_code *code1)
313 machine_mode m1,m2;
314 enum tree_code dummy_code;
315 optab op;
317 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
319 m1 = TYPE_MODE (vectype_out);
320 m2 = TYPE_MODE (vectype_in);
322 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
323 return false;
325 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in),
326 TYPE_VECTOR_SUBPARTS (vectype_out)))
327 return false;
329 switch (code)
331 case WIDEN_LSHIFT_EXPR:
332 *code1 = LSHIFT_EXPR;
333 break;
334 case WIDEN_MULT_EXPR:
335 *code1 = MULT_EXPR;
336 break;
337 default:
338 return false;
341 if (!supportable_convert_operation (NOP_EXPR, vectype_out, vectype_in,
342 &dummy_code))
343 return false;
345 op = optab_for_tree_code (*code1, vectype_out, optab_vector);
346 return (optab_handler (op, TYPE_MODE (vectype_out)) != CODE_FOR_nothing);
349 /* Function supportable_convert_operation
351 Check whether an operation represented by the code CODE is a
352 convert operation that is supported by the target platform in
353 vector form (i.e., when operating on arguments of type VECTYPE_IN
354 producing a result of type VECTYPE_OUT).
356 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
357 This function checks if these operations are supported
358 by the target platform directly (via vector tree-codes).
360 Output:
361 - CODE1 is code of vector operation to be used when
362 vectorizing the operation, if available. */
364 bool
365 supportable_convert_operation (enum tree_code code,
366 tree vectype_out, tree vectype_in,
367 enum tree_code *code1)
369 machine_mode m1,m2;
370 bool truncp;
372 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
374 m1 = TYPE_MODE (vectype_out);
375 m2 = TYPE_MODE (vectype_in);
377 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
378 return false;
380 /* First check if we can done conversion directly. */
381 if ((code == FIX_TRUNC_EXPR
382 && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
383 != CODE_FOR_nothing)
384 || (code == FLOAT_EXPR
385 && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
386 != CODE_FOR_nothing))
388 *code1 = code;
389 return true;
392 if (GET_MODE_UNIT_PRECISION (m1) > GET_MODE_UNIT_PRECISION (m2)
393 && can_extend_p (m1, m2, TYPE_UNSIGNED (vectype_in)))
395 *code1 = code;
396 return true;
399 if (GET_MODE_UNIT_PRECISION (m1) < GET_MODE_UNIT_PRECISION (m2)
400 && convert_optab_handler (trunc_optab, m1, m2) != CODE_FOR_nothing)
402 *code1 = code;
403 return true;
406 return false;
409 /* Return true iff vec_cmp_optab/vec_cmpu_optab can handle a vector comparison
410 for code CODE, comparing operands of type VALUE_TYPE and producing a result
411 of type MASK_TYPE. */
413 static bool
414 vec_cmp_icode_p (tree value_type, tree mask_type, enum tree_code code)
416 enum rtx_code rcode = get_rtx_code_1 (code, TYPE_UNSIGNED (value_type));
417 if (rcode == UNKNOWN)
418 return false;
420 return can_vec_cmp_compare_p (rcode, TYPE_MODE (value_type),
421 TYPE_MODE (mask_type));
424 /* Return true iff vec_cmpeq_optab can handle a vector comparison for code
425 CODE, comparing operands of type VALUE_TYPE and producing a result of type
426 MASK_TYPE. */
428 static bool
429 vec_cmp_eq_icode_p (tree value_type, tree mask_type, enum tree_code code)
431 if (code != EQ_EXPR && code != NE_EXPR)
432 return false;
434 return get_vec_cmp_eq_icode (TYPE_MODE (value_type), TYPE_MODE (mask_type))
435 != CODE_FOR_nothing;
438 /* Return TRUE if appropriate vector insn is available
439 for vector comparison expr with vector type VALUE_TYPE
440 and resulting mask with MASK_TYPE. */
442 bool
443 expand_vec_cmp_expr_p (tree value_type, tree mask_type, enum tree_code code)
445 return vec_cmp_icode_p (value_type, mask_type, code)
446 || vec_cmp_eq_icode_p (value_type, mask_type, code);
449 /* Return true iff vcond_optab/vcondu_optab can handle a vector
450 comparison for code CODE, comparing operands of type CMP_OP_TYPE and
451 producing a result of type VALUE_TYPE. */
453 static bool
454 vcond_icode_p (tree value_type, tree cmp_op_type, enum tree_code code)
456 enum rtx_code rcode = get_rtx_code_1 (code, TYPE_UNSIGNED (cmp_op_type));
457 if (rcode == UNKNOWN)
458 return false;
460 return can_vcond_compare_p (rcode, TYPE_MODE (value_type),
461 TYPE_MODE (cmp_op_type));
464 /* Return true iff vcondeq_optab can handle a vector comparison for code CODE,
465 comparing operands of type CMP_OP_TYPE and producing a result of type
466 VALUE_TYPE. */
468 static bool
469 vcond_eq_icode_p (tree value_type, tree cmp_op_type, enum tree_code code)
471 if (code != EQ_EXPR && code != NE_EXPR)
472 return false;
474 return get_vcond_eq_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type))
475 != CODE_FOR_nothing;
478 /* Return TRUE iff, appropriate vector insns are available
479 for vector cond expr with vector type VALUE_TYPE and a comparison
480 with operand vector types in CMP_OP_TYPE. */
482 bool
483 expand_vec_cond_expr_p (tree value_type, tree cmp_op_type, enum tree_code code)
485 machine_mode value_mode = TYPE_MODE (value_type);
486 machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
487 if (VECTOR_BOOLEAN_TYPE_P (cmp_op_type)
488 && get_vcond_mask_icode (TYPE_MODE (value_type),
489 TYPE_MODE (cmp_op_type)) != CODE_FOR_nothing)
490 return true;
492 if (maybe_ne (GET_MODE_NUNITS (value_mode), GET_MODE_NUNITS (cmp_op_mode)))
493 return false;
495 if (TREE_CODE_CLASS (code) != tcc_comparison)
496 /* This may happen, for example, if code == SSA_NAME, in which case we
497 cannot be certain whether a vector insn is available. */
498 return false;
500 return vcond_icode_p (value_type, cmp_op_type, code)
501 || vcond_eq_icode_p (value_type, cmp_op_type, code);
504 /* Use the current target and options to initialize
505 TREE_OPTIMIZATION_OPTABS (OPTNODE). */
507 void
508 init_tree_optimization_optabs (tree optnode)
510 /* Quick exit if we have already computed optabs for this target. */
511 if (TREE_OPTIMIZATION_BASE_OPTABS (optnode) == this_target_optabs)
512 return;
514 /* Forget any previous information and set up for the current target. */
515 TREE_OPTIMIZATION_BASE_OPTABS (optnode) = this_target_optabs;
516 struct target_optabs *tmp_optabs = (struct target_optabs *)
517 TREE_OPTIMIZATION_OPTABS (optnode);
518 if (tmp_optabs)
519 memset (tmp_optabs, 0, sizeof (struct target_optabs));
520 else
521 tmp_optabs = ggc_cleared_alloc<target_optabs> ();
523 /* Generate a new set of optabs into tmp_optabs. */
524 init_all_optabs (tmp_optabs);
526 /* If the optabs changed, record it. */
527 if (memcmp (tmp_optabs, this_target_optabs, sizeof (struct target_optabs)))
528 TREE_OPTIMIZATION_OPTABS (optnode) = tmp_optabs;
529 else
531 TREE_OPTIMIZATION_OPTABS (optnode) = NULL;
532 ggc_free (tmp_optabs);
536 /* Return TRUE if the target has support for vector right shift of an
537 operand of type TYPE. If OT_TYPE is OPTAB_DEFAULT, check for existence
538 of a shift by either a scalar or a vector. Otherwise, check only
539 for a shift that matches OT_TYPE. */
541 bool
542 target_supports_op_p (tree type, enum tree_code code,
543 enum optab_subtype ot_subtype)
545 optab ot = optab_for_tree_code (code, type, ot_subtype);
546 return (ot != unknown_optab
547 && optab_handler (ot, TYPE_MODE (type)) != CODE_FOR_nothing);
550 /* Return true if the target has support for masked load/store.
551 We can support masked load/store by either mask{load,store}
552 or mask_len_{load,store}.
553 This helper function checks whether target supports masked
554 load/store and return corresponding IFN in the last argument
555 (IFN_MASK_{LOAD,STORE} or IFN_MASK_LEN_{LOAD,STORE}). */
557 static bool
558 target_supports_mask_load_store_p (machine_mode mode, machine_mode mask_mode,
559 bool is_load, internal_fn *ifn)
561 optab op = is_load ? maskload_optab : maskstore_optab;
562 optab len_op = is_load ? mask_len_load_optab : mask_len_store_optab;
563 if (convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing)
565 if (ifn)
566 *ifn = is_load ? IFN_MASK_LOAD : IFN_MASK_STORE;
567 return true;
569 else if (convert_optab_handler (len_op, mode, mask_mode) != CODE_FOR_nothing)
571 if (ifn)
572 *ifn = is_load ? IFN_MASK_LEN_LOAD : IFN_MASK_LEN_STORE;
573 return true;
575 return false;
578 /* Return true if target supports vector masked load/store for mode.
579 An additional output in the last argument which is the IFN pointer.
580 We set IFN as MASK_{LOAD,STORE} or MASK_LEN_{LOAD,STORE} according
581 which optab is supported in the target. */
583 bool
584 can_vec_mask_load_store_p (machine_mode mode,
585 machine_mode mask_mode,
586 bool is_load,
587 internal_fn *ifn)
589 machine_mode vmode;
591 /* If mode is vector mode, check it directly. */
592 if (VECTOR_MODE_P (mode))
593 return target_supports_mask_load_store_p (mode, mask_mode, is_load, ifn);
595 /* Otherwise, return true if there is some vector mode with
596 the mask load/store supported. */
598 /* See if there is any chance the mask load or store might be
599 vectorized. If not, punt. */
600 scalar_mode smode;
601 if (!is_a <scalar_mode> (mode, &smode))
602 return false;
604 vmode = targetm.vectorize.preferred_simd_mode (smode);
605 if (VECTOR_MODE_P (vmode)
606 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
607 && target_supports_mask_load_store_p (vmode, mask_mode, is_load, ifn))
608 return true;
610 auto_vector_modes vector_modes;
611 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
612 for (machine_mode base_mode : vector_modes)
613 if (related_vector_mode (base_mode, smode).exists (&vmode)
614 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
615 && target_supports_mask_load_store_p (vmode, mask_mode, is_load, ifn))
616 return true;
617 return false;
620 /* Return true if the target has support for len load/store.
621 We can support len load/store by either len_{load,store}
622 or mask_len_{load,store}.
623 This helper function checks whether target supports len
624 load/store and return corresponding IFN in the last argument
625 (IFN_LEN_{LOAD,STORE} or IFN_MASK_LEN_{LOAD,STORE}). */
627 static bool
628 target_supports_len_load_store_p (machine_mode mode, bool is_load,
629 internal_fn *ifn)
631 optab op = is_load ? len_load_optab : len_store_optab;
632 optab masked_op = is_load ? mask_len_load_optab : mask_len_store_optab;
634 if (direct_optab_handler (op, mode))
636 if (ifn)
637 *ifn = is_load ? IFN_LEN_LOAD : IFN_LEN_STORE;
638 return true;
640 machine_mode mask_mode;
641 if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
642 && convert_optab_handler (masked_op, mode, mask_mode) != CODE_FOR_nothing)
644 if (ifn)
645 *ifn = is_load ? IFN_MASK_LEN_LOAD : IFN_MASK_LEN_STORE;
646 return true;
648 return false;
651 /* If target supports vector load/store with length for vector mode MODE,
652 return the corresponding vector mode, otherwise return opt_machine_mode ().
653 There are two flavors for vector load/store with length, one is to measure
654 length with bytes, the other is to measure length with lanes.
655 As len_{load,store} optabs point out, for the flavor with bytes, we use
656 VnQI to wrap the other supportable same size vector modes.
657 An additional output in the last argument which is the IFN pointer.
658 We set IFN as LEN_{LOAD,STORE} or MASK_LEN_{LOAD,STORE} according
659 which optab is supported in the target. */
661 opt_machine_mode
662 get_len_load_store_mode (machine_mode mode, bool is_load, internal_fn *ifn)
664 gcc_assert (VECTOR_MODE_P (mode));
666 /* Check if length in lanes supported for this mode directly. */
667 if (target_supports_len_load_store_p (mode, is_load, ifn))
668 return mode;
670 /* Check if length in bytes supported for same vector size VnQI. */
671 machine_mode vmode;
672 poly_uint64 nunits = GET_MODE_SIZE (mode);
673 if (related_vector_mode (mode, QImode, nunits).exists (&vmode)
674 && target_supports_len_load_store_p (vmode, is_load, ifn))
675 return vmode;
677 return opt_machine_mode ();