1 /* General types and functions that are uselful for processing of OpenMP,
2 OpenACC and similar directivers at various stages of compilation.
4 Copyright (C) 2005-2023 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Find an OMP clause of type KIND within CLAUSES. */
26 #include "coretypes.h"
32 #include "diagnostic-core.h"
33 #include "fold-const.h"
34 #include "langhooks.h"
35 #include "omp-general.h"
36 #include "stringpool.h"
40 #include "alloc-pool.h"
41 #include "symbol-summary.h"
42 #include "tree-pass.h"
43 #include "omp-device-properties.h"
44 #include "tree-iterator.h"
45 #include "data-streamer.h"
46 #include "streamer-hooks.h"
48 #include "omp-general.h"
49 #include "tree-pretty-print.h"
51 enum omp_requires omp_requires_mask
;
54 omp_find_clause (tree clauses
, enum omp_clause_code kind
)
56 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
57 if (OMP_CLAUSE_CODE (clauses
) == kind
)
63 /* True if OpenMP should regard this DECL as being a scalar which has Fortran's
64 allocatable or pointer attribute. */
66 omp_is_allocatable_or_ptr (tree decl
)
68 return lang_hooks
.decls
.omp_is_allocatable_or_ptr (decl
);
71 /* Check whether this DECL belongs to a Fortran optional argument.
72 With 'for_present_check' set to false, decls which are optional parameters
73 themselve are returned as tree - or a NULL_TREE otherwise. Those decls are
74 always pointers. With 'for_present_check' set to true, the decl for checking
75 whether an argument is present is returned; for arguments with value
76 attribute this is the hidden argument and of BOOLEAN_TYPE. If the decl is
77 unrelated to optional arguments, NULL_TREE is returned. */
80 omp_check_optional_argument (tree decl
, bool for_present_check
)
82 return lang_hooks
.decls
.omp_check_optional_argument (decl
, for_present_check
);
85 /* Return true if TYPE is an OpenMP mappable type. */
88 omp_mappable_type (tree type
)
90 /* Mappable type has to be complete. */
91 if (type
== error_mark_node
|| !COMPLETE_TYPE_P (type
))
96 /* True if OpenMP should privatize what this DECL points to rather
97 than the DECL itself. */
100 omp_privatize_by_reference (tree decl
)
102 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
105 /* Adjust *COND_CODE and *N2 so that the former is either LT_EXPR or GT_EXPR,
106 given that V is the loop index variable and STEP is loop step. */
109 omp_adjust_for_condition (location_t loc
, enum tree_code
*cond_code
, tree
*n2
,
119 gcc_assert (TREE_CODE (step
) == INTEGER_CST
);
120 if (TREE_CODE (TREE_TYPE (v
)) == INTEGER_TYPE
)
122 if (integer_onep (step
))
123 *cond_code
= LT_EXPR
;
126 gcc_assert (integer_minus_onep (step
));
127 *cond_code
= GT_EXPR
;
132 tree unit
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (v
)));
133 gcc_assert (TREE_CODE (unit
) == INTEGER_CST
);
134 if (tree_int_cst_equal (unit
, step
))
135 *cond_code
= LT_EXPR
;
138 gcc_assert (wi::neg (wi::to_widest (unit
))
139 == wi::to_widest (step
));
140 *cond_code
= GT_EXPR
;
147 if (POINTER_TYPE_P (TREE_TYPE (*n2
)))
148 *n2
= fold_build_pointer_plus_hwi_loc (loc
, *n2
, 1);
150 *n2
= fold_build2_loc (loc
, PLUS_EXPR
, TREE_TYPE (*n2
), *n2
,
151 build_int_cst (TREE_TYPE (*n2
), 1));
152 *cond_code
= LT_EXPR
;
155 if (POINTER_TYPE_P (TREE_TYPE (*n2
)))
156 *n2
= fold_build_pointer_plus_hwi_loc (loc
, *n2
, -1);
158 *n2
= fold_build2_loc (loc
, MINUS_EXPR
, TREE_TYPE (*n2
), *n2
,
159 build_int_cst (TREE_TYPE (*n2
), 1));
160 *cond_code
= GT_EXPR
;
167 /* Return the looping step from INCR, extracted from the step of a gimple omp
171 omp_get_for_step_from_incr (location_t loc
, tree incr
)
174 switch (TREE_CODE (incr
))
177 step
= TREE_OPERAND (incr
, 1);
179 case POINTER_PLUS_EXPR
:
180 step
= fold_convert (ssizetype
, TREE_OPERAND (incr
, 1));
183 step
= TREE_OPERAND (incr
, 1);
184 step
= fold_build1_loc (loc
, NEGATE_EXPR
, TREE_TYPE (step
), step
);
192 /* Extract the header elements of parallel loop FOR_STMT and store
196 omp_extract_for_data (gomp_for
*for_stmt
, struct omp_for_data
*fd
,
197 struct omp_for_data_loop
*loops
)
199 tree t
, var
, *collapse_iter
, *collapse_count
;
200 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
201 struct omp_for_data_loop
*loop
;
203 struct omp_for_data_loop dummy_loop
;
204 location_t loc
= gimple_location (for_stmt
);
205 bool simd
= gimple_omp_for_kind (for_stmt
) == GF_OMP_FOR_KIND_SIMD
;
206 bool distribute
= gimple_omp_for_kind (for_stmt
)
207 == GF_OMP_FOR_KIND_DISTRIBUTE
;
208 bool taskloop
= gimple_omp_for_kind (for_stmt
)
209 == GF_OMP_FOR_KIND_TASKLOOP
;
210 bool order_reproducible
= false;
213 fd
->for_stmt
= for_stmt
;
215 fd
->have_nowait
= distribute
|| simd
;
216 fd
->have_ordered
= false;
217 fd
->have_reductemp
= false;
218 fd
->have_pointer_condtemp
= false;
219 fd
->have_scantemp
= false;
220 fd
->have_nonctrl_scantemp
= false;
221 fd
->non_rect
= false;
222 fd
->lastprivate_conditional
= 0;
223 fd
->tiling
= NULL_TREE
;
226 fd
->first_nonrect
= -1;
227 fd
->last_nonrect
= -1;
228 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
229 fd
->sched_modifiers
= 0;
230 fd
->chunk_size
= NULL_TREE
;
231 fd
->simd_schedule
= false;
232 fd
->first_inner_iterations
= NULL_TREE
;
233 fd
->factor
= NULL_TREE
;
234 fd
->adjn1
= NULL_TREE
;
235 collapse_iter
= NULL
;
236 collapse_count
= NULL
;
238 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
239 switch (OMP_CLAUSE_CODE (t
))
241 case OMP_CLAUSE_NOWAIT
:
242 fd
->have_nowait
= true;
244 case OMP_CLAUSE_ORDERED
:
245 fd
->have_ordered
= true;
246 if (OMP_CLAUSE_ORDERED_DOACROSS (t
))
248 if (OMP_CLAUSE_ORDERED_EXPR (t
))
249 fd
->ordered
= tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t
));
254 case OMP_CLAUSE_SCHEDULE
:
255 gcc_assert (!distribute
&& !taskloop
);
257 = (enum omp_clause_schedule_kind
)
258 (OMP_CLAUSE_SCHEDULE_KIND (t
) & OMP_CLAUSE_SCHEDULE_MASK
);
259 fd
->sched_modifiers
= (OMP_CLAUSE_SCHEDULE_KIND (t
)
260 & ~OMP_CLAUSE_SCHEDULE_MASK
);
261 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
262 fd
->simd_schedule
= OMP_CLAUSE_SCHEDULE_SIMD (t
);
264 case OMP_CLAUSE_DIST_SCHEDULE
:
265 gcc_assert (distribute
);
266 fd
->chunk_size
= OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t
);
268 case OMP_CLAUSE_COLLAPSE
:
269 fd
->collapse
= tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t
));
270 if (fd
->collapse
> 1)
272 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
273 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
276 case OMP_CLAUSE_TILE
:
277 fd
->tiling
= OMP_CLAUSE_TILE_LIST (t
);
278 fd
->collapse
= list_length (fd
->tiling
);
279 gcc_assert (fd
->collapse
);
280 collapse_iter
= &OMP_CLAUSE_TILE_ITERVAR (t
);
281 collapse_count
= &OMP_CLAUSE_TILE_COUNT (t
);
283 case OMP_CLAUSE__REDUCTEMP_
:
284 fd
->have_reductemp
= true;
286 case OMP_CLAUSE_LASTPRIVATE
:
287 if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (t
))
288 fd
->lastprivate_conditional
++;
290 case OMP_CLAUSE__CONDTEMP_
:
291 if (POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (t
))))
292 fd
->have_pointer_condtemp
= true;
294 case OMP_CLAUSE__SCANTEMP_
:
295 fd
->have_scantemp
= true;
296 if (!OMP_CLAUSE__SCANTEMP__ALLOC (t
)
297 && !OMP_CLAUSE__SCANTEMP__CONTROL (t
))
298 fd
->have_nonctrl_scantemp
= true;
300 case OMP_CLAUSE_ORDER
:
301 /* FIXME: For OpenMP 5.2 this should change to
302 if (OMP_CLAUSE_ORDER_REPRODUCIBLE (t))
303 (with the exception of loop construct but that lowers to
304 no schedule/dist_schedule clauses currently). */
305 if (!OMP_CLAUSE_ORDER_UNCONSTRAINED (t
))
306 order_reproducible
= true;
311 if (fd
->ordered
== -1)
312 fd
->ordered
= fd
->collapse
;
314 /* For order(reproducible:concurrent) schedule ({dynamic,guided,runtime})
315 we have either the option to expensively remember at runtime how we've
316 distributed work from first loop and reuse that in following loops with
317 the same number of iterations and schedule, or just force static schedule.
318 OpenMP API calls etc. aren't allowed in order(concurrent) bodies so
319 users can't observe it easily anyway. */
320 if (order_reproducible
)
321 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
322 if (fd
->collapse
> 1 || fd
->tiling
)
325 fd
->loops
= &fd
->loop
;
327 if (fd
->ordered
&& fd
->collapse
== 1 && loops
!= NULL
)
332 collapse_iter
= &iterv
;
333 collapse_count
= &countv
;
336 /* FIXME: for now map schedule(auto) to schedule(static).
337 There should be analysis to determine whether all iterations
338 are approximately the same amount of work (then schedule(static)
339 is best) or if it varies (then schedule(dynamic,N) is better). */
340 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
342 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
343 gcc_assert (fd
->chunk_size
== NULL
);
345 gcc_assert ((fd
->collapse
== 1 && !fd
->tiling
) || collapse_iter
!= NULL
);
347 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_RUNTIME
;
348 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
349 gcc_assert (fd
->chunk_size
== NULL
);
350 else if (fd
->chunk_size
== NULL
)
352 /* We only need to compute a default chunk size for ordered
353 static loops and dynamic loops. */
354 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
356 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
357 ? integer_zero_node
: integer_one_node
;
360 int cnt
= fd
->ordered
? fd
->ordered
: fd
->collapse
;
361 int single_nonrect
= -1;
362 tree single_nonrect_count
= NULL_TREE
;
363 enum tree_code single_nonrect_cond_code
= ERROR_MARK
;
364 for (i
= 1; i
< cnt
; i
++)
366 tree n1
= gimple_omp_for_initial (for_stmt
, i
);
367 tree n2
= gimple_omp_for_final (for_stmt
, i
);
368 if (TREE_CODE (n1
) == TREE_VEC
)
375 for (int j
= i
- 1; j
>= 0; j
--)
376 if (TREE_VEC_ELT (n1
, 0) == gimple_omp_for_index (for_stmt
, j
))
383 else if (TREE_CODE (n2
) == TREE_VEC
)
390 for (int j
= i
- 1; j
>= 0; j
--)
391 if (TREE_VEC_ELT (n2
, 0) == gimple_omp_for_index (for_stmt
, j
))
399 for (i
= 0; i
< cnt
; i
++)
404 && (fd
->ordered
== 0 || loops
== NULL
))
406 else if (loops
!= NULL
)
411 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
412 gcc_assert (SSA_VAR_P (loop
->v
));
413 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
414 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
415 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
416 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
417 loop
->m1
= NULL_TREE
;
418 loop
->m2
= NULL_TREE
;
420 loop
->non_rect_referenced
= false;
421 if (TREE_CODE (loop
->n1
) == TREE_VEC
)
423 for (int j
= i
- 1; j
>= 0; j
--)
424 if (TREE_VEC_ELT (loop
->n1
, 0) == gimple_omp_for_index (for_stmt
, j
))
428 loops
[j
].non_rect_referenced
= true;
429 if (fd
->first_nonrect
== -1 || fd
->first_nonrect
> j
)
430 fd
->first_nonrect
= j
;
433 gcc_assert (loop
->outer
);
434 loop
->m1
= TREE_VEC_ELT (loop
->n1
, 1);
435 loop
->n1
= TREE_VEC_ELT (loop
->n1
, 2);
437 fd
->last_nonrect
= i
;
440 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
441 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
442 gcc_assert (loop
->cond_code
!= NE_EXPR
443 || (gimple_omp_for_kind (for_stmt
)
444 != GF_OMP_FOR_KIND_OACC_LOOP
));
445 if (TREE_CODE (loop
->n2
) == TREE_VEC
)
448 gcc_assert (TREE_VEC_ELT (loop
->n2
, 0)
449 == gimple_omp_for_index (for_stmt
, i
- loop
->outer
));
451 for (int j
= i
- 1; j
>= 0; j
--)
452 if (TREE_VEC_ELT (loop
->n2
, 0) == gimple_omp_for_index (for_stmt
, j
))
456 loops
[j
].non_rect_referenced
= true;
457 if (fd
->first_nonrect
== -1 || fd
->first_nonrect
> j
)
458 fd
->first_nonrect
= j
;
461 gcc_assert (loop
->outer
);
462 loop
->m2
= TREE_VEC_ELT (loop
->n2
, 1);
463 loop
->n2
= TREE_VEC_ELT (loop
->n2
, 2);
465 fd
->last_nonrect
= i
;
468 t
= gimple_omp_for_incr (for_stmt
, i
);
469 gcc_assert (TREE_OPERAND (t
, 0) == var
);
470 loop
->step
= omp_get_for_step_from_incr (loc
, t
);
472 omp_adjust_for_condition (loc
, &loop
->cond_code
, &loop
->n2
, loop
->v
,
476 || (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
477 && !fd
->have_ordered
))
479 if (fd
->collapse
== 1 && !fd
->tiling
)
480 iter_type
= TREE_TYPE (loop
->v
);
482 || TYPE_PRECISION (iter_type
)
483 < TYPE_PRECISION (TREE_TYPE (loop
->v
)))
485 = build_nonstandard_integer_type
486 (TYPE_PRECISION (TREE_TYPE (loop
->v
)), 1);
488 else if (iter_type
!= long_long_unsigned_type_node
)
490 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
491 iter_type
= long_long_unsigned_type_node
;
492 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
493 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
494 >= TYPE_PRECISION (iter_type
))
498 if (loop
->cond_code
== LT_EXPR
)
499 n
= fold_build2_loc (loc
, PLUS_EXPR
, TREE_TYPE (loop
->v
),
500 loop
->n2
, loop
->step
);
505 || TREE_CODE (n
) != INTEGER_CST
506 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
507 iter_type
= long_long_unsigned_type_node
;
509 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
510 > TYPE_PRECISION (iter_type
))
514 if (loop
->cond_code
== LT_EXPR
)
517 n2
= fold_build2_loc (loc
, PLUS_EXPR
, TREE_TYPE (loop
->v
),
518 loop
->n2
, loop
->step
);
522 n1
= fold_build2_loc (loc
, MINUS_EXPR
, TREE_TYPE (loop
->v
),
523 loop
->n2
, loop
->step
);
528 || TREE_CODE (n1
) != INTEGER_CST
529 || TREE_CODE (n2
) != INTEGER_CST
530 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
531 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
532 iter_type
= long_long_unsigned_type_node
;
536 if (i
>= fd
->collapse
)
539 if (collapse_count
&& *collapse_count
== NULL
)
541 if (count
&& integer_zerop (count
))
543 tree n1first
= NULL_TREE
, n2first
= NULL_TREE
;
544 tree n1last
= NULL_TREE
, n2last
= NULL_TREE
;
545 tree ostep
= NULL_TREE
;
546 if (loop
->m1
|| loop
->m2
)
548 if (count
== NULL_TREE
)
550 if (single_nonrect
== -1
551 || (loop
->m1
&& TREE_CODE (loop
->m1
) != INTEGER_CST
)
552 || (loop
->m2
&& TREE_CODE (loop
->m2
) != INTEGER_CST
)
553 || TREE_CODE (loop
->n1
) != INTEGER_CST
554 || TREE_CODE (loop
->n2
) != INTEGER_CST
555 || TREE_CODE (loop
->step
) != INTEGER_CST
)
560 tree var
= gimple_omp_for_initial (for_stmt
, single_nonrect
);
561 tree itype
= TREE_TYPE (var
);
562 tree first
= gimple_omp_for_initial (for_stmt
, single_nonrect
);
563 t
= gimple_omp_for_incr (for_stmt
, single_nonrect
);
564 ostep
= omp_get_for_step_from_incr (loc
, t
);
565 t
= fold_binary (MINUS_EXPR
, long_long_unsigned_type_node
,
566 single_nonrect_count
,
567 build_one_cst (long_long_unsigned_type_node
));
568 t
= fold_convert (itype
, t
);
569 first
= fold_convert (itype
, first
);
570 ostep
= fold_convert (itype
, ostep
);
571 tree last
= fold_binary (PLUS_EXPR
, itype
, first
,
572 fold_binary (MULT_EXPR
, itype
, t
,
574 if (TREE_CODE (first
) != INTEGER_CST
575 || TREE_CODE (last
) != INTEGER_CST
)
582 tree m1
= fold_convert (itype
, loop
->m1
);
583 tree n1
= fold_convert (itype
, loop
->n1
);
584 n1first
= fold_binary (PLUS_EXPR
, itype
,
585 fold_binary (MULT_EXPR
, itype
,
587 n1last
= fold_binary (PLUS_EXPR
, itype
,
588 fold_binary (MULT_EXPR
, itype
,
592 n1first
= n1last
= loop
->n1
;
595 tree n2
= fold_convert (itype
, loop
->n2
);
596 tree m2
= fold_convert (itype
, loop
->m2
);
597 n2first
= fold_binary (PLUS_EXPR
, itype
,
598 fold_binary (MULT_EXPR
, itype
,
600 n2last
= fold_binary (PLUS_EXPR
, itype
,
601 fold_binary (MULT_EXPR
, itype
,
605 n2first
= n2last
= loop
->n2
;
606 n1first
= fold_convert (TREE_TYPE (loop
->v
), n1first
);
607 n2first
= fold_convert (TREE_TYPE (loop
->v
), n2first
);
608 n1last
= fold_convert (TREE_TYPE (loop
->v
), n1last
);
609 n2last
= fold_convert (TREE_TYPE (loop
->v
), n2last
);
610 t
= fold_binary (loop
->cond_code
, boolean_type_node
,
612 tree t2
= fold_binary (loop
->cond_code
, boolean_type_node
,
614 if (t
&& t2
&& integer_nonzerop (t
) && integer_nonzerop (t2
))
615 /* All outer loop iterators have at least one inner loop
616 iteration. Try to compute the count at compile time. */
618 else if (t
&& t2
&& integer_zerop (t
) && integer_zerop (t2
))
619 /* No iterations of the inner loop. count will be set to
621 else if (TYPE_UNSIGNED (itype
)
624 || TREE_CODE (t
) != INTEGER_CST
625 || TREE_CODE (t2
) != INTEGER_CST
)
627 /* Punt (for now). */
633 /* Some iterations of the outer loop have zero iterations
634 of the inner loop, while others have at least one.
635 In this case, we need to adjust one of those outer
636 loop bounds. If ADJ_FIRST, we need to adjust outer n1
637 (first), otherwise outer n2 (last). */
638 bool adj_first
= integer_zerop (t
);
639 tree n1
= fold_convert (itype
, loop
->n1
);
640 tree n2
= fold_convert (itype
, loop
->n2
);
641 tree m1
= loop
->m1
? fold_convert (itype
, loop
->m1
)
642 : build_zero_cst (itype
);
643 tree m2
= loop
->m2
? fold_convert (itype
, loop
->m2
)
644 : build_zero_cst (itype
);
645 t
= fold_binary (MINUS_EXPR
, itype
, n1
, n2
);
646 t2
= fold_binary (MINUS_EXPR
, itype
, m2
, m1
);
647 t
= fold_binary (TRUNC_DIV_EXPR
, itype
, t
, t2
);
648 t2
= fold_binary (MINUS_EXPR
, itype
, t
, first
);
649 t2
= fold_binary (TRUNC_MOD_EXPR
, itype
, t2
, ostep
);
650 t
= fold_binary (MINUS_EXPR
, itype
, t
, t2
);
652 = fold_binary (PLUS_EXPR
, itype
, n1
,
653 fold_binary (MULT_EXPR
, itype
, m1
, t
));
655 = fold_binary (PLUS_EXPR
, itype
, n2
,
656 fold_binary (MULT_EXPR
, itype
, m2
, t
));
657 t2
= fold_binary (loop
->cond_code
, boolean_type_node
,
659 tree t3
= fold_binary (MULT_EXPR
, itype
, m1
, ostep
);
660 tree t4
= fold_binary (MULT_EXPR
, itype
, m2
, ostep
);
665 if (integer_nonzerop (t2
))
672 t3
= fold_binary (MINUS_EXPR
, itype
, n1cur
, t3
);
673 t4
= fold_binary (MINUS_EXPR
, itype
, n2cur
, t4
);
674 t3
= fold_binary (loop
->cond_code
,
675 boolean_type_node
, t3
, t4
);
676 gcc_assert (integer_zerop (t3
));
681 t3
= fold_binary (PLUS_EXPR
, itype
, n1cur
, t3
);
682 t4
= fold_binary (PLUS_EXPR
, itype
, n2cur
, t4
);
683 new_first
= fold_binary (PLUS_EXPR
, itype
, t
, ostep
);
688 t3
= fold_binary (loop
->cond_code
,
689 boolean_type_node
, t3
, t4
);
690 gcc_assert (integer_nonzerop (t3
));
693 diff
= fold_binary (MINUS_EXPR
, itype
, new_first
, first
);
700 if (integer_zerop (t2
))
702 t3
= fold_binary (MINUS_EXPR
, itype
, n1cur
, t3
);
703 t4
= fold_binary (MINUS_EXPR
, itype
, n2cur
, t4
);
704 new_last
= fold_binary (MINUS_EXPR
, itype
, t
, ostep
);
709 t3
= fold_binary (loop
->cond_code
,
710 boolean_type_node
, t3
, t4
);
711 gcc_assert (integer_nonzerop (t3
));
721 t3
= fold_binary (PLUS_EXPR
, itype
, n1cur
, t3
);
722 t4
= fold_binary (PLUS_EXPR
, itype
, n2cur
, t4
);
723 t3
= fold_binary (loop
->cond_code
,
724 boolean_type_node
, t3
, t4
);
725 gcc_assert (integer_zerop (t3
));
728 diff
= fold_binary (MINUS_EXPR
, itype
, last
, new_last
);
730 if (TYPE_UNSIGNED (itype
)
731 && single_nonrect_cond_code
== GT_EXPR
)
732 diff
= fold_binary (TRUNC_DIV_EXPR
, itype
,
733 fold_unary (NEGATE_EXPR
, itype
, diff
),
734 fold_unary (NEGATE_EXPR
, itype
,
737 diff
= fold_binary (TRUNC_DIV_EXPR
, itype
, diff
, ostep
);
738 diff
= fold_convert (long_long_unsigned_type_node
, diff
);
740 = fold_binary (MINUS_EXPR
, long_long_unsigned_type_node
,
741 single_nonrect_count
, diff
);
746 t
= fold_binary (loop
->cond_code
, boolean_type_node
,
747 fold_convert (TREE_TYPE (loop
->v
), loop
->n1
),
748 fold_convert (TREE_TYPE (loop
->v
), loop
->n2
));
749 if (t
&& integer_zerop (t
))
750 count
= build_zero_cst (long_long_unsigned_type_node
);
751 else if ((i
== 0 || count
!= NULL_TREE
)
752 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
753 && TREE_CONSTANT (loop
->n1
)
754 && TREE_CONSTANT (loop
->n2
)
755 && TREE_CODE (loop
->step
) == INTEGER_CST
)
757 tree itype
= TREE_TYPE (loop
->v
);
759 if (POINTER_TYPE_P (itype
))
760 itype
= signed_type_for (itype
);
761 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
762 t
= fold_build2 (PLUS_EXPR
, itype
,
763 fold_convert (itype
, loop
->step
), t
);
766 if (loop
->m1
|| loop
->m2
)
768 gcc_assert (single_nonrect
!= -1);
772 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fold_convert (itype
, n2
));
773 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
774 tree step
= fold_convert_loc (loc
, itype
, loop
->step
);
775 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
776 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
777 fold_build1 (NEGATE_EXPR
, itype
, t
),
778 fold_build1 (NEGATE_EXPR
, itype
, step
));
780 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
781 tree llutype
= long_long_unsigned_type_node
;
782 t
= fold_convert (llutype
, t
);
783 if (loop
->m1
|| loop
->m2
)
785 /* t is number of iterations of inner loop at either first
786 or last value of the outer iterator (the one with fewer
788 Compute t2 = ((m2 - m1) * ostep) / step
789 and niters = outer_count * t
790 + t2 * ((outer_count - 1) * outer_count / 2)
792 tree m1
= loop
->m1
? loop
->m1
: integer_zero_node
;
793 tree m2
= loop
->m2
? loop
->m2
: integer_zero_node
;
794 m1
= fold_convert (itype
, m1
);
795 m2
= fold_convert (itype
, m2
);
796 tree t2
= fold_build2 (MINUS_EXPR
, itype
, m2
, m1
);
797 t2
= fold_build2 (MULT_EXPR
, itype
, t2
, ostep
);
798 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
799 t2
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
800 fold_build1 (NEGATE_EXPR
, itype
, t2
),
801 fold_build1 (NEGATE_EXPR
, itype
, step
));
803 t2
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t2
, step
);
804 t2
= fold_convert (llutype
, t2
);
805 fd
->first_inner_iterations
= t
;
807 t
= fold_build2 (MULT_EXPR
, llutype
, t
,
808 single_nonrect_count
);
809 tree t3
= fold_build2 (MINUS_EXPR
, llutype
,
810 single_nonrect_count
,
811 build_one_cst (llutype
));
812 t3
= fold_build2 (MULT_EXPR
, llutype
, t3
,
813 single_nonrect_count
);
814 t3
= fold_build2 (TRUNC_DIV_EXPR
, llutype
, t3
,
815 build_int_cst (llutype
, 2));
816 t2
= fold_build2 (MULT_EXPR
, llutype
, t2
, t3
);
817 t
= fold_build2 (PLUS_EXPR
, llutype
, t
, t2
);
819 if (i
== single_nonrect
)
821 if (integer_zerop (t
) || TREE_CODE (t
) != INTEGER_CST
)
825 single_nonrect_count
= t
;
826 single_nonrect_cond_code
= loop
->cond_code
;
827 if (count
== NULL_TREE
)
828 count
= build_one_cst (llutype
);
831 else if (count
!= NULL_TREE
)
832 count
= fold_build2 (MULT_EXPR
, llutype
, count
, t
);
835 if (TREE_CODE (count
) != INTEGER_CST
)
838 else if (count
&& !integer_zerop (count
))
845 && (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
846 || fd
->have_ordered
))
848 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
849 iter_type
= long_long_unsigned_type_node
;
851 iter_type
= long_integer_type_node
;
853 else if (collapse_iter
&& *collapse_iter
!= NULL
)
854 iter_type
= TREE_TYPE (*collapse_iter
);
855 fd
->iter_type
= iter_type
;
856 if (collapse_iter
&& *collapse_iter
== NULL
)
857 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
858 if (collapse_count
&& *collapse_count
== NULL
)
862 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
863 if (fd
->first_inner_iterations
&& fd
->factor
)
865 t
= make_tree_vec (4);
866 TREE_VEC_ELT (t
, 0) = *collapse_count
;
867 TREE_VEC_ELT (t
, 1) = fd
->first_inner_iterations
;
868 TREE_VEC_ELT (t
, 2) = fd
->factor
;
869 TREE_VEC_ELT (t
, 3) = fd
->adjn1
;
874 *collapse_count
= create_tmp_var (iter_type
, ".count");
877 if (fd
->collapse
> 1 || fd
->tiling
|| (fd
->ordered
&& loops
))
879 fd
->loop
.v
= *collapse_iter
;
880 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
881 fd
->loop
.n2
= *collapse_count
;
882 if (TREE_CODE (fd
->loop
.n2
) == TREE_VEC
)
884 gcc_assert (fd
->non_rect
);
885 fd
->first_inner_iterations
= TREE_VEC_ELT (fd
->loop
.n2
, 1);
886 fd
->factor
= TREE_VEC_ELT (fd
->loop
.n2
, 2);
887 fd
->adjn1
= TREE_VEC_ELT (fd
->loop
.n2
, 3);
888 fd
->loop
.n2
= TREE_VEC_ELT (fd
->loop
.n2
, 0);
890 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
891 fd
->loop
.m1
= NULL_TREE
;
892 fd
->loop
.m2
= NULL_TREE
;
894 fd
->loop
.cond_code
= LT_EXPR
;
900 /* Build a call to GOMP_barrier. */
903 omp_build_barrier (tree lhs
)
905 tree fndecl
= builtin_decl_explicit (lhs
? BUILT_IN_GOMP_BARRIER_CANCEL
906 : BUILT_IN_GOMP_BARRIER
);
907 gcall
*g
= gimple_build_call (fndecl
, 0);
909 gimple_call_set_lhs (g
, lhs
);
913 /* Find OMP_FOR resp. OMP_SIMD with non-NULL OMP_FOR_INIT. Also, fill in pdata
914 array, pdata[0] non-NULL if there is anything non-trivial in between,
915 pdata[1] is address of OMP_PARALLEL in between if any, pdata[2] is address
916 of OMP_FOR in between if any and pdata[3] is address of the inner
920 find_combined_omp_for (tree
*tp
, int *walk_subtrees
, void *data
)
922 tree
**pdata
= (tree
**) data
;
924 switch (TREE_CODE (*tp
))
927 if (OMP_FOR_INIT (*tp
) != NULL_TREE
)
936 if (OMP_FOR_INIT (*tp
) != NULL_TREE
)
943 if (BIND_EXPR_VARS (*tp
)
944 || (BIND_EXPR_BLOCK (*tp
)
945 && BLOCK_VARS (BIND_EXPR_BLOCK (*tp
))))
950 if (!tsi_one_before_end_p (tsi_start (*tp
)))
954 case TRY_FINALLY_EXPR
:
968 /* Return maximum possible vectorization factor for the target. */
975 || !flag_tree_loop_optimize
976 || (!flag_tree_loop_vectorize
977 && OPTION_SET_P (flag_tree_loop_vectorize
)))
980 auto_vector_modes modes
;
981 targetm
.vectorize
.autovectorize_vector_modes (&modes
, true);
982 if (!modes
.is_empty ())
985 for (unsigned int i
= 0; i
< modes
.length (); ++i
)
986 /* The returned modes use the smallest element size (and thus
987 the largest nunits) for the vectorization approach that they
989 vf
= ordered_max (vf
, GET_MODE_NUNITS (modes
[i
]));
993 machine_mode vqimode
= targetm
.vectorize
.preferred_simd_mode (QImode
);
994 if (GET_MODE_CLASS (vqimode
) == MODE_VECTOR_INT
)
995 return GET_MODE_NUNITS (vqimode
);
1000 /* Return maximum SIMT width if offloading may target SIMT hardware. */
1003 omp_max_simt_vf (void)
1007 if (ENABLE_OFFLOADING
)
1008 for (const char *c
= getenv ("OFFLOAD_TARGET_NAMES"); c
;)
1010 if (startswith (c
, "nvptx"))
1012 else if ((c
= strchr (c
, ':')))
1018 /* Store the construct selectors as tree codes from last to first.
1019 CTX is a list of trait selectors, nconstructs must be equal to its
1020 length, and the array CONSTRUCTS holds the output. */
1023 omp_construct_traits_to_codes (tree ctx
, int nconstructs
,
1024 enum tree_code
*constructs
)
1026 int i
= nconstructs
- 1;
1028 /* Order must match the OMP_TRAIT_CONSTRUCT_* enumerators in
1029 enum omp_ts_code. */
1030 static enum tree_code code_map
[]
1031 = { OMP_TARGET
, OMP_TEAMS
, OMP_PARALLEL
, OMP_FOR
, OMP_SIMD
};
1033 for (tree ts
= ctx
; ts
; ts
= TREE_CHAIN (ts
), i
--)
1035 enum omp_ts_code sel
= OMP_TS_CODE (ts
);
1036 int j
= (int)sel
- (int)OMP_TRAIT_CONSTRUCT_TARGET
;
1037 gcc_assert (j
>= 0 && (unsigned int) j
< ARRAY_SIZE (code_map
));
1038 constructs
[i
] = code_map
[j
];
1040 gcc_assert (i
== -1);
1043 /* Return true if PROP is possibly present in one of the offloading target's
1044 OpenMP contexts. The format of PROPS string is always offloading target's
1045 name terminated by '\0', followed by properties for that offloading
1046 target separated by '\0' and terminated by another '\0'. The strings
1047 are created from omp-device-properties installed files of all configured
1048 offloading targets. */
1051 omp_offload_device_kind_arch_isa (const char *props
, const char *prop
)
1053 const char *names
= getenv ("OFFLOAD_TARGET_NAMES");
1054 if (names
== NULL
|| *names
== '\0')
1056 while (*props
!= '\0')
1058 size_t name_len
= strlen (props
);
1059 bool matches
= false;
1060 for (const char *c
= names
; c
; )
1062 if (strncmp (props
, c
, name_len
) == 0
1063 && (c
[name_len
] == '\0'
1064 || c
[name_len
] == ':'
1065 || c
[name_len
] == '='))
1070 else if ((c
= strchr (c
, ':')))
1073 props
= props
+ name_len
+ 1;
1074 while (*props
!= '\0')
1076 if (matches
&& strcmp (props
, prop
) == 0)
1078 props
= strchr (props
, '\0') + 1;
1085 /* Return true if the current code location is or might be offloaded.
1086 Return true in declare target functions, or when nested in a target
1087 region or when unsure, return false otherwise. */
1090 omp_maybe_offloaded (void)
1092 if (!ENABLE_OFFLOADING
)
1094 const char *names
= getenv ("OFFLOAD_TARGET_NAMES");
1095 if (names
== NULL
|| *names
== '\0')
1098 if (symtab
->state
== PARSING
)
1101 if (cfun
&& cfun
->after_inlining
)
1103 if (current_function_decl
1104 && lookup_attribute ("omp declare target",
1105 DECL_ATTRIBUTES (current_function_decl
)))
1107 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) == 0)
1109 enum tree_code construct
= OMP_TARGET
;
1110 if (omp_construct_selector_matches (&construct
, 1, NULL
))
1116 /* Lookup tables for context selectors. */
1117 const char *omp_tss_map
[] =
1127 /* Arrays of property candidates must be null-terminated. */
1128 static const char *const kind_properties
[] =
1129 { "host", "nohost", "cpu", "gpu", "fpga", "any", NULL
};
1130 static const char *const vendor_properties
[] =
1131 { "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "hpe", "ibm", "intel",
1132 "llvm", "nvidia", "pgi", "ti", "unknown", NULL
};
1133 static const char *const extension_properties
[] =
1135 static const char *const atomic_default_mem_order_properties
[] =
1136 { "seq_cst", "relaxed", "acq_rel", "acquire", "release", NULL
};
1138 struct omp_ts_info omp_ts_map
[] =
1141 (1 << OMP_TRAIT_SET_DEVICE
) | (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1142 OMP_TRAIT_PROPERTY_NAME_LIST
, false,
1146 (1 << OMP_TRAIT_SET_DEVICE
) | (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1147 OMP_TRAIT_PROPERTY_NAME_LIST
, false,
1151 (1 << OMP_TRAIT_SET_DEVICE
) | (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1152 OMP_TRAIT_PROPERTY_NAME_LIST
, false,
1156 (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1157 OMP_TRAIT_PROPERTY_EXPR
, false,
1161 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1162 OMP_TRAIT_PROPERTY_NAME_LIST
, true,
1166 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1167 OMP_TRAIT_PROPERTY_NAME_LIST
, true,
1168 extension_properties
,
1170 { "atomic_default_mem_order",
1171 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1172 OMP_TRAIT_PROPERTY_ID
, true,
1173 atomic_default_mem_order_properties
,
1176 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1177 OMP_TRAIT_PROPERTY_CLAUSE_LIST
, true,
1180 { "unified_address",
1181 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1182 OMP_TRAIT_PROPERTY_NONE
, true,
1185 { "unified_shared_memory",
1186 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1187 OMP_TRAIT_PROPERTY_NONE
, true,
1190 { "dynamic_allocators",
1191 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1192 OMP_TRAIT_PROPERTY_NONE
, true,
1195 { "reverse_offload",
1196 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1197 OMP_TRAIT_PROPERTY_NONE
, true,
1201 (1 << OMP_TRAIT_SET_USER
),
1202 OMP_TRAIT_PROPERTY_EXPR
, true,
1206 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1207 OMP_TRAIT_PROPERTY_NONE
, false,
1211 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1212 OMP_TRAIT_PROPERTY_NONE
, false,
1216 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1217 OMP_TRAIT_PROPERTY_NONE
, false,
1221 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1222 OMP_TRAIT_PROPERTY_NONE
, false,
1226 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1227 OMP_TRAIT_PROPERTY_CLAUSE_LIST
, false,
1230 { NULL
, 0, OMP_TRAIT_PROPERTY_NONE
, false, NULL
} /* OMP_TRAIT_LAST */
1234 /* Return a name from PROP, a property in selectors accepting
1238 omp_context_name_list_prop (tree prop
)
1240 gcc_assert (OMP_TP_NAME (prop
) == OMP_TP_NAMELIST_NODE
);
1241 tree val
= OMP_TP_VALUE (prop
);
1242 switch (TREE_CODE (val
))
1244 case IDENTIFIER_NODE
:
1245 return IDENTIFIER_POINTER (val
);
1248 const char *ret
= TREE_STRING_POINTER (val
);
1249 if ((size_t) TREE_STRING_LENGTH (val
)
1250 == strlen (ret
) + (lang_GNU_Fortran () ? 0 : 1))
1259 /* Diagnose errors in an OpenMP context selector, return CTX if
1260 it is correct or error_mark_node otherwise. */
1263 omp_check_context_selector (location_t loc
, tree ctx
)
1265 bool tss_seen
[OMP_TRAIT_SET_LAST
], ts_seen
[OMP_TRAIT_LAST
];
1267 memset (tss_seen
, 0, sizeof (tss_seen
));
1268 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
1270 enum omp_tss_code tss_code
= OMP_TSS_CODE (tss
);
1272 /* We can parse this, but not handle it yet. */
1273 if (tss_code
== OMP_TRAIT_SET_TARGET_DEVICE
)
1274 sorry_at (loc
, "%<target_device%> selector set is not supported yet");
1276 /* Each trait-set-selector-name can only be specified once. */
1277 if (tss_seen
[tss_code
])
1279 error_at (loc
, "selector set %qs specified more than once",
1280 OMP_TSS_NAME (tss
));
1281 return error_mark_node
;
1284 tss_seen
[tss_code
] = true;
1286 memset (ts_seen
, 0, sizeof (ts_seen
));
1287 for (tree ts
= OMP_TSS_TRAIT_SELECTORS (tss
); ts
; ts
= TREE_CHAIN (ts
))
1289 enum omp_ts_code ts_code
= OMP_TS_CODE (ts
);
1291 /* Ignore unknown traits. */
1292 if (ts_code
== OMP_TRAIT_INVALID
)
1295 /* Each trait-selector-name can only be specified once. */
1296 if (ts_seen
[ts_code
])
1299 "selector %qs specified more than once in set %qs",
1301 OMP_TSS_NAME (tss
));
1302 return error_mark_node
;
1305 ts_seen
[ts_code
] = true;
1307 if (omp_ts_map
[ts_code
].valid_properties
== NULL
)
1310 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1311 for (unsigned j
= 0; ; j
++)
1313 const char *candidate
1314 = omp_ts_map
[ts_code
].valid_properties
[j
];
1315 if (candidate
== NULL
)
1317 /* We've reached the end of the candidate array. */
1318 if (ts_code
== OMP_TRAIT_IMPLEMENTATION_ADMO
)
1319 /* FIXME: not sure why this is an error vs warnings
1320 for the others, + incorrect/unknown wording? */
1323 "incorrect property %qs of %qs selector",
1324 IDENTIFIER_POINTER (OMP_TP_NAME (p
)),
1325 "atomic_default_mem_order");
1326 return error_mark_node
;
1328 if (OMP_TP_NAME (p
) == OMP_TP_NAMELIST_NODE
1329 && (TREE_CODE (OMP_TP_VALUE (p
)) == STRING_CST
))
1330 warning_at (loc
, OPT_Wopenmp
,
1331 "unknown property %qE of %qs selector",
1334 else if (OMP_TP_NAME (p
) == OMP_TP_NAMELIST_NODE
)
1335 warning_at (loc
, OPT_Wopenmp
,
1336 "unknown property %qs of %qs selector",
1337 omp_context_name_list_prop (p
),
1339 else if (OMP_TP_NAME (p
))
1340 warning_at (loc
, OPT_Wopenmp
,
1341 "unknown property %qs of %qs selector",
1342 IDENTIFIER_POINTER (OMP_TP_NAME (p
)),
1346 else if (OMP_TP_NAME (p
) == OMP_TP_NAMELIST_NODE
)
1347 /* Property-list traits. */
1349 const char *str
= omp_context_name_list_prop (p
);
1350 if (str
&& !strcmp (str
, candidate
))
1353 else if (!strcmp (IDENTIFIER_POINTER (OMP_TP_NAME (p
)),
1355 /* Identifier traits. */
1364 /* Register VARIANT as variant of some base function marked with
1365 #pragma omp declare variant. CONSTRUCT is corresponding list of
1366 trait-selectors for the construct selector set. This is stashed as the
1367 value of the "omp declare variant variant" attribute on VARIANT. */
1369 omp_mark_declare_variant (location_t loc
, tree variant
, tree construct
)
1371 /* Ignore this variant if it contains unknown construct selectors.
1372 It will never match, and the front ends have already issued a warning
1374 for (tree c
= construct
; c
; c
= TREE_CHAIN (c
))
1375 if (OMP_TS_CODE (c
) == OMP_TRAIT_INVALID
)
1378 tree attr
= lookup_attribute ("omp declare variant variant",
1379 DECL_ATTRIBUTES (variant
));
1380 if (attr
== NULL_TREE
)
1382 attr
= tree_cons (get_identifier ("omp declare variant variant"),
1383 unshare_expr (construct
),
1384 DECL_ATTRIBUTES (variant
));
1385 DECL_ATTRIBUTES (variant
) = attr
;
1388 if ((TREE_VALUE (attr
) != NULL_TREE
) != (construct
!= NULL_TREE
)
1389 || (construct
!= NULL_TREE
1390 && omp_context_selector_set_compare (OMP_TRAIT_SET_CONSTRUCT
,
1393 error_at (loc
, "%qD used as a variant with incompatible %<construct%> "
1394 "selector sets", variant
);
1398 /* Constructors for context selectors. */
1401 make_trait_set_selector (enum omp_tss_code code
, tree selectors
, tree chain
)
1403 return tree_cons (build_int_cst (integer_type_node
, code
),
1408 make_trait_selector (enum omp_ts_code code
, tree score
, tree properties
,
1411 if (score
== NULL_TREE
)
1412 return tree_cons (build_int_cst (integer_type_node
, code
),
1415 return tree_cons (build_int_cst (integer_type_node
, code
),
1416 tree_cons (OMP_TS_SCORE_NODE
, score
, properties
),
1421 make_trait_property (tree name
, tree value
, tree chain
)
1423 return tree_cons (name
, value
, chain
);
1426 /* Return 1 if context selector matches the current OpenMP context, 0
1427 if it does not and -1 if it is unknown and need to be determined later.
1428 Some properties can be checked right away during parsing (this routine),
1429 others need to wait until the whole TU is parsed, others need to wait until
1430 IPA, others until vectorization. */
1433 omp_context_selector_matches (tree ctx
)
1436 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
1438 enum omp_tss_code set
= OMP_TSS_CODE (tss
);
1439 tree selectors
= OMP_TSS_TRAIT_SELECTORS (tss
);
1441 /* Immediately reject the match if there are any ignored
1442 selectors present. */
1443 for (tree ts
= selectors
; ts
; ts
= TREE_CHAIN (ts
))
1444 if (OMP_TS_CODE (ts
) == OMP_TRAIT_INVALID
)
1447 if (set
== OMP_TRAIT_SET_CONSTRUCT
)
1449 /* For now, ignore the construct set. While something can be
1450 determined already during parsing, we don't know until end of TU
1451 whether additional constructs aren't added through declare variant
1452 unless "omp declare variant variant" attribute exists already
1453 (so in most of the cases), and we'd need to maintain set of
1454 surrounding OpenMP constructs, which is better handled during
1456 if (symtab
->state
== PARSING
)
1462 int nconstructs
= list_length (selectors
);
1463 enum tree_code
*constructs
= NULL
;
1466 /* Even though this alloca appears in a loop over selector
1467 sets, it does not repeatedly grow the stack, because
1468 there can be only one construct selector set specified.
1469 This is enforced by omp_check_context_selector. */
1471 = (enum tree_code
*) alloca (nconstructs
1472 * sizeof (enum tree_code
));
1473 omp_construct_traits_to_codes (selectors
, nconstructs
,
1477 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1479 if (!cfun
->after_inlining
)
1485 for (i
= 0; i
< nconstructs
; ++i
)
1486 if (constructs
[i
] == OMP_SIMD
)
1488 if (i
< nconstructs
)
1493 /* If there is no simd, assume it is ok after IPA,
1494 constructs should have been checked before. */
1498 int r
= omp_construct_selector_matches (constructs
, nconstructs
,
1506 for (tree ts
= selectors
; ts
; ts
= TREE_CHAIN (ts
))
1508 enum omp_ts_code sel
= OMP_TS_CODE (ts
);
1511 case OMP_TRAIT_IMPLEMENTATION_VENDOR
:
1512 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1513 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1515 const char *prop
= omp_context_name_list_prop (p
);
1518 if (!strcmp (prop
, "gnu"))
1523 case OMP_TRAIT_IMPLEMENTATION_EXTENSION
:
1524 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1525 /* We don't support any extensions right now. */
1528 case OMP_TRAIT_IMPLEMENTATION_ADMO
:
1529 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1531 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1534 enum omp_memory_order omo
1535 = ((enum omp_memory_order
)
1537 & OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER
));
1538 if (omo
== OMP_MEMORY_ORDER_UNSPECIFIED
)
1540 /* We don't know yet, until end of TU. */
1541 if (symtab
->state
== PARSING
)
1547 omo
= OMP_MEMORY_ORDER_RELAXED
;
1549 tree p
= OMP_TS_PROPERTIES (ts
);
1550 const char *prop
= IDENTIFIER_POINTER (OMP_TP_NAME (p
));
1551 if (!strcmp (prop
, "relaxed")
1552 && omo
!= OMP_MEMORY_ORDER_RELAXED
)
1554 else if (!strcmp (prop
, "seq_cst")
1555 && omo
!= OMP_MEMORY_ORDER_SEQ_CST
)
1557 else if (!strcmp (prop
, "acq_rel")
1558 && omo
!= OMP_MEMORY_ORDER_ACQ_REL
)
1560 else if (!strcmp (prop
, "acquire")
1561 && omo
!= OMP_MEMORY_ORDER_ACQUIRE
)
1563 else if (!strcmp (prop
, "release")
1564 && omo
!= OMP_MEMORY_ORDER_RELEASE
)
1568 case OMP_TRAIT_DEVICE_ARCH
:
1569 if (set
== OMP_TRAIT_SET_DEVICE
)
1570 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1572 const char *arch
= omp_context_name_list_prop (p
);
1576 if (targetm
.omp
.device_kind_arch_isa
!= NULL
)
1577 r
= targetm
.omp
.device_kind_arch_isa (omp_device_arch
,
1579 if (r
== 0 || (r
== -1 && symtab
->state
!= PARSING
))
1581 /* If we are or might be in a target region or
1582 declare target function, need to take into account
1583 also offloading values. */
1584 if (!omp_maybe_offloaded ())
1586 if (ENABLE_OFFLOADING
)
1588 const char *arches
= omp_offload_device_arch
;
1589 if (omp_offload_device_kind_arch_isa (arches
,
1600 /* If arch matches on the host, it still might not match
1601 in the offloading region. */
1602 else if (omp_maybe_offloaded ())
1606 case OMP_TRAIT_IMPLEMENTATION_UNIFIED_ADDRESS
:
1607 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1609 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1612 if ((omp_requires_mask
& OMP_REQUIRES_UNIFIED_ADDRESS
) == 0)
1614 if (symtab
->state
== PARSING
)
1621 case OMP_TRAIT_IMPLEMENTATION_UNIFIED_SHARED_MEMORY
:
1622 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1624 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1627 if ((omp_requires_mask
1628 & OMP_REQUIRES_UNIFIED_SHARED_MEMORY
) == 0)
1630 if (symtab
->state
== PARSING
)
1637 case OMP_TRAIT_IMPLEMENTATION_DYNAMIC_ALLOCATORS
:
1638 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1640 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1643 if ((omp_requires_mask
1644 & OMP_REQUIRES_DYNAMIC_ALLOCATORS
) == 0)
1646 if (symtab
->state
== PARSING
)
1653 case OMP_TRAIT_IMPLEMENTATION_REVERSE_OFFLOAD
:
1654 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1656 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1659 if ((omp_requires_mask
& OMP_REQUIRES_REVERSE_OFFLOAD
) == 0)
1661 if (symtab
->state
== PARSING
)
1668 case OMP_TRAIT_DEVICE_KIND
:
1669 if (set
== OMP_TRAIT_SET_DEVICE
)
1670 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1672 const char *prop
= omp_context_name_list_prop (p
);
1675 if (!strcmp (prop
, "any"))
1677 if (!strcmp (prop
, "host"))
1679 #ifdef ACCEL_COMPILER
1682 if (omp_maybe_offloaded ())
1687 if (!strcmp (prop
, "nohost"))
1689 #ifndef ACCEL_COMPILER
1690 if (omp_maybe_offloaded ())
1698 if (targetm
.omp
.device_kind_arch_isa
!= NULL
)
1699 r
= targetm
.omp
.device_kind_arch_isa (omp_device_kind
,
1702 r
= strcmp (prop
, "cpu") == 0;
1703 if (r
== 0 || (r
== -1 && symtab
->state
!= PARSING
))
1705 /* If we are or might be in a target region or
1706 declare target function, need to take into account
1707 also offloading values. */
1708 if (!omp_maybe_offloaded ())
1710 if (ENABLE_OFFLOADING
)
1712 const char *kinds
= omp_offload_device_kind
;
1713 if (omp_offload_device_kind_arch_isa (kinds
, prop
))
1723 /* If kind matches on the host, it still might not match
1724 in the offloading region. */
1725 else if (omp_maybe_offloaded ())
1729 case OMP_TRAIT_DEVICE_ISA
:
1730 if (set
== OMP_TRAIT_SET_DEVICE
)
1731 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1733 const char *isa
= omp_context_name_list_prop (p
);
1737 if (targetm
.omp
.device_kind_arch_isa
!= NULL
)
1738 r
= targetm
.omp
.device_kind_arch_isa (omp_device_isa
,
1740 if (r
== 0 || (r
== -1 && symtab
->state
!= PARSING
))
1742 /* If isa is valid on the target, but not in the
1743 current function and current function has
1744 #pragma omp declare simd on it, some simd clones
1745 might have the isa added later on. */
1747 && targetm
.simd_clone
.compute_vecsize_and_simdlen
1748 && (cfun
== NULL
|| !cfun
->after_inlining
))
1751 = DECL_ATTRIBUTES (current_function_decl
);
1752 if (lookup_attribute ("omp declare simd", attrs
))
1758 /* If we are or might be in a target region or
1759 declare target function, need to take into account
1760 also offloading values. */
1761 if (!omp_maybe_offloaded ())
1763 if (ENABLE_OFFLOADING
)
1765 const char *isas
= omp_offload_device_isa
;
1766 if (omp_offload_device_kind_arch_isa (isas
, isa
))
1776 /* If isa matches on the host, it still might not match
1777 in the offloading region. */
1778 else if (omp_maybe_offloaded ())
1782 case OMP_TRAIT_USER_CONDITION
:
1783 if (set
== OMP_TRAIT_SET_USER
)
1784 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1785 if (OMP_TP_NAME (p
) == NULL_TREE
)
1787 if (integer_zerop (OMP_TP_VALUE (p
)))
1789 if (integer_nonzerop (OMP_TP_VALUE (p
)))
1802 /* Compare construct={simd} CLAUSES1 with CLAUSES2, return 0/-1/1/2 as
1803 in omp_context_selector_set_compare. */
1806 omp_construct_simd_compare (tree clauses1
, tree clauses2
)
1808 if (clauses1
== NULL_TREE
)
1809 return clauses2
== NULL_TREE
? 0 : -1;
1810 if (clauses2
== NULL_TREE
)
1814 struct declare_variant_simd_data
{
1815 bool inbranch
, notinbranch
;
1817 auto_vec
<tree
,16> data_sharing
;
1818 auto_vec
<tree
,16> aligned
;
1819 declare_variant_simd_data ()
1820 : inbranch(false), notinbranch(false), simdlen(NULL_TREE
) {}
1823 for (i
= 0; i
< 2; i
++)
1824 for (tree c
= i
? clauses2
: clauses1
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1827 switch (OMP_CLAUSE_CODE (c
))
1829 case OMP_CLAUSE_INBRANCH
:
1830 data
[i
].inbranch
= true;
1832 case OMP_CLAUSE_NOTINBRANCH
:
1833 data
[i
].notinbranch
= true;
1835 case OMP_CLAUSE_SIMDLEN
:
1836 data
[i
].simdlen
= OMP_CLAUSE_SIMDLEN_EXPR (c
);
1838 case OMP_CLAUSE_UNIFORM
:
1839 case OMP_CLAUSE_LINEAR
:
1840 v
= &data
[i
].data_sharing
;
1842 case OMP_CLAUSE_ALIGNED
:
1843 v
= &data
[i
].aligned
;
1848 unsigned HOST_WIDE_INT argno
= tree_to_uhwi (OMP_CLAUSE_DECL (c
));
1849 if (argno
>= v
->length ())
1850 v
->safe_grow_cleared (argno
+ 1, true);
1853 /* Here, r is used as a bitmask, 2 is set if CLAUSES1 has something
1854 CLAUSES2 doesn't, 1 is set if CLAUSES2 has something CLAUSES1
1855 doesn't. Thus, r == 3 implies return value 2, r == 1 implies
1856 -1, r == 2 implies 1 and r == 0 implies 0. */
1857 if (data
[0].inbranch
!= data
[1].inbranch
)
1858 r
|= data
[0].inbranch
? 2 : 1;
1859 if (data
[0].notinbranch
!= data
[1].notinbranch
)
1860 r
|= data
[0].notinbranch
? 2 : 1;
1861 if (!simple_cst_equal (data
[0].simdlen
, data
[1].simdlen
))
1863 if (data
[0].simdlen
&& data
[1].simdlen
)
1865 r
|= data
[0].simdlen
? 2 : 1;
1867 if (data
[0].data_sharing
.length () < data
[1].data_sharing
.length ()
1868 || data
[0].aligned
.length () < data
[1].aligned
.length ())
1871 FOR_EACH_VEC_ELT (data
[0].data_sharing
, i
, c1
)
1873 c2
= (i
< data
[1].data_sharing
.length ()
1874 ? data
[1].data_sharing
[i
] : NULL_TREE
);
1875 if ((c1
== NULL_TREE
) != (c2
== NULL_TREE
))
1877 r
|= c1
!= NULL_TREE
? 2 : 1;
1880 if (c1
== NULL_TREE
)
1882 if (OMP_CLAUSE_CODE (c1
) != OMP_CLAUSE_CODE (c2
))
1884 if (OMP_CLAUSE_CODE (c1
) != OMP_CLAUSE_LINEAR
)
1886 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c1
)
1887 != OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c2
))
1889 if (OMP_CLAUSE_LINEAR_KIND (c1
) != OMP_CLAUSE_LINEAR_KIND (c2
))
1891 if (!simple_cst_equal (OMP_CLAUSE_LINEAR_STEP (c1
),
1892 OMP_CLAUSE_LINEAR_STEP (c2
)))
1895 FOR_EACH_VEC_ELT (data
[0].aligned
, i
, c1
)
1897 c2
= i
< data
[1].aligned
.length () ? data
[1].aligned
[i
] : NULL_TREE
;
1898 if ((c1
== NULL_TREE
) != (c2
== NULL_TREE
))
1900 r
|= c1
!= NULL_TREE
? 2 : 1;
1903 if (c1
== NULL_TREE
)
1905 if (!simple_cst_equal (OMP_CLAUSE_ALIGNED_ALIGNMENT (c1
),
1906 OMP_CLAUSE_ALIGNED_ALIGNMENT (c2
)))
1915 default: gcc_unreachable ();
1919 /* Compare properties of selectors SEL from SET other than construct.
1920 CTX1 and CTX2 are the lists of properties to compare.
1921 Return 0/-1/1/2 as in omp_context_selector_set_compare.
1922 Unlike set names or selector names, properties can have duplicates. */
1925 omp_context_selector_props_compare (enum omp_tss_code set
,
1926 enum omp_ts_code sel
,
1927 tree ctx1
, tree ctx2
)
1930 for (int pass
= 0; pass
< 2; pass
++)
1931 for (tree p1
= pass
? ctx2
: ctx1
; p1
; p1
= TREE_CHAIN (p1
))
1934 for (p2
= pass
? ctx1
: ctx2
; p2
; p2
= TREE_CHAIN (p2
))
1935 if (OMP_TP_NAME (p1
) == OMP_TP_NAME (p2
))
1937 if (OMP_TP_NAME (p1
) == NULL_TREE
)
1939 if (set
== OMP_TRAIT_SET_USER
1940 && sel
== OMP_TRAIT_USER_CONDITION
)
1942 if (integer_zerop (OMP_TP_VALUE (p1
))
1943 != integer_zerop (OMP_TP_VALUE (p2
)))
1947 if (simple_cst_equal (OMP_TP_VALUE (p1
), OMP_TP_VALUE (p2
)))
1950 else if (OMP_TP_NAME (p1
) == OMP_TP_NAMELIST_NODE
)
1952 /* Handle string constant vs identifier comparison for
1953 name-list properties. */
1954 const char *n1
= omp_context_name_list_prop (p1
);
1955 const char *n2
= omp_context_name_list_prop (p2
);
1956 if (n1
&& n2
&& !strcmp (n1
, n2
))
1962 if (p2
== NULL_TREE
)
1964 int r
= pass
? -1 : 1;
1965 if (ret
&& ret
!= r
)
1979 /* Compare single context selector sets CTX1 and CTX2 with SET name.
1980 CTX1 and CTX2 are lists of trait-selectors.
1981 Return 0 if CTX1 is equal to CTX2,
1982 -1 if CTX1 is a strict subset of CTX2,
1983 1 if CTX2 is a strict subset of CTX1, or
1984 2 if neither context is a subset of another one. */
1987 omp_context_selector_set_compare (enum omp_tss_code set
, tree ctx1
, tree ctx2
)
1990 /* If either list includes an ignored selector trait, neither can
1991 be a subset of the other. */
1992 for (tree ts
= ctx1
; ts
; ts
= TREE_CHAIN (ts
))
1993 if (OMP_TS_CODE (ts
) == OMP_TRAIT_INVALID
)
1995 for (tree ts
= ctx2
; ts
; ts
= TREE_CHAIN (ts
))
1996 if (OMP_TS_CODE (ts
) == OMP_TRAIT_INVALID
)
1999 bool swapped
= false;
2001 int len1
= list_length (ctx1
);
2002 int len2
= list_length (ctx2
);
2007 std::swap (ctx1
, ctx2
);
2008 std::swap (len1
, len2
);
2011 if (set
== OMP_TRAIT_SET_CONSTRUCT
)
2015 /* Handle construct set specially. In this case the order
2016 of the selector matters too. */
2017 for (ts1
= ctx1
; ts1
; ts1
= TREE_CHAIN (ts1
))
2018 if (OMP_TS_CODE (ts1
) == OMP_TS_CODE (ts2
))
2021 if (OMP_TS_CODE (ts1
) == OMP_TRAIT_CONSTRUCT_SIMD
)
2022 r
= omp_construct_simd_compare (OMP_TS_PROPERTIES (ts1
),
2023 OMP_TS_PROPERTIES (ts2
));
2024 if (r
== 2 || (ret
&& r
&& (ret
< 0) != (r
< 0)))
2028 ts2
= TREE_CHAIN (ts2
);
2029 if (ts2
== NULL_TREE
)
2031 ts1
= TREE_CHAIN (ts1
);
2039 if (ts2
!= NULL_TREE
)
2041 if (ts1
!= NULL_TREE
)
2049 return swapped
? -ret
: ret
;
2051 for (tree ts1
= ctx1
; ts1
; ts1
= TREE_CHAIN (ts1
))
2053 enum omp_ts_code sel
= OMP_TS_CODE (ts1
);
2055 for (ts2
= ctx2
; ts2
; ts2
= TREE_CHAIN (ts2
))
2056 if (sel
== OMP_TS_CODE (ts2
))
2058 tree score1
= OMP_TS_SCORE (ts1
);
2059 tree score2
= OMP_TS_SCORE (ts2
);
2060 if (score1
&& score2
&& !simple_cst_equal (score1
, score2
))
2063 int r
= omp_context_selector_props_compare (set
, OMP_TS_CODE (ts1
),
2064 OMP_TS_PROPERTIES (ts1
),
2065 OMP_TS_PROPERTIES (ts2
));
2066 if (r
== 2 || (ret
&& r
&& (ret
< 0) != (r
< 0)))
2073 if (ts2
== NULL_TREE
)
2084 return swapped
? -ret
: ret
;
2087 /* Compare whole context selector specification CTX1 and CTX2.
2088 Return 0 if CTX1 is equal to CTX2,
2089 -1 if CTX1 is a strict subset of CTX2,
2090 1 if CTX2 is a strict subset of CTX1, or
2091 2 if neither context is a subset of another one. */
2094 omp_context_selector_compare (tree ctx1
, tree ctx2
)
2096 bool swapped
= false;
2098 int len1
= list_length (ctx1
);
2099 int len2
= list_length (ctx2
);
2104 std::swap (ctx1
, ctx2
);
2105 std::swap (len1
, len2
);
2107 for (tree tss1
= ctx1
; tss1
; tss1
= TREE_CHAIN (tss1
))
2109 enum omp_tss_code set
= OMP_TSS_CODE (tss1
);
2111 for (tss2
= ctx2
; tss2
; tss2
= TREE_CHAIN (tss2
))
2112 if (set
== OMP_TSS_CODE (tss2
))
2115 = omp_context_selector_set_compare
2116 (set
, OMP_TSS_TRAIT_SELECTORS (tss1
),
2117 OMP_TSS_TRAIT_SELECTORS (tss2
));
2118 if (r
== 2 || (ret
&& r
&& (ret
< 0) != (r
< 0)))
2125 if (tss2
== NULL_TREE
)
2136 return swapped
? -ret
: ret
;
2139 /* From context selector CTX, return trait-selector with name SEL in
2140 trait-selector-set with name SET if any, or NULL_TREE if not found. */
2142 omp_get_context_selector (tree ctx
, enum omp_tss_code set
,
2143 enum omp_ts_code sel
)
2145 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
2146 if (OMP_TSS_CODE (tss
) == set
)
2147 for (tree ts
= OMP_TSS_TRAIT_SELECTORS (tss
); ts
; ts
= TREE_CHAIN (ts
))
2148 if (OMP_TS_CODE (ts
) == sel
)
2153 /* Similar, but returns the whole trait-selector list for SET in CTX. */
2155 omp_get_context_selector_list (tree ctx
, enum omp_tss_code set
)
2157 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
2158 if (OMP_TSS_CODE (tss
) == set
)
2159 return OMP_TSS_TRAIT_SELECTORS (tss
);
2163 /* Map string S onto a trait selector set code. */
2165 omp_lookup_tss_code (const char * s
)
2167 for (int i
= 0; i
< OMP_TRAIT_SET_LAST
; i
++)
2168 if (strcmp (s
, omp_tss_map
[i
]) == 0)
2169 return (enum omp_tss_code
) i
;
2170 return OMP_TRAIT_SET_INVALID
;
2173 /* Map string S onto a trait selector code for set SET. */
2175 omp_lookup_ts_code (enum omp_tss_code set
, const char *s
)
2177 unsigned int mask
= 1 << set
;
2178 for (int i
= 0; i
< OMP_TRAIT_LAST
; i
++)
2179 if ((mask
& omp_ts_map
[i
].tss_mask
) != 0
2180 && strcmp (s
, omp_ts_map
[i
].name
) == 0)
2181 return (enum omp_ts_code
) i
;
2182 return OMP_TRAIT_INVALID
;
2185 /* Needs to be a GC-friendly widest_int variant, but precision is
2186 desirable to be the same on all targets. */
2187 typedef generic_wide_int
<fixed_wide_int_storage
<1024> > score_wide_int
;
2189 /* Compute *SCORE for context selector CTX. Return true if the score
2190 would be different depending on whether it is a declare simd clone or
2191 not. DECLARE_SIMD should be true for the case when it would be
2192 a declare simd clone. */
2195 omp_context_compute_score (tree ctx
, score_wide_int
*score
, bool declare_simd
)
2198 = omp_get_context_selector_list (ctx
, OMP_TRAIT_SET_CONSTRUCT
);
2199 bool has_kind
= omp_get_context_selector (ctx
, OMP_TRAIT_SET_DEVICE
,
2200 OMP_TRAIT_DEVICE_KIND
);
2201 bool has_arch
= omp_get_context_selector (ctx
, OMP_TRAIT_SET_DEVICE
,
2202 OMP_TRAIT_DEVICE_ARCH
);
2203 bool has_isa
= omp_get_context_selector (ctx
, OMP_TRAIT_SET_DEVICE
,
2204 OMP_TRAIT_DEVICE_ISA
);
2207 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
2208 if (OMP_TSS_TRAIT_SELECTORS (tss
) != selectors
)
2209 for (tree ts
= OMP_TSS_TRAIT_SELECTORS (tss
); ts
; ts
= TREE_CHAIN (ts
))
2211 tree s
= OMP_TS_SCORE (ts
);
2212 if (s
&& TREE_CODE (s
) == INTEGER_CST
)
2213 *score
+= score_wide_int::from (wi::to_wide (s
),
2214 TYPE_SIGN (TREE_TYPE (s
)));
2217 if (selectors
|| has_kind
|| has_arch
|| has_isa
)
2219 int nconstructs
= list_length (selectors
);
2220 enum tree_code
*constructs
= NULL
;
2224 = (enum tree_code
*) alloca (nconstructs
2225 * sizeof (enum tree_code
));
2226 omp_construct_traits_to_codes (selectors
, nconstructs
, constructs
);
2229 = (int *) alloca ((2 * nconstructs
+ 2) * sizeof (int));
2230 if (omp_construct_selector_matches (constructs
, nconstructs
, scores
)
2233 int b
= declare_simd
? nconstructs
+ 1 : 0;
2234 if (scores
[b
+ nconstructs
] + 4U < score
->get_precision ())
2236 for (int n
= 0; n
< nconstructs
; ++n
)
2238 if (scores
[b
+ n
] < 0)
2243 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ n
], 1, false);
2246 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ nconstructs
],
2249 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ nconstructs
] + 1,
2252 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ nconstructs
] + 2,
2255 else /* FIXME: Implement this. */
2261 /* Class describing a single variant. */
2262 struct GTY(()) omp_declare_variant_entry
{
2263 /* NODE of the variant. */
2264 cgraph_node
*variant
;
2265 /* Score if not in declare simd clone. */
2266 score_wide_int score
;
2267 /* Score if in declare simd clone. */
2268 score_wide_int score_in_declare_simd_clone
;
2269 /* Context selector for the variant. */
2271 /* True if the context selector is known to match already. */
2275 /* Class describing a function with variants. */
2276 struct GTY((for_user
)) omp_declare_variant_base_entry
{
2277 /* NODE of the base function. */
2279 /* NODE of the artificial function created for the deferred variant
2282 /* Vector of the variants. */
2283 vec
<omp_declare_variant_entry
, va_gc
> *variants
;
2286 struct omp_declare_variant_hasher
2287 : ggc_ptr_hash
<omp_declare_variant_base_entry
> {
2288 static hashval_t
hash (omp_declare_variant_base_entry
*);
2289 static bool equal (omp_declare_variant_base_entry
*,
2290 omp_declare_variant_base_entry
*);
2294 omp_declare_variant_hasher::hash (omp_declare_variant_base_entry
*x
)
2296 inchash::hash hstate
;
2297 hstate
.add_int (DECL_UID (x
->base
->decl
));
2298 hstate
.add_int (x
->variants
->length ());
2299 omp_declare_variant_entry
*variant
;
2301 FOR_EACH_VEC_SAFE_ELT (x
->variants
, i
, variant
)
2303 hstate
.add_int (DECL_UID (variant
->variant
->decl
));
2304 hstate
.add_wide_int (variant
->score
);
2305 hstate
.add_wide_int (variant
->score_in_declare_simd_clone
);
2306 hstate
.add_ptr (variant
->ctx
);
2307 hstate
.add_int (variant
->matches
);
2309 return hstate
.end ();
2313 omp_declare_variant_hasher::equal (omp_declare_variant_base_entry
*x
,
2314 omp_declare_variant_base_entry
*y
)
2316 if (x
->base
!= y
->base
2317 || x
->variants
->length () != y
->variants
->length ())
2319 omp_declare_variant_entry
*variant
;
2321 FOR_EACH_VEC_SAFE_ELT (x
->variants
, i
, variant
)
2322 if (variant
->variant
!= (*y
->variants
)[i
].variant
2323 || variant
->score
!= (*y
->variants
)[i
].score
2324 || (variant
->score_in_declare_simd_clone
2325 != (*y
->variants
)[i
].score_in_declare_simd_clone
)
2326 || variant
->ctx
!= (*y
->variants
)[i
].ctx
2327 || variant
->matches
!= (*y
->variants
)[i
].matches
)
2332 static GTY(()) hash_table
<omp_declare_variant_hasher
> *omp_declare_variants
;
2334 struct omp_declare_variant_alt_hasher
2335 : ggc_ptr_hash
<omp_declare_variant_base_entry
> {
2336 static hashval_t
hash (omp_declare_variant_base_entry
*);
2337 static bool equal (omp_declare_variant_base_entry
*,
2338 omp_declare_variant_base_entry
*);
2342 omp_declare_variant_alt_hasher::hash (omp_declare_variant_base_entry
*x
)
2344 return DECL_UID (x
->node
->decl
);
2348 omp_declare_variant_alt_hasher::equal (omp_declare_variant_base_entry
*x
,
2349 omp_declare_variant_base_entry
*y
)
2351 return x
->node
== y
->node
;
2354 static GTY(()) hash_table
<omp_declare_variant_alt_hasher
>
2355 *omp_declare_variant_alt
;
2357 /* Try to resolve declare variant after gimplification. */
2360 omp_resolve_late_declare_variant (tree alt
)
2362 cgraph_node
*node
= cgraph_node::get (alt
);
2363 cgraph_node
*cur_node
= cgraph_node::get (cfun
->decl
);
2365 || !node
->declare_variant_alt
2366 || !cfun
->after_inlining
)
2369 omp_declare_variant_base_entry entry
;
2372 entry
.variants
= NULL
;
2373 omp_declare_variant_base_entry
*entryp
2374 = omp_declare_variant_alt
->find_with_hash (&entry
, DECL_UID (alt
));
2377 omp_declare_variant_entry
*varentry1
, *varentry2
;
2378 auto_vec
<bool, 16> matches
;
2379 unsigned int nmatches
= 0;
2380 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry1
)
2382 if (varentry1
->matches
)
2384 /* This has been checked to be ok already. */
2385 matches
.safe_push (true);
2389 switch (omp_context_selector_matches (varentry1
->ctx
))
2392 matches
.safe_push (false);
2397 matches
.safe_push (true);
2404 return entryp
->base
->decl
;
2406 /* A context selector that is a strict subset of another context selector
2407 has a score of zero. */
2408 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry1
)
2412 vec_safe_iterate (entryp
->variants
, j
, &varentry2
); ++j
)
2415 int r
= omp_context_selector_compare (varentry1
->ctx
,
2419 /* ctx1 is a strict subset of ctx2, ignore ctx1. */
2424 /* ctx2 is a strict subset of ctx1, remove ctx2. */
2429 score_wide_int max_score
= -1;
2431 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry1
)
2434 score_wide_int score
2435 = (cur_node
->simdclone
? varentry1
->score_in_declare_simd_clone
2436 : varentry1
->score
);
2437 if (score
> max_score
)
2440 varentry2
= varentry1
;
2443 return varentry2
->variant
->decl
;
2446 /* Hook to adjust hash tables on cgraph_node removal. */
2449 omp_declare_variant_remove_hook (struct cgraph_node
*node
, void *)
2451 if (!node
->declare_variant_alt
)
2454 /* Drop this hash table completely. */
2455 omp_declare_variants
= NULL
;
2456 /* And remove node from the other hash table. */
2457 if (omp_declare_variant_alt
)
2459 omp_declare_variant_base_entry entry
;
2462 entry
.variants
= NULL
;
2463 omp_declare_variant_alt
->remove_elt_with_hash (&entry
,
2464 DECL_UID (node
->decl
));
2468 /* Try to resolve declare variant, return the variant decl if it should
2469 be used instead of base, or base otherwise. */
2472 omp_resolve_declare_variant (tree base
)
2474 tree variant1
= NULL_TREE
, variant2
= NULL_TREE
;
2475 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
2476 return omp_resolve_late_declare_variant (base
);
2478 auto_vec
<tree
, 16> variants
;
2479 auto_vec
<bool, 16> defer
;
2480 bool any_deferred
= false;
2481 for (tree attr
= DECL_ATTRIBUTES (base
); attr
; attr
= TREE_CHAIN (attr
))
2483 attr
= lookup_attribute ("omp declare variant base", attr
);
2484 if (attr
== NULL_TREE
)
2486 if (TREE_CODE (TREE_PURPOSE (TREE_VALUE (attr
))) != FUNCTION_DECL
)
2488 cgraph_node
*node
= cgraph_node::get (base
);
2489 /* If this is already a magic decl created by this function,
2490 don't process it again. */
2491 if (node
&& node
->declare_variant_alt
)
2493 switch (omp_context_selector_matches (TREE_VALUE (TREE_VALUE (attr
))))
2496 /* No match, ignore. */
2499 /* Needs to be deferred. */
2500 any_deferred
= true;
2501 variants
.safe_push (attr
);
2502 defer
.safe_push (true);
2505 variants
.safe_push (attr
);
2506 defer
.safe_push (false);
2510 if (variants
.length () == 0)
2515 score_wide_int max_score1
= 0;
2516 score_wide_int max_score2
= 0;
2520 omp_declare_variant_base_entry entry
;
2521 entry
.base
= cgraph_node::get_create (base
);
2523 vec_alloc (entry
.variants
, variants
.length ());
2524 FOR_EACH_VEC_ELT (variants
, i
, attr1
)
2526 score_wide_int score1
;
2527 score_wide_int score2
;
2529 tree ctx
= TREE_VALUE (TREE_VALUE (attr1
));
2530 need_two
= omp_context_compute_score (ctx
, &score1
, false);
2532 omp_context_compute_score (ctx
, &score2
, true);
2538 max_score1
= score1
;
2539 max_score2
= score2
;
2548 if (max_score1
== score1
)
2549 variant1
= NULL_TREE
;
2550 else if (score1
> max_score1
)
2552 max_score1
= score1
;
2553 variant1
= defer
[i
] ? NULL_TREE
: attr1
;
2555 if (max_score2
== score2
)
2556 variant2
= NULL_TREE
;
2557 else if (score2
> max_score2
)
2559 max_score2
= score2
;
2560 variant2
= defer
[i
] ? NULL_TREE
: attr1
;
2563 omp_declare_variant_entry varentry
;
2565 = cgraph_node::get_create (TREE_PURPOSE (TREE_VALUE (attr1
)));
2566 varentry
.score
= score1
;
2567 varentry
.score_in_declare_simd_clone
= score2
;
2569 varentry
.matches
= !defer
[i
];
2570 entry
.variants
->quick_push (varentry
);
2573 /* If there is a clear winner variant with the score which is not
2574 deferred, verify it is not a strict subset of any other context
2575 selector and if it is not, it is the best alternative no matter
2576 whether the others do or don't match. */
2577 if (variant1
&& variant1
== variant2
)
2579 tree ctx1
= TREE_VALUE (TREE_VALUE (variant1
));
2580 FOR_EACH_VEC_ELT (variants
, i
, attr2
)
2582 if (attr2
== variant1
)
2584 tree ctx2
= TREE_VALUE (TREE_VALUE (attr2
));
2585 int r
= omp_context_selector_compare (ctx1
, ctx2
);
2588 /* The winner is a strict subset of ctx2, can't
2590 variant1
= NULL_TREE
;
2596 vec_free (entry
.variants
);
2597 return TREE_PURPOSE (TREE_VALUE (variant1
));
2601 static struct cgraph_node_hook_list
*node_removal_hook_holder
;
2602 if (!node_removal_hook_holder
)
2603 node_removal_hook_holder
2604 = symtab
->add_cgraph_removal_hook (omp_declare_variant_remove_hook
,
2607 if (omp_declare_variants
== NULL
)
2608 omp_declare_variants
2609 = hash_table
<omp_declare_variant_hasher
>::create_ggc (64);
2610 omp_declare_variant_base_entry
**slot
2611 = omp_declare_variants
->find_slot (&entry
, INSERT
);
2614 vec_free (entry
.variants
);
2615 return (*slot
)->node
->decl
;
2618 *slot
= ggc_cleared_alloc
<omp_declare_variant_base_entry
> ();
2619 (*slot
)->base
= entry
.base
;
2620 (*slot
)->node
= entry
.base
;
2621 (*slot
)->variants
= entry
.variants
;
2622 tree alt
= build_decl (DECL_SOURCE_LOCATION (base
), FUNCTION_DECL
,
2623 DECL_NAME (base
), TREE_TYPE (base
));
2624 DECL_ARTIFICIAL (alt
) = 1;
2625 DECL_IGNORED_P (alt
) = 1;
2626 TREE_STATIC (alt
) = 1;
2627 tree attributes
= DECL_ATTRIBUTES (base
);
2628 if (lookup_attribute ("noipa", attributes
) == NULL
)
2630 attributes
= tree_cons (get_identifier ("noipa"), NULL
, attributes
);
2631 if (lookup_attribute ("noinline", attributes
) == NULL
)
2632 attributes
= tree_cons (get_identifier ("noinline"), NULL
,
2634 if (lookup_attribute ("noclone", attributes
) == NULL
)
2635 attributes
= tree_cons (get_identifier ("noclone"), NULL
,
2637 if (lookup_attribute ("no_icf", attributes
) == NULL
)
2638 attributes
= tree_cons (get_identifier ("no_icf"), NULL
,
2641 DECL_ATTRIBUTES (alt
) = attributes
;
2642 DECL_INITIAL (alt
) = error_mark_node
;
2643 (*slot
)->node
= cgraph_node::create (alt
);
2644 (*slot
)->node
->declare_variant_alt
= 1;
2645 (*slot
)->node
->create_reference (entry
.base
, IPA_REF_ADDR
);
2646 omp_declare_variant_entry
*varentry
;
2647 FOR_EACH_VEC_SAFE_ELT (entry
.variants
, i
, varentry
)
2648 (*slot
)->node
->create_reference (varentry
->variant
, IPA_REF_ADDR
);
2649 if (omp_declare_variant_alt
== NULL
)
2650 omp_declare_variant_alt
2651 = hash_table
<omp_declare_variant_alt_hasher
>::create_ggc (64);
2652 *omp_declare_variant_alt
->find_slot_with_hash (*slot
, DECL_UID (alt
),
2657 if (variants
.length () == 1)
2658 return TREE_PURPOSE (TREE_VALUE (variants
[0]));
2660 /* A context selector that is a strict subset of another context selector
2661 has a score of zero. */
2664 FOR_EACH_VEC_ELT (variants
, i
, attr1
)
2667 tree ctx1
= TREE_VALUE (TREE_VALUE (attr1
));
2668 FOR_EACH_VEC_ELT_FROM (variants
, j
, attr2
, i
+ 1)
2671 tree ctx2
= TREE_VALUE (TREE_VALUE (attr2
));
2672 int r
= omp_context_selector_compare (ctx1
, ctx2
);
2675 /* ctx1 is a strict subset of ctx2, remove
2676 attr1 from the vector. */
2677 variants
[i
] = NULL_TREE
;
2681 /* ctx2 is a strict subset of ctx1, remove attr2
2683 variants
[j
] = NULL_TREE
;
2686 score_wide_int max_score1
= 0;
2687 score_wide_int max_score2
= 0;
2689 FOR_EACH_VEC_ELT (variants
, i
, attr1
)
2694 score_wide_int score1
;
2695 score_wide_int score2
;
2701 ctx
= TREE_VALUE (TREE_VALUE (variant1
));
2702 need_two
= omp_context_compute_score (ctx
, &max_score1
, false);
2704 omp_context_compute_score (ctx
, &max_score2
, true);
2706 max_score2
= max_score1
;
2708 ctx
= TREE_VALUE (TREE_VALUE (attr1
));
2709 need_two
= omp_context_compute_score (ctx
, &score1
, false);
2711 omp_context_compute_score (ctx
, &score2
, true);
2714 if (score1
> max_score1
)
2716 max_score1
= score1
;
2719 if (score2
> max_score2
)
2721 max_score2
= score2
;
2731 /* If there is a disagreement on which variant has the highest score
2732 depending on whether it will be in a declare simd clone or not,
2733 punt for now and defer until after IPA where we will know that. */
2734 return ((variant1
&& variant1
== variant2
)
2735 ? TREE_PURPOSE (TREE_VALUE (variant1
)) : base
);
2739 omp_lto_output_declare_variant_alt (lto_simple_output_block
*ob
,
2741 lto_symtab_encoder_t encoder
)
2743 gcc_assert (node
->declare_variant_alt
);
2745 omp_declare_variant_base_entry entry
;
2748 entry
.variants
= NULL
;
2749 omp_declare_variant_base_entry
*entryp
2750 = omp_declare_variant_alt
->find_with_hash (&entry
, DECL_UID (node
->decl
));
2751 gcc_assert (entryp
);
2753 int nbase
= lto_symtab_encoder_lookup (encoder
, entryp
->base
);
2754 gcc_assert (nbase
!= LCC_NOT_FOUND
);
2755 streamer_write_hwi_stream (ob
->main_stream
, nbase
);
2757 streamer_write_hwi_stream (ob
->main_stream
, entryp
->variants
->length ());
2760 omp_declare_variant_entry
*varentry
;
2761 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry
)
2763 int nvar
= lto_symtab_encoder_lookup (encoder
, varentry
->variant
);
2764 gcc_assert (nvar
!= LCC_NOT_FOUND
);
2765 streamer_write_hwi_stream (ob
->main_stream
, nvar
);
2767 for (score_wide_int
*w
= &varentry
->score
; ;
2768 w
= &varentry
->score_in_declare_simd_clone
)
2770 unsigned len
= w
->get_len ();
2771 streamer_write_hwi_stream (ob
->main_stream
, len
);
2772 const HOST_WIDE_INT
*val
= w
->get_val ();
2773 for (unsigned j
= 0; j
< len
; j
++)
2774 streamer_write_hwi_stream (ob
->main_stream
, val
[j
]);
2775 if (w
== &varentry
->score_in_declare_simd_clone
)
2779 HOST_WIDE_INT cnt
= -1;
2780 HOST_WIDE_INT i
= varentry
->matches
? 1 : 0;
2781 for (tree attr
= DECL_ATTRIBUTES (entryp
->base
->decl
);
2782 attr
; attr
= TREE_CHAIN (attr
), i
+= 2)
2784 attr
= lookup_attribute ("omp declare variant base", attr
);
2785 if (attr
== NULL_TREE
)
2788 if (varentry
->ctx
== TREE_VALUE (TREE_VALUE (attr
)))
2795 gcc_assert (cnt
!= -1);
2796 streamer_write_hwi_stream (ob
->main_stream
, cnt
);
2801 omp_lto_input_declare_variant_alt (lto_input_block
*ib
, cgraph_node
*node
,
2802 vec
<symtab_node
*> nodes
)
2804 gcc_assert (node
->declare_variant_alt
);
2805 omp_declare_variant_base_entry
*entryp
2806 = ggc_cleared_alloc
<omp_declare_variant_base_entry
> ();
2807 entryp
->base
= dyn_cast
<cgraph_node
*> (nodes
[streamer_read_hwi (ib
)]);
2808 entryp
->node
= node
;
2809 unsigned int len
= streamer_read_hwi (ib
);
2810 vec_alloc (entryp
->variants
, len
);
2812 for (unsigned int i
= 0; i
< len
; i
++)
2814 omp_declare_variant_entry varentry
;
2816 = dyn_cast
<cgraph_node
*> (nodes
[streamer_read_hwi (ib
)]);
2817 for (score_wide_int
*w
= &varentry
.score
; ;
2818 w
= &varentry
.score_in_declare_simd_clone
)
2820 unsigned len2
= streamer_read_hwi (ib
);
2821 HOST_WIDE_INT arr
[WIDE_INT_MAX_HWIS (1024)];
2822 gcc_assert (len2
<= WIDE_INT_MAX_HWIS (1024));
2823 for (unsigned int j
= 0; j
< len2
; j
++)
2824 arr
[j
] = streamer_read_hwi (ib
);
2825 *w
= score_wide_int::from_array (arr
, len2
, true);
2826 if (w
== &varentry
.score_in_declare_simd_clone
)
2830 HOST_WIDE_INT cnt
= streamer_read_hwi (ib
);
2831 HOST_WIDE_INT j
= 0;
2832 varentry
.ctx
= NULL_TREE
;
2833 varentry
.matches
= (cnt
& 1) ? true : false;
2834 cnt
&= ~HOST_WIDE_INT_1
;
2835 for (tree attr
= DECL_ATTRIBUTES (entryp
->base
->decl
);
2836 attr
; attr
= TREE_CHAIN (attr
), j
+= 2)
2838 attr
= lookup_attribute ("omp declare variant base", attr
);
2839 if (attr
== NULL_TREE
)
2844 varentry
.ctx
= TREE_VALUE (TREE_VALUE (attr
));
2848 gcc_assert (varentry
.ctx
!= NULL_TREE
);
2849 entryp
->variants
->quick_push (varentry
);
2851 if (omp_declare_variant_alt
== NULL
)
2852 omp_declare_variant_alt
2853 = hash_table
<omp_declare_variant_alt_hasher
>::create_ggc (64);
2854 *omp_declare_variant_alt
->find_slot_with_hash (entryp
, DECL_UID (node
->decl
),
2858 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
2859 macro on gomp-constants.h. We do not check for overflow. */
2862 oacc_launch_pack (unsigned code
, tree device
, unsigned op
)
2866 res
= build_int_cst (unsigned_type_node
, GOMP_LAUNCH_PACK (code
, 0, op
));
2869 device
= fold_build2 (LSHIFT_EXPR
, unsigned_type_node
,
2870 device
, build_int_cst (unsigned_type_node
,
2871 GOMP_LAUNCH_DEVICE_SHIFT
));
2872 res
= fold_build2 (BIT_IOR_EXPR
, unsigned_type_node
, res
, device
);
2877 /* FIXME: What is the following comment for? */
2878 /* Look for compute grid dimension clauses and convert to an attribute
2879 attached to FN. This permits the target-side code to (a) massage
2880 the dimensions, (b) emit that data and (c) optimize. Non-constant
2881 dimensions are pushed onto ARGS.
2883 The attribute value is a TREE_LIST. A set of dimensions is
2884 represented as a list of INTEGER_CST. Those that are runtime
2885 exprs are represented as an INTEGER_CST of zero.
2887 TODO: Normally the attribute will just contain a single such list. If
2888 however it contains a list of lists, this will represent the use of
2889 device_type. Each member of the outer list is an assoc list of
2890 dimensions, keyed by the device type. The first entry will be the
2891 default. Well, that's the plan. */
2893 /* Replace any existing oacc fn attribute with updated dimensions. */
2895 /* Variant working on a list of attributes. */
2898 oacc_replace_fn_attrib_attr (tree attribs
, tree dims
)
2900 tree ident
= get_identifier (OACC_FN_ATTRIB
);
2902 /* If we happen to be present as the first attrib, drop it. */
2903 if (attribs
&& TREE_PURPOSE (attribs
) == ident
)
2904 attribs
= TREE_CHAIN (attribs
);
2905 return tree_cons (ident
, dims
, attribs
);
2908 /* Variant working on a function decl. */
2911 oacc_replace_fn_attrib (tree fn
, tree dims
)
2913 DECL_ATTRIBUTES (fn
)
2914 = oacc_replace_fn_attrib_attr (DECL_ATTRIBUTES (fn
), dims
);
2917 /* Scan CLAUSES for launch dimensions and attach them to the oacc
2918 function attribute. Push any that are non-constant onto the ARGS
2919 list, along with an appropriate GOMP_LAUNCH_DIM tag. */
2922 oacc_set_fn_attrib (tree fn
, tree clauses
, vec
<tree
> *args
)
2924 /* Must match GOMP_DIM ordering. */
2925 static const omp_clause_code ids
[]
2926 = { OMP_CLAUSE_NUM_GANGS
, OMP_CLAUSE_NUM_WORKERS
,
2927 OMP_CLAUSE_VECTOR_LENGTH
};
2929 tree dims
[GOMP_DIM_MAX
];
2931 tree attr
= NULL_TREE
;
2932 unsigned non_const
= 0;
2934 for (ix
= GOMP_DIM_MAX
; ix
--;)
2936 tree clause
= omp_find_clause (clauses
, ids
[ix
]);
2937 tree dim
= NULL_TREE
;
2940 dim
= OMP_CLAUSE_EXPR (clause
, ids
[ix
]);
2942 if (dim
&& TREE_CODE (dim
) != INTEGER_CST
)
2944 dim
= integer_zero_node
;
2945 non_const
|= GOMP_DIM_MASK (ix
);
2947 attr
= tree_cons (NULL_TREE
, dim
, attr
);
2950 oacc_replace_fn_attrib (fn
, attr
);
2954 /* Push a dynamic argument set. */
2955 args
->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM
,
2956 NULL_TREE
, non_const
));
2957 for (unsigned ix
= 0; ix
!= GOMP_DIM_MAX
; ix
++)
2958 if (non_const
& GOMP_DIM_MASK (ix
))
2959 args
->safe_push (dims
[ix
]);
2963 /* Verify OpenACC routine clauses.
2965 Returns 0 if FNDECL should be marked with an OpenACC 'routine' directive, 1
2966 if it has already been marked in compatible way, and -1 if incompatible.
2967 Upon returning, the chain of clauses will contain exactly one clause
2968 specifying the level of parallelism. */
2971 oacc_verify_routine_clauses (tree fndecl
, tree
*clauses
, location_t loc
,
2972 const char *routine_str
)
2974 tree c_level
= NULL_TREE
;
2975 tree c_nohost
= NULL_TREE
;
2976 tree c_p
= NULL_TREE
;
2977 for (tree c
= *clauses
; c
; c_p
= c
, c
= OMP_CLAUSE_CHAIN (c
))
2978 switch (OMP_CLAUSE_CODE (c
))
2980 case OMP_CLAUSE_GANG
:
2981 case OMP_CLAUSE_WORKER
:
2982 case OMP_CLAUSE_VECTOR
:
2983 case OMP_CLAUSE_SEQ
:
2984 if (c_level
== NULL_TREE
)
2986 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_CODE (c_level
))
2988 /* This has already been diagnosed in the front ends. */
2989 /* Drop the duplicate clause. */
2990 gcc_checking_assert (c_p
!= NULL_TREE
);
2991 OMP_CLAUSE_CHAIN (c_p
) = OMP_CLAUSE_CHAIN (c
);
2996 error_at (OMP_CLAUSE_LOCATION (c
),
2997 "%qs specifies a conflicting level of parallelism",
2998 omp_clause_code_name
[OMP_CLAUSE_CODE (c
)]);
2999 inform (OMP_CLAUSE_LOCATION (c_level
),
3000 "... to the previous %qs clause here",
3001 omp_clause_code_name
[OMP_CLAUSE_CODE (c_level
)]);
3002 /* Drop the conflicting clause. */
3003 gcc_checking_assert (c_p
!= NULL_TREE
);
3004 OMP_CLAUSE_CHAIN (c_p
) = OMP_CLAUSE_CHAIN (c
);
3008 case OMP_CLAUSE_NOHOST
:
3009 /* Don't worry about duplicate clauses here. */
3015 if (c_level
== NULL_TREE
)
3017 /* Default to an implicit 'seq' clause. */
3018 c_level
= build_omp_clause (loc
, OMP_CLAUSE_SEQ
);
3019 OMP_CLAUSE_CHAIN (c_level
) = *clauses
;
3022 /* In *clauses, we now have exactly one clause specifying the level of
3026 = lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl
));
3027 if (attr
!= NULL_TREE
)
3029 /* Diagnose if "#pragma omp declare target" has also been applied. */
3030 if (TREE_VALUE (attr
) == NULL_TREE
)
3032 /* See <https://gcc.gnu.org/PR93465>; the semantics of combining
3033 OpenACC and OpenMP 'target' are not clear. */
3035 "cannot apply %<%s%> to %qD, which has also been"
3036 " marked with an OpenMP 'declare target' directive",
3037 routine_str
, fndecl
);
3042 /* If a "#pragma acc routine" has already been applied, just verify
3043 this one for compatibility. */
3044 /* Collect previous directive's clauses. */
3045 tree c_level_p
= NULL_TREE
;
3046 tree c_nohost_p
= NULL_TREE
;
3047 for (tree c
= TREE_VALUE (attr
); c
; c
= OMP_CLAUSE_CHAIN (c
))
3048 switch (OMP_CLAUSE_CODE (c
))
3050 case OMP_CLAUSE_GANG
:
3051 case OMP_CLAUSE_WORKER
:
3052 case OMP_CLAUSE_VECTOR
:
3053 case OMP_CLAUSE_SEQ
:
3054 gcc_checking_assert (c_level_p
== NULL_TREE
);
3057 case OMP_CLAUSE_NOHOST
:
3058 gcc_checking_assert (c_nohost_p
== NULL_TREE
);
3064 gcc_checking_assert (c_level_p
!= NULL_TREE
);
3065 /* ..., and compare to current directive's, which we've already collected
3069 /* Matching level of parallelism? */
3070 if (OMP_CLAUSE_CODE (c_level
) != OMP_CLAUSE_CODE (c_level_p
))
3073 c_diag_p
= c_level_p
;
3076 /* Matching 'nohost' clauses? */
3077 if ((c_nohost
== NULL_TREE
) != (c_nohost_p
== NULL_TREE
))
3080 c_diag_p
= c_nohost_p
;
3087 if (c_diag
!= NULL_TREE
)
3088 error_at (OMP_CLAUSE_LOCATION (c_diag
),
3089 "incompatible %qs clause when applying"
3090 " %<%s%> to %qD, which has already been"
3091 " marked with an OpenACC 'routine' directive",
3092 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag
)],
3093 routine_str
, fndecl
);
3094 else if (c_diag_p
!= NULL_TREE
)
3096 "missing %qs clause when applying"
3097 " %<%s%> to %qD, which has already been"
3098 " marked with an OpenACC 'routine' directive",
3099 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag_p
)],
3100 routine_str
, fndecl
);
3103 if (c_diag_p
!= NULL_TREE
)
3104 inform (OMP_CLAUSE_LOCATION (c_diag_p
),
3105 "... with %qs clause here",
3106 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag_p
)]);
3109 /* In the front ends, we don't preserve location information for the
3110 OpenACC routine directive itself. However, that of c_level_p
3112 location_t loc_routine
= OMP_CLAUSE_LOCATION (c_level_p
);
3113 inform (loc_routine
, "... without %qs clause near to here",
3114 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag
)]);
3123 /* Process the OpenACC 'routine' directive clauses to generate an attribute
3124 for the level of parallelism. All dimensions have a size of zero
3125 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
3126 can have a loop partitioned on it. non-zero indicates
3127 yes, zero indicates no. By construction once a non-zero has been
3128 reached, further inner dimensions must also be non-zero. We set
3129 TREE_VALUE to zero for the dimensions that may be partitioned and
3130 1 for the other ones -- if a loop is (erroneously) spawned at
3131 an outer level, we don't want to try and partition it. */
3134 oacc_build_routine_dims (tree clauses
)
3136 /* Must match GOMP_DIM ordering. */
3137 static const omp_clause_code ids
[]
3138 = {OMP_CLAUSE_GANG
, OMP_CLAUSE_WORKER
, OMP_CLAUSE_VECTOR
, OMP_CLAUSE_SEQ
};
3142 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
3143 for (ix
= GOMP_DIM_MAX
+ 1; ix
--;)
3144 if (OMP_CLAUSE_CODE (clauses
) == ids
[ix
])
3149 gcc_checking_assert (level
>= 0);
3151 tree dims
= NULL_TREE
;
3153 for (ix
= GOMP_DIM_MAX
; ix
--;)
3154 dims
= tree_cons (build_int_cst (boolean_type_node
, ix
>= level
),
3155 build_int_cst (integer_type_node
, ix
< level
), dims
);
3160 /* Retrieve the oacc function attrib and return it. Non-oacc
3161 functions will return NULL. */
3164 oacc_get_fn_attrib (tree fn
)
3166 return lookup_attribute (OACC_FN_ATTRIB
, DECL_ATTRIBUTES (fn
));
3169 /* Return true if FN is an OpenMP or OpenACC offloading function. */
3172 offloading_function_p (tree fn
)
3174 tree attrs
= DECL_ATTRIBUTES (fn
);
3175 return (lookup_attribute ("omp declare target", attrs
)
3176 || lookup_attribute ("omp target entrypoint", attrs
));
3179 /* Extract an oacc execution dimension from FN. FN must be an
3180 offloaded function or routine that has already had its execution
3181 dimensions lowered to the target-specific values. */
3184 oacc_get_fn_dim_size (tree fn
, int axis
)
3186 tree attrs
= oacc_get_fn_attrib (fn
);
3188 gcc_assert (axis
< GOMP_DIM_MAX
);
3190 tree dims
= TREE_VALUE (attrs
);
3192 dims
= TREE_CHAIN (dims
);
3194 int size
= TREE_INT_CST_LOW (TREE_VALUE (dims
));
3199 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
3200 IFN_GOACC_DIM_SIZE call. */
3203 oacc_get_ifn_dim_arg (const gimple
*stmt
)
3205 gcc_checking_assert (gimple_call_internal_fn (stmt
) == IFN_GOACC_DIM_SIZE
3206 || gimple_call_internal_fn (stmt
) == IFN_GOACC_DIM_POS
);
3207 tree arg
= gimple_call_arg (stmt
, 0);
3208 HOST_WIDE_INT axis
= TREE_INT_CST_LOW (arg
);
3210 gcc_checking_assert (axis
>= 0 && axis
< GOMP_DIM_MAX
);
3214 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
3218 omp_build_component_ref (tree obj
, tree field
)
3220 tree ret
= build3 (COMPONENT_REF
, TREE_TYPE (field
), obj
, field
, NULL
);
3221 if (TREE_THIS_VOLATILE (field
))
3222 TREE_THIS_VOLATILE (ret
) |= 1;
3223 if (TREE_READONLY (field
))
3224 TREE_READONLY (ret
) |= 1;
3228 /* Return true if NAME is the name of an omp_* runtime API call. */
3230 omp_runtime_api_procname (const char *name
)
3232 if (!startswith (name
, "omp_"))
3235 static const char *omp_runtime_apis
[] =
3237 /* This array has 3 sections. First omp_* calls that don't
3238 have any suffixes. */
3247 "target_associate_ptr",
3248 "target_disassociate_ptr",
3250 "target_is_accessible",
3251 "target_is_present",
3253 "target_memcpy_async",
3254 "target_memcpy_rect",
3255 "target_memcpy_rect_async",
3257 /* Now omp_* calls that are available as omp_* and omp_*_; however, the
3258 DECL_NAME is always omp_* without tailing underscore. */
3260 "destroy_allocator",
3262 "destroy_nest_lock",
3266 "get_affinity_format",
3268 "get_default_allocator",
3269 "get_default_device",
3272 "get_initial_device",
3274 "get_max_active_levels",
3275 "get_max_task_priority",
3284 "get_partition_num_places",
3287 "get_supported_active_levels",
3289 "get_teams_thread_limit",
3299 "is_initial_device",
3301 "pause_resource_all",
3302 "set_affinity_format",
3303 "set_default_allocator",
3311 /* And finally calls available as omp_*, omp_*_ and omp_*_8_; however,
3312 as DECL_NAME only omp_* and omp_*_8 appear. */
3314 "get_ancestor_thread_num",
3316 "get_partition_place_nums",
3317 "get_place_num_procs",
3318 "get_place_proc_ids",
3321 "set_default_device",
3323 "set_max_active_levels",
3328 "set_teams_thread_limit"
3332 for (unsigned i
= 0; i
< ARRAY_SIZE (omp_runtime_apis
); i
++)
3334 if (omp_runtime_apis
[i
] == NULL
)
3339 size_t len
= strlen (omp_runtime_apis
[i
]);
3340 if (strncmp (name
+ 4, omp_runtime_apis
[i
], len
) == 0
3341 && (name
[4 + len
] == '\0'
3342 || (mode
> 1 && strcmp (name
+ 4 + len
, "_8") == 0)))
3348 /* Return true if FNDECL is an omp_* runtime API call. */
3351 omp_runtime_api_call (const_tree fndecl
)
3353 tree declname
= DECL_NAME (fndecl
);
3355 || (DECL_CONTEXT (fndecl
) != NULL_TREE
3356 && TREE_CODE (DECL_CONTEXT (fndecl
)) != TRANSLATION_UNIT_DECL
)
3357 || !TREE_PUBLIC (fndecl
))
3359 return omp_runtime_api_procname (IDENTIFIER_POINTER (declname
));
3362 namespace omp_addr_tokenizer
{
3364 /* We scan an expression by recursive descent, and build a vector of
3365 "omp_addr_token *" pointers representing a "parsed" version of the
3366 expression. The grammar we use is something like this:
3369 expr [section-access]
3372 structured-expr access-method
3373 | array-base access-method
3376 structure-base component-selector
3383 | structured-expr access-method
3384 | arbitrary-expr access-method
3396 | REF_TO_POINTER_OFFSET
3398 | INDEXED_REF_TO_ARRAY
3402 INDEX_EXPR access-method
3404 component-selector::
3405 component-selector COMPONENT_REF
3406 | component-selector ARRAY_REF
3409 This tokenized form is then used both in parsing, for OpenMP clause
3410 expansion (for C and C++) and in gimplify.cc for sibling-list handling
3411 (for C, C++ and Fortran). */
3413 omp_addr_token::omp_addr_token (token_type t
, tree e
)
3418 omp_addr_token::omp_addr_token (access_method_kinds k
, tree e
)
3419 : type(ACCESS_METHOD
), expr(e
)
3424 omp_addr_token::omp_addr_token (token_type t
, structure_base_kinds k
, tree e
)
3427 u
.structure_base_kind
= k
;
3431 omp_parse_component_selector (tree
*expr0
)
3434 tree last_component
= NULL_TREE
;
3436 while (TREE_CODE (expr
) == COMPONENT_REF
3437 || TREE_CODE (expr
) == ARRAY_REF
)
3439 if (TREE_CODE (expr
) == COMPONENT_REF
)
3440 last_component
= expr
;
3442 expr
= TREE_OPERAND (expr
, 0);
3444 if (TREE_CODE (TREE_TYPE (expr
)) == REFERENCE_TYPE
)
3448 if (!last_component
)
3451 *expr0
= last_component
;
3455 /* This handles references that have had convert_from_reference called on
3456 them, and also those that haven't. */
3459 omp_parse_ref (tree
*expr0
)
3463 if (TREE_CODE (TREE_TYPE (expr
)) == REFERENCE_TYPE
)
3465 else if ((TREE_CODE (expr
) == INDIRECT_REF
3466 || (TREE_CODE (expr
) == MEM_REF
3467 && integer_zerop (TREE_OPERAND (expr
, 1))))
3468 && TREE_CODE (TREE_TYPE (TREE_OPERAND (expr
, 0))) == REFERENCE_TYPE
)
3470 *expr0
= TREE_OPERAND (expr
, 0);
3478 omp_parse_pointer (tree
*expr0
, bool *has_offset
)
3482 *has_offset
= false;
3484 if ((TREE_CODE (expr
) == INDIRECT_REF
3485 || (TREE_CODE (expr
) == MEM_REF
3486 && integer_zerop (TREE_OPERAND (expr
, 1))))
3487 && TREE_CODE (TREE_TYPE (TREE_OPERAND (expr
, 0))) == POINTER_TYPE
)
3489 expr
= TREE_OPERAND (expr
, 0);
3491 /* The Fortran FE sometimes emits a no-op cast here. */
3496 if (TREE_CODE (expr
) == COMPOUND_EXPR
)
3498 expr
= TREE_OPERAND (expr
, 1);
3501 else if (TREE_CODE (expr
) == SAVE_EXPR
)
3502 expr
= TREE_OPERAND (expr
, 0);
3503 else if (TREE_CODE (expr
) == POINTER_PLUS_EXPR
)
3506 expr
= TREE_OPERAND (expr
, 0);
3522 omp_parse_access_method (tree
*expr0
, enum access_method_kinds
*kind
)
3527 if (omp_parse_ref (&expr
))
3529 else if (omp_parse_pointer (&expr
, &has_offset
))
3531 if (omp_parse_ref (&expr
))
3532 *kind
= has_offset
? ACCESS_REF_TO_POINTER_OFFSET
3533 : ACCESS_REF_TO_POINTER
;
3535 *kind
= has_offset
? ACCESS_POINTER_OFFSET
: ACCESS_POINTER
;
3537 else if (TREE_CODE (expr
) == ARRAY_REF
)
3539 while (TREE_CODE (expr
) == ARRAY_REF
)
3540 expr
= TREE_OPERAND (expr
, 0);
3541 if (omp_parse_ref (&expr
))
3542 *kind
= ACCESS_INDEXED_REF_TO_ARRAY
;
3544 *kind
= ACCESS_INDEXED_ARRAY
;
3547 *kind
= ACCESS_DIRECT
;
3556 omp_parse_access_methods (vec
<omp_addr_token
*> &addr_tokens
, tree
*expr0
)
3559 enum access_method_kinds kind
;
3562 if (omp_parse_access_method (&expr
, &kind
))
3565 if (TREE_CODE (expr
) == INDIRECT_REF
3566 || TREE_CODE (expr
) == MEM_REF
3567 || TREE_CODE (expr
) == ARRAY_REF
)
3568 omp_parse_access_methods (addr_tokens
, &expr
);
3570 addr_tokens
.safe_push (new omp_addr_token (kind
, am_expr
));
3576 static bool omp_parse_structured_expr (vec
<omp_addr_token
*> &, tree
*);
3579 omp_parse_structure_base (vec
<omp_addr_token
*> &addr_tokens
,
3580 tree
*expr0
, structure_base_kinds
*kind
,
3581 vec
<omp_addr_token
*> &base_access_tokens
,
3582 bool allow_structured
= true)
3586 if (allow_structured
)
3587 omp_parse_access_methods (base_access_tokens
, &expr
);
3595 if (allow_structured
&& omp_parse_structured_expr (addr_tokens
, &expr
))
3597 *kind
= BASE_COMPONENT_EXPR
;
3602 *kind
= BASE_ARBITRARY_EXPR
;
3608 omp_parse_structured_expr (vec
<omp_addr_token
*> &addr_tokens
, tree
*expr0
)
3611 tree base_component
= NULL_TREE
;
3612 structure_base_kinds struct_base_kind
;
3613 auto_vec
<omp_addr_token
*> base_access_tokens
;
3615 if (omp_parse_component_selector (&expr
))
3616 base_component
= expr
;
3620 gcc_assert (TREE_CODE (expr
) == COMPONENT_REF
);
3621 expr
= TREE_OPERAND (expr
, 0);
3623 tree structure_base
= expr
;
3625 if (!omp_parse_structure_base (addr_tokens
, &expr
, &struct_base_kind
,
3626 base_access_tokens
))
3629 addr_tokens
.safe_push (new omp_addr_token (STRUCTURE_BASE
, struct_base_kind
,
3631 addr_tokens
.safe_splice (base_access_tokens
);
3632 addr_tokens
.safe_push (new omp_addr_token (COMPONENT_SELECTOR
,
3641 omp_parse_array_expr (vec
<omp_addr_token
*> &addr_tokens
, tree
*expr0
)
3644 structure_base_kinds s_kind
;
3645 auto_vec
<omp_addr_token
*> base_access_tokens
;
3647 if (!omp_parse_structure_base (addr_tokens
, &expr
, &s_kind
,
3648 base_access_tokens
, false))
3651 addr_tokens
.safe_push (new omp_addr_token (ARRAY_BASE
, s_kind
, expr
));
3652 addr_tokens
.safe_splice (base_access_tokens
);
3658 /* Return TRUE if the ACCESS_METHOD token at index 'i' has a further
3659 ACCESS_METHOD chained after it (e.g., if we're processing an expression
3660 containing multiple pointer indirections). */
3663 omp_access_chain_p (vec
<omp_addr_token
*> &addr_tokens
, unsigned i
)
3665 gcc_assert (addr_tokens
[i
]->type
== ACCESS_METHOD
);
3666 return (i
+ 1 < addr_tokens
.length ()
3667 && addr_tokens
[i
+ 1]->type
== ACCESS_METHOD
);
3670 /* Return the address of the object accessed by the ACCESS_METHOD token
3671 at 'i': either of the next access method's expr, or of EXPR if we're at
3672 the end of the list of tokens. */
3675 omp_accessed_addr (vec
<omp_addr_token
*> &addr_tokens
, unsigned i
, tree expr
)
3677 if (i
+ 1 < addr_tokens
.length ())
3678 return build_fold_addr_expr (addr_tokens
[i
+ 1]->expr
);
3680 return build_fold_addr_expr (expr
);
3683 } /* namespace omp_addr_tokenizer. */
3686 omp_parse_expr (vec
<omp_addr_token
*> &addr_tokens
, tree expr
)
3688 using namespace omp_addr_tokenizer
;
3689 auto_vec
<omp_addr_token
*> expr_access_tokens
;
3691 if (!omp_parse_access_methods (expr_access_tokens
, &expr
))
3694 if (omp_parse_structured_expr (addr_tokens
, &expr
))
3696 else if (omp_parse_array_expr (addr_tokens
, &expr
))
3701 addr_tokens
.safe_splice (expr_access_tokens
);
3707 debug_omp_tokenized_addr (vec
<omp_addr_token
*> &addr_tokens
,
3710 using namespace omp_addr_tokenizer
;
3711 const char *sep
= with_exprs
? " " : "";
3713 for (auto e
: addr_tokens
)
3715 const char *pfx
= "";
3717 fputs (sep
, stderr
);
3721 case COMPONENT_SELECTOR
:
3722 fputs ("component_selector", stderr
);
3725 switch (e
->u
.access_kind
)
3728 fputs ("access_direct", stderr
);
3731 fputs ("access_ref", stderr
);
3733 case ACCESS_POINTER
:
3734 fputs ("access_pointer", stderr
);
3736 case ACCESS_POINTER_OFFSET
:
3737 fputs ("access_pointer_offset", stderr
);
3739 case ACCESS_REF_TO_POINTER
:
3740 fputs ("access_ref_to_pointer", stderr
);
3742 case ACCESS_REF_TO_POINTER_OFFSET
:
3743 fputs ("access_ref_to_pointer_offset", stderr
);
3745 case ACCESS_INDEXED_ARRAY
:
3746 fputs ("access_indexed_array", stderr
);
3748 case ACCESS_INDEXED_REF_TO_ARRAY
:
3749 fputs ("access_indexed_ref_to_array", stderr
);
3754 case STRUCTURE_BASE
:
3755 pfx
= e
->type
== ARRAY_BASE
? "array_" : "struct_";
3756 switch (e
->u
.structure_base_kind
)
3759 fprintf (stderr
, "%sbase_decl", pfx
);
3761 case BASE_COMPONENT_EXPR
:
3762 fputs ("base_component_expr", stderr
);
3764 case BASE_ARBITRARY_EXPR
:
3765 fprintf (stderr
, "%sbase_arbitrary_expr", pfx
);
3772 fputs (" [", stderr
);
3773 print_generic_expr (stderr
, e
->expr
);
3774 fputc (']', stderr
);
3781 fputs ("\n", stderr
);
3785 #include "gt-omp-general.h"