1 /* General types and functions that are useful for processing of OpenMP,
2 OpenACC and similar directives at various stages of compilation.
4 Copyright (C) 2005-2024 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "langhooks.h"
33 #include "omp-general.h"
34 #include "stringpool.h"
38 #include "alloc-pool.h"
39 #include "symbol-summary.h"
40 #include "tree-pass.h"
41 #include "omp-device-properties.h"
42 #include "tree-iterator.h"
43 #include "data-streamer.h"
44 #include "streamer-hooks.h"
46 #include "tree-pretty-print.h"
48 enum omp_requires omp_requires_mask
;
50 /* Find an OMP clause of type KIND within CLAUSES. */
52 omp_find_clause (tree clauses
, enum omp_clause_code kind
)
54 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
55 if (OMP_CLAUSE_CODE (clauses
) == kind
)
61 /* True if OpenMP should regard this DECL as being a scalar which has Fortran's
62 allocatable or pointer attribute. */
64 omp_is_allocatable_or_ptr (tree decl
)
66 return lang_hooks
.decls
.omp_is_allocatable_or_ptr (decl
);
69 /* Check whether this DECL belongs to a Fortran optional argument.
70 With 'for_present_check' set to false, decls which are optional parameters
71 themselve are returned as tree - or a NULL_TREE otherwise. Those decls are
72 always pointers. With 'for_present_check' set to true, the decl for checking
73 whether an argument is present is returned; for arguments with value
74 attribute this is the hidden argument and of BOOLEAN_TYPE. If the decl is
75 unrelated to optional arguments, NULL_TREE is returned. */
78 omp_check_optional_argument (tree decl
, bool for_present_check
)
80 return lang_hooks
.decls
.omp_check_optional_argument (decl
, for_present_check
);
83 /* Return true if TYPE is an OpenMP mappable type. */
86 omp_mappable_type (tree type
)
88 /* Mappable type has to be complete. */
89 if (type
== error_mark_node
|| !COMPLETE_TYPE_P (type
))
94 /* True if OpenMP should privatize what this DECL points to rather
95 than the DECL itself. */
98 omp_privatize_by_reference (tree decl
)
100 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
103 /* Adjust *COND_CODE and *N2 so that the former is either LT_EXPR or GT_EXPR,
104 given that V is the loop index variable and STEP is loop step. */
107 omp_adjust_for_condition (location_t loc
, enum tree_code
*cond_code
, tree
*n2
,
117 gcc_assert (TREE_CODE (step
) == INTEGER_CST
);
118 if (TREE_CODE (TREE_TYPE (v
)) == INTEGER_TYPE
119 || TREE_CODE (TREE_TYPE (v
)) == BITINT_TYPE
)
121 if (integer_onep (step
))
122 *cond_code
= LT_EXPR
;
125 gcc_assert (integer_minus_onep (step
));
126 *cond_code
= GT_EXPR
;
131 tree unit
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (v
)));
132 gcc_assert (TREE_CODE (unit
) == INTEGER_CST
);
133 if (tree_int_cst_equal (unit
, step
))
134 *cond_code
= LT_EXPR
;
137 gcc_assert (wi::neg (wi::to_widest (unit
))
138 == wi::to_widest (step
));
139 *cond_code
= GT_EXPR
;
146 if (POINTER_TYPE_P (TREE_TYPE (*n2
)))
147 *n2
= fold_build_pointer_plus_hwi_loc (loc
, *n2
, 1);
149 *n2
= fold_build2_loc (loc
, PLUS_EXPR
, TREE_TYPE (*n2
), *n2
,
150 build_int_cst (TREE_TYPE (*n2
), 1));
151 *cond_code
= LT_EXPR
;
154 if (POINTER_TYPE_P (TREE_TYPE (*n2
)))
155 *n2
= fold_build_pointer_plus_hwi_loc (loc
, *n2
, -1);
157 *n2
= fold_build2_loc (loc
, MINUS_EXPR
, TREE_TYPE (*n2
), *n2
,
158 build_int_cst (TREE_TYPE (*n2
), 1));
159 *cond_code
= GT_EXPR
;
166 /* Return the looping step from INCR, extracted from the step of a gimple omp
170 omp_get_for_step_from_incr (location_t loc
, tree incr
)
173 switch (TREE_CODE (incr
))
176 step
= TREE_OPERAND (incr
, 1);
178 case POINTER_PLUS_EXPR
:
179 step
= fold_convert (ssizetype
, TREE_OPERAND (incr
, 1));
182 step
= TREE_OPERAND (incr
, 1);
183 step
= fold_build1_loc (loc
, NEGATE_EXPR
, TREE_TYPE (step
), step
);
191 /* Extract the header elements of parallel loop FOR_STMT and store
195 omp_extract_for_data (gomp_for
*for_stmt
, struct omp_for_data
*fd
,
196 struct omp_for_data_loop
*loops
)
198 tree t
, var
, *collapse_iter
, *collapse_count
;
199 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
200 struct omp_for_data_loop
*loop
;
202 struct omp_for_data_loop dummy_loop
;
203 location_t loc
= gimple_location (for_stmt
);
204 bool simd
= gimple_omp_for_kind (for_stmt
) == GF_OMP_FOR_KIND_SIMD
;
205 bool distribute
= gimple_omp_for_kind (for_stmt
)
206 == GF_OMP_FOR_KIND_DISTRIBUTE
;
207 bool taskloop
= gimple_omp_for_kind (for_stmt
)
208 == GF_OMP_FOR_KIND_TASKLOOP
;
209 bool order_reproducible
= false;
212 fd
->for_stmt
= for_stmt
;
214 fd
->have_nowait
= distribute
|| simd
;
215 fd
->have_ordered
= false;
216 fd
->have_reductemp
= false;
217 fd
->have_pointer_condtemp
= false;
218 fd
->have_scantemp
= false;
219 fd
->have_nonctrl_scantemp
= false;
220 fd
->non_rect
= false;
221 fd
->lastprivate_conditional
= 0;
222 fd
->tiling
= NULL_TREE
;
225 fd
->first_nonrect
= -1;
226 fd
->last_nonrect
= -1;
227 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
228 fd
->sched_modifiers
= 0;
229 fd
->chunk_size
= NULL_TREE
;
230 fd
->simd_schedule
= false;
231 fd
->first_inner_iterations
= NULL_TREE
;
232 fd
->factor
= NULL_TREE
;
233 fd
->adjn1
= NULL_TREE
;
234 collapse_iter
= NULL
;
235 collapse_count
= NULL
;
237 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
238 switch (OMP_CLAUSE_CODE (t
))
240 case OMP_CLAUSE_NOWAIT
:
241 fd
->have_nowait
= true;
243 case OMP_CLAUSE_ORDERED
:
244 fd
->have_ordered
= true;
245 if (OMP_CLAUSE_ORDERED_DOACROSS (t
))
247 if (OMP_CLAUSE_ORDERED_EXPR (t
))
248 fd
->ordered
= tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t
));
253 case OMP_CLAUSE_SCHEDULE
:
254 gcc_assert (!distribute
&& !taskloop
);
256 = (enum omp_clause_schedule_kind
)
257 (OMP_CLAUSE_SCHEDULE_KIND (t
) & OMP_CLAUSE_SCHEDULE_MASK
);
258 fd
->sched_modifiers
= (OMP_CLAUSE_SCHEDULE_KIND (t
)
259 & ~OMP_CLAUSE_SCHEDULE_MASK
);
260 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
261 fd
->simd_schedule
= OMP_CLAUSE_SCHEDULE_SIMD (t
);
263 case OMP_CLAUSE_DIST_SCHEDULE
:
264 gcc_assert (distribute
);
265 fd
->chunk_size
= OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t
);
267 case OMP_CLAUSE_COLLAPSE
:
268 fd
->collapse
= tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t
));
269 if (fd
->collapse
> 1)
271 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
272 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
275 case OMP_CLAUSE_TILE
:
276 fd
->tiling
= OMP_CLAUSE_TILE_LIST (t
);
277 fd
->collapse
= list_length (fd
->tiling
);
278 gcc_assert (fd
->collapse
);
279 collapse_iter
= &OMP_CLAUSE_TILE_ITERVAR (t
);
280 collapse_count
= &OMP_CLAUSE_TILE_COUNT (t
);
282 case OMP_CLAUSE__REDUCTEMP_
:
283 fd
->have_reductemp
= true;
285 case OMP_CLAUSE_LASTPRIVATE
:
286 if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (t
))
287 fd
->lastprivate_conditional
++;
289 case OMP_CLAUSE__CONDTEMP_
:
290 if (POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (t
))))
291 fd
->have_pointer_condtemp
= true;
293 case OMP_CLAUSE__SCANTEMP_
:
294 fd
->have_scantemp
= true;
295 if (!OMP_CLAUSE__SCANTEMP__ALLOC (t
)
296 && !OMP_CLAUSE__SCANTEMP__CONTROL (t
))
297 fd
->have_nonctrl_scantemp
= true;
299 case OMP_CLAUSE_ORDER
:
300 /* FIXME: For OpenMP 5.2 this should change to
301 if (OMP_CLAUSE_ORDER_REPRODUCIBLE (t))
302 (with the exception of loop construct but that lowers to
303 no schedule/dist_schedule clauses currently). */
304 if (!OMP_CLAUSE_ORDER_UNCONSTRAINED (t
))
305 order_reproducible
= true;
310 if (fd
->ordered
== -1)
311 fd
->ordered
= fd
->collapse
;
313 /* For order(reproducible:concurrent) schedule ({dynamic,guided,runtime})
314 we have either the option to expensively remember at runtime how we've
315 distributed work from first loop and reuse that in following loops with
316 the same number of iterations and schedule, or just force static schedule.
317 OpenMP API calls etc. aren't allowed in order(concurrent) bodies so
318 users can't observe it easily anyway. */
319 if (order_reproducible
)
320 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
321 if (fd
->collapse
> 1 || fd
->tiling
)
324 fd
->loops
= &fd
->loop
;
326 if (fd
->ordered
&& fd
->collapse
== 1 && loops
!= NULL
)
331 collapse_iter
= &iterv
;
332 collapse_count
= &countv
;
335 /* FIXME: for now map schedule(auto) to schedule(static).
336 There should be analysis to determine whether all iterations
337 are approximately the same amount of work (then schedule(static)
338 is best) or if it varies (then schedule(dynamic,N) is better). */
339 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
341 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
342 gcc_assert (fd
->chunk_size
== NULL
);
344 gcc_assert ((fd
->collapse
== 1 && !fd
->tiling
) || collapse_iter
!= NULL
);
346 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_RUNTIME
;
347 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
348 gcc_assert (fd
->chunk_size
== NULL
);
349 else if (fd
->chunk_size
== NULL
)
351 /* We only need to compute a default chunk size for ordered
352 static loops and dynamic loops. */
353 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
355 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
356 ? integer_zero_node
: integer_one_node
;
359 int cnt
= fd
->ordered
? fd
->ordered
: fd
->collapse
;
360 int single_nonrect
= -1;
361 tree single_nonrect_count
= NULL_TREE
;
362 enum tree_code single_nonrect_cond_code
= ERROR_MARK
;
363 for (i
= 1; i
< cnt
; i
++)
365 tree n1
= gimple_omp_for_initial (for_stmt
, i
);
366 tree n2
= gimple_omp_for_final (for_stmt
, i
);
367 if (TREE_CODE (n1
) == TREE_VEC
)
374 for (int j
= i
- 1; j
>= 0; j
--)
375 if (TREE_VEC_ELT (n1
, 0) == gimple_omp_for_index (for_stmt
, j
))
382 else if (TREE_CODE (n2
) == TREE_VEC
)
389 for (int j
= i
- 1; j
>= 0; j
--)
390 if (TREE_VEC_ELT (n2
, 0) == gimple_omp_for_index (for_stmt
, j
))
398 for (i
= 0; i
< cnt
; i
++)
403 && (fd
->ordered
== 0 || loops
== NULL
))
405 else if (loops
!= NULL
)
410 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
411 gcc_assert (SSA_VAR_P (loop
->v
));
412 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
413 || TREE_CODE (TREE_TYPE (loop
->v
)) == BITINT_TYPE
414 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
415 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
416 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
417 loop
->m1
= NULL_TREE
;
418 loop
->m2
= NULL_TREE
;
420 loop
->non_rect_referenced
= false;
421 if (TREE_CODE (loop
->n1
) == TREE_VEC
)
423 for (int j
= i
- 1; j
>= 0; j
--)
424 if (TREE_VEC_ELT (loop
->n1
, 0) == gimple_omp_for_index (for_stmt
, j
))
428 loops
[j
].non_rect_referenced
= true;
429 if (fd
->first_nonrect
== -1 || fd
->first_nonrect
> j
)
430 fd
->first_nonrect
= j
;
433 gcc_assert (loop
->outer
);
434 loop
->m1
= TREE_VEC_ELT (loop
->n1
, 1);
435 loop
->n1
= TREE_VEC_ELT (loop
->n1
, 2);
437 fd
->last_nonrect
= i
;
440 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
441 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
442 gcc_assert (loop
->cond_code
!= NE_EXPR
443 || (gimple_omp_for_kind (for_stmt
)
444 != GF_OMP_FOR_KIND_OACC_LOOP
));
445 if (TREE_CODE (loop
->n2
) == TREE_VEC
)
448 gcc_assert (TREE_VEC_ELT (loop
->n2
, 0)
449 == gimple_omp_for_index (for_stmt
, i
- loop
->outer
));
451 for (int j
= i
- 1; j
>= 0; j
--)
452 if (TREE_VEC_ELT (loop
->n2
, 0) == gimple_omp_for_index (for_stmt
, j
))
456 loops
[j
].non_rect_referenced
= true;
457 if (fd
->first_nonrect
== -1 || fd
->first_nonrect
> j
)
458 fd
->first_nonrect
= j
;
461 gcc_assert (loop
->outer
);
462 loop
->m2
= TREE_VEC_ELT (loop
->n2
, 1);
463 loop
->n2
= TREE_VEC_ELT (loop
->n2
, 2);
465 fd
->last_nonrect
= i
;
468 t
= gimple_omp_for_incr (for_stmt
, i
);
469 gcc_assert (TREE_OPERAND (t
, 0) == var
);
470 loop
->step
= omp_get_for_step_from_incr (loc
, t
);
472 omp_adjust_for_condition (loc
, &loop
->cond_code
, &loop
->n2
, loop
->v
,
476 || (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
477 && !fd
->have_ordered
))
479 if (fd
->collapse
== 1 && !fd
->tiling
)
480 iter_type
= TREE_TYPE (loop
->v
);
482 || TYPE_PRECISION (iter_type
)
483 < TYPE_PRECISION (TREE_TYPE (loop
->v
)))
485 if (TREE_CODE (iter_type
) == BITINT_TYPE
486 || TREE_CODE (TREE_TYPE (loop
->v
)) == BITINT_TYPE
)
488 = build_bitint_type (TYPE_PRECISION (TREE_TYPE (loop
->v
)),
492 = build_nonstandard_integer_type
493 (TYPE_PRECISION (TREE_TYPE (loop
->v
)), 1);
496 else if (iter_type
!= long_long_unsigned_type_node
)
498 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
499 iter_type
= long_long_unsigned_type_node
;
500 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
501 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
502 >= TYPE_PRECISION (iter_type
))
506 if (loop
->cond_code
== LT_EXPR
)
507 n
= fold_build2_loc (loc
, PLUS_EXPR
, TREE_TYPE (loop
->v
),
508 loop
->n2
, loop
->step
);
513 || TREE_CODE (n
) != INTEGER_CST
514 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
515 iter_type
= long_long_unsigned_type_node
;
517 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
518 > TYPE_PRECISION (iter_type
))
522 if (loop
->cond_code
== LT_EXPR
)
525 n2
= fold_build2_loc (loc
, PLUS_EXPR
, TREE_TYPE (loop
->v
),
526 loop
->n2
, loop
->step
);
530 n1
= fold_build2_loc (loc
, MINUS_EXPR
, TREE_TYPE (loop
->v
),
531 loop
->n2
, loop
->step
);
536 || TREE_CODE (n1
) != INTEGER_CST
537 || TREE_CODE (n2
) != INTEGER_CST
538 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
539 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
540 iter_type
= long_long_unsigned_type_node
;
544 if (i
>= fd
->collapse
)
547 if (collapse_count
&& *collapse_count
== NULL
)
549 if (count
&& integer_zerop (count
))
551 tree n1first
= NULL_TREE
, n2first
= NULL_TREE
;
552 tree n1last
= NULL_TREE
, n2last
= NULL_TREE
;
553 tree ostep
= NULL_TREE
;
554 if (loop
->m1
|| loop
->m2
)
556 if (count
== NULL_TREE
)
558 if (single_nonrect
== -1
559 || (loop
->m1
&& TREE_CODE (loop
->m1
) != INTEGER_CST
)
560 || (loop
->m2
&& TREE_CODE (loop
->m2
) != INTEGER_CST
)
561 || TREE_CODE (loop
->n1
) != INTEGER_CST
562 || TREE_CODE (loop
->n2
) != INTEGER_CST
563 || TREE_CODE (loop
->step
) != INTEGER_CST
)
568 tree var
= gimple_omp_for_initial (for_stmt
, single_nonrect
);
569 tree itype
= TREE_TYPE (var
);
570 tree first
= gimple_omp_for_initial (for_stmt
, single_nonrect
);
571 t
= gimple_omp_for_incr (for_stmt
, single_nonrect
);
572 ostep
= omp_get_for_step_from_incr (loc
, t
);
573 t
= fold_binary (MINUS_EXPR
, long_long_unsigned_type_node
,
574 single_nonrect_count
,
575 build_one_cst (long_long_unsigned_type_node
));
576 t
= fold_convert (itype
, t
);
577 first
= fold_convert (itype
, first
);
578 ostep
= fold_convert (itype
, ostep
);
579 tree last
= fold_binary (PLUS_EXPR
, itype
, first
,
580 fold_binary (MULT_EXPR
, itype
, t
,
582 if (TREE_CODE (first
) != INTEGER_CST
583 || TREE_CODE (last
) != INTEGER_CST
)
590 tree m1
= fold_convert (itype
, loop
->m1
);
591 tree n1
= fold_convert (itype
, loop
->n1
);
592 n1first
= fold_binary (PLUS_EXPR
, itype
,
593 fold_binary (MULT_EXPR
, itype
,
595 n1last
= fold_binary (PLUS_EXPR
, itype
,
596 fold_binary (MULT_EXPR
, itype
,
600 n1first
= n1last
= loop
->n1
;
603 tree n2
= fold_convert (itype
, loop
->n2
);
604 tree m2
= fold_convert (itype
, loop
->m2
);
605 n2first
= fold_binary (PLUS_EXPR
, itype
,
606 fold_binary (MULT_EXPR
, itype
,
608 n2last
= fold_binary (PLUS_EXPR
, itype
,
609 fold_binary (MULT_EXPR
, itype
,
613 n2first
= n2last
= loop
->n2
;
614 n1first
= fold_convert (TREE_TYPE (loop
->v
), n1first
);
615 n2first
= fold_convert (TREE_TYPE (loop
->v
), n2first
);
616 n1last
= fold_convert (TREE_TYPE (loop
->v
), n1last
);
617 n2last
= fold_convert (TREE_TYPE (loop
->v
), n2last
);
618 t
= fold_binary (loop
->cond_code
, boolean_type_node
,
620 tree t2
= fold_binary (loop
->cond_code
, boolean_type_node
,
622 if (t
&& t2
&& integer_nonzerop (t
) && integer_nonzerop (t2
))
623 /* All outer loop iterators have at least one inner loop
624 iteration. Try to compute the count at compile time. */
626 else if (t
&& t2
&& integer_zerop (t
) && integer_zerop (t2
))
627 /* No iterations of the inner loop. count will be set to
629 else if (TYPE_UNSIGNED (itype
)
632 || TREE_CODE (t
) != INTEGER_CST
633 || TREE_CODE (t2
) != INTEGER_CST
)
635 /* Punt (for now). */
641 /* Some iterations of the outer loop have zero iterations
642 of the inner loop, while others have at least one.
643 In this case, we need to adjust one of those outer
644 loop bounds. If ADJ_FIRST, we need to adjust outer n1
645 (first), otherwise outer n2 (last). */
646 bool adj_first
= integer_zerop (t
);
647 tree n1
= fold_convert (itype
, loop
->n1
);
648 tree n2
= fold_convert (itype
, loop
->n2
);
649 tree m1
= loop
->m1
? fold_convert (itype
, loop
->m1
)
650 : build_zero_cst (itype
);
651 tree m2
= loop
->m2
? fold_convert (itype
, loop
->m2
)
652 : build_zero_cst (itype
);
653 t
= fold_binary (MINUS_EXPR
, itype
, n1
, n2
);
654 t2
= fold_binary (MINUS_EXPR
, itype
, m2
, m1
);
655 t
= fold_binary (TRUNC_DIV_EXPR
, itype
, t
, t2
);
656 t2
= fold_binary (MINUS_EXPR
, itype
, t
, first
);
657 t2
= fold_binary (TRUNC_MOD_EXPR
, itype
, t2
, ostep
);
658 t
= fold_binary (MINUS_EXPR
, itype
, t
, t2
);
660 = fold_binary (PLUS_EXPR
, itype
, n1
,
661 fold_binary (MULT_EXPR
, itype
, m1
, t
));
663 = fold_binary (PLUS_EXPR
, itype
, n2
,
664 fold_binary (MULT_EXPR
, itype
, m2
, t
));
665 t2
= fold_binary (loop
->cond_code
, boolean_type_node
,
667 tree t3
= fold_binary (MULT_EXPR
, itype
, m1
, ostep
);
668 tree t4
= fold_binary (MULT_EXPR
, itype
, m2
, ostep
);
673 if (integer_nonzerop (t2
))
680 t3
= fold_binary (MINUS_EXPR
, itype
, n1cur
, t3
);
681 t4
= fold_binary (MINUS_EXPR
, itype
, n2cur
, t4
);
682 t3
= fold_binary (loop
->cond_code
,
683 boolean_type_node
, t3
, t4
);
684 gcc_assert (integer_zerop (t3
));
689 t3
= fold_binary (PLUS_EXPR
, itype
, n1cur
, t3
);
690 t4
= fold_binary (PLUS_EXPR
, itype
, n2cur
, t4
);
691 new_first
= fold_binary (PLUS_EXPR
, itype
, t
, ostep
);
696 t3
= fold_binary (loop
->cond_code
,
697 boolean_type_node
, t3
, t4
);
698 gcc_assert (integer_nonzerop (t3
));
701 diff
= fold_binary (MINUS_EXPR
, itype
, new_first
, first
);
708 if (integer_zerop (t2
))
710 t3
= fold_binary (MINUS_EXPR
, itype
, n1cur
, t3
);
711 t4
= fold_binary (MINUS_EXPR
, itype
, n2cur
, t4
);
712 new_last
= fold_binary (MINUS_EXPR
, itype
, t
, ostep
);
717 t3
= fold_binary (loop
->cond_code
,
718 boolean_type_node
, t3
, t4
);
719 gcc_assert (integer_nonzerop (t3
));
729 t3
= fold_binary (PLUS_EXPR
, itype
, n1cur
, t3
);
730 t4
= fold_binary (PLUS_EXPR
, itype
, n2cur
, t4
);
731 t3
= fold_binary (loop
->cond_code
,
732 boolean_type_node
, t3
, t4
);
733 gcc_assert (integer_zerop (t3
));
736 diff
= fold_binary (MINUS_EXPR
, itype
, last
, new_last
);
738 if (TYPE_UNSIGNED (itype
)
739 && single_nonrect_cond_code
== GT_EXPR
)
740 diff
= fold_binary (TRUNC_DIV_EXPR
, itype
,
741 fold_unary (NEGATE_EXPR
, itype
, diff
),
742 fold_unary (NEGATE_EXPR
, itype
,
745 diff
= fold_binary (TRUNC_DIV_EXPR
, itype
, diff
, ostep
);
746 diff
= fold_convert (long_long_unsigned_type_node
, diff
);
748 = fold_binary (MINUS_EXPR
, long_long_unsigned_type_node
,
749 single_nonrect_count
, diff
);
754 t
= fold_binary (loop
->cond_code
, boolean_type_node
,
755 fold_convert (TREE_TYPE (loop
->v
), loop
->n1
),
756 fold_convert (TREE_TYPE (loop
->v
), loop
->n2
));
757 if (t
&& integer_zerop (t
))
758 count
= build_zero_cst (long_long_unsigned_type_node
);
759 else if ((i
== 0 || count
!= NULL_TREE
)
760 && (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
761 || TREE_CODE (TREE_TYPE (loop
->v
)) == BITINT_TYPE
)
762 && TREE_CONSTANT (loop
->n1
)
763 && TREE_CONSTANT (loop
->n2
)
764 && TREE_CODE (loop
->step
) == INTEGER_CST
)
766 tree itype
= TREE_TYPE (loop
->v
);
768 if (POINTER_TYPE_P (itype
))
769 itype
= signed_type_for (itype
);
770 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
771 t
= fold_build2 (PLUS_EXPR
, itype
,
772 fold_convert (itype
, loop
->step
), t
);
775 if (loop
->m1
|| loop
->m2
)
777 gcc_assert (single_nonrect
!= -1);
781 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fold_convert (itype
, n2
));
782 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
783 tree step
= fold_convert_loc (loc
, itype
, loop
->step
);
784 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
785 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
786 fold_build1 (NEGATE_EXPR
, itype
, t
),
787 fold_build1 (NEGATE_EXPR
, itype
, step
));
789 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
790 tree llutype
= long_long_unsigned_type_node
;
791 t
= fold_convert (llutype
, t
);
792 if (loop
->m1
|| loop
->m2
)
794 /* t is number of iterations of inner loop at either first
795 or last value of the outer iterator (the one with fewer
797 Compute t2 = ((m2 - m1) * ostep) / step
798 and niters = outer_count * t
799 + t2 * ((outer_count - 1) * outer_count / 2)
801 tree m1
= loop
->m1
? loop
->m1
: integer_zero_node
;
802 tree m2
= loop
->m2
? loop
->m2
: integer_zero_node
;
803 m1
= fold_convert (itype
, m1
);
804 m2
= fold_convert (itype
, m2
);
805 tree t2
= fold_build2 (MINUS_EXPR
, itype
, m2
, m1
);
806 t2
= fold_build2 (MULT_EXPR
, itype
, t2
, ostep
);
807 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
808 t2
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
809 fold_build1 (NEGATE_EXPR
, itype
, t2
),
810 fold_build1 (NEGATE_EXPR
, itype
, step
));
812 t2
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t2
, step
);
813 t2
= fold_convert (llutype
, t2
);
814 fd
->first_inner_iterations
= t
;
816 t
= fold_build2 (MULT_EXPR
, llutype
, t
,
817 single_nonrect_count
);
818 tree t3
= fold_build2 (MINUS_EXPR
, llutype
,
819 single_nonrect_count
,
820 build_one_cst (llutype
));
821 t3
= fold_build2 (MULT_EXPR
, llutype
, t3
,
822 single_nonrect_count
);
823 t3
= fold_build2 (TRUNC_DIV_EXPR
, llutype
, t3
,
824 build_int_cst (llutype
, 2));
825 t2
= fold_build2 (MULT_EXPR
, llutype
, t2
, t3
);
826 t
= fold_build2 (PLUS_EXPR
, llutype
, t
, t2
);
828 if (i
== single_nonrect
)
830 if (integer_zerop (t
) || TREE_CODE (t
) != INTEGER_CST
)
834 single_nonrect_count
= t
;
835 single_nonrect_cond_code
= loop
->cond_code
;
836 if (count
== NULL_TREE
)
837 count
= build_one_cst (llutype
);
840 else if (count
!= NULL_TREE
)
841 count
= fold_build2 (MULT_EXPR
, llutype
, count
, t
);
844 if (TREE_CODE (count
) != INTEGER_CST
)
847 else if (count
&& !integer_zerop (count
))
854 && (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
855 || fd
->have_ordered
))
857 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
858 iter_type
= long_long_unsigned_type_node
;
860 iter_type
= long_integer_type_node
;
862 else if (collapse_iter
&& *collapse_iter
!= NULL
)
863 iter_type
= TREE_TYPE (*collapse_iter
);
864 fd
->iter_type
= iter_type
;
865 if (collapse_iter
&& *collapse_iter
== NULL
)
866 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
867 if (collapse_count
&& *collapse_count
== NULL
)
871 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
872 if (fd
->first_inner_iterations
&& fd
->factor
)
874 t
= make_tree_vec (4);
875 TREE_VEC_ELT (t
, 0) = *collapse_count
;
876 TREE_VEC_ELT (t
, 1) = fd
->first_inner_iterations
;
877 TREE_VEC_ELT (t
, 2) = fd
->factor
;
878 TREE_VEC_ELT (t
, 3) = fd
->adjn1
;
883 *collapse_count
= create_tmp_var (iter_type
, ".count");
886 if (fd
->collapse
> 1 || fd
->tiling
|| (fd
->ordered
&& loops
))
888 fd
->loop
.v
= *collapse_iter
;
889 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
890 fd
->loop
.n2
= *collapse_count
;
891 if (TREE_CODE (fd
->loop
.n2
) == TREE_VEC
)
893 gcc_assert (fd
->non_rect
);
894 fd
->first_inner_iterations
= TREE_VEC_ELT (fd
->loop
.n2
, 1);
895 fd
->factor
= TREE_VEC_ELT (fd
->loop
.n2
, 2);
896 fd
->adjn1
= TREE_VEC_ELT (fd
->loop
.n2
, 3);
897 fd
->loop
.n2
= TREE_VEC_ELT (fd
->loop
.n2
, 0);
899 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
900 fd
->loop
.m1
= NULL_TREE
;
901 fd
->loop
.m2
= NULL_TREE
;
903 fd
->loop
.cond_code
= LT_EXPR
;
909 /* Build a call to GOMP_barrier. */
912 omp_build_barrier (tree lhs
)
914 tree fndecl
= builtin_decl_explicit (lhs
? BUILT_IN_GOMP_BARRIER_CANCEL
915 : BUILT_IN_GOMP_BARRIER
);
916 gcall
*g
= gimple_build_call (fndecl
, 0);
918 gimple_call_set_lhs (g
, lhs
);
922 /* Find OMP_FOR resp. OMP_SIMD with non-NULL OMP_FOR_INIT. Also, fill in pdata
923 array, pdata[0] non-NULL if there is anything non-trivial in between,
924 pdata[1] is address of OMP_PARALLEL in between if any, pdata[2] is address
925 of OMP_FOR in between if any and pdata[3] is address of the inner
929 find_combined_omp_for (tree
*tp
, int *walk_subtrees
, void *data
)
931 tree
**pdata
= (tree
**) data
;
933 switch (TREE_CODE (*tp
))
936 if (OMP_FOR_INIT (*tp
) != NULL_TREE
)
945 if (OMP_FOR_INIT (*tp
) != NULL_TREE
)
952 if (BIND_EXPR_VARS (*tp
)
953 || (BIND_EXPR_BLOCK (*tp
)
954 && BLOCK_VARS (BIND_EXPR_BLOCK (*tp
))))
959 if (!tsi_one_before_end_p (tsi_start (*tp
)))
963 case TRY_FINALLY_EXPR
:
977 /* Return maximum possible vectorization factor for the target. */
984 || !flag_tree_loop_optimize
985 || (!flag_tree_loop_vectorize
986 && OPTION_SET_P (flag_tree_loop_vectorize
)))
989 auto_vector_modes modes
;
990 targetm
.vectorize
.autovectorize_vector_modes (&modes
, true);
991 if (!modes
.is_empty ())
994 for (unsigned int i
= 0; i
< modes
.length (); ++i
)
995 /* The returned modes use the smallest element size (and thus
996 the largest nunits) for the vectorization approach that they
998 vf
= ordered_max (vf
, GET_MODE_NUNITS (modes
[i
]));
1002 machine_mode vqimode
= targetm
.vectorize
.preferred_simd_mode (QImode
);
1003 if (GET_MODE_CLASS (vqimode
) == MODE_VECTOR_INT
)
1004 return GET_MODE_NUNITS (vqimode
);
1009 /* Return maximum SIMT width if offloading may target SIMT hardware. */
1012 omp_max_simt_vf (void)
1016 if (ENABLE_OFFLOADING
)
1017 for (const char *c
= getenv ("OFFLOAD_TARGET_NAMES"); c
;)
1019 if (startswith (c
, "nvptx"))
1021 else if ((c
= strchr (c
, ':')))
1027 /* Store the construct selectors as tree codes from last to first.
1028 CTX is a list of trait selectors, nconstructs must be equal to its
1029 length, and the array CONSTRUCTS holds the output. */
1032 omp_construct_traits_to_codes (tree ctx
, int nconstructs
,
1033 enum tree_code
*constructs
)
1035 int i
= nconstructs
- 1;
1037 /* Order must match the OMP_TRAIT_CONSTRUCT_* enumerators in
1038 enum omp_ts_code. */
1039 static enum tree_code code_map
[]
1040 = { OMP_TARGET
, OMP_TEAMS
, OMP_PARALLEL
, OMP_FOR
, OMP_SIMD
};
1042 for (tree ts
= ctx
; ts
; ts
= TREE_CHAIN (ts
), i
--)
1044 enum omp_ts_code sel
= OMP_TS_CODE (ts
);
1045 int j
= (int)sel
- (int)OMP_TRAIT_CONSTRUCT_TARGET
;
1046 gcc_assert (j
>= 0 && (unsigned int) j
< ARRAY_SIZE (code_map
));
1047 constructs
[i
] = code_map
[j
];
1049 gcc_assert (i
== -1);
1052 /* Return true if PROP is possibly present in one of the offloading target's
1053 OpenMP contexts. The format of PROPS string is always offloading target's
1054 name terminated by '\0', followed by properties for that offloading
1055 target separated by '\0' and terminated by another '\0'. The strings
1056 are created from omp-device-properties installed files of all configured
1057 offloading targets. */
1060 omp_offload_device_kind_arch_isa (const char *props
, const char *prop
)
1062 const char *names
= getenv ("OFFLOAD_TARGET_NAMES");
1063 if (names
== NULL
|| *names
== '\0')
1065 while (*props
!= '\0')
1067 size_t name_len
= strlen (props
);
1068 bool matches
= false;
1069 for (const char *c
= names
; c
; )
1071 if (strncmp (props
, c
, name_len
) == 0
1072 && (c
[name_len
] == '\0'
1073 || c
[name_len
] == ':'
1074 || c
[name_len
] == '='))
1079 else if ((c
= strchr (c
, ':')))
1082 props
= props
+ name_len
+ 1;
1083 while (*props
!= '\0')
1085 if (matches
&& strcmp (props
, prop
) == 0)
1087 props
= strchr (props
, '\0') + 1;
1094 /* Return true if the current code location is or might be offloaded.
1095 Return true in declare target functions, or when nested in a target
1096 region or when unsure, return false otherwise. */
1099 omp_maybe_offloaded (void)
1101 if (!ENABLE_OFFLOADING
)
1103 const char *names
= getenv ("OFFLOAD_TARGET_NAMES");
1104 if (names
== NULL
|| *names
== '\0')
1107 if (symtab
->state
== PARSING
)
1110 if (cfun
&& cfun
->after_inlining
)
1112 if (current_function_decl
1113 && lookup_attribute ("omp declare target",
1114 DECL_ATTRIBUTES (current_function_decl
)))
1116 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) == 0)
1118 enum tree_code construct
= OMP_TARGET
;
1119 if (omp_construct_selector_matches (&construct
, 1, NULL
))
1125 /* Lookup tables for context selectors. */
1126 const char *omp_tss_map
[] =
1136 /* Arrays of property candidates must be null-terminated. */
1137 static const char *const kind_properties
[] =
1138 { "host", "nohost", "cpu", "gpu", "fpga", "any", NULL
};
1139 static const char *const vendor_properties
[] =
1140 { "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "hpe", "ibm", "intel",
1141 "llvm", "nvidia", "pgi", "ti", "unknown", NULL
};
1142 static const char *const extension_properties
[] =
1144 static const char *const atomic_default_mem_order_properties
[] =
1145 { "seq_cst", "relaxed", "acq_rel", "acquire", "release", NULL
};
1147 struct omp_ts_info omp_ts_map
[] =
1150 (1 << OMP_TRAIT_SET_DEVICE
) | (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1151 OMP_TRAIT_PROPERTY_NAME_LIST
, false,
1155 (1 << OMP_TRAIT_SET_DEVICE
) | (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1156 OMP_TRAIT_PROPERTY_NAME_LIST
, false,
1160 (1 << OMP_TRAIT_SET_DEVICE
) | (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1161 OMP_TRAIT_PROPERTY_NAME_LIST
, false,
1165 (1 << OMP_TRAIT_SET_TARGET_DEVICE
),
1166 OMP_TRAIT_PROPERTY_EXPR
, false,
1170 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1171 OMP_TRAIT_PROPERTY_NAME_LIST
, true,
1175 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1176 OMP_TRAIT_PROPERTY_NAME_LIST
, true,
1177 extension_properties
,
1179 { "atomic_default_mem_order",
1180 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1181 OMP_TRAIT_PROPERTY_ID
, true,
1182 atomic_default_mem_order_properties
,
1185 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1186 OMP_TRAIT_PROPERTY_CLAUSE_LIST
, true,
1189 { "unified_address",
1190 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1191 OMP_TRAIT_PROPERTY_NONE
, true,
1194 { "unified_shared_memory",
1195 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1196 OMP_TRAIT_PROPERTY_NONE
, true,
1199 { "dynamic_allocators",
1200 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1201 OMP_TRAIT_PROPERTY_NONE
, true,
1204 { "reverse_offload",
1205 (1 << OMP_TRAIT_SET_IMPLEMENTATION
),
1206 OMP_TRAIT_PROPERTY_NONE
, true,
1210 (1 << OMP_TRAIT_SET_USER
),
1211 OMP_TRAIT_PROPERTY_EXPR
, true,
1215 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1216 OMP_TRAIT_PROPERTY_NONE
, false,
1220 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1221 OMP_TRAIT_PROPERTY_NONE
, false,
1225 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1226 OMP_TRAIT_PROPERTY_NONE
, false,
1230 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1231 OMP_TRAIT_PROPERTY_NONE
, false,
1235 (1 << OMP_TRAIT_SET_CONSTRUCT
),
1236 OMP_TRAIT_PROPERTY_CLAUSE_LIST
, false,
1239 { NULL
, 0, OMP_TRAIT_PROPERTY_NONE
, false, NULL
} /* OMP_TRAIT_LAST */
1243 /* Return a name from PROP, a property in selectors accepting
1247 omp_context_name_list_prop (tree prop
)
1249 gcc_assert (OMP_TP_NAME (prop
) == OMP_TP_NAMELIST_NODE
);
1250 tree val
= OMP_TP_VALUE (prop
);
1251 switch (TREE_CODE (val
))
1253 case IDENTIFIER_NODE
:
1254 return IDENTIFIER_POINTER (val
);
1257 const char *ret
= TREE_STRING_POINTER (val
);
1258 if ((size_t) TREE_STRING_LENGTH (val
)
1259 == strlen (ret
) + (lang_GNU_Fortran () ? 0 : 1))
1268 /* Diagnose errors in an OpenMP context selector, return CTX if
1269 it is correct or error_mark_node otherwise. */
1272 omp_check_context_selector (location_t loc
, tree ctx
)
1274 bool tss_seen
[OMP_TRAIT_SET_LAST
], ts_seen
[OMP_TRAIT_LAST
];
1276 memset (tss_seen
, 0, sizeof (tss_seen
));
1277 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
1279 enum omp_tss_code tss_code
= OMP_TSS_CODE (tss
);
1281 /* We can parse this, but not handle it yet. */
1282 if (tss_code
== OMP_TRAIT_SET_TARGET_DEVICE
)
1283 sorry_at (loc
, "%<target_device%> selector set is not supported yet");
1285 /* Each trait-set-selector-name can only be specified once. */
1286 if (tss_seen
[tss_code
])
1288 error_at (loc
, "selector set %qs specified more than once",
1289 OMP_TSS_NAME (tss
));
1290 return error_mark_node
;
1293 tss_seen
[tss_code
] = true;
1295 memset (ts_seen
, 0, sizeof (ts_seen
));
1296 for (tree ts
= OMP_TSS_TRAIT_SELECTORS (tss
); ts
; ts
= TREE_CHAIN (ts
))
1298 enum omp_ts_code ts_code
= OMP_TS_CODE (ts
);
1300 /* Ignore unknown traits. */
1301 if (ts_code
== OMP_TRAIT_INVALID
)
1304 /* Each trait-selector-name can only be specified once. */
1305 if (ts_seen
[ts_code
])
1308 "selector %qs specified more than once in set %qs",
1310 OMP_TSS_NAME (tss
));
1311 return error_mark_node
;
1314 ts_seen
[ts_code
] = true;
1316 if (omp_ts_map
[ts_code
].valid_properties
== NULL
)
1319 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1320 for (unsigned j
= 0; ; j
++)
1322 const char *candidate
1323 = omp_ts_map
[ts_code
].valid_properties
[j
];
1324 if (candidate
== NULL
)
1326 /* We've reached the end of the candidate array. */
1327 if (ts_code
== OMP_TRAIT_IMPLEMENTATION_ADMO
)
1328 /* FIXME: not sure why this is an error vs warnings
1329 for the others, + incorrect/unknown wording? */
1332 "incorrect property %qs of %qs selector",
1333 IDENTIFIER_POINTER (OMP_TP_NAME (p
)),
1334 "atomic_default_mem_order");
1335 return error_mark_node
;
1337 if (OMP_TP_NAME (p
) == OMP_TP_NAMELIST_NODE
1338 && (TREE_CODE (OMP_TP_VALUE (p
)) == STRING_CST
))
1339 warning_at (loc
, OPT_Wopenmp
,
1340 "unknown property %qE of %qs selector",
1343 else if (OMP_TP_NAME (p
) == OMP_TP_NAMELIST_NODE
)
1344 warning_at (loc
, OPT_Wopenmp
,
1345 "unknown property %qs of %qs selector",
1346 omp_context_name_list_prop (p
),
1348 else if (OMP_TP_NAME (p
))
1349 warning_at (loc
, OPT_Wopenmp
,
1350 "unknown property %qs of %qs selector",
1351 IDENTIFIER_POINTER (OMP_TP_NAME (p
)),
1355 else if (OMP_TP_NAME (p
) == OMP_TP_NAMELIST_NODE
)
1356 /* Property-list traits. */
1358 const char *str
= omp_context_name_list_prop (p
);
1359 if (str
&& !strcmp (str
, candidate
))
1362 else if (!strcmp (IDENTIFIER_POINTER (OMP_TP_NAME (p
)),
1364 /* Identifier traits. */
1373 /* Register VARIANT as variant of some base function marked with
1374 #pragma omp declare variant. CONSTRUCT is corresponding list of
1375 trait-selectors for the construct selector set. This is stashed as the
1376 value of the "omp declare variant variant" attribute on VARIANT. */
1378 omp_mark_declare_variant (location_t loc
, tree variant
, tree construct
)
1380 /* Ignore this variant if it contains unknown construct selectors.
1381 It will never match, and the front ends have already issued a warning
1383 for (tree c
= construct
; c
; c
= TREE_CHAIN (c
))
1384 if (OMP_TS_CODE (c
) == OMP_TRAIT_INVALID
)
1387 tree attr
= lookup_attribute ("omp declare variant variant",
1388 DECL_ATTRIBUTES (variant
));
1389 if (attr
== NULL_TREE
)
1391 attr
= tree_cons (get_identifier ("omp declare variant variant"),
1392 unshare_expr (construct
),
1393 DECL_ATTRIBUTES (variant
));
1394 DECL_ATTRIBUTES (variant
) = attr
;
1397 if ((TREE_VALUE (attr
) != NULL_TREE
) != (construct
!= NULL_TREE
)
1398 || (construct
!= NULL_TREE
1399 && omp_context_selector_set_compare (OMP_TRAIT_SET_CONSTRUCT
,
1402 error_at (loc
, "%qD used as a variant with incompatible %<construct%> "
1403 "selector sets", variant
);
1407 /* Constructors for context selectors. */
1410 make_trait_set_selector (enum omp_tss_code code
, tree selectors
, tree chain
)
1412 return tree_cons (build_int_cst (integer_type_node
, code
),
1417 make_trait_selector (enum omp_ts_code code
, tree score
, tree properties
,
1420 if (score
== NULL_TREE
)
1421 return tree_cons (build_int_cst (integer_type_node
, code
),
1424 return tree_cons (build_int_cst (integer_type_node
, code
),
1425 tree_cons (OMP_TS_SCORE_NODE
, score
, properties
),
1430 make_trait_property (tree name
, tree value
, tree chain
)
1432 return tree_cons (name
, value
, chain
);
1435 /* Return 1 if context selector matches the current OpenMP context, 0
1436 if it does not and -1 if it is unknown and need to be determined later.
1437 Some properties can be checked right away during parsing (this routine),
1438 others need to wait until the whole TU is parsed, others need to wait until
1439 IPA, others until vectorization. */
1442 omp_context_selector_matches (tree ctx
)
1445 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
1447 enum omp_tss_code set
= OMP_TSS_CODE (tss
);
1448 tree selectors
= OMP_TSS_TRAIT_SELECTORS (tss
);
1450 /* Immediately reject the match if there are any ignored
1451 selectors present. */
1452 for (tree ts
= selectors
; ts
; ts
= TREE_CHAIN (ts
))
1453 if (OMP_TS_CODE (ts
) == OMP_TRAIT_INVALID
)
1456 if (set
== OMP_TRAIT_SET_CONSTRUCT
)
1458 /* For now, ignore the construct set. While something can be
1459 determined already during parsing, we don't know until end of TU
1460 whether additional constructs aren't added through declare variant
1461 unless "omp declare variant variant" attribute exists already
1462 (so in most of the cases), and we'd need to maintain set of
1463 surrounding OpenMP constructs, which is better handled during
1465 if (symtab
->state
== PARSING
)
1471 int nconstructs
= list_length (selectors
);
1472 enum tree_code
*constructs
= NULL
;
1475 /* Even though this alloca appears in a loop over selector
1476 sets, it does not repeatedly grow the stack, because
1477 there can be only one construct selector set specified.
1478 This is enforced by omp_check_context_selector. */
1480 = (enum tree_code
*) alloca (nconstructs
1481 * sizeof (enum tree_code
));
1482 omp_construct_traits_to_codes (selectors
, nconstructs
,
1486 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1488 if (!cfun
->after_inlining
)
1494 for (i
= 0; i
< nconstructs
; ++i
)
1495 if (constructs
[i
] == OMP_SIMD
)
1497 if (i
< nconstructs
)
1502 /* If there is no simd, assume it is ok after IPA,
1503 constructs should have been checked before. */
1507 int r
= omp_construct_selector_matches (constructs
, nconstructs
,
1515 for (tree ts
= selectors
; ts
; ts
= TREE_CHAIN (ts
))
1517 enum omp_ts_code sel
= OMP_TS_CODE (ts
);
1520 case OMP_TRAIT_IMPLEMENTATION_VENDOR
:
1521 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1522 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1524 const char *prop
= omp_context_name_list_prop (p
);
1527 if (!strcmp (prop
, "gnu"))
1532 case OMP_TRAIT_IMPLEMENTATION_EXTENSION
:
1533 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1534 /* We don't support any extensions right now. */
1537 case OMP_TRAIT_IMPLEMENTATION_ADMO
:
1538 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1540 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1543 enum omp_memory_order omo
1544 = ((enum omp_memory_order
)
1546 & OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER
));
1547 if (omo
== OMP_MEMORY_ORDER_UNSPECIFIED
)
1549 /* We don't know yet, until end of TU. */
1550 if (symtab
->state
== PARSING
)
1556 omo
= OMP_MEMORY_ORDER_RELAXED
;
1558 tree p
= OMP_TS_PROPERTIES (ts
);
1559 const char *prop
= IDENTIFIER_POINTER (OMP_TP_NAME (p
));
1560 if (!strcmp (prop
, "relaxed")
1561 && omo
!= OMP_MEMORY_ORDER_RELAXED
)
1563 else if (!strcmp (prop
, "seq_cst")
1564 && omo
!= OMP_MEMORY_ORDER_SEQ_CST
)
1566 else if (!strcmp (prop
, "acq_rel")
1567 && omo
!= OMP_MEMORY_ORDER_ACQ_REL
)
1569 else if (!strcmp (prop
, "acquire")
1570 && omo
!= OMP_MEMORY_ORDER_ACQUIRE
)
1572 else if (!strcmp (prop
, "release")
1573 && omo
!= OMP_MEMORY_ORDER_RELEASE
)
1577 case OMP_TRAIT_DEVICE_ARCH
:
1578 if (set
== OMP_TRAIT_SET_DEVICE
)
1579 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1581 const char *arch
= omp_context_name_list_prop (p
);
1585 if (targetm
.omp
.device_kind_arch_isa
!= NULL
)
1586 r
= targetm
.omp
.device_kind_arch_isa (omp_device_arch
,
1588 if (r
== 0 || (r
== -1 && symtab
->state
!= PARSING
))
1590 /* If we are or might be in a target region or
1591 declare target function, need to take into account
1592 also offloading values. */
1593 if (!omp_maybe_offloaded ())
1595 if (ENABLE_OFFLOADING
)
1597 const char *arches
= omp_offload_device_arch
;
1598 if (omp_offload_device_kind_arch_isa (arches
,
1609 /* If arch matches on the host, it still might not match
1610 in the offloading region. */
1611 else if (omp_maybe_offloaded ())
1615 case OMP_TRAIT_IMPLEMENTATION_UNIFIED_ADDRESS
:
1616 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1618 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1621 if ((omp_requires_mask
& OMP_REQUIRES_UNIFIED_ADDRESS
) == 0)
1623 if (symtab
->state
== PARSING
)
1630 case OMP_TRAIT_IMPLEMENTATION_UNIFIED_SHARED_MEMORY
:
1631 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1633 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1636 if ((omp_requires_mask
1637 & OMP_REQUIRES_UNIFIED_SHARED_MEMORY
) == 0)
1639 if (symtab
->state
== PARSING
)
1646 case OMP_TRAIT_IMPLEMENTATION_DYNAMIC_ALLOCATORS
:
1647 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1649 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1652 if ((omp_requires_mask
1653 & OMP_REQUIRES_DYNAMIC_ALLOCATORS
) == 0)
1655 if (symtab
->state
== PARSING
)
1662 case OMP_TRAIT_IMPLEMENTATION_REVERSE_OFFLOAD
:
1663 if (set
== OMP_TRAIT_SET_IMPLEMENTATION
)
1665 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
1668 if ((omp_requires_mask
& OMP_REQUIRES_REVERSE_OFFLOAD
) == 0)
1670 if (symtab
->state
== PARSING
)
1677 case OMP_TRAIT_DEVICE_KIND
:
1678 if (set
== OMP_TRAIT_SET_DEVICE
)
1679 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1681 const char *prop
= omp_context_name_list_prop (p
);
1684 if (!strcmp (prop
, "any"))
1686 if (!strcmp (prop
, "host"))
1688 #ifdef ACCEL_COMPILER
1691 if (omp_maybe_offloaded ())
1696 if (!strcmp (prop
, "nohost"))
1698 #ifndef ACCEL_COMPILER
1699 if (omp_maybe_offloaded ())
1707 if (targetm
.omp
.device_kind_arch_isa
!= NULL
)
1708 r
= targetm
.omp
.device_kind_arch_isa (omp_device_kind
,
1711 r
= strcmp (prop
, "cpu") == 0;
1712 if (r
== 0 || (r
== -1 && symtab
->state
!= PARSING
))
1714 /* If we are or might be in a target region or
1715 declare target function, need to take into account
1716 also offloading values. */
1717 if (!omp_maybe_offloaded ())
1719 if (ENABLE_OFFLOADING
)
1721 const char *kinds
= omp_offload_device_kind
;
1722 if (omp_offload_device_kind_arch_isa (kinds
, prop
))
1732 /* If kind matches on the host, it still might not match
1733 in the offloading region. */
1734 else if (omp_maybe_offloaded ())
1738 case OMP_TRAIT_DEVICE_ISA
:
1739 if (set
== OMP_TRAIT_SET_DEVICE
)
1740 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1742 const char *isa
= omp_context_name_list_prop (p
);
1746 if (targetm
.omp
.device_kind_arch_isa
!= NULL
)
1747 r
= targetm
.omp
.device_kind_arch_isa (omp_device_isa
,
1749 if (r
== 0 || (r
== -1 && symtab
->state
!= PARSING
))
1751 /* If isa is valid on the target, but not in the
1752 current function and current function has
1753 #pragma omp declare simd on it, some simd clones
1754 might have the isa added later on. */
1756 && targetm
.simd_clone
.compute_vecsize_and_simdlen
1757 && (cfun
== NULL
|| !cfun
->after_inlining
))
1760 = DECL_ATTRIBUTES (current_function_decl
);
1761 if (lookup_attribute ("omp declare simd", attrs
))
1767 /* If we are or might be in a target region or
1768 declare target function, need to take into account
1769 also offloading values. */
1770 if (!omp_maybe_offloaded ())
1772 if (ENABLE_OFFLOADING
)
1774 const char *isas
= omp_offload_device_isa
;
1775 if (omp_offload_device_kind_arch_isa (isas
, isa
))
1785 /* If isa matches on the host, it still might not match
1786 in the offloading region. */
1787 else if (omp_maybe_offloaded ())
1791 case OMP_TRAIT_USER_CONDITION
:
1792 if (set
== OMP_TRAIT_SET_USER
)
1793 for (tree p
= OMP_TS_PROPERTIES (ts
); p
; p
= TREE_CHAIN (p
))
1794 if (OMP_TP_NAME (p
) == NULL_TREE
)
1796 if (integer_zerop (OMP_TP_VALUE (p
)))
1798 if (integer_nonzerop (OMP_TP_VALUE (p
)))
1811 /* Compare construct={simd} CLAUSES1 with CLAUSES2, return 0/-1/1/2 as
1812 in omp_context_selector_set_compare. */
1815 omp_construct_simd_compare (tree clauses1
, tree clauses2
)
1817 if (clauses1
== NULL_TREE
)
1818 return clauses2
== NULL_TREE
? 0 : -1;
1819 if (clauses2
== NULL_TREE
)
1823 struct declare_variant_simd_data
{
1824 bool inbranch
, notinbranch
;
1826 auto_vec
<tree
,16> data_sharing
;
1827 auto_vec
<tree
,16> aligned
;
1828 declare_variant_simd_data ()
1829 : inbranch(false), notinbranch(false), simdlen(NULL_TREE
) {}
1832 for (i
= 0; i
< 2; i
++)
1833 for (tree c
= i
? clauses2
: clauses1
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1836 switch (OMP_CLAUSE_CODE (c
))
1838 case OMP_CLAUSE_INBRANCH
:
1839 data
[i
].inbranch
= true;
1841 case OMP_CLAUSE_NOTINBRANCH
:
1842 data
[i
].notinbranch
= true;
1844 case OMP_CLAUSE_SIMDLEN
:
1845 data
[i
].simdlen
= OMP_CLAUSE_SIMDLEN_EXPR (c
);
1847 case OMP_CLAUSE_UNIFORM
:
1848 case OMP_CLAUSE_LINEAR
:
1849 v
= &data
[i
].data_sharing
;
1851 case OMP_CLAUSE_ALIGNED
:
1852 v
= &data
[i
].aligned
;
1857 unsigned HOST_WIDE_INT argno
= tree_to_uhwi (OMP_CLAUSE_DECL (c
));
1858 if (argno
>= v
->length ())
1859 v
->safe_grow_cleared (argno
+ 1, true);
1862 /* Here, r is used as a bitmask, 2 is set if CLAUSES1 has something
1863 CLAUSES2 doesn't, 1 is set if CLAUSES2 has something CLAUSES1
1864 doesn't. Thus, r == 3 implies return value 2, r == 1 implies
1865 -1, r == 2 implies 1 and r == 0 implies 0. */
1866 if (data
[0].inbranch
!= data
[1].inbranch
)
1867 r
|= data
[0].inbranch
? 2 : 1;
1868 if (data
[0].notinbranch
!= data
[1].notinbranch
)
1869 r
|= data
[0].notinbranch
? 2 : 1;
1870 if (!simple_cst_equal (data
[0].simdlen
, data
[1].simdlen
))
1872 if (data
[0].simdlen
&& data
[1].simdlen
)
1874 r
|= data
[0].simdlen
? 2 : 1;
1876 if (data
[0].data_sharing
.length () < data
[1].data_sharing
.length ()
1877 || data
[0].aligned
.length () < data
[1].aligned
.length ())
1880 FOR_EACH_VEC_ELT (data
[0].data_sharing
, i
, c1
)
1882 c2
= (i
< data
[1].data_sharing
.length ()
1883 ? data
[1].data_sharing
[i
] : NULL_TREE
);
1884 if ((c1
== NULL_TREE
) != (c2
== NULL_TREE
))
1886 r
|= c1
!= NULL_TREE
? 2 : 1;
1889 if (c1
== NULL_TREE
)
1891 if (OMP_CLAUSE_CODE (c1
) != OMP_CLAUSE_CODE (c2
))
1893 if (OMP_CLAUSE_CODE (c1
) != OMP_CLAUSE_LINEAR
)
1895 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c1
)
1896 != OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c2
))
1898 if (OMP_CLAUSE_LINEAR_KIND (c1
) != OMP_CLAUSE_LINEAR_KIND (c2
))
1900 if (!simple_cst_equal (OMP_CLAUSE_LINEAR_STEP (c1
),
1901 OMP_CLAUSE_LINEAR_STEP (c2
)))
1904 FOR_EACH_VEC_ELT (data
[0].aligned
, i
, c1
)
1906 c2
= i
< data
[1].aligned
.length () ? data
[1].aligned
[i
] : NULL_TREE
;
1907 if ((c1
== NULL_TREE
) != (c2
== NULL_TREE
))
1909 r
|= c1
!= NULL_TREE
? 2 : 1;
1912 if (c1
== NULL_TREE
)
1914 if (!simple_cst_equal (OMP_CLAUSE_ALIGNED_ALIGNMENT (c1
),
1915 OMP_CLAUSE_ALIGNED_ALIGNMENT (c2
)))
1924 default: gcc_unreachable ();
1928 /* Compare properties of selectors SEL from SET other than construct.
1929 CTX1 and CTX2 are the lists of properties to compare.
1930 Return 0/-1/1/2 as in omp_context_selector_set_compare.
1931 Unlike set names or selector names, properties can have duplicates. */
1934 omp_context_selector_props_compare (enum omp_tss_code set
,
1935 enum omp_ts_code sel
,
1936 tree ctx1
, tree ctx2
)
1939 for (int pass
= 0; pass
< 2; pass
++)
1940 for (tree p1
= pass
? ctx2
: ctx1
; p1
; p1
= TREE_CHAIN (p1
))
1943 for (p2
= pass
? ctx1
: ctx2
; p2
; p2
= TREE_CHAIN (p2
))
1944 if (OMP_TP_NAME (p1
) == OMP_TP_NAME (p2
))
1946 if (OMP_TP_NAME (p1
) == NULL_TREE
)
1948 if (set
== OMP_TRAIT_SET_USER
1949 && sel
== OMP_TRAIT_USER_CONDITION
)
1951 if (integer_zerop (OMP_TP_VALUE (p1
))
1952 != integer_zerop (OMP_TP_VALUE (p2
)))
1956 if (simple_cst_equal (OMP_TP_VALUE (p1
), OMP_TP_VALUE (p2
)))
1959 else if (OMP_TP_NAME (p1
) == OMP_TP_NAMELIST_NODE
)
1961 /* Handle string constant vs identifier comparison for
1962 name-list properties. */
1963 const char *n1
= omp_context_name_list_prop (p1
);
1964 const char *n2
= omp_context_name_list_prop (p2
);
1965 if (n1
&& n2
&& !strcmp (n1
, n2
))
1971 if (p2
== NULL_TREE
)
1973 int r
= pass
? -1 : 1;
1974 if (ret
&& ret
!= r
)
1988 /* Compare single context selector sets CTX1 and CTX2 with SET name.
1989 CTX1 and CTX2 are lists of trait-selectors.
1990 Return 0 if CTX1 is equal to CTX2,
1991 -1 if CTX1 is a strict subset of CTX2,
1992 1 if CTX2 is a strict subset of CTX1, or
1993 2 if neither context is a subset of another one. */
1996 omp_context_selector_set_compare (enum omp_tss_code set
, tree ctx1
, tree ctx2
)
1999 /* If either list includes an ignored selector trait, neither can
2000 be a subset of the other. */
2001 for (tree ts
= ctx1
; ts
; ts
= TREE_CHAIN (ts
))
2002 if (OMP_TS_CODE (ts
) == OMP_TRAIT_INVALID
)
2004 for (tree ts
= ctx2
; ts
; ts
= TREE_CHAIN (ts
))
2005 if (OMP_TS_CODE (ts
) == OMP_TRAIT_INVALID
)
2008 bool swapped
= false;
2010 int len1
= list_length (ctx1
);
2011 int len2
= list_length (ctx2
);
2016 std::swap (ctx1
, ctx2
);
2017 std::swap (len1
, len2
);
2020 if (set
== OMP_TRAIT_SET_CONSTRUCT
)
2024 /* Handle construct set specially. In this case the order
2025 of the selector matters too. */
2026 for (ts1
= ctx1
; ts1
; ts1
= TREE_CHAIN (ts1
))
2027 if (OMP_TS_CODE (ts1
) == OMP_TS_CODE (ts2
))
2030 if (OMP_TS_CODE (ts1
) == OMP_TRAIT_CONSTRUCT_SIMD
)
2031 r
= omp_construct_simd_compare (OMP_TS_PROPERTIES (ts1
),
2032 OMP_TS_PROPERTIES (ts2
));
2033 if (r
== 2 || (ret
&& r
&& (ret
< 0) != (r
< 0)))
2037 ts2
= TREE_CHAIN (ts2
);
2038 if (ts2
== NULL_TREE
)
2040 ts1
= TREE_CHAIN (ts1
);
2048 if (ts2
!= NULL_TREE
)
2050 if (ts1
!= NULL_TREE
)
2058 return swapped
? -ret
: ret
;
2060 for (tree ts1
= ctx1
; ts1
; ts1
= TREE_CHAIN (ts1
))
2062 enum omp_ts_code sel
= OMP_TS_CODE (ts1
);
2064 for (ts2
= ctx2
; ts2
; ts2
= TREE_CHAIN (ts2
))
2065 if (sel
== OMP_TS_CODE (ts2
))
2067 tree score1
= OMP_TS_SCORE (ts1
);
2068 tree score2
= OMP_TS_SCORE (ts2
);
2069 if (score1
&& score2
&& !simple_cst_equal (score1
, score2
))
2072 int r
= omp_context_selector_props_compare (set
, OMP_TS_CODE (ts1
),
2073 OMP_TS_PROPERTIES (ts1
),
2074 OMP_TS_PROPERTIES (ts2
));
2075 if (r
== 2 || (ret
&& r
&& (ret
< 0) != (r
< 0)))
2082 if (ts2
== NULL_TREE
)
2093 return swapped
? -ret
: ret
;
2096 /* Compare whole context selector specification CTX1 and CTX2.
2097 Return 0 if CTX1 is equal to CTX2,
2098 -1 if CTX1 is a strict subset of CTX2,
2099 1 if CTX2 is a strict subset of CTX1, or
2100 2 if neither context is a subset of another one. */
2103 omp_context_selector_compare (tree ctx1
, tree ctx2
)
2105 bool swapped
= false;
2107 int len1
= list_length (ctx1
);
2108 int len2
= list_length (ctx2
);
2113 std::swap (ctx1
, ctx2
);
2114 std::swap (len1
, len2
);
2116 for (tree tss1
= ctx1
; tss1
; tss1
= TREE_CHAIN (tss1
))
2118 enum omp_tss_code set
= OMP_TSS_CODE (tss1
);
2120 for (tss2
= ctx2
; tss2
; tss2
= TREE_CHAIN (tss2
))
2121 if (set
== OMP_TSS_CODE (tss2
))
2124 = omp_context_selector_set_compare
2125 (set
, OMP_TSS_TRAIT_SELECTORS (tss1
),
2126 OMP_TSS_TRAIT_SELECTORS (tss2
));
2127 if (r
== 2 || (ret
&& r
&& (ret
< 0) != (r
< 0)))
2134 if (tss2
== NULL_TREE
)
2145 return swapped
? -ret
: ret
;
2148 /* From context selector CTX, return trait-selector with name SEL in
2149 trait-selector-set with name SET if any, or NULL_TREE if not found. */
2151 omp_get_context_selector (tree ctx
, enum omp_tss_code set
,
2152 enum omp_ts_code sel
)
2154 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
2155 if (OMP_TSS_CODE (tss
) == set
)
2156 for (tree ts
= OMP_TSS_TRAIT_SELECTORS (tss
); ts
; ts
= TREE_CHAIN (ts
))
2157 if (OMP_TS_CODE (ts
) == sel
)
2162 /* Similar, but returns the whole trait-selector list for SET in CTX. */
2164 omp_get_context_selector_list (tree ctx
, enum omp_tss_code set
)
2166 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
2167 if (OMP_TSS_CODE (tss
) == set
)
2168 return OMP_TSS_TRAIT_SELECTORS (tss
);
2172 /* Map string S onto a trait selector set code. */
2174 omp_lookup_tss_code (const char * s
)
2176 for (int i
= 0; i
< OMP_TRAIT_SET_LAST
; i
++)
2177 if (strcmp (s
, omp_tss_map
[i
]) == 0)
2178 return (enum omp_tss_code
) i
;
2179 return OMP_TRAIT_SET_INVALID
;
2182 /* Map string S onto a trait selector code for set SET. */
2184 omp_lookup_ts_code (enum omp_tss_code set
, const char *s
)
2186 unsigned int mask
= 1 << set
;
2187 for (int i
= 0; i
< OMP_TRAIT_LAST
; i
++)
2188 if ((mask
& omp_ts_map
[i
].tss_mask
) != 0
2189 && strcmp (s
, omp_ts_map
[i
].name
) == 0)
2190 return (enum omp_ts_code
) i
;
2191 return OMP_TRAIT_INVALID
;
2194 /* Needs to be a GC-friendly widest_int variant, but precision is
2195 desirable to be the same on all targets. */
2196 typedef generic_wide_int
<fixed_wide_int_storage
<1024> > score_wide_int
;
2198 /* Compute *SCORE for context selector CTX. Return true if the score
2199 would be different depending on whether it is a declare simd clone or
2200 not. DECLARE_SIMD should be true for the case when it would be
2201 a declare simd clone. */
2204 omp_context_compute_score (tree ctx
, score_wide_int
*score
, bool declare_simd
)
2207 = omp_get_context_selector_list (ctx
, OMP_TRAIT_SET_CONSTRUCT
);
2208 bool has_kind
= omp_get_context_selector (ctx
, OMP_TRAIT_SET_DEVICE
,
2209 OMP_TRAIT_DEVICE_KIND
);
2210 bool has_arch
= omp_get_context_selector (ctx
, OMP_TRAIT_SET_DEVICE
,
2211 OMP_TRAIT_DEVICE_ARCH
);
2212 bool has_isa
= omp_get_context_selector (ctx
, OMP_TRAIT_SET_DEVICE
,
2213 OMP_TRAIT_DEVICE_ISA
);
2216 for (tree tss
= ctx
; tss
; tss
= TREE_CHAIN (tss
))
2217 if (OMP_TSS_TRAIT_SELECTORS (tss
) != selectors
)
2218 for (tree ts
= OMP_TSS_TRAIT_SELECTORS (tss
); ts
; ts
= TREE_CHAIN (ts
))
2220 tree s
= OMP_TS_SCORE (ts
);
2221 if (s
&& TREE_CODE (s
) == INTEGER_CST
)
2222 *score
+= score_wide_int::from (wi::to_wide (s
),
2223 TYPE_SIGN (TREE_TYPE (s
)));
2226 if (selectors
|| has_kind
|| has_arch
|| has_isa
)
2228 int nconstructs
= list_length (selectors
);
2229 enum tree_code
*constructs
= NULL
;
2233 = (enum tree_code
*) alloca (nconstructs
2234 * sizeof (enum tree_code
));
2235 omp_construct_traits_to_codes (selectors
, nconstructs
, constructs
);
2238 = (int *) alloca ((2 * nconstructs
+ 2) * sizeof (int));
2239 if (omp_construct_selector_matches (constructs
, nconstructs
, scores
)
2242 int b
= declare_simd
? nconstructs
+ 1 : 0;
2243 if (scores
[b
+ nconstructs
] + 4U < score
->get_precision ())
2245 for (int n
= 0; n
< nconstructs
; ++n
)
2247 if (scores
[b
+ n
] < 0)
2252 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ n
], 1, false);
2255 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ nconstructs
],
2258 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ nconstructs
] + 1,
2261 *score
+= wi::shifted_mask
<score_wide_int
> (scores
[b
+ nconstructs
] + 2,
2264 else /* FIXME: Implement this. */
2270 /* Class describing a single variant. */
2271 struct GTY(()) omp_declare_variant_entry
{
2272 /* NODE of the variant. */
2273 cgraph_node
*variant
;
2274 /* Score if not in declare simd clone. */
2275 score_wide_int score
;
2276 /* Score if in declare simd clone. */
2277 score_wide_int score_in_declare_simd_clone
;
2278 /* Context selector for the variant. */
2280 /* True if the context selector is known to match already. */
2284 /* Class describing a function with variants. */
2285 struct GTY((for_user
)) omp_declare_variant_base_entry
{
2286 /* NODE of the base function. */
2288 /* NODE of the artificial function created for the deferred variant
2291 /* Vector of the variants. */
2292 vec
<omp_declare_variant_entry
, va_gc
> *variants
;
2295 struct omp_declare_variant_hasher
2296 : ggc_ptr_hash
<omp_declare_variant_base_entry
> {
2297 static hashval_t
hash (omp_declare_variant_base_entry
*);
2298 static bool equal (omp_declare_variant_base_entry
*,
2299 omp_declare_variant_base_entry
*);
2303 omp_declare_variant_hasher::hash (omp_declare_variant_base_entry
*x
)
2305 inchash::hash hstate
;
2306 hstate
.add_int (DECL_UID (x
->base
->decl
));
2307 hstate
.add_int (x
->variants
->length ());
2308 omp_declare_variant_entry
*variant
;
2310 FOR_EACH_VEC_SAFE_ELT (x
->variants
, i
, variant
)
2312 hstate
.add_int (DECL_UID (variant
->variant
->decl
));
2313 hstate
.add_wide_int (variant
->score
);
2314 hstate
.add_wide_int (variant
->score_in_declare_simd_clone
);
2315 hstate
.add_ptr (variant
->ctx
);
2316 hstate
.add_int (variant
->matches
);
2318 return hstate
.end ();
2322 omp_declare_variant_hasher::equal (omp_declare_variant_base_entry
*x
,
2323 omp_declare_variant_base_entry
*y
)
2325 if (x
->base
!= y
->base
2326 || x
->variants
->length () != y
->variants
->length ())
2328 omp_declare_variant_entry
*variant
;
2330 FOR_EACH_VEC_SAFE_ELT (x
->variants
, i
, variant
)
2331 if (variant
->variant
!= (*y
->variants
)[i
].variant
2332 || variant
->score
!= (*y
->variants
)[i
].score
2333 || (variant
->score_in_declare_simd_clone
2334 != (*y
->variants
)[i
].score_in_declare_simd_clone
)
2335 || variant
->ctx
!= (*y
->variants
)[i
].ctx
2336 || variant
->matches
!= (*y
->variants
)[i
].matches
)
2341 static GTY(()) hash_table
<omp_declare_variant_hasher
> *omp_declare_variants
;
2343 struct omp_declare_variant_alt_hasher
2344 : ggc_ptr_hash
<omp_declare_variant_base_entry
> {
2345 static hashval_t
hash (omp_declare_variant_base_entry
*);
2346 static bool equal (omp_declare_variant_base_entry
*,
2347 omp_declare_variant_base_entry
*);
2351 omp_declare_variant_alt_hasher::hash (omp_declare_variant_base_entry
*x
)
2353 return DECL_UID (x
->node
->decl
);
2357 omp_declare_variant_alt_hasher::equal (omp_declare_variant_base_entry
*x
,
2358 omp_declare_variant_base_entry
*y
)
2360 return x
->node
== y
->node
;
2363 static GTY(()) hash_table
<omp_declare_variant_alt_hasher
>
2364 *omp_declare_variant_alt
;
2366 /* Try to resolve declare variant after gimplification. */
2369 omp_resolve_late_declare_variant (tree alt
)
2371 cgraph_node
*node
= cgraph_node::get (alt
);
2372 cgraph_node
*cur_node
= cgraph_node::get (cfun
->decl
);
2374 || !node
->declare_variant_alt
2375 || !cfun
->after_inlining
)
2378 omp_declare_variant_base_entry entry
;
2381 entry
.variants
= NULL
;
2382 omp_declare_variant_base_entry
*entryp
2383 = omp_declare_variant_alt
->find_with_hash (&entry
, DECL_UID (alt
));
2386 omp_declare_variant_entry
*varentry1
, *varentry2
;
2387 auto_vec
<bool, 16> matches
;
2388 unsigned int nmatches
= 0;
2389 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry1
)
2391 if (varentry1
->matches
)
2393 /* This has been checked to be ok already. */
2394 matches
.safe_push (true);
2398 switch (omp_context_selector_matches (varentry1
->ctx
))
2401 matches
.safe_push (false);
2406 matches
.safe_push (true);
2413 return entryp
->base
->decl
;
2415 /* A context selector that is a strict subset of another context selector
2416 has a score of zero. */
2417 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry1
)
2421 vec_safe_iterate (entryp
->variants
, j
, &varentry2
); ++j
)
2424 int r
= omp_context_selector_compare (varentry1
->ctx
,
2428 /* ctx1 is a strict subset of ctx2, ignore ctx1. */
2433 /* ctx2 is a strict subset of ctx1, remove ctx2. */
2438 score_wide_int max_score
= -1;
2440 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry1
)
2443 score_wide_int score
2444 = (cur_node
->simdclone
? varentry1
->score_in_declare_simd_clone
2445 : varentry1
->score
);
2446 if (score
> max_score
)
2449 varentry2
= varentry1
;
2452 return varentry2
->variant
->decl
;
2455 /* Hook to adjust hash tables on cgraph_node removal. */
2458 omp_declare_variant_remove_hook (struct cgraph_node
*node
, void *)
2460 if (!node
->declare_variant_alt
)
2463 /* Drop this hash table completely. */
2464 omp_declare_variants
= NULL
;
2465 /* And remove node from the other hash table. */
2466 if (omp_declare_variant_alt
)
2468 omp_declare_variant_base_entry entry
;
2471 entry
.variants
= NULL
;
2472 omp_declare_variant_alt
->remove_elt_with_hash (&entry
,
2473 DECL_UID (node
->decl
));
2477 /* Try to resolve declare variant, return the variant decl if it should
2478 be used instead of base, or base otherwise. */
2481 omp_resolve_declare_variant (tree base
)
2483 tree variant1
= NULL_TREE
, variant2
= NULL_TREE
;
2484 if (cfun
&& (cfun
->curr_properties
& PROP_gimple_any
) != 0)
2485 return omp_resolve_late_declare_variant (base
);
2487 auto_vec
<tree
, 16> variants
;
2488 auto_vec
<bool, 16> defer
;
2489 bool any_deferred
= false;
2490 for (tree attr
= DECL_ATTRIBUTES (base
); attr
; attr
= TREE_CHAIN (attr
))
2492 attr
= lookup_attribute ("omp declare variant base", attr
);
2493 if (attr
== NULL_TREE
)
2495 if (TREE_CODE (TREE_PURPOSE (TREE_VALUE (attr
))) != FUNCTION_DECL
)
2497 cgraph_node
*node
= cgraph_node::get (base
);
2498 /* If this is already a magic decl created by this function,
2499 don't process it again. */
2500 if (node
&& node
->declare_variant_alt
)
2502 switch (omp_context_selector_matches (TREE_VALUE (TREE_VALUE (attr
))))
2505 /* No match, ignore. */
2508 /* Needs to be deferred. */
2509 any_deferred
= true;
2510 variants
.safe_push (attr
);
2511 defer
.safe_push (true);
2514 variants
.safe_push (attr
);
2515 defer
.safe_push (false);
2519 if (variants
.length () == 0)
2524 score_wide_int max_score1
= 0;
2525 score_wide_int max_score2
= 0;
2529 omp_declare_variant_base_entry entry
;
2530 entry
.base
= cgraph_node::get_create (base
);
2532 vec_alloc (entry
.variants
, variants
.length ());
2533 FOR_EACH_VEC_ELT (variants
, i
, attr1
)
2535 score_wide_int score1
;
2536 score_wide_int score2
;
2538 tree ctx
= TREE_VALUE (TREE_VALUE (attr1
));
2539 need_two
= omp_context_compute_score (ctx
, &score1
, false);
2541 omp_context_compute_score (ctx
, &score2
, true);
2547 max_score1
= score1
;
2548 max_score2
= score2
;
2557 if (max_score1
== score1
)
2558 variant1
= NULL_TREE
;
2559 else if (score1
> max_score1
)
2561 max_score1
= score1
;
2562 variant1
= defer
[i
] ? NULL_TREE
: attr1
;
2564 if (max_score2
== score2
)
2565 variant2
= NULL_TREE
;
2566 else if (score2
> max_score2
)
2568 max_score2
= score2
;
2569 variant2
= defer
[i
] ? NULL_TREE
: attr1
;
2572 omp_declare_variant_entry varentry
;
2574 = cgraph_node::get_create (TREE_PURPOSE (TREE_VALUE (attr1
)));
2575 varentry
.score
= score1
;
2576 varentry
.score_in_declare_simd_clone
= score2
;
2578 varentry
.matches
= !defer
[i
];
2579 entry
.variants
->quick_push (varentry
);
2582 /* If there is a clear winner variant with the score which is not
2583 deferred, verify it is not a strict subset of any other context
2584 selector and if it is not, it is the best alternative no matter
2585 whether the others do or don't match. */
2586 if (variant1
&& variant1
== variant2
)
2588 tree ctx1
= TREE_VALUE (TREE_VALUE (variant1
));
2589 FOR_EACH_VEC_ELT (variants
, i
, attr2
)
2591 if (attr2
== variant1
)
2593 tree ctx2
= TREE_VALUE (TREE_VALUE (attr2
));
2594 int r
= omp_context_selector_compare (ctx1
, ctx2
);
2597 /* The winner is a strict subset of ctx2, can't
2599 variant1
= NULL_TREE
;
2605 vec_free (entry
.variants
);
2606 return TREE_PURPOSE (TREE_VALUE (variant1
));
2610 static struct cgraph_node_hook_list
*node_removal_hook_holder
;
2611 if (!node_removal_hook_holder
)
2612 node_removal_hook_holder
2613 = symtab
->add_cgraph_removal_hook (omp_declare_variant_remove_hook
,
2616 if (omp_declare_variants
== NULL
)
2617 omp_declare_variants
2618 = hash_table
<omp_declare_variant_hasher
>::create_ggc (64);
2619 omp_declare_variant_base_entry
**slot
2620 = omp_declare_variants
->find_slot (&entry
, INSERT
);
2623 vec_free (entry
.variants
);
2624 return (*slot
)->node
->decl
;
2627 *slot
= ggc_cleared_alloc
<omp_declare_variant_base_entry
> ();
2628 (*slot
)->base
= entry
.base
;
2629 (*slot
)->node
= entry
.base
;
2630 (*slot
)->variants
= entry
.variants
;
2631 tree alt
= build_decl (DECL_SOURCE_LOCATION (base
), FUNCTION_DECL
,
2632 DECL_NAME (base
), TREE_TYPE (base
));
2633 DECL_ARTIFICIAL (alt
) = 1;
2634 DECL_IGNORED_P (alt
) = 1;
2635 TREE_STATIC (alt
) = 1;
2636 tree attributes
= DECL_ATTRIBUTES (base
);
2637 if (lookup_attribute ("noipa", attributes
) == NULL
)
2639 attributes
= tree_cons (get_identifier ("noipa"), NULL
, attributes
);
2640 if (lookup_attribute ("noinline", attributes
) == NULL
)
2641 attributes
= tree_cons (get_identifier ("noinline"), NULL
,
2643 if (lookup_attribute ("noclone", attributes
) == NULL
)
2644 attributes
= tree_cons (get_identifier ("noclone"), NULL
,
2646 if (lookup_attribute ("no_icf", attributes
) == NULL
)
2647 attributes
= tree_cons (get_identifier ("no_icf"), NULL
,
2650 DECL_ATTRIBUTES (alt
) = attributes
;
2651 DECL_INITIAL (alt
) = error_mark_node
;
2652 (*slot
)->node
= cgraph_node::create (alt
);
2653 (*slot
)->node
->declare_variant_alt
= 1;
2654 (*slot
)->node
->create_reference (entry
.base
, IPA_REF_ADDR
);
2655 omp_declare_variant_entry
*varentry
;
2656 FOR_EACH_VEC_SAFE_ELT (entry
.variants
, i
, varentry
)
2657 (*slot
)->node
->create_reference (varentry
->variant
, IPA_REF_ADDR
);
2658 if (omp_declare_variant_alt
== NULL
)
2659 omp_declare_variant_alt
2660 = hash_table
<omp_declare_variant_alt_hasher
>::create_ggc (64);
2661 *omp_declare_variant_alt
->find_slot_with_hash (*slot
, DECL_UID (alt
),
2666 if (variants
.length () == 1)
2667 return TREE_PURPOSE (TREE_VALUE (variants
[0]));
2669 /* A context selector that is a strict subset of another context selector
2670 has a score of zero. */
2673 FOR_EACH_VEC_ELT (variants
, i
, attr1
)
2676 tree ctx1
= TREE_VALUE (TREE_VALUE (attr1
));
2677 FOR_EACH_VEC_ELT_FROM (variants
, j
, attr2
, i
+ 1)
2680 tree ctx2
= TREE_VALUE (TREE_VALUE (attr2
));
2681 int r
= omp_context_selector_compare (ctx1
, ctx2
);
2684 /* ctx1 is a strict subset of ctx2, remove
2685 attr1 from the vector. */
2686 variants
[i
] = NULL_TREE
;
2690 /* ctx2 is a strict subset of ctx1, remove attr2
2692 variants
[j
] = NULL_TREE
;
2695 score_wide_int max_score1
= 0;
2696 score_wide_int max_score2
= 0;
2698 FOR_EACH_VEC_ELT (variants
, i
, attr1
)
2703 score_wide_int score1
;
2704 score_wide_int score2
;
2710 ctx
= TREE_VALUE (TREE_VALUE (variant1
));
2711 need_two
= omp_context_compute_score (ctx
, &max_score1
, false);
2713 omp_context_compute_score (ctx
, &max_score2
, true);
2715 max_score2
= max_score1
;
2717 ctx
= TREE_VALUE (TREE_VALUE (attr1
));
2718 need_two
= omp_context_compute_score (ctx
, &score1
, false);
2720 omp_context_compute_score (ctx
, &score2
, true);
2723 if (score1
> max_score1
)
2725 max_score1
= score1
;
2728 if (score2
> max_score2
)
2730 max_score2
= score2
;
2740 /* If there is a disagreement on which variant has the highest score
2741 depending on whether it will be in a declare simd clone or not,
2742 punt for now and defer until after IPA where we will know that. */
2743 return ((variant1
&& variant1
== variant2
)
2744 ? TREE_PURPOSE (TREE_VALUE (variant1
)) : base
);
2748 omp_lto_output_declare_variant_alt (lto_simple_output_block
*ob
,
2750 lto_symtab_encoder_t encoder
)
2752 gcc_assert (node
->declare_variant_alt
);
2754 omp_declare_variant_base_entry entry
;
2757 entry
.variants
= NULL
;
2758 omp_declare_variant_base_entry
*entryp
2759 = omp_declare_variant_alt
->find_with_hash (&entry
, DECL_UID (node
->decl
));
2760 gcc_assert (entryp
);
2762 int nbase
= lto_symtab_encoder_lookup (encoder
, entryp
->base
);
2763 gcc_assert (nbase
!= LCC_NOT_FOUND
);
2764 streamer_write_hwi_stream (ob
->main_stream
, nbase
);
2766 streamer_write_hwi_stream (ob
->main_stream
, entryp
->variants
->length ());
2769 omp_declare_variant_entry
*varentry
;
2770 FOR_EACH_VEC_SAFE_ELT (entryp
->variants
, i
, varentry
)
2772 int nvar
= lto_symtab_encoder_lookup (encoder
, varentry
->variant
);
2773 gcc_assert (nvar
!= LCC_NOT_FOUND
);
2774 streamer_write_hwi_stream (ob
->main_stream
, nvar
);
2776 for (score_wide_int
*w
= &varentry
->score
; ;
2777 w
= &varentry
->score_in_declare_simd_clone
)
2779 unsigned len
= w
->get_len ();
2780 streamer_write_hwi_stream (ob
->main_stream
, len
);
2781 const HOST_WIDE_INT
*val
= w
->get_val ();
2782 for (unsigned j
= 0; j
< len
; j
++)
2783 streamer_write_hwi_stream (ob
->main_stream
, val
[j
]);
2784 if (w
== &varentry
->score_in_declare_simd_clone
)
2788 HOST_WIDE_INT cnt
= -1;
2789 HOST_WIDE_INT i
= varentry
->matches
? 1 : 0;
2790 for (tree attr
= DECL_ATTRIBUTES (entryp
->base
->decl
);
2791 attr
; attr
= TREE_CHAIN (attr
), i
+= 2)
2793 attr
= lookup_attribute ("omp declare variant base", attr
);
2794 if (attr
== NULL_TREE
)
2797 if (varentry
->ctx
== TREE_VALUE (TREE_VALUE (attr
)))
2804 gcc_assert (cnt
!= -1);
2805 streamer_write_hwi_stream (ob
->main_stream
, cnt
);
2810 omp_lto_input_declare_variant_alt (lto_input_block
*ib
, cgraph_node
*node
,
2811 vec
<symtab_node
*> nodes
)
2813 gcc_assert (node
->declare_variant_alt
);
2814 omp_declare_variant_base_entry
*entryp
2815 = ggc_cleared_alloc
<omp_declare_variant_base_entry
> ();
2816 entryp
->base
= dyn_cast
<cgraph_node
*> (nodes
[streamer_read_hwi (ib
)]);
2817 entryp
->node
= node
;
2818 unsigned int len
= streamer_read_hwi (ib
);
2819 vec_alloc (entryp
->variants
, len
);
2821 for (unsigned int i
= 0; i
< len
; i
++)
2823 omp_declare_variant_entry varentry
;
2825 = dyn_cast
<cgraph_node
*> (nodes
[streamer_read_hwi (ib
)]);
2826 for (score_wide_int
*w
= &varentry
.score
; ;
2827 w
= &varentry
.score_in_declare_simd_clone
)
2829 unsigned len2
= streamer_read_hwi (ib
);
2830 HOST_WIDE_INT arr
[WIDE_INT_MAX_HWIS (1024)];
2831 gcc_assert (len2
<= WIDE_INT_MAX_HWIS (1024));
2832 for (unsigned int j
= 0; j
< len2
; j
++)
2833 arr
[j
] = streamer_read_hwi (ib
);
2834 *w
= score_wide_int::from_array (arr
, len2
, true);
2835 if (w
== &varentry
.score_in_declare_simd_clone
)
2839 HOST_WIDE_INT cnt
= streamer_read_hwi (ib
);
2840 HOST_WIDE_INT j
= 0;
2841 varentry
.ctx
= NULL_TREE
;
2842 varentry
.matches
= (cnt
& 1) ? true : false;
2843 cnt
&= ~HOST_WIDE_INT_1
;
2844 for (tree attr
= DECL_ATTRIBUTES (entryp
->base
->decl
);
2845 attr
; attr
= TREE_CHAIN (attr
), j
+= 2)
2847 attr
= lookup_attribute ("omp declare variant base", attr
);
2848 if (attr
== NULL_TREE
)
2853 varentry
.ctx
= TREE_VALUE (TREE_VALUE (attr
));
2857 gcc_assert (varentry
.ctx
!= NULL_TREE
);
2858 entryp
->variants
->quick_push (varentry
);
2860 if (omp_declare_variant_alt
== NULL
)
2861 omp_declare_variant_alt
2862 = hash_table
<omp_declare_variant_alt_hasher
>::create_ggc (64);
2863 *omp_declare_variant_alt
->find_slot_with_hash (entryp
, DECL_UID (node
->decl
),
2867 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
2868 macro on gomp-constants.h. We do not check for overflow. */
2871 oacc_launch_pack (unsigned code
, tree device
, unsigned op
)
2875 res
= build_int_cst (unsigned_type_node
, GOMP_LAUNCH_PACK (code
, 0, op
));
2878 device
= fold_build2 (LSHIFT_EXPR
, unsigned_type_node
,
2879 device
, build_int_cst (unsigned_type_node
,
2880 GOMP_LAUNCH_DEVICE_SHIFT
));
2881 res
= fold_build2 (BIT_IOR_EXPR
, unsigned_type_node
, res
, device
);
2886 /* Openacc compute grid dimension clauses are converted to an attribute
2887 attached to the function. This permits the target-side code to (a) massage
2888 the dimensions, (b) emit that data and (c) optimize. Non-constant
2889 dimensions are pushed onto ARGS.
2891 The attribute value is a TREE_LIST. A set of dimensions is
2892 represented as a list of INTEGER_CST. Those that are runtime
2893 exprs are represented as an INTEGER_CST of zero.
2895 TODO: Normally the attribute will just contain a single such list. If
2896 however it contains a list of lists, this will represent the use of
2897 device_type. Each member of the outer list is an assoc list of
2898 dimensions, keyed by the device type. The first entry will be the
2899 default. Well, that's the plan. */
2901 /* Replace any existing oacc fn attribute in ATTRIBS with updated
2905 oacc_replace_fn_attrib_attr (tree attribs
, tree dims
)
2907 tree ident
= get_identifier (OACC_FN_ATTRIB
);
2909 /* If we happen to be present as the first attrib, drop it. */
2910 if (attribs
&& TREE_PURPOSE (attribs
) == ident
)
2911 attribs
= TREE_CHAIN (attribs
);
2912 return tree_cons (ident
, dims
, attribs
);
2915 /* Replace any existing oacc fn attribute on FN with updated
2919 oacc_replace_fn_attrib (tree fn
, tree dims
)
2921 DECL_ATTRIBUTES (fn
)
2922 = oacc_replace_fn_attrib_attr (DECL_ATTRIBUTES (fn
), dims
);
2925 /* Scan CLAUSES for launch dimensions and attach them to the oacc
2926 function attribute. Push any that are non-constant onto the ARGS
2927 list, along with an appropriate GOMP_LAUNCH_DIM tag. */
2930 oacc_set_fn_attrib (tree fn
, tree clauses
, vec
<tree
> *args
)
2932 /* Must match GOMP_DIM ordering. */
2933 static const omp_clause_code ids
[]
2934 = { OMP_CLAUSE_NUM_GANGS
, OMP_CLAUSE_NUM_WORKERS
,
2935 OMP_CLAUSE_VECTOR_LENGTH
};
2937 tree dims
[GOMP_DIM_MAX
];
2939 tree attr
= NULL_TREE
;
2940 unsigned non_const
= 0;
2942 for (ix
= GOMP_DIM_MAX
; ix
--;)
2944 tree clause
= omp_find_clause (clauses
, ids
[ix
]);
2945 tree dim
= NULL_TREE
;
2948 dim
= OMP_CLAUSE_EXPR (clause
, ids
[ix
]);
2950 if (dim
&& TREE_CODE (dim
) != INTEGER_CST
)
2952 dim
= integer_zero_node
;
2953 non_const
|= GOMP_DIM_MASK (ix
);
2955 attr
= tree_cons (NULL_TREE
, dim
, attr
);
2958 oacc_replace_fn_attrib (fn
, attr
);
2962 /* Push a dynamic argument set. */
2963 args
->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM
,
2964 NULL_TREE
, non_const
));
2965 for (unsigned ix
= 0; ix
!= GOMP_DIM_MAX
; ix
++)
2966 if (non_const
& GOMP_DIM_MASK (ix
))
2967 args
->safe_push (dims
[ix
]);
2971 /* Verify OpenACC routine clauses.
2973 Returns 0 if FNDECL should be marked with an OpenACC 'routine' directive, 1
2974 if it has already been marked in compatible way, and -1 if incompatible.
2975 Upon returning, the chain of clauses will contain exactly one clause
2976 specifying the level of parallelism. */
2979 oacc_verify_routine_clauses (tree fndecl
, tree
*clauses
, location_t loc
,
2980 const char *routine_str
)
2982 tree c_level
= NULL_TREE
;
2983 tree c_nohost
= NULL_TREE
;
2984 tree c_p
= NULL_TREE
;
2985 for (tree c
= *clauses
; c
; c_p
= c
, c
= OMP_CLAUSE_CHAIN (c
))
2986 switch (OMP_CLAUSE_CODE (c
))
2988 case OMP_CLAUSE_GANG
:
2989 case OMP_CLAUSE_WORKER
:
2990 case OMP_CLAUSE_VECTOR
:
2991 case OMP_CLAUSE_SEQ
:
2992 if (c_level
== NULL_TREE
)
2994 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_CODE (c_level
))
2996 /* This has already been diagnosed in the front ends. */
2997 /* Drop the duplicate clause. */
2998 gcc_checking_assert (c_p
!= NULL_TREE
);
2999 OMP_CLAUSE_CHAIN (c_p
) = OMP_CLAUSE_CHAIN (c
);
3004 error_at (OMP_CLAUSE_LOCATION (c
),
3005 "%qs specifies a conflicting level of parallelism",
3006 omp_clause_code_name
[OMP_CLAUSE_CODE (c
)]);
3007 inform (OMP_CLAUSE_LOCATION (c_level
),
3008 "... to the previous %qs clause here",
3009 omp_clause_code_name
[OMP_CLAUSE_CODE (c_level
)]);
3010 /* Drop the conflicting clause. */
3011 gcc_checking_assert (c_p
!= NULL_TREE
);
3012 OMP_CLAUSE_CHAIN (c_p
) = OMP_CLAUSE_CHAIN (c
);
3016 case OMP_CLAUSE_NOHOST
:
3017 /* Don't worry about duplicate clauses here. */
3023 if (c_level
== NULL_TREE
)
3025 /* Default to an implicit 'seq' clause. */
3026 c_level
= build_omp_clause (loc
, OMP_CLAUSE_SEQ
);
3027 OMP_CLAUSE_CHAIN (c_level
) = *clauses
;
3030 /* In *clauses, we now have exactly one clause specifying the level of
3034 = lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl
));
3035 if (attr
!= NULL_TREE
)
3037 /* Diagnose if "#pragma omp declare target" has also been applied. */
3038 if (TREE_VALUE (attr
) == NULL_TREE
)
3040 /* See <https://gcc.gnu.org/PR93465>; the semantics of combining
3041 OpenACC and OpenMP 'target' are not clear. */
3043 "cannot apply %<%s%> to %qD, which has also been"
3044 " marked with an OpenMP 'declare target' directive",
3045 routine_str
, fndecl
);
3050 /* If a "#pragma acc routine" has already been applied, just verify
3051 this one for compatibility. */
3052 /* Collect previous directive's clauses. */
3053 tree c_level_p
= NULL_TREE
;
3054 tree c_nohost_p
= NULL_TREE
;
3055 for (tree c
= TREE_VALUE (attr
); c
; c
= OMP_CLAUSE_CHAIN (c
))
3056 switch (OMP_CLAUSE_CODE (c
))
3058 case OMP_CLAUSE_GANG
:
3059 case OMP_CLAUSE_WORKER
:
3060 case OMP_CLAUSE_VECTOR
:
3061 case OMP_CLAUSE_SEQ
:
3062 gcc_checking_assert (c_level_p
== NULL_TREE
);
3065 case OMP_CLAUSE_NOHOST
:
3066 gcc_checking_assert (c_nohost_p
== NULL_TREE
);
3072 gcc_checking_assert (c_level_p
!= NULL_TREE
);
3073 /* ..., and compare to current directive's, which we've already collected
3077 /* Matching level of parallelism? */
3078 if (OMP_CLAUSE_CODE (c_level
) != OMP_CLAUSE_CODE (c_level_p
))
3081 c_diag_p
= c_level_p
;
3084 /* Matching 'nohost' clauses? */
3085 if ((c_nohost
== NULL_TREE
) != (c_nohost_p
== NULL_TREE
))
3088 c_diag_p
= c_nohost_p
;
3095 if (c_diag
!= NULL_TREE
)
3096 error_at (OMP_CLAUSE_LOCATION (c_diag
),
3097 "incompatible %qs clause when applying"
3098 " %<%s%> to %qD, which has already been"
3099 " marked with an OpenACC 'routine' directive",
3100 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag
)],
3101 routine_str
, fndecl
);
3102 else if (c_diag_p
!= NULL_TREE
)
3104 "missing %qs clause when applying"
3105 " %<%s%> to %qD, which has already been"
3106 " marked with an OpenACC 'routine' directive",
3107 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag_p
)],
3108 routine_str
, fndecl
);
3111 if (c_diag_p
!= NULL_TREE
)
3112 inform (OMP_CLAUSE_LOCATION (c_diag_p
),
3113 "... with %qs clause here",
3114 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag_p
)]);
3117 /* In the front ends, we don't preserve location information for the
3118 OpenACC routine directive itself. However, that of c_level_p
3120 location_t loc_routine
= OMP_CLAUSE_LOCATION (c_level_p
);
3121 inform (loc_routine
, "... without %qs clause near to here",
3122 omp_clause_code_name
[OMP_CLAUSE_CODE (c_diag
)]);
3131 /* Process the OpenACC 'routine' directive clauses to generate an attribute
3132 for the level of parallelism. All dimensions have a size of zero
3133 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
3134 can have a loop partitioned on it. non-zero indicates
3135 yes, zero indicates no. By construction once a non-zero has been
3136 reached, further inner dimensions must also be non-zero. We set
3137 TREE_VALUE to zero for the dimensions that may be partitioned and
3138 1 for the other ones -- if a loop is (erroneously) spawned at
3139 an outer level, we don't want to try and partition it. */
3142 oacc_build_routine_dims (tree clauses
)
3144 /* Must match GOMP_DIM ordering. */
3145 static const omp_clause_code ids
[]
3146 = {OMP_CLAUSE_GANG
, OMP_CLAUSE_WORKER
, OMP_CLAUSE_VECTOR
, OMP_CLAUSE_SEQ
};
3150 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
3151 for (ix
= GOMP_DIM_MAX
+ 1; ix
--;)
3152 if (OMP_CLAUSE_CODE (clauses
) == ids
[ix
])
3157 gcc_checking_assert (level
>= 0);
3159 tree dims
= NULL_TREE
;
3161 for (ix
= GOMP_DIM_MAX
; ix
--;)
3162 dims
= tree_cons (build_int_cst (boolean_type_node
, ix
>= level
),
3163 build_int_cst (integer_type_node
, ix
< level
), dims
);
3168 /* Retrieve the oacc function attrib and return it. Non-oacc
3169 functions will return NULL. */
3172 oacc_get_fn_attrib (tree fn
)
3174 return lookup_attribute (OACC_FN_ATTRIB
, DECL_ATTRIBUTES (fn
));
3177 /* Return true if FN is an OpenMP or OpenACC offloading function. */
3180 offloading_function_p (tree fn
)
3182 tree attrs
= DECL_ATTRIBUTES (fn
);
3183 return (lookup_attribute ("omp declare target", attrs
)
3184 || lookup_attribute ("omp target entrypoint", attrs
));
3187 /* Extract an oacc execution dimension from FN. FN must be an
3188 offloaded function or routine that has already had its execution
3189 dimensions lowered to the target-specific values. */
3192 oacc_get_fn_dim_size (tree fn
, int axis
)
3194 tree attrs
= oacc_get_fn_attrib (fn
);
3196 gcc_assert (axis
< GOMP_DIM_MAX
);
3198 tree dims
= TREE_VALUE (attrs
);
3200 dims
= TREE_CHAIN (dims
);
3202 int size
= TREE_INT_CST_LOW (TREE_VALUE (dims
));
3207 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
3208 IFN_GOACC_DIM_SIZE call. */
3211 oacc_get_ifn_dim_arg (const gimple
*stmt
)
3213 gcc_checking_assert (gimple_call_internal_fn (stmt
) == IFN_GOACC_DIM_SIZE
3214 || gimple_call_internal_fn (stmt
) == IFN_GOACC_DIM_POS
);
3215 tree arg
= gimple_call_arg (stmt
, 0);
3216 HOST_WIDE_INT axis
= TREE_INT_CST_LOW (arg
);
3218 gcc_checking_assert (axis
>= 0 && axis
< GOMP_DIM_MAX
);
3222 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
3226 omp_build_component_ref (tree obj
, tree field
)
3228 tree ret
= build3 (COMPONENT_REF
, TREE_TYPE (field
), obj
, field
, NULL
);
3229 if (TREE_THIS_VOLATILE (field
))
3230 TREE_THIS_VOLATILE (ret
) |= 1;
3231 if (TREE_READONLY (field
))
3232 TREE_READONLY (ret
) |= 1;
3236 /* Return true if NAME is the name of an omp_* runtime API call. */
3238 omp_runtime_api_procname (const char *name
)
3240 if (!startswith (name
, "omp_"))
3243 static const char *omp_runtime_apis
[] =
3245 /* This array has 3 sections. First omp_* calls that don't
3246 have any suffixes. */
3255 "target_associate_ptr",
3256 "target_disassociate_ptr",
3258 "target_is_accessible",
3259 "target_is_present",
3261 "target_memcpy_async",
3262 "target_memcpy_rect",
3263 "target_memcpy_rect_async",
3265 /* Now omp_* calls that are available as omp_* and omp_*_; however, the
3266 DECL_NAME is always omp_* without tailing underscore. */
3268 "destroy_allocator",
3270 "destroy_nest_lock",
3274 "get_affinity_format",
3276 "get_default_allocator",
3277 "get_default_device",
3280 "get_initial_device",
3282 "get_max_active_levels",
3283 "get_max_task_priority",
3292 "get_partition_num_places",
3295 "get_supported_active_levels",
3297 "get_teams_thread_limit",
3307 "is_initial_device",
3309 "pause_resource_all",
3310 "set_affinity_format",
3311 "set_default_allocator",
3319 /* And finally calls available as omp_*, omp_*_ and omp_*_8_; however,
3320 as DECL_NAME only omp_* and omp_*_8 appear. */
3322 "get_ancestor_thread_num",
3324 "get_partition_place_nums",
3325 "get_place_num_procs",
3326 "get_place_proc_ids",
3329 "set_default_device",
3331 "set_max_active_levels",
3336 "set_teams_thread_limit"
3340 for (unsigned i
= 0; i
< ARRAY_SIZE (omp_runtime_apis
); i
++)
3342 if (omp_runtime_apis
[i
] == NULL
)
3347 size_t len
= strlen (omp_runtime_apis
[i
]);
3348 if (strncmp (name
+ 4, omp_runtime_apis
[i
], len
) == 0
3349 && (name
[4 + len
] == '\0'
3350 || (mode
> 1 && strcmp (name
+ 4 + len
, "_8") == 0)))
3356 /* Return true if FNDECL is an omp_* runtime API call. */
3359 omp_runtime_api_call (const_tree fndecl
)
3361 tree declname
= DECL_NAME (fndecl
);
3363 || (DECL_CONTEXT (fndecl
) != NULL_TREE
3364 && TREE_CODE (DECL_CONTEXT (fndecl
)) != TRANSLATION_UNIT_DECL
)
3365 || !TREE_PUBLIC (fndecl
))
3367 return omp_runtime_api_procname (IDENTIFIER_POINTER (declname
));
3370 namespace omp_addr_tokenizer
{
3372 /* We scan an expression by recursive descent, and build a vector of
3373 "omp_addr_token *" pointers representing a "parsed" version of the
3374 expression. The grammar we use is something like this:
3377 expr [section-access]
3380 structured-expr access-method
3381 | array-base access-method
3384 structure-base component-selector
3391 | structured-expr access-method
3392 | arbitrary-expr access-method
3404 | REF_TO_POINTER_OFFSET
3406 | INDEXED_REF_TO_ARRAY
3410 INDEX_EXPR access-method
3412 component-selector::
3413 component-selector COMPONENT_REF
3414 | component-selector ARRAY_REF
3417 This tokenized form is then used both in parsing, for OpenMP clause
3418 expansion (for C and C++) and in gimplify.cc for sibling-list handling
3419 (for C, C++ and Fortran). */
3421 omp_addr_token::omp_addr_token (token_type t
, tree e
)
3426 omp_addr_token::omp_addr_token (access_method_kinds k
, tree e
)
3427 : type(ACCESS_METHOD
), expr(e
)
3432 omp_addr_token::omp_addr_token (token_type t
, structure_base_kinds k
, tree e
)
3435 u
.structure_base_kind
= k
;
3439 omp_parse_component_selector (tree
*expr0
)
3442 tree last_component
= NULL_TREE
;
3444 while (TREE_CODE (expr
) == COMPONENT_REF
3445 || TREE_CODE (expr
) == ARRAY_REF
)
3447 if (TREE_CODE (expr
) == COMPONENT_REF
)
3448 last_component
= expr
;
3450 expr
= TREE_OPERAND (expr
, 0);
3452 if (TREE_CODE (TREE_TYPE (expr
)) == REFERENCE_TYPE
)
3456 if (!last_component
)
3459 *expr0
= last_component
;
3463 /* This handles references that have had convert_from_reference called on
3464 them, and also those that haven't. */
3467 omp_parse_ref (tree
*expr0
)
3471 if (TREE_CODE (TREE_TYPE (expr
)) == REFERENCE_TYPE
)
3473 else if ((TREE_CODE (expr
) == INDIRECT_REF
3474 || (TREE_CODE (expr
) == MEM_REF
3475 && integer_zerop (TREE_OPERAND (expr
, 1))))
3476 && TREE_CODE (TREE_TYPE (TREE_OPERAND (expr
, 0))) == REFERENCE_TYPE
)
3478 *expr0
= TREE_OPERAND (expr
, 0);
3486 omp_parse_pointer (tree
*expr0
, bool *has_offset
)
3490 *has_offset
= false;
3492 if ((TREE_CODE (expr
) == INDIRECT_REF
3493 || (TREE_CODE (expr
) == MEM_REF
3494 && integer_zerop (TREE_OPERAND (expr
, 1))))
3495 && TREE_CODE (TREE_TYPE (TREE_OPERAND (expr
, 0))) == POINTER_TYPE
)
3497 expr
= TREE_OPERAND (expr
, 0);
3499 /* The Fortran FE sometimes emits a no-op cast here. */
3504 if (TREE_CODE (expr
) == COMPOUND_EXPR
)
3506 expr
= TREE_OPERAND (expr
, 1);
3509 else if (TREE_CODE (expr
) == SAVE_EXPR
)
3510 expr
= TREE_OPERAND (expr
, 0);
3511 else if (TREE_CODE (expr
) == POINTER_PLUS_EXPR
)
3514 expr
= TREE_OPERAND (expr
, 0);
3530 omp_parse_access_method (tree
*expr0
, enum access_method_kinds
*kind
)
3535 if (omp_parse_ref (&expr
))
3537 else if (omp_parse_pointer (&expr
, &has_offset
))
3539 if (omp_parse_ref (&expr
))
3540 *kind
= has_offset
? ACCESS_REF_TO_POINTER_OFFSET
3541 : ACCESS_REF_TO_POINTER
;
3543 *kind
= has_offset
? ACCESS_POINTER_OFFSET
: ACCESS_POINTER
;
3545 else if (TREE_CODE (expr
) == ARRAY_REF
)
3547 while (TREE_CODE (expr
) == ARRAY_REF
)
3548 expr
= TREE_OPERAND (expr
, 0);
3549 if (omp_parse_ref (&expr
))
3550 *kind
= ACCESS_INDEXED_REF_TO_ARRAY
;
3552 *kind
= ACCESS_INDEXED_ARRAY
;
3555 *kind
= ACCESS_DIRECT
;
3564 omp_parse_access_methods (vec
<omp_addr_token
*> &addr_tokens
, tree
*expr0
)
3567 enum access_method_kinds kind
;
3570 if (omp_parse_access_method (&expr
, &kind
))
3573 if (TREE_CODE (expr
) == INDIRECT_REF
3574 || TREE_CODE (expr
) == MEM_REF
3575 || TREE_CODE (expr
) == ARRAY_REF
)
3576 omp_parse_access_methods (addr_tokens
, &expr
);
3578 addr_tokens
.safe_push (new omp_addr_token (kind
, am_expr
));
3584 static bool omp_parse_structured_expr (vec
<omp_addr_token
*> &, tree
*);
3587 omp_parse_structure_base (vec
<omp_addr_token
*> &addr_tokens
,
3588 tree
*expr0
, structure_base_kinds
*kind
,
3589 vec
<omp_addr_token
*> &base_access_tokens
,
3590 bool allow_structured
= true)
3594 if (allow_structured
)
3595 omp_parse_access_methods (base_access_tokens
, &expr
);
3603 if (allow_structured
&& omp_parse_structured_expr (addr_tokens
, &expr
))
3605 *kind
= BASE_COMPONENT_EXPR
;
3610 *kind
= BASE_ARBITRARY_EXPR
;
3616 omp_parse_structured_expr (vec
<omp_addr_token
*> &addr_tokens
, tree
*expr0
)
3619 tree base_component
= NULL_TREE
;
3620 structure_base_kinds struct_base_kind
;
3621 auto_vec
<omp_addr_token
*> base_access_tokens
;
3623 if (omp_parse_component_selector (&expr
))
3624 base_component
= expr
;
3628 gcc_assert (TREE_CODE (expr
) == COMPONENT_REF
);
3629 expr
= TREE_OPERAND (expr
, 0);
3631 tree structure_base
= expr
;
3633 if (!omp_parse_structure_base (addr_tokens
, &expr
, &struct_base_kind
,
3634 base_access_tokens
))
3637 addr_tokens
.safe_push (new omp_addr_token (STRUCTURE_BASE
, struct_base_kind
,
3639 addr_tokens
.safe_splice (base_access_tokens
);
3640 addr_tokens
.safe_push (new omp_addr_token (COMPONENT_SELECTOR
,
3649 omp_parse_array_expr (vec
<omp_addr_token
*> &addr_tokens
, tree
*expr0
)
3652 structure_base_kinds s_kind
;
3653 auto_vec
<omp_addr_token
*> base_access_tokens
;
3655 if (!omp_parse_structure_base (addr_tokens
, &expr
, &s_kind
,
3656 base_access_tokens
, false))
3659 addr_tokens
.safe_push (new omp_addr_token (ARRAY_BASE
, s_kind
, expr
));
3660 addr_tokens
.safe_splice (base_access_tokens
);
3666 /* Return TRUE if the ACCESS_METHOD token at index 'i' has a further
3667 ACCESS_METHOD chained after it (e.g., if we're processing an expression
3668 containing multiple pointer indirections). */
3671 omp_access_chain_p (vec
<omp_addr_token
*> &addr_tokens
, unsigned i
)
3673 gcc_assert (addr_tokens
[i
]->type
== ACCESS_METHOD
);
3674 return (i
+ 1 < addr_tokens
.length ()
3675 && addr_tokens
[i
+ 1]->type
== ACCESS_METHOD
);
3678 /* Return the address of the object accessed by the ACCESS_METHOD token
3679 at 'i': either of the next access method's expr, or of EXPR if we're at
3680 the end of the list of tokens. */
3683 omp_accessed_addr (vec
<omp_addr_token
*> &addr_tokens
, unsigned i
, tree expr
)
3685 if (i
+ 1 < addr_tokens
.length ())
3686 return build_fold_addr_expr (addr_tokens
[i
+ 1]->expr
);
3688 return build_fold_addr_expr (expr
);
3691 } /* namespace omp_addr_tokenizer. */
3694 omp_parse_expr (vec
<omp_addr_token
*> &addr_tokens
, tree expr
)
3696 using namespace omp_addr_tokenizer
;
3697 auto_vec
<omp_addr_token
*> expr_access_tokens
;
3699 if (!omp_parse_access_methods (expr_access_tokens
, &expr
))
3702 if (omp_parse_structured_expr (addr_tokens
, &expr
))
3704 else if (omp_parse_array_expr (addr_tokens
, &expr
))
3709 addr_tokens
.safe_splice (expr_access_tokens
);
3715 debug_omp_tokenized_addr (vec
<omp_addr_token
*> &addr_tokens
,
3718 using namespace omp_addr_tokenizer
;
3719 const char *sep
= with_exprs
? " " : "";
3721 for (auto e
: addr_tokens
)
3723 const char *pfx
= "";
3725 fputs (sep
, stderr
);
3729 case COMPONENT_SELECTOR
:
3730 fputs ("component_selector", stderr
);
3733 switch (e
->u
.access_kind
)
3736 fputs ("access_direct", stderr
);
3739 fputs ("access_ref", stderr
);
3741 case ACCESS_POINTER
:
3742 fputs ("access_pointer", stderr
);
3744 case ACCESS_POINTER_OFFSET
:
3745 fputs ("access_pointer_offset", stderr
);
3747 case ACCESS_REF_TO_POINTER
:
3748 fputs ("access_ref_to_pointer", stderr
);
3750 case ACCESS_REF_TO_POINTER_OFFSET
:
3751 fputs ("access_ref_to_pointer_offset", stderr
);
3753 case ACCESS_INDEXED_ARRAY
:
3754 fputs ("access_indexed_array", stderr
);
3756 case ACCESS_INDEXED_REF_TO_ARRAY
:
3757 fputs ("access_indexed_ref_to_array", stderr
);
3762 case STRUCTURE_BASE
:
3763 pfx
= e
->type
== ARRAY_BASE
? "array_" : "struct_";
3764 switch (e
->u
.structure_base_kind
)
3767 fprintf (stderr
, "%sbase_decl", pfx
);
3769 case BASE_COMPONENT_EXPR
:
3770 fputs ("base_component_expr", stderr
);
3772 case BASE_ARBITRARY_EXPR
:
3773 fprintf (stderr
, "%sbase_arbitrary_expr", pfx
);
3780 fputs (" [", stderr
);
3781 print_generic_expr (stderr
, e
->expr
);
3782 fputc (']', stderr
);
3789 fputs ("\n", stderr
);
3793 #include "gt-omp-general.h"