ada: Rename Is_Constr_Subt_For_UN_Aliased flag
[official-gcc.git] / gcc / omp-general.cc
blob7f1ad0f4e452103cd137756dce31f0e719eea245
1 /* General types and functions that are uselful for processing of OpenMP,
2 OpenACC and similar directivers at various stages of compilation.
4 Copyright (C) 2005-2023 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Find an OMP clause of type KIND within CLAUSES. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "ssa.h"
32 #include "diagnostic-core.h"
33 #include "fold-const.h"
34 #include "langhooks.h"
35 #include "omp-general.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "gimplify.h"
39 #include "cgraph.h"
40 #include "alloc-pool.h"
41 #include "symbol-summary.h"
42 #include "tree-pass.h"
43 #include "omp-device-properties.h"
44 #include "tree-iterator.h"
45 #include "data-streamer.h"
46 #include "streamer-hooks.h"
47 #include "opts.h"
48 #include "omp-general.h"
49 #include "tree-pretty-print.h"
51 enum omp_requires omp_requires_mask;
53 tree
54 omp_find_clause (tree clauses, enum omp_clause_code kind)
56 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
57 if (OMP_CLAUSE_CODE (clauses) == kind)
58 return clauses;
60 return NULL_TREE;
63 /* True if OpenMP should regard this DECL as being a scalar which has Fortran's
64 allocatable or pointer attribute. */
65 bool
66 omp_is_allocatable_or_ptr (tree decl)
68 return lang_hooks.decls.omp_is_allocatable_or_ptr (decl);
71 /* Check whether this DECL belongs to a Fortran optional argument.
72 With 'for_present_check' set to false, decls which are optional parameters
73 themselve are returned as tree - or a NULL_TREE otherwise. Those decls are
74 always pointers. With 'for_present_check' set to true, the decl for checking
75 whether an argument is present is returned; for arguments with value
76 attribute this is the hidden argument and of BOOLEAN_TYPE. If the decl is
77 unrelated to optional arguments, NULL_TREE is returned. */
79 tree
80 omp_check_optional_argument (tree decl, bool for_present_check)
82 return lang_hooks.decls.omp_check_optional_argument (decl, for_present_check);
85 /* Return true if TYPE is an OpenMP mappable type. */
87 bool
88 omp_mappable_type (tree type)
90 /* Mappable type has to be complete. */
91 if (type == error_mark_node || !COMPLETE_TYPE_P (type))
92 return false;
93 return true;
96 /* True if OpenMP should privatize what this DECL points to rather
97 than the DECL itself. */
99 bool
100 omp_privatize_by_reference (tree decl)
102 return lang_hooks.decls.omp_privatize_by_reference (decl);
105 /* Adjust *COND_CODE and *N2 so that the former is either LT_EXPR or GT_EXPR,
106 given that V is the loop index variable and STEP is loop step. */
108 void
109 omp_adjust_for_condition (location_t loc, enum tree_code *cond_code, tree *n2,
110 tree v, tree step)
112 switch (*cond_code)
114 case LT_EXPR:
115 case GT_EXPR:
116 break;
118 case NE_EXPR:
119 gcc_assert (TREE_CODE (step) == INTEGER_CST);
120 if (TREE_CODE (TREE_TYPE (v)) == INTEGER_TYPE)
122 if (integer_onep (step))
123 *cond_code = LT_EXPR;
124 else
126 gcc_assert (integer_minus_onep (step));
127 *cond_code = GT_EXPR;
130 else
132 tree unit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (v)));
133 gcc_assert (TREE_CODE (unit) == INTEGER_CST);
134 if (tree_int_cst_equal (unit, step))
135 *cond_code = LT_EXPR;
136 else
138 gcc_assert (wi::neg (wi::to_widest (unit))
139 == wi::to_widest (step));
140 *cond_code = GT_EXPR;
144 break;
146 case LE_EXPR:
147 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
148 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, 1);
149 else
150 *n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (*n2), *n2,
151 build_int_cst (TREE_TYPE (*n2), 1));
152 *cond_code = LT_EXPR;
153 break;
154 case GE_EXPR:
155 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
156 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, -1);
157 else
158 *n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (*n2), *n2,
159 build_int_cst (TREE_TYPE (*n2), 1));
160 *cond_code = GT_EXPR;
161 break;
162 default:
163 gcc_unreachable ();
167 /* Return the looping step from INCR, extracted from the step of a gimple omp
168 for statement. */
170 tree
171 omp_get_for_step_from_incr (location_t loc, tree incr)
173 tree step;
174 switch (TREE_CODE (incr))
176 case PLUS_EXPR:
177 step = TREE_OPERAND (incr, 1);
178 break;
179 case POINTER_PLUS_EXPR:
180 step = fold_convert (ssizetype, TREE_OPERAND (incr, 1));
181 break;
182 case MINUS_EXPR:
183 step = TREE_OPERAND (incr, 1);
184 step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (step), step);
185 break;
186 default:
187 gcc_unreachable ();
189 return step;
192 /* Extract the header elements of parallel loop FOR_STMT and store
193 them into *FD. */
195 void
196 omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
197 struct omp_for_data_loop *loops)
199 tree t, var, *collapse_iter, *collapse_count;
200 tree count = NULL_TREE, iter_type = long_integer_type_node;
201 struct omp_for_data_loop *loop;
202 int i;
203 struct omp_for_data_loop dummy_loop;
204 location_t loc = gimple_location (for_stmt);
205 bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
206 bool distribute = gimple_omp_for_kind (for_stmt)
207 == GF_OMP_FOR_KIND_DISTRIBUTE;
208 bool taskloop = gimple_omp_for_kind (for_stmt)
209 == GF_OMP_FOR_KIND_TASKLOOP;
210 bool order_reproducible = false;
211 tree iterv, countv;
213 fd->for_stmt = for_stmt;
214 fd->pre = NULL;
215 fd->have_nowait = distribute || simd;
216 fd->have_ordered = false;
217 fd->have_reductemp = false;
218 fd->have_pointer_condtemp = false;
219 fd->have_scantemp = false;
220 fd->have_nonctrl_scantemp = false;
221 fd->non_rect = false;
222 fd->lastprivate_conditional = 0;
223 fd->tiling = NULL_TREE;
224 fd->collapse = 1;
225 fd->ordered = 0;
226 fd->first_nonrect = -1;
227 fd->last_nonrect = -1;
228 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
229 fd->sched_modifiers = 0;
230 fd->chunk_size = NULL_TREE;
231 fd->simd_schedule = false;
232 fd->first_inner_iterations = NULL_TREE;
233 fd->factor = NULL_TREE;
234 fd->adjn1 = NULL_TREE;
235 collapse_iter = NULL;
236 collapse_count = NULL;
238 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
239 switch (OMP_CLAUSE_CODE (t))
241 case OMP_CLAUSE_NOWAIT:
242 fd->have_nowait = true;
243 break;
244 case OMP_CLAUSE_ORDERED:
245 fd->have_ordered = true;
246 if (OMP_CLAUSE_ORDERED_DOACROSS (t))
248 if (OMP_CLAUSE_ORDERED_EXPR (t))
249 fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
250 else
251 fd->ordered = -1;
253 break;
254 case OMP_CLAUSE_SCHEDULE:
255 gcc_assert (!distribute && !taskloop);
256 fd->sched_kind
257 = (enum omp_clause_schedule_kind)
258 (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
259 fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
260 & ~OMP_CLAUSE_SCHEDULE_MASK);
261 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
262 fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
263 break;
264 case OMP_CLAUSE_DIST_SCHEDULE:
265 gcc_assert (distribute);
266 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
267 break;
268 case OMP_CLAUSE_COLLAPSE:
269 fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
270 if (fd->collapse > 1)
272 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
273 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
275 break;
276 case OMP_CLAUSE_TILE:
277 fd->tiling = OMP_CLAUSE_TILE_LIST (t);
278 fd->collapse = list_length (fd->tiling);
279 gcc_assert (fd->collapse);
280 collapse_iter = &OMP_CLAUSE_TILE_ITERVAR (t);
281 collapse_count = &OMP_CLAUSE_TILE_COUNT (t);
282 break;
283 case OMP_CLAUSE__REDUCTEMP_:
284 fd->have_reductemp = true;
285 break;
286 case OMP_CLAUSE_LASTPRIVATE:
287 if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (t))
288 fd->lastprivate_conditional++;
289 break;
290 case OMP_CLAUSE__CONDTEMP_:
291 if (POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (t))))
292 fd->have_pointer_condtemp = true;
293 break;
294 case OMP_CLAUSE__SCANTEMP_:
295 fd->have_scantemp = true;
296 if (!OMP_CLAUSE__SCANTEMP__ALLOC (t)
297 && !OMP_CLAUSE__SCANTEMP__CONTROL (t))
298 fd->have_nonctrl_scantemp = true;
299 break;
300 case OMP_CLAUSE_ORDER:
301 /* FIXME: For OpenMP 5.2 this should change to
302 if (OMP_CLAUSE_ORDER_REPRODUCIBLE (t))
303 (with the exception of loop construct but that lowers to
304 no schedule/dist_schedule clauses currently). */
305 if (!OMP_CLAUSE_ORDER_UNCONSTRAINED (t))
306 order_reproducible = true;
307 default:
308 break;
311 if (fd->ordered == -1)
312 fd->ordered = fd->collapse;
314 /* For order(reproducible:concurrent) schedule ({dynamic,guided,runtime})
315 we have either the option to expensively remember at runtime how we've
316 distributed work from first loop and reuse that in following loops with
317 the same number of iterations and schedule, or just force static schedule.
318 OpenMP API calls etc. aren't allowed in order(concurrent) bodies so
319 users can't observe it easily anyway. */
320 if (order_reproducible)
321 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
322 if (fd->collapse > 1 || fd->tiling)
323 fd->loops = loops;
324 else
325 fd->loops = &fd->loop;
327 if (fd->ordered && fd->collapse == 1 && loops != NULL)
329 fd->loops = loops;
330 iterv = NULL_TREE;
331 countv = NULL_TREE;
332 collapse_iter = &iterv;
333 collapse_count = &countv;
336 /* FIXME: for now map schedule(auto) to schedule(static).
337 There should be analysis to determine whether all iterations
338 are approximately the same amount of work (then schedule(static)
339 is best) or if it varies (then schedule(dynamic,N) is better). */
340 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
342 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
343 gcc_assert (fd->chunk_size == NULL);
345 gcc_assert ((fd->collapse == 1 && !fd->tiling) || collapse_iter != NULL);
346 if (taskloop)
347 fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
348 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
349 gcc_assert (fd->chunk_size == NULL);
350 else if (fd->chunk_size == NULL)
352 /* We only need to compute a default chunk size for ordered
353 static loops and dynamic loops. */
354 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
355 || fd->have_ordered)
356 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
357 ? integer_zero_node : integer_one_node;
360 int cnt = fd->ordered ? fd->ordered : fd->collapse;
361 int single_nonrect = -1;
362 tree single_nonrect_count = NULL_TREE;
363 enum tree_code single_nonrect_cond_code = ERROR_MARK;
364 for (i = 1; i < cnt; i++)
366 tree n1 = gimple_omp_for_initial (for_stmt, i);
367 tree n2 = gimple_omp_for_final (for_stmt, i);
368 if (TREE_CODE (n1) == TREE_VEC)
370 if (fd->non_rect)
372 single_nonrect = -1;
373 break;
375 for (int j = i - 1; j >= 0; j--)
376 if (TREE_VEC_ELT (n1, 0) == gimple_omp_for_index (for_stmt, j))
378 single_nonrect = j;
379 break;
381 fd->non_rect = true;
383 else if (TREE_CODE (n2) == TREE_VEC)
385 if (fd->non_rect)
387 single_nonrect = -1;
388 break;
390 for (int j = i - 1; j >= 0; j--)
391 if (TREE_VEC_ELT (n2, 0) == gimple_omp_for_index (for_stmt, j))
393 single_nonrect = j;
394 break;
396 fd->non_rect = true;
399 for (i = 0; i < cnt; i++)
401 if (i == 0
402 && fd->collapse == 1
403 && !fd->tiling
404 && (fd->ordered == 0 || loops == NULL))
405 loop = &fd->loop;
406 else if (loops != NULL)
407 loop = loops + i;
408 else
409 loop = &dummy_loop;
411 loop->v = gimple_omp_for_index (for_stmt, i);
412 gcc_assert (SSA_VAR_P (loop->v));
413 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
414 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
415 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
416 loop->n1 = gimple_omp_for_initial (for_stmt, i);
417 loop->m1 = NULL_TREE;
418 loop->m2 = NULL_TREE;
419 loop->outer = 0;
420 loop->non_rect_referenced = false;
421 if (TREE_CODE (loop->n1) == TREE_VEC)
423 for (int j = i - 1; j >= 0; j--)
424 if (TREE_VEC_ELT (loop->n1, 0) == gimple_omp_for_index (for_stmt, j))
426 loop->outer = i - j;
427 if (loops != NULL)
428 loops[j].non_rect_referenced = true;
429 if (fd->first_nonrect == -1 || fd->first_nonrect > j)
430 fd->first_nonrect = j;
431 break;
433 gcc_assert (loop->outer);
434 loop->m1 = TREE_VEC_ELT (loop->n1, 1);
435 loop->n1 = TREE_VEC_ELT (loop->n1, 2);
436 fd->non_rect = true;
437 fd->last_nonrect = i;
440 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
441 loop->n2 = gimple_omp_for_final (for_stmt, i);
442 gcc_assert (loop->cond_code != NE_EXPR
443 || (gimple_omp_for_kind (for_stmt)
444 != GF_OMP_FOR_KIND_OACC_LOOP));
445 if (TREE_CODE (loop->n2) == TREE_VEC)
447 if (loop->outer)
448 gcc_assert (TREE_VEC_ELT (loop->n2, 0)
449 == gimple_omp_for_index (for_stmt, i - loop->outer));
450 else
451 for (int j = i - 1; j >= 0; j--)
452 if (TREE_VEC_ELT (loop->n2, 0) == gimple_omp_for_index (for_stmt, j))
454 loop->outer = i - j;
455 if (loops != NULL)
456 loops[j].non_rect_referenced = true;
457 if (fd->first_nonrect == -1 || fd->first_nonrect > j)
458 fd->first_nonrect = j;
459 break;
461 gcc_assert (loop->outer);
462 loop->m2 = TREE_VEC_ELT (loop->n2, 1);
463 loop->n2 = TREE_VEC_ELT (loop->n2, 2);
464 fd->non_rect = true;
465 fd->last_nonrect = i;
468 t = gimple_omp_for_incr (for_stmt, i);
469 gcc_assert (TREE_OPERAND (t, 0) == var);
470 loop->step = omp_get_for_step_from_incr (loc, t);
472 omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2, loop->v,
473 loop->step);
475 if (simd
476 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
477 && !fd->have_ordered))
479 if (fd->collapse == 1 && !fd->tiling)
480 iter_type = TREE_TYPE (loop->v);
481 else if (i == 0
482 || TYPE_PRECISION (iter_type)
483 < TYPE_PRECISION (TREE_TYPE (loop->v)))
484 iter_type
485 = build_nonstandard_integer_type
486 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
488 else if (iter_type != long_long_unsigned_type_node)
490 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
491 iter_type = long_long_unsigned_type_node;
492 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
493 && TYPE_PRECISION (TREE_TYPE (loop->v))
494 >= TYPE_PRECISION (iter_type))
496 tree n;
498 if (loop->cond_code == LT_EXPR)
499 n = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v),
500 loop->n2, loop->step);
501 else
502 n = loop->n1;
503 if (loop->m1
504 || loop->m2
505 || TREE_CODE (n) != INTEGER_CST
506 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
507 iter_type = long_long_unsigned_type_node;
509 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
510 > TYPE_PRECISION (iter_type))
512 tree n1, n2;
514 if (loop->cond_code == LT_EXPR)
516 n1 = loop->n1;
517 n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v),
518 loop->n2, loop->step);
520 else
522 n1 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->v),
523 loop->n2, loop->step);
524 n2 = loop->n1;
526 if (loop->m1
527 || loop->m2
528 || TREE_CODE (n1) != INTEGER_CST
529 || TREE_CODE (n2) != INTEGER_CST
530 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
531 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
532 iter_type = long_long_unsigned_type_node;
536 if (i >= fd->collapse)
537 continue;
539 if (collapse_count && *collapse_count == NULL)
541 if (count && integer_zerop (count))
542 continue;
543 tree n1first = NULL_TREE, n2first = NULL_TREE;
544 tree n1last = NULL_TREE, n2last = NULL_TREE;
545 tree ostep = NULL_TREE;
546 if (loop->m1 || loop->m2)
548 if (count == NULL_TREE)
549 continue;
550 if (single_nonrect == -1
551 || (loop->m1 && TREE_CODE (loop->m1) != INTEGER_CST)
552 || (loop->m2 && TREE_CODE (loop->m2) != INTEGER_CST)
553 || TREE_CODE (loop->n1) != INTEGER_CST
554 || TREE_CODE (loop->n2) != INTEGER_CST
555 || TREE_CODE (loop->step) != INTEGER_CST)
557 count = NULL_TREE;
558 continue;
560 tree var = gimple_omp_for_initial (for_stmt, single_nonrect);
561 tree itype = TREE_TYPE (var);
562 tree first = gimple_omp_for_initial (for_stmt, single_nonrect);
563 t = gimple_omp_for_incr (for_stmt, single_nonrect);
564 ostep = omp_get_for_step_from_incr (loc, t);
565 t = fold_binary (MINUS_EXPR, long_long_unsigned_type_node,
566 single_nonrect_count,
567 build_one_cst (long_long_unsigned_type_node));
568 t = fold_convert (itype, t);
569 first = fold_convert (itype, first);
570 ostep = fold_convert (itype, ostep);
571 tree last = fold_binary (PLUS_EXPR, itype, first,
572 fold_binary (MULT_EXPR, itype, t,
573 ostep));
574 if (TREE_CODE (first) != INTEGER_CST
575 || TREE_CODE (last) != INTEGER_CST)
577 count = NULL_TREE;
578 continue;
580 if (loop->m1)
582 tree m1 = fold_convert (itype, loop->m1);
583 tree n1 = fold_convert (itype, loop->n1);
584 n1first = fold_binary (PLUS_EXPR, itype,
585 fold_binary (MULT_EXPR, itype,
586 first, m1), n1);
587 n1last = fold_binary (PLUS_EXPR, itype,
588 fold_binary (MULT_EXPR, itype,
589 last, m1), n1);
591 else
592 n1first = n1last = loop->n1;
593 if (loop->m2)
595 tree n2 = fold_convert (itype, loop->n2);
596 tree m2 = fold_convert (itype, loop->m2);
597 n2first = fold_binary (PLUS_EXPR, itype,
598 fold_binary (MULT_EXPR, itype,
599 first, m2), n2);
600 n2last = fold_binary (PLUS_EXPR, itype,
601 fold_binary (MULT_EXPR, itype,
602 last, m2), n2);
604 else
605 n2first = n2last = loop->n2;
606 n1first = fold_convert (TREE_TYPE (loop->v), n1first);
607 n2first = fold_convert (TREE_TYPE (loop->v), n2first);
608 n1last = fold_convert (TREE_TYPE (loop->v), n1last);
609 n2last = fold_convert (TREE_TYPE (loop->v), n2last);
610 t = fold_binary (loop->cond_code, boolean_type_node,
611 n1first, n2first);
612 tree t2 = fold_binary (loop->cond_code, boolean_type_node,
613 n1last, n2last);
614 if (t && t2 && integer_nonzerop (t) && integer_nonzerop (t2))
615 /* All outer loop iterators have at least one inner loop
616 iteration. Try to compute the count at compile time. */
617 t = NULL_TREE;
618 else if (t && t2 && integer_zerop (t) && integer_zerop (t2))
619 /* No iterations of the inner loop. count will be set to
620 zero cst below. */;
621 else if (TYPE_UNSIGNED (itype)
622 || t == NULL_TREE
623 || t2 == NULL_TREE
624 || TREE_CODE (t) != INTEGER_CST
625 || TREE_CODE (t2) != INTEGER_CST)
627 /* Punt (for now). */
628 count = NULL_TREE;
629 continue;
631 else
633 /* Some iterations of the outer loop have zero iterations
634 of the inner loop, while others have at least one.
635 In this case, we need to adjust one of those outer
636 loop bounds. If ADJ_FIRST, we need to adjust outer n1
637 (first), otherwise outer n2 (last). */
638 bool adj_first = integer_zerop (t);
639 tree n1 = fold_convert (itype, loop->n1);
640 tree n2 = fold_convert (itype, loop->n2);
641 tree m1 = loop->m1 ? fold_convert (itype, loop->m1)
642 : build_zero_cst (itype);
643 tree m2 = loop->m2 ? fold_convert (itype, loop->m2)
644 : build_zero_cst (itype);
645 t = fold_binary (MINUS_EXPR, itype, n1, n2);
646 t2 = fold_binary (MINUS_EXPR, itype, m2, m1);
647 t = fold_binary (TRUNC_DIV_EXPR, itype, t, t2);
648 t2 = fold_binary (MINUS_EXPR, itype, t, first);
649 t2 = fold_binary (TRUNC_MOD_EXPR, itype, t2, ostep);
650 t = fold_binary (MINUS_EXPR, itype, t, t2);
651 tree n1cur
652 = fold_binary (PLUS_EXPR, itype, n1,
653 fold_binary (MULT_EXPR, itype, m1, t));
654 tree n2cur
655 = fold_binary (PLUS_EXPR, itype, n2,
656 fold_binary (MULT_EXPR, itype, m2, t));
657 t2 = fold_binary (loop->cond_code, boolean_type_node,
658 n1cur, n2cur);
659 tree t3 = fold_binary (MULT_EXPR, itype, m1, ostep);
660 tree t4 = fold_binary (MULT_EXPR, itype, m2, ostep);
661 tree diff;
662 if (adj_first)
664 tree new_first;
665 if (integer_nonzerop (t2))
667 new_first = t;
668 n1first = n1cur;
669 n2first = n2cur;
670 if (flag_checking)
672 t3 = fold_binary (MINUS_EXPR, itype, n1cur, t3);
673 t4 = fold_binary (MINUS_EXPR, itype, n2cur, t4);
674 t3 = fold_binary (loop->cond_code,
675 boolean_type_node, t3, t4);
676 gcc_assert (integer_zerop (t3));
679 else
681 t3 = fold_binary (PLUS_EXPR, itype, n1cur, t3);
682 t4 = fold_binary (PLUS_EXPR, itype, n2cur, t4);
683 new_first = fold_binary (PLUS_EXPR, itype, t, ostep);
684 n1first = t3;
685 n2first = t4;
686 if (flag_checking)
688 t3 = fold_binary (loop->cond_code,
689 boolean_type_node, t3, t4);
690 gcc_assert (integer_nonzerop (t3));
693 diff = fold_binary (MINUS_EXPR, itype, new_first, first);
694 first = new_first;
695 fd->adjn1 = first;
697 else
699 tree new_last;
700 if (integer_zerop (t2))
702 t3 = fold_binary (MINUS_EXPR, itype, n1cur, t3);
703 t4 = fold_binary (MINUS_EXPR, itype, n2cur, t4);
704 new_last = fold_binary (MINUS_EXPR, itype, t, ostep);
705 n1last = t3;
706 n2last = t4;
707 if (flag_checking)
709 t3 = fold_binary (loop->cond_code,
710 boolean_type_node, t3, t4);
711 gcc_assert (integer_nonzerop (t3));
714 else
716 new_last = t;
717 n1last = n1cur;
718 n2last = n2cur;
719 if (flag_checking)
721 t3 = fold_binary (PLUS_EXPR, itype, n1cur, t3);
722 t4 = fold_binary (PLUS_EXPR, itype, n2cur, t4);
723 t3 = fold_binary (loop->cond_code,
724 boolean_type_node, t3, t4);
725 gcc_assert (integer_zerop (t3));
728 diff = fold_binary (MINUS_EXPR, itype, last, new_last);
730 if (TYPE_UNSIGNED (itype)
731 && single_nonrect_cond_code == GT_EXPR)
732 diff = fold_binary (TRUNC_DIV_EXPR, itype,
733 fold_unary (NEGATE_EXPR, itype, diff),
734 fold_unary (NEGATE_EXPR, itype,
735 ostep));
736 else
737 diff = fold_binary (TRUNC_DIV_EXPR, itype, diff, ostep);
738 diff = fold_convert (long_long_unsigned_type_node, diff);
739 single_nonrect_count
740 = fold_binary (MINUS_EXPR, long_long_unsigned_type_node,
741 single_nonrect_count, diff);
742 t = NULL_TREE;
745 else
746 t = fold_binary (loop->cond_code, boolean_type_node,
747 fold_convert (TREE_TYPE (loop->v), loop->n1),
748 fold_convert (TREE_TYPE (loop->v), loop->n2));
749 if (t && integer_zerop (t))
750 count = build_zero_cst (long_long_unsigned_type_node);
751 else if ((i == 0 || count != NULL_TREE)
752 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
753 && TREE_CONSTANT (loop->n1)
754 && TREE_CONSTANT (loop->n2)
755 && TREE_CODE (loop->step) == INTEGER_CST)
757 tree itype = TREE_TYPE (loop->v);
759 if (POINTER_TYPE_P (itype))
760 itype = signed_type_for (itype);
761 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
762 t = fold_build2 (PLUS_EXPR, itype,
763 fold_convert (itype, loop->step), t);
764 tree n1 = loop->n1;
765 tree n2 = loop->n2;
766 if (loop->m1 || loop->m2)
768 gcc_assert (single_nonrect != -1);
769 n1 = n1first;
770 n2 = n2first;
772 t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, n2));
773 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
774 tree step = fold_convert_loc (loc, itype, loop->step);
775 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
776 t = fold_build2 (TRUNC_DIV_EXPR, itype,
777 fold_build1 (NEGATE_EXPR, itype, t),
778 fold_build1 (NEGATE_EXPR, itype, step));
779 else
780 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
781 tree llutype = long_long_unsigned_type_node;
782 t = fold_convert (llutype, t);
783 if (loop->m1 || loop->m2)
785 /* t is number of iterations of inner loop at either first
786 or last value of the outer iterator (the one with fewer
787 iterations).
788 Compute t2 = ((m2 - m1) * ostep) / step
789 and niters = outer_count * t
790 + t2 * ((outer_count - 1) * outer_count / 2)
792 tree m1 = loop->m1 ? loop->m1 : integer_zero_node;
793 tree m2 = loop->m2 ? loop->m2 : integer_zero_node;
794 m1 = fold_convert (itype, m1);
795 m2 = fold_convert (itype, m2);
796 tree t2 = fold_build2 (MINUS_EXPR, itype, m2, m1);
797 t2 = fold_build2 (MULT_EXPR, itype, t2, ostep);
798 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
799 t2 = fold_build2 (TRUNC_DIV_EXPR, itype,
800 fold_build1 (NEGATE_EXPR, itype, t2),
801 fold_build1 (NEGATE_EXPR, itype, step));
802 else
803 t2 = fold_build2 (TRUNC_DIV_EXPR, itype, t2, step);
804 t2 = fold_convert (llutype, t2);
805 fd->first_inner_iterations = t;
806 fd->factor = t2;
807 t = fold_build2 (MULT_EXPR, llutype, t,
808 single_nonrect_count);
809 tree t3 = fold_build2 (MINUS_EXPR, llutype,
810 single_nonrect_count,
811 build_one_cst (llutype));
812 t3 = fold_build2 (MULT_EXPR, llutype, t3,
813 single_nonrect_count);
814 t3 = fold_build2 (TRUNC_DIV_EXPR, llutype, t3,
815 build_int_cst (llutype, 2));
816 t2 = fold_build2 (MULT_EXPR, llutype, t2, t3);
817 t = fold_build2 (PLUS_EXPR, llutype, t, t2);
819 if (i == single_nonrect)
821 if (integer_zerop (t) || TREE_CODE (t) != INTEGER_CST)
822 count = t;
823 else
825 single_nonrect_count = t;
826 single_nonrect_cond_code = loop->cond_code;
827 if (count == NULL_TREE)
828 count = build_one_cst (llutype);
831 else if (count != NULL_TREE)
832 count = fold_build2 (MULT_EXPR, llutype, count, t);
833 else
834 count = t;
835 if (TREE_CODE (count) != INTEGER_CST)
836 count = NULL_TREE;
838 else if (count && !integer_zerop (count))
839 count = NULL_TREE;
843 if (count
844 && !simd
845 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
846 || fd->have_ordered))
848 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
849 iter_type = long_long_unsigned_type_node;
850 else
851 iter_type = long_integer_type_node;
853 else if (collapse_iter && *collapse_iter != NULL)
854 iter_type = TREE_TYPE (*collapse_iter);
855 fd->iter_type = iter_type;
856 if (collapse_iter && *collapse_iter == NULL)
857 *collapse_iter = create_tmp_var (iter_type, ".iter");
858 if (collapse_count && *collapse_count == NULL)
860 if (count)
862 *collapse_count = fold_convert_loc (loc, iter_type, count);
863 if (fd->first_inner_iterations && fd->factor)
865 t = make_tree_vec (4);
866 TREE_VEC_ELT (t, 0) = *collapse_count;
867 TREE_VEC_ELT (t, 1) = fd->first_inner_iterations;
868 TREE_VEC_ELT (t, 2) = fd->factor;
869 TREE_VEC_ELT (t, 3) = fd->adjn1;
870 *collapse_count = t;
873 else
874 *collapse_count = create_tmp_var (iter_type, ".count");
877 if (fd->collapse > 1 || fd->tiling || (fd->ordered && loops))
879 fd->loop.v = *collapse_iter;
880 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
881 fd->loop.n2 = *collapse_count;
882 if (TREE_CODE (fd->loop.n2) == TREE_VEC)
884 gcc_assert (fd->non_rect);
885 fd->first_inner_iterations = TREE_VEC_ELT (fd->loop.n2, 1);
886 fd->factor = TREE_VEC_ELT (fd->loop.n2, 2);
887 fd->adjn1 = TREE_VEC_ELT (fd->loop.n2, 3);
888 fd->loop.n2 = TREE_VEC_ELT (fd->loop.n2, 0);
890 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
891 fd->loop.m1 = NULL_TREE;
892 fd->loop.m2 = NULL_TREE;
893 fd->loop.outer = 0;
894 fd->loop.cond_code = LT_EXPR;
896 else if (loops)
897 loops[0] = fd->loop;
900 /* Build a call to GOMP_barrier. */
902 gimple *
903 omp_build_barrier (tree lhs)
905 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
906 : BUILT_IN_GOMP_BARRIER);
907 gcall *g = gimple_build_call (fndecl, 0);
908 if (lhs)
909 gimple_call_set_lhs (g, lhs);
910 return g;
913 /* Find OMP_FOR resp. OMP_SIMD with non-NULL OMP_FOR_INIT. Also, fill in pdata
914 array, pdata[0] non-NULL if there is anything non-trivial in between,
915 pdata[1] is address of OMP_PARALLEL in between if any, pdata[2] is address
916 of OMP_FOR in between if any and pdata[3] is address of the inner
917 OMP_FOR/OMP_SIMD. */
919 tree
920 find_combined_omp_for (tree *tp, int *walk_subtrees, void *data)
922 tree **pdata = (tree **) data;
923 *walk_subtrees = 0;
924 switch (TREE_CODE (*tp))
926 case OMP_FOR:
927 if (OMP_FOR_INIT (*tp) != NULL_TREE)
929 pdata[3] = tp;
930 return *tp;
932 pdata[2] = tp;
933 *walk_subtrees = 1;
934 break;
935 case OMP_SIMD:
936 if (OMP_FOR_INIT (*tp) != NULL_TREE)
938 pdata[3] = tp;
939 return *tp;
941 break;
942 case BIND_EXPR:
943 if (BIND_EXPR_VARS (*tp)
944 || (BIND_EXPR_BLOCK (*tp)
945 && BLOCK_VARS (BIND_EXPR_BLOCK (*tp))))
946 pdata[0] = tp;
947 *walk_subtrees = 1;
948 break;
949 case STATEMENT_LIST:
950 if (!tsi_one_before_end_p (tsi_start (*tp)))
951 pdata[0] = tp;
952 *walk_subtrees = 1;
953 break;
954 case TRY_FINALLY_EXPR:
955 pdata[0] = tp;
956 *walk_subtrees = 1;
957 break;
958 case OMP_PARALLEL:
959 pdata[1] = tp;
960 *walk_subtrees = 1;
961 break;
962 default:
963 break;
965 return NULL_TREE;
968 /* Return maximum possible vectorization factor for the target. */
970 poly_uint64
971 omp_max_vf (void)
973 if (!optimize
974 || optimize_debug
975 || !flag_tree_loop_optimize
976 || (!flag_tree_loop_vectorize
977 && OPTION_SET_P (flag_tree_loop_vectorize)))
978 return 1;
980 auto_vector_modes modes;
981 targetm.vectorize.autovectorize_vector_modes (&modes, true);
982 if (!modes.is_empty ())
984 poly_uint64 vf = 0;
985 for (unsigned int i = 0; i < modes.length (); ++i)
986 /* The returned modes use the smallest element size (and thus
987 the largest nunits) for the vectorization approach that they
988 represent. */
989 vf = ordered_max (vf, GET_MODE_NUNITS (modes[i]));
990 return vf;
993 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
994 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
995 return GET_MODE_NUNITS (vqimode);
997 return 1;
1000 /* Return maximum SIMT width if offloading may target SIMT hardware. */
1003 omp_max_simt_vf (void)
1005 if (!optimize)
1006 return 0;
1007 if (ENABLE_OFFLOADING)
1008 for (const char *c = getenv ("OFFLOAD_TARGET_NAMES"); c;)
1010 if (startswith (c, "nvptx"))
1011 return 32;
1012 else if ((c = strchr (c, ':')))
1013 c++;
1015 return 0;
1018 /* Store the construct selectors as tree codes from last to first,
1019 return their number. */
1022 omp_constructor_traits_to_codes (tree ctx, enum tree_code *constructs)
1024 int nconstructs = list_length (ctx);
1025 int i = nconstructs - 1;
1026 for (tree t2 = ctx; t2; t2 = TREE_CHAIN (t2), i--)
1028 const char *sel = IDENTIFIER_POINTER (TREE_PURPOSE (t2));
1029 if (!strcmp (sel, "target"))
1030 constructs[i] = OMP_TARGET;
1031 else if (!strcmp (sel, "teams"))
1032 constructs[i] = OMP_TEAMS;
1033 else if (!strcmp (sel, "parallel"))
1034 constructs[i] = OMP_PARALLEL;
1035 else if (!strcmp (sel, "for") || !strcmp (sel, "do"))
1036 constructs[i] = OMP_FOR;
1037 else if (!strcmp (sel, "simd"))
1038 constructs[i] = OMP_SIMD;
1039 else
1040 gcc_unreachable ();
1042 gcc_assert (i == -1);
1043 return nconstructs;
1046 /* Return true if PROP is possibly present in one of the offloading target's
1047 OpenMP contexts. The format of PROPS string is always offloading target's
1048 name terminated by '\0', followed by properties for that offloading
1049 target separated by '\0' and terminated by another '\0'. The strings
1050 are created from omp-device-properties installed files of all configured
1051 offloading targets. */
1053 static bool
1054 omp_offload_device_kind_arch_isa (const char *props, const char *prop)
1056 const char *names = getenv ("OFFLOAD_TARGET_NAMES");
1057 if (names == NULL || *names == '\0')
1058 return false;
1059 while (*props != '\0')
1061 size_t name_len = strlen (props);
1062 bool matches = false;
1063 for (const char *c = names; c; )
1065 if (strncmp (props, c, name_len) == 0
1066 && (c[name_len] == '\0'
1067 || c[name_len] == ':'
1068 || c[name_len] == '='))
1070 matches = true;
1071 break;
1073 else if ((c = strchr (c, ':')))
1074 c++;
1076 props = props + name_len + 1;
1077 while (*props != '\0')
1079 if (matches && strcmp (props, prop) == 0)
1080 return true;
1081 props = strchr (props, '\0') + 1;
1083 props++;
1085 return false;
1088 /* Return true if the current code location is or might be offloaded.
1089 Return true in declare target functions, or when nested in a target
1090 region or when unsure, return false otherwise. */
1092 static bool
1093 omp_maybe_offloaded (void)
1095 if (!ENABLE_OFFLOADING)
1096 return false;
1097 const char *names = getenv ("OFFLOAD_TARGET_NAMES");
1098 if (names == NULL || *names == '\0')
1099 return false;
1101 if (symtab->state == PARSING)
1102 /* Maybe. */
1103 return true;
1104 if (cfun && cfun->after_inlining)
1105 return false;
1106 if (current_function_decl
1107 && lookup_attribute ("omp declare target",
1108 DECL_ATTRIBUTES (current_function_decl)))
1109 return true;
1110 if (cfun && (cfun->curr_properties & PROP_gimple_any) == 0)
1112 enum tree_code construct = OMP_TARGET;
1113 if (omp_construct_selector_matches (&construct, 1, NULL))
1114 return true;
1116 return false;
1120 /* Diagnose errors in an OpenMP context selector, return CTX if
1121 it is correct or error_mark_node otherwise. */
1123 tree
1124 omp_check_context_selector (location_t loc, tree ctx)
1126 /* Each trait-set-selector-name can only be specified once.
1127 There are just 4 set names. */
1128 for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
1129 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
1130 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
1132 error_at (loc, "selector set %qs specified more than once",
1133 IDENTIFIER_POINTER (TREE_PURPOSE (t1)));
1134 return error_mark_node;
1136 for (tree t = ctx; t; t = TREE_CHAIN (t))
1138 /* Each trait-selector-name can only be specified once. */
1139 if (list_length (TREE_VALUE (t)) < 5)
1141 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
1142 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
1143 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
1145 error_at (loc,
1146 "selector %qs specified more than once in set %qs",
1147 IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
1148 IDENTIFIER_POINTER (TREE_PURPOSE (t)));
1149 return error_mark_node;
1152 else
1154 hash_set<tree> pset;
1155 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
1156 if (pset.add (TREE_PURPOSE (t1)))
1158 error_at (loc,
1159 "selector %qs specified more than once in set %qs",
1160 IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
1161 IDENTIFIER_POINTER (TREE_PURPOSE (t)));
1162 return error_mark_node;
1166 static const char *const kind[] = {
1167 "host", "nohost", "cpu", "gpu", "fpga", "any", NULL };
1168 static const char *const vendor[] = {
1169 "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel",
1170 "llvm", "nvidia", "pgi", "ti", "unknown", NULL };
1171 static const char *const extension[] = { NULL };
1172 static const char *const atomic_default_mem_order[] = {
1173 "seq_cst", "relaxed", "acq_rel", NULL };
1174 struct known_properties { const char *set; const char *selector;
1175 const char *const *props; };
1176 known_properties props[] = {
1177 { "device", "kind", kind },
1178 { "implementation", "vendor", vendor },
1179 { "implementation", "extension", extension },
1180 { "implementation", "atomic_default_mem_order",
1181 atomic_default_mem_order } };
1182 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
1183 for (unsigned i = 0; i < ARRAY_SIZE (props); i++)
1184 if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
1185 props[i].selector)
1186 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)),
1187 props[i].set))
1188 for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
1189 for (unsigned j = 0; ; j++)
1191 if (props[i].props[j] == NULL)
1193 if (TREE_PURPOSE (t2)
1194 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
1195 " score"))
1196 break;
1197 if (props[i].props == atomic_default_mem_order)
1199 error_at (loc,
1200 "incorrect property %qs of %qs selector",
1201 IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
1202 "atomic_default_mem_order");
1203 return error_mark_node;
1205 else if (TREE_PURPOSE (t2))
1206 warning_at (loc, OPT_Wopenmp,
1207 "unknown property %qs of %qs selector",
1208 IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
1209 props[i].selector);
1210 else
1211 warning_at (loc, OPT_Wopenmp,
1212 "unknown property %qE of %qs selector",
1213 TREE_VALUE (t2), props[i].selector);
1214 break;
1216 else if (TREE_PURPOSE (t2) == NULL_TREE)
1218 const char *str = TREE_STRING_POINTER (TREE_VALUE (t2));
1219 if (!strcmp (str, props[i].props[j])
1220 && ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2))
1221 == strlen (str) + (lang_GNU_Fortran () ? 0 : 1)))
1222 break;
1224 else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
1225 props[i].props[j]))
1226 break;
1229 return ctx;
1233 /* Register VARIANT as variant of some base function marked with
1234 #pragma omp declare variant. CONSTRUCT is corresponding construct
1235 selector set. */
1237 void
1238 omp_mark_declare_variant (location_t loc, tree variant, tree construct)
1240 tree attr = lookup_attribute ("omp declare variant variant",
1241 DECL_ATTRIBUTES (variant));
1242 if (attr == NULL_TREE)
1244 attr = tree_cons (get_identifier ("omp declare variant variant"),
1245 unshare_expr (construct),
1246 DECL_ATTRIBUTES (variant));
1247 DECL_ATTRIBUTES (variant) = attr;
1248 return;
1250 if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE)
1251 || (construct != NULL_TREE
1252 && omp_context_selector_set_compare ("construct", TREE_VALUE (attr),
1253 construct)))
1254 error_at (loc, "%qD used as a variant with incompatible %<construct%> "
1255 "selector sets", variant);
1259 /* Return a name from PROP, a property in selectors accepting
1260 name lists. */
1262 static const char *
1263 omp_context_name_list_prop (tree prop)
1265 if (TREE_PURPOSE (prop))
1266 return IDENTIFIER_POINTER (TREE_PURPOSE (prop));
1267 else
1269 const char *ret = TREE_STRING_POINTER (TREE_VALUE (prop));
1270 if ((size_t) TREE_STRING_LENGTH (TREE_VALUE (prop))
1271 == strlen (ret) + (lang_GNU_Fortran () ? 0 : 1))
1272 return ret;
1273 return NULL;
1277 /* Return 1 if context selector matches the current OpenMP context, 0
1278 if it does not and -1 if it is unknown and need to be determined later.
1279 Some properties can be checked right away during parsing (this routine),
1280 others need to wait until the whole TU is parsed, others need to wait until
1281 IPA, others until vectorization. */
1284 omp_context_selector_matches (tree ctx)
1286 int ret = 1;
1287 for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
1289 char set = IDENTIFIER_POINTER (TREE_PURPOSE (t1))[0];
1290 if (set == 'c')
1292 /* For now, ignore the construct set. While something can be
1293 determined already during parsing, we don't know until end of TU
1294 whether additional constructs aren't added through declare variant
1295 unless "omp declare variant variant" attribute exists already
1296 (so in most of the cases), and we'd need to maintain set of
1297 surrounding OpenMP constructs, which is better handled during
1298 gimplification. */
1299 if (symtab->state == PARSING)
1301 ret = -1;
1302 continue;
1305 enum tree_code constructs[5];
1306 int nconstructs
1307 = omp_constructor_traits_to_codes (TREE_VALUE (t1), constructs);
1309 if (cfun && (cfun->curr_properties & PROP_gimple_any) != 0)
1311 if (!cfun->after_inlining)
1313 ret = -1;
1314 continue;
1316 int i;
1317 for (i = 0; i < nconstructs; ++i)
1318 if (constructs[i] == OMP_SIMD)
1319 break;
1320 if (i < nconstructs)
1322 ret = -1;
1323 continue;
1325 /* If there is no simd, assume it is ok after IPA,
1326 constructs should have been checked before. */
1327 continue;
1330 int r = omp_construct_selector_matches (constructs, nconstructs,
1331 NULL);
1332 if (r == 0)
1333 return 0;
1334 if (r == -1)
1335 ret = -1;
1336 continue;
1338 for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
1340 const char *sel = IDENTIFIER_POINTER (TREE_PURPOSE (t2));
1341 switch (*sel)
1343 case 'v':
1344 if (set == 'i' && !strcmp (sel, "vendor"))
1345 for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3))
1347 const char *prop = omp_context_name_list_prop (t3);
1348 if (prop == NULL)
1349 return 0;
1350 if ((!strcmp (prop, " score") && TREE_PURPOSE (t3))
1351 || !strcmp (prop, "gnu"))
1352 continue;
1353 return 0;
1355 break;
1356 case 'e':
1357 if (set == 'i' && !strcmp (sel, "extension"))
1358 /* We don't support any extensions right now. */
1359 return 0;
1360 break;
1361 case 'a':
1362 if (set == 'i' && !strcmp (sel, "atomic_default_mem_order"))
1364 if (cfun && (cfun->curr_properties & PROP_gimple_any) != 0)
1365 break;
1367 enum omp_memory_order omo
1368 = ((enum omp_memory_order)
1369 (omp_requires_mask
1370 & OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER));
1371 if (omo == OMP_MEMORY_ORDER_UNSPECIFIED)
1373 /* We don't know yet, until end of TU. */
1374 if (symtab->state == PARSING)
1376 ret = -1;
1377 break;
1379 else
1380 omo = OMP_MEMORY_ORDER_RELAXED;
1382 tree t3 = TREE_VALUE (t2);
1383 const char *prop = IDENTIFIER_POINTER (TREE_PURPOSE (t3));
1384 if (!strcmp (prop, " score"))
1386 t3 = TREE_CHAIN (t3);
1387 prop = IDENTIFIER_POINTER (TREE_PURPOSE (t3));
1389 if (!strcmp (prop, "relaxed")
1390 && omo != OMP_MEMORY_ORDER_RELAXED)
1391 return 0;
1392 else if (!strcmp (prop, "seq_cst")
1393 && omo != OMP_MEMORY_ORDER_SEQ_CST)
1394 return 0;
1395 else if (!strcmp (prop, "acq_rel")
1396 && omo != OMP_MEMORY_ORDER_ACQ_REL)
1397 return 0;
1399 if (set == 'd' && !strcmp (sel, "arch"))
1400 for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3))
1402 const char *arch = omp_context_name_list_prop (t3);
1403 if (arch == NULL)
1404 return 0;
1405 int r = 0;
1406 if (targetm.omp.device_kind_arch_isa != NULL)
1407 r = targetm.omp.device_kind_arch_isa (omp_device_arch,
1408 arch);
1409 if (r == 0 || (r == -1 && symtab->state != PARSING))
1411 /* If we are or might be in a target region or
1412 declare target function, need to take into account
1413 also offloading values. */
1414 if (!omp_maybe_offloaded ())
1415 return 0;
1416 if (ENABLE_OFFLOADING)
1418 const char *arches = omp_offload_device_arch;
1419 if (omp_offload_device_kind_arch_isa (arches,
1420 arch))
1422 ret = -1;
1423 continue;
1426 return 0;
1428 else if (r == -1)
1429 ret = -1;
1430 /* If arch matches on the host, it still might not match
1431 in the offloading region. */
1432 else if (omp_maybe_offloaded ())
1433 ret = -1;
1435 break;
1436 case 'u':
1437 if (set == 'i' && !strcmp (sel, "unified_address"))
1439 if (cfun && (cfun->curr_properties & PROP_gimple_any) != 0)
1440 break;
1442 if ((omp_requires_mask & OMP_REQUIRES_UNIFIED_ADDRESS) == 0)
1444 if (symtab->state == PARSING)
1445 ret = -1;
1446 else
1447 return 0;
1449 break;
1451 if (set == 'i' && !strcmp (sel, "unified_shared_memory"))
1453 if (cfun && (cfun->curr_properties & PROP_gimple_any) != 0)
1454 break;
1456 if ((omp_requires_mask
1457 & OMP_REQUIRES_UNIFIED_SHARED_MEMORY) == 0)
1459 if (symtab->state == PARSING)
1460 ret = -1;
1461 else
1462 return 0;
1464 break;
1466 break;
1467 case 'd':
1468 if (set == 'i' && !strcmp (sel, "dynamic_allocators"))
1470 if (cfun && (cfun->curr_properties & PROP_gimple_any) != 0)
1471 break;
1473 if ((omp_requires_mask
1474 & OMP_REQUIRES_DYNAMIC_ALLOCATORS) == 0)
1476 if (symtab->state == PARSING)
1477 ret = -1;
1478 else
1479 return 0;
1481 break;
1483 break;
1484 case 'r':
1485 if (set == 'i' && !strcmp (sel, "reverse_offload"))
1487 if (cfun && (cfun->curr_properties & PROP_gimple_any) != 0)
1488 break;
1490 if ((omp_requires_mask & OMP_REQUIRES_REVERSE_OFFLOAD) == 0)
1492 if (symtab->state == PARSING)
1493 ret = -1;
1494 else
1495 return 0;
1497 break;
1499 break;
1500 case 'k':
1501 if (set == 'd' && !strcmp (sel, "kind"))
1502 for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3))
1504 const char *prop = omp_context_name_list_prop (t3);
1505 if (prop == NULL)
1506 return 0;
1507 if (!strcmp (prop, "any"))
1508 continue;
1509 if (!strcmp (prop, "host"))
1511 #ifdef ACCEL_COMPILER
1512 return 0;
1513 #else
1514 if (omp_maybe_offloaded ())
1515 ret = -1;
1516 continue;
1517 #endif
1519 if (!strcmp (prop, "nohost"))
1521 #ifndef ACCEL_COMPILER
1522 if (omp_maybe_offloaded ())
1523 ret = -1;
1524 else
1525 return 0;
1526 #endif
1527 continue;
1529 int r = 0;
1530 if (targetm.omp.device_kind_arch_isa != NULL)
1531 r = targetm.omp.device_kind_arch_isa (omp_device_kind,
1532 prop);
1533 else
1534 r = strcmp (prop, "cpu") == 0;
1535 if (r == 0 || (r == -1 && symtab->state != PARSING))
1537 /* If we are or might be in a target region or
1538 declare target function, need to take into account
1539 also offloading values. */
1540 if (!omp_maybe_offloaded ())
1541 return 0;
1542 if (ENABLE_OFFLOADING)
1544 const char *kinds = omp_offload_device_kind;
1545 if (omp_offload_device_kind_arch_isa (kinds, prop))
1547 ret = -1;
1548 continue;
1551 return 0;
1553 else if (r == -1)
1554 ret = -1;
1555 /* If kind matches on the host, it still might not match
1556 in the offloading region. */
1557 else if (omp_maybe_offloaded ())
1558 ret = -1;
1560 break;
1561 case 'i':
1562 if (set == 'd' && !strcmp (sel, "isa"))
1563 for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3))
1565 const char *isa = omp_context_name_list_prop (t3);
1566 if (isa == NULL)
1567 return 0;
1568 int r = 0;
1569 if (targetm.omp.device_kind_arch_isa != NULL)
1570 r = targetm.omp.device_kind_arch_isa (omp_device_isa,
1571 isa);
1572 if (r == 0 || (r == -1 && symtab->state != PARSING))
1574 /* If isa is valid on the target, but not in the
1575 current function and current function has
1576 #pragma omp declare simd on it, some simd clones
1577 might have the isa added later on. */
1578 if (r == -1
1579 && targetm.simd_clone.compute_vecsize_and_simdlen
1580 && (cfun == NULL || !cfun->after_inlining))
1582 tree attrs
1583 = DECL_ATTRIBUTES (current_function_decl);
1584 if (lookup_attribute ("omp declare simd", attrs))
1586 ret = -1;
1587 continue;
1590 /* If we are or might be in a target region or
1591 declare target function, need to take into account
1592 also offloading values. */
1593 if (!omp_maybe_offloaded ())
1594 return 0;
1595 if (ENABLE_OFFLOADING)
1597 const char *isas = omp_offload_device_isa;
1598 if (omp_offload_device_kind_arch_isa (isas, isa))
1600 ret = -1;
1601 continue;
1604 return 0;
1606 else if (r == -1)
1607 ret = -1;
1608 /* If isa matches on the host, it still might not match
1609 in the offloading region. */
1610 else if (omp_maybe_offloaded ())
1611 ret = -1;
1613 break;
1614 case 'c':
1615 if (set == 'u' && !strcmp (sel, "condition"))
1616 for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3))
1617 if (TREE_PURPOSE (t3) == NULL_TREE)
1619 if (integer_zerop (TREE_VALUE (t3)))
1620 return 0;
1621 if (integer_nonzerop (TREE_VALUE (t3)))
1622 break;
1623 ret = -1;
1625 break;
1626 default:
1627 break;
1631 return ret;
1634 /* Compare construct={simd} CLAUSES1 with CLAUSES2, return 0/-1/1/2 as
1635 in omp_context_selector_set_compare. */
1637 static int
1638 omp_construct_simd_compare (tree clauses1, tree clauses2)
1640 if (clauses1 == NULL_TREE)
1641 return clauses2 == NULL_TREE ? 0 : -1;
1642 if (clauses2 == NULL_TREE)
1643 return 1;
1645 int r = 0;
1646 struct declare_variant_simd_data {
1647 bool inbranch, notinbranch;
1648 tree simdlen;
1649 auto_vec<tree,16> data_sharing;
1650 auto_vec<tree,16> aligned;
1651 declare_variant_simd_data ()
1652 : inbranch(false), notinbranch(false), simdlen(NULL_TREE) {}
1653 } data[2];
1654 unsigned int i;
1655 for (i = 0; i < 2; i++)
1656 for (tree c = i ? clauses2 : clauses1; c; c = OMP_CLAUSE_CHAIN (c))
1658 vec<tree> *v;
1659 switch (OMP_CLAUSE_CODE (c))
1661 case OMP_CLAUSE_INBRANCH:
1662 data[i].inbranch = true;
1663 continue;
1664 case OMP_CLAUSE_NOTINBRANCH:
1665 data[i].notinbranch = true;
1666 continue;
1667 case OMP_CLAUSE_SIMDLEN:
1668 data[i].simdlen = OMP_CLAUSE_SIMDLEN_EXPR (c);
1669 continue;
1670 case OMP_CLAUSE_UNIFORM:
1671 case OMP_CLAUSE_LINEAR:
1672 v = &data[i].data_sharing;
1673 break;
1674 case OMP_CLAUSE_ALIGNED:
1675 v = &data[i].aligned;
1676 break;
1677 default:
1678 gcc_unreachable ();
1680 unsigned HOST_WIDE_INT argno = tree_to_uhwi (OMP_CLAUSE_DECL (c));
1681 if (argno >= v->length ())
1682 v->safe_grow_cleared (argno + 1, true);
1683 (*v)[argno] = c;
1685 /* Here, r is used as a bitmask, 2 is set if CLAUSES1 has something
1686 CLAUSES2 doesn't, 1 is set if CLAUSES2 has something CLAUSES1
1687 doesn't. Thus, r == 3 implies return value 2, r == 1 implies
1688 -1, r == 2 implies 1 and r == 0 implies 0. */
1689 if (data[0].inbranch != data[1].inbranch)
1690 r |= data[0].inbranch ? 2 : 1;
1691 if (data[0].notinbranch != data[1].notinbranch)
1692 r |= data[0].notinbranch ? 2 : 1;
1693 if (!simple_cst_equal (data[0].simdlen, data[1].simdlen))
1695 if (data[0].simdlen && data[1].simdlen)
1696 return 2;
1697 r |= data[0].simdlen ? 2 : 1;
1699 if (data[0].data_sharing.length () < data[1].data_sharing.length ()
1700 || data[0].aligned.length () < data[1].aligned.length ())
1701 r |= 1;
1702 tree c1, c2;
1703 FOR_EACH_VEC_ELT (data[0].data_sharing, i, c1)
1705 c2 = (i < data[1].data_sharing.length ()
1706 ? data[1].data_sharing[i] : NULL_TREE);
1707 if ((c1 == NULL_TREE) != (c2 == NULL_TREE))
1709 r |= c1 != NULL_TREE ? 2 : 1;
1710 continue;
1712 if (c1 == NULL_TREE)
1713 continue;
1714 if (OMP_CLAUSE_CODE (c1) != OMP_CLAUSE_CODE (c2))
1715 return 2;
1716 if (OMP_CLAUSE_CODE (c1) != OMP_CLAUSE_LINEAR)
1717 continue;
1718 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c1)
1719 != OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c2))
1720 return 2;
1721 if (OMP_CLAUSE_LINEAR_KIND (c1) != OMP_CLAUSE_LINEAR_KIND (c2))
1722 return 2;
1723 if (!simple_cst_equal (OMP_CLAUSE_LINEAR_STEP (c1),
1724 OMP_CLAUSE_LINEAR_STEP (c2)))
1725 return 2;
1727 FOR_EACH_VEC_ELT (data[0].aligned, i, c1)
1729 c2 = i < data[1].aligned.length () ? data[1].aligned[i] : NULL_TREE;
1730 if ((c1 == NULL_TREE) != (c2 == NULL_TREE))
1732 r |= c1 != NULL_TREE ? 2 : 1;
1733 continue;
1735 if (c1 == NULL_TREE)
1736 continue;
1737 if (!simple_cst_equal (OMP_CLAUSE_ALIGNED_ALIGNMENT (c1),
1738 OMP_CLAUSE_ALIGNED_ALIGNMENT (c2)))
1739 return 2;
1741 switch (r)
1743 case 0: return 0;
1744 case 1: return -1;
1745 case 2: return 1;
1746 case 3: return 2;
1747 default: gcc_unreachable ();
1751 /* Compare properties of selectors SEL from SET other than construct.
1752 Return 0/-1/1/2 as in omp_context_selector_set_compare.
1753 Unlike set names or selector names, properties can have duplicates. */
1755 static int
1756 omp_context_selector_props_compare (const char *set, const char *sel,
1757 tree ctx1, tree ctx2)
1759 int ret = 0;
1760 for (int pass = 0; pass < 2; pass++)
1761 for (tree t1 = pass ? ctx2 : ctx1; t1; t1 = TREE_CHAIN (t1))
1763 tree t2;
1764 for (t2 = pass ? ctx1 : ctx2; t2; t2 = TREE_CHAIN (t2))
1765 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
1767 if (TREE_PURPOSE (t1) == NULL_TREE)
1769 if (set[0] == 'u' && strcmp (sel, "condition") == 0)
1771 if (integer_zerop (TREE_VALUE (t1))
1772 != integer_zerop (TREE_VALUE (t2)))
1773 return 2;
1774 break;
1776 if (simple_cst_equal (TREE_VALUE (t1), TREE_VALUE (t2)))
1777 break;
1779 else if (strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
1780 " score") == 0)
1782 if (!simple_cst_equal (TREE_VALUE (t1), TREE_VALUE (t2)))
1783 return 2;
1784 break;
1786 else
1787 break;
1789 else if (TREE_PURPOSE (t1)
1790 && TREE_PURPOSE (t2) == NULL_TREE
1791 && TREE_CODE (TREE_VALUE (t2)) == STRING_CST)
1793 const char *p1 = omp_context_name_list_prop (t1);
1794 const char *p2 = omp_context_name_list_prop (t2);
1795 if (p2
1796 && strcmp (p1, p2) == 0
1797 && strcmp (p1, " score"))
1798 break;
1800 else if (TREE_PURPOSE (t1) == NULL_TREE
1801 && TREE_PURPOSE (t2)
1802 && TREE_CODE (TREE_VALUE (t1)) == STRING_CST)
1804 const char *p1 = omp_context_name_list_prop (t1);
1805 const char *p2 = omp_context_name_list_prop (t2);
1806 if (p1
1807 && strcmp (p1, p2) == 0
1808 && strcmp (p1, " score"))
1809 break;
1811 if (t2 == NULL_TREE)
1813 int r = pass ? -1 : 1;
1814 if (ret && ret != r)
1815 return 2;
1816 else if (pass)
1817 return r;
1818 else
1820 ret = r;
1821 break;
1825 return ret;
1828 /* Compare single context selector sets CTX1 and CTX2 with SET name.
1829 Return 0 if CTX1 is equal to CTX2,
1830 -1 if CTX1 is a strict subset of CTX2,
1831 1 if CTX2 is a strict subset of CTX1, or
1832 2 if neither context is a subset of another one. */
1835 omp_context_selector_set_compare (const char *set, tree ctx1, tree ctx2)
1837 bool swapped = false;
1838 int ret = 0;
1839 int len1 = list_length (ctx1);
1840 int len2 = list_length (ctx2);
1841 int cnt = 0;
1842 if (len1 < len2)
1844 swapped = true;
1845 std::swap (ctx1, ctx2);
1846 std::swap (len1, len2);
1848 if (set[0] == 'c')
1850 tree t1;
1851 tree t2 = ctx2;
1852 tree simd = get_identifier ("simd");
1853 /* Handle construct set specially. In this case the order
1854 of the selector matters too. */
1855 for (t1 = ctx1; t1; t1 = TREE_CHAIN (t1))
1856 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
1858 int r = 0;
1859 if (TREE_PURPOSE (t1) == simd)
1860 r = omp_construct_simd_compare (TREE_VALUE (t1),
1861 TREE_VALUE (t2));
1862 if (r == 2 || (ret && r && (ret < 0) != (r < 0)))
1863 return 2;
1864 if (ret == 0)
1865 ret = r;
1866 t2 = TREE_CHAIN (t2);
1867 if (t2 == NULL_TREE)
1869 t1 = TREE_CHAIN (t1);
1870 break;
1873 else if (ret < 0)
1874 return 2;
1875 else
1876 ret = 1;
1877 if (t2 != NULL_TREE)
1878 return 2;
1879 if (t1 != NULL_TREE)
1881 if (ret < 0)
1882 return 2;
1883 ret = 1;
1885 if (ret == 0)
1886 return 0;
1887 return swapped ? -ret : ret;
1889 for (tree t1 = ctx1; t1; t1 = TREE_CHAIN (t1))
1891 tree t2;
1892 for (t2 = ctx2; t2; t2 = TREE_CHAIN (t2))
1893 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
1895 const char *sel = IDENTIFIER_POINTER (TREE_PURPOSE (t1));
1896 int r = omp_context_selector_props_compare (set, sel,
1897 TREE_VALUE (t1),
1898 TREE_VALUE (t2));
1899 if (r == 2 || (ret && r && (ret < 0) != (r < 0)))
1900 return 2;
1901 if (ret == 0)
1902 ret = r;
1903 cnt++;
1904 break;
1906 if (t2 == NULL_TREE)
1908 if (ret == -1)
1909 return 2;
1910 ret = 1;
1913 if (cnt < len2)
1914 return 2;
1915 if (ret == 0)
1916 return 0;
1917 return swapped ? -ret : ret;
1920 /* Compare whole context selector specification CTX1 and CTX2.
1921 Return 0 if CTX1 is equal to CTX2,
1922 -1 if CTX1 is a strict subset of CTX2,
1923 1 if CTX2 is a strict subset of CTX1, or
1924 2 if neither context is a subset of another one. */
1926 static int
1927 omp_context_selector_compare (tree ctx1, tree ctx2)
1929 bool swapped = false;
1930 int ret = 0;
1931 int len1 = list_length (ctx1);
1932 int len2 = list_length (ctx2);
1933 int cnt = 0;
1934 if (len1 < len2)
1936 swapped = true;
1937 std::swap (ctx1, ctx2);
1938 std::swap (len1, len2);
1940 for (tree t1 = ctx1; t1; t1 = TREE_CHAIN (t1))
1942 tree t2;
1943 for (t2 = ctx2; t2; t2 = TREE_CHAIN (t2))
1944 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
1946 const char *set = IDENTIFIER_POINTER (TREE_PURPOSE (t1));
1947 int r = omp_context_selector_set_compare (set, TREE_VALUE (t1),
1948 TREE_VALUE (t2));
1949 if (r == 2 || (ret && r && (ret < 0) != (r < 0)))
1950 return 2;
1951 if (ret == 0)
1952 ret = r;
1953 cnt++;
1954 break;
1956 if (t2 == NULL_TREE)
1958 if (ret == -1)
1959 return 2;
1960 ret = 1;
1963 if (cnt < len2)
1964 return 2;
1965 if (ret == 0)
1966 return 0;
1967 return swapped ? -ret : ret;
1970 /* From context selector CTX, return trait-selector with name SEL in
1971 trait-selector-set with name SET if any, or NULL_TREE if not found.
1972 If SEL is NULL, return the list of trait-selectors in SET. */
1974 tree
1975 omp_get_context_selector (tree ctx, const char *set, const char *sel)
1977 tree setid = get_identifier (set);
1978 tree selid = sel ? get_identifier (sel) : NULL_TREE;
1979 for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
1980 if (TREE_PURPOSE (t1) == setid)
1982 if (sel == NULL)
1983 return TREE_VALUE (t1);
1984 for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
1985 if (TREE_PURPOSE (t2) == selid)
1986 return t2;
1988 return NULL_TREE;
1991 /* Needs to be a GC-friendly widest_int variant, but precision is
1992 desirable to be the same on all targets. */
1993 typedef generic_wide_int <fixed_wide_int_storage <1024> > score_wide_int;
1995 /* Compute *SCORE for context selector CTX. Return true if the score
1996 would be different depending on whether it is a declare simd clone or
1997 not. DECLARE_SIMD should be true for the case when it would be
1998 a declare simd clone. */
2000 static bool
2001 omp_context_compute_score (tree ctx, score_wide_int *score, bool declare_simd)
2003 tree construct = omp_get_context_selector (ctx, "construct", NULL);
2004 bool has_kind = omp_get_context_selector (ctx, "device", "kind");
2005 bool has_arch = omp_get_context_selector (ctx, "device", "arch");
2006 bool has_isa = omp_get_context_selector (ctx, "device", "isa");
2007 bool ret = false;
2008 *score = 1;
2009 for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
2010 if (TREE_VALUE (t1) != construct)
2011 for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
2012 if (tree t3 = TREE_VALUE (t2))
2013 if (TREE_PURPOSE (t3)
2014 && strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t3)), " score") == 0
2015 && TREE_CODE (TREE_VALUE (t3)) == INTEGER_CST)
2017 tree t4 = TREE_VALUE (t3);
2018 *score += score_wide_int::from (wi::to_wide (t4),
2019 TYPE_SIGN (TREE_TYPE (t4)));
2021 if (construct || has_kind || has_arch || has_isa)
2023 int scores[12];
2024 enum tree_code constructs[5];
2025 int nconstructs = 0;
2026 if (construct)
2027 nconstructs = omp_constructor_traits_to_codes (construct, constructs);
2028 if (omp_construct_selector_matches (constructs, nconstructs, scores)
2029 == 2)
2030 ret = true;
2031 int b = declare_simd ? nconstructs + 1 : 0;
2032 if (scores[b + nconstructs] + 4U < score->get_precision ())
2034 for (int n = 0; n < nconstructs; ++n)
2036 if (scores[b + n] < 0)
2038 *score = -1;
2039 return ret;
2041 *score += wi::shifted_mask <score_wide_int> (scores[b + n], 1, false);
2043 if (has_kind)
2044 *score += wi::shifted_mask <score_wide_int> (scores[b + nconstructs],
2045 1, false);
2046 if (has_arch)
2047 *score += wi::shifted_mask <score_wide_int> (scores[b + nconstructs] + 1,
2048 1, false);
2049 if (has_isa)
2050 *score += wi::shifted_mask <score_wide_int> (scores[b + nconstructs] + 2,
2051 1, false);
2053 else /* FIXME: Implement this. */
2054 gcc_unreachable ();
2056 return ret;
2059 /* Class describing a single variant. */
2060 struct GTY(()) omp_declare_variant_entry {
2061 /* NODE of the variant. */
2062 cgraph_node *variant;
2063 /* Score if not in declare simd clone. */
2064 score_wide_int score;
2065 /* Score if in declare simd clone. */
2066 score_wide_int score_in_declare_simd_clone;
2067 /* Context selector for the variant. */
2068 tree ctx;
2069 /* True if the context selector is known to match already. */
2070 bool matches;
2073 /* Class describing a function with variants. */
2074 struct GTY((for_user)) omp_declare_variant_base_entry {
2075 /* NODE of the base function. */
2076 cgraph_node *base;
2077 /* NODE of the artificial function created for the deferred variant
2078 resolution. */
2079 cgraph_node *node;
2080 /* Vector of the variants. */
2081 vec<omp_declare_variant_entry, va_gc> *variants;
2084 struct omp_declare_variant_hasher
2085 : ggc_ptr_hash<omp_declare_variant_base_entry> {
2086 static hashval_t hash (omp_declare_variant_base_entry *);
2087 static bool equal (omp_declare_variant_base_entry *,
2088 omp_declare_variant_base_entry *);
2091 hashval_t
2092 omp_declare_variant_hasher::hash (omp_declare_variant_base_entry *x)
2094 inchash::hash hstate;
2095 hstate.add_int (DECL_UID (x->base->decl));
2096 hstate.add_int (x->variants->length ());
2097 omp_declare_variant_entry *variant;
2098 unsigned int i;
2099 FOR_EACH_VEC_SAFE_ELT (x->variants, i, variant)
2101 hstate.add_int (DECL_UID (variant->variant->decl));
2102 hstate.add_wide_int (variant->score);
2103 hstate.add_wide_int (variant->score_in_declare_simd_clone);
2104 hstate.add_ptr (variant->ctx);
2105 hstate.add_int (variant->matches);
2107 return hstate.end ();
2110 bool
2111 omp_declare_variant_hasher::equal (omp_declare_variant_base_entry *x,
2112 omp_declare_variant_base_entry *y)
2114 if (x->base != y->base
2115 || x->variants->length () != y->variants->length ())
2116 return false;
2117 omp_declare_variant_entry *variant;
2118 unsigned int i;
2119 FOR_EACH_VEC_SAFE_ELT (x->variants, i, variant)
2120 if (variant->variant != (*y->variants)[i].variant
2121 || variant->score != (*y->variants)[i].score
2122 || (variant->score_in_declare_simd_clone
2123 != (*y->variants)[i].score_in_declare_simd_clone)
2124 || variant->ctx != (*y->variants)[i].ctx
2125 || variant->matches != (*y->variants)[i].matches)
2126 return false;
2127 return true;
2130 static GTY(()) hash_table<omp_declare_variant_hasher> *omp_declare_variants;
2132 struct omp_declare_variant_alt_hasher
2133 : ggc_ptr_hash<omp_declare_variant_base_entry> {
2134 static hashval_t hash (omp_declare_variant_base_entry *);
2135 static bool equal (omp_declare_variant_base_entry *,
2136 omp_declare_variant_base_entry *);
2139 hashval_t
2140 omp_declare_variant_alt_hasher::hash (omp_declare_variant_base_entry *x)
2142 return DECL_UID (x->node->decl);
2145 bool
2146 omp_declare_variant_alt_hasher::equal (omp_declare_variant_base_entry *x,
2147 omp_declare_variant_base_entry *y)
2149 return x->node == y->node;
2152 static GTY(()) hash_table<omp_declare_variant_alt_hasher>
2153 *omp_declare_variant_alt;
2155 /* Try to resolve declare variant after gimplification. */
2157 static tree
2158 omp_resolve_late_declare_variant (tree alt)
2160 cgraph_node *node = cgraph_node::get (alt);
2161 cgraph_node *cur_node = cgraph_node::get (cfun->decl);
2162 if (node == NULL
2163 || !node->declare_variant_alt
2164 || !cfun->after_inlining)
2165 return alt;
2167 omp_declare_variant_base_entry entry;
2168 entry.base = NULL;
2169 entry.node = node;
2170 entry.variants = NULL;
2171 omp_declare_variant_base_entry *entryp
2172 = omp_declare_variant_alt->find_with_hash (&entry, DECL_UID (alt));
2174 unsigned int i, j;
2175 omp_declare_variant_entry *varentry1, *varentry2;
2176 auto_vec <bool, 16> matches;
2177 unsigned int nmatches = 0;
2178 FOR_EACH_VEC_SAFE_ELT (entryp->variants, i, varentry1)
2180 if (varentry1->matches)
2182 /* This has been checked to be ok already. */
2183 matches.safe_push (true);
2184 nmatches++;
2185 continue;
2187 switch (omp_context_selector_matches (varentry1->ctx))
2189 case 0:
2190 matches.safe_push (false);
2191 break;
2192 case -1:
2193 return alt;
2194 default:
2195 matches.safe_push (true);
2196 nmatches++;
2197 break;
2201 if (nmatches == 0)
2202 return entryp->base->decl;
2204 /* A context selector that is a strict subset of another context selector
2205 has a score of zero. */
2206 FOR_EACH_VEC_SAFE_ELT (entryp->variants, i, varentry1)
2207 if (matches[i])
2209 for (j = i + 1;
2210 vec_safe_iterate (entryp->variants, j, &varentry2); ++j)
2211 if (matches[j])
2213 int r = omp_context_selector_compare (varentry1->ctx,
2214 varentry2->ctx);
2215 if (r == -1)
2217 /* ctx1 is a strict subset of ctx2, ignore ctx1. */
2218 matches[i] = false;
2219 break;
2221 else if (r == 1)
2222 /* ctx2 is a strict subset of ctx1, remove ctx2. */
2223 matches[j] = false;
2227 score_wide_int max_score = -1;
2228 varentry2 = NULL;
2229 FOR_EACH_VEC_SAFE_ELT (entryp->variants, i, varentry1)
2230 if (matches[i])
2232 score_wide_int score
2233 = (cur_node->simdclone ? varentry1->score_in_declare_simd_clone
2234 : varentry1->score);
2235 if (score > max_score)
2237 max_score = score;
2238 varentry2 = varentry1;
2241 return varentry2->variant->decl;
2244 /* Hook to adjust hash tables on cgraph_node removal. */
2246 static void
2247 omp_declare_variant_remove_hook (struct cgraph_node *node, void *)
2249 if (!node->declare_variant_alt)
2250 return;
2252 /* Drop this hash table completely. */
2253 omp_declare_variants = NULL;
2254 /* And remove node from the other hash table. */
2255 if (omp_declare_variant_alt)
2257 omp_declare_variant_base_entry entry;
2258 entry.base = NULL;
2259 entry.node = node;
2260 entry.variants = NULL;
2261 omp_declare_variant_alt->remove_elt_with_hash (&entry,
2262 DECL_UID (node->decl));
2266 /* Try to resolve declare variant, return the variant decl if it should
2267 be used instead of base, or base otherwise. */
2269 tree
2270 omp_resolve_declare_variant (tree base)
2272 tree variant1 = NULL_TREE, variant2 = NULL_TREE;
2273 if (cfun && (cfun->curr_properties & PROP_gimple_any) != 0)
2274 return omp_resolve_late_declare_variant (base);
2276 auto_vec <tree, 16> variants;
2277 auto_vec <bool, 16> defer;
2278 bool any_deferred = false;
2279 for (tree attr = DECL_ATTRIBUTES (base); attr; attr = TREE_CHAIN (attr))
2281 attr = lookup_attribute ("omp declare variant base", attr);
2282 if (attr == NULL_TREE)
2283 break;
2284 if (TREE_CODE (TREE_PURPOSE (TREE_VALUE (attr))) != FUNCTION_DECL)
2285 continue;
2286 cgraph_node *node = cgraph_node::get (base);
2287 /* If this is already a magic decl created by this function,
2288 don't process it again. */
2289 if (node && node->declare_variant_alt)
2290 return base;
2291 switch (omp_context_selector_matches (TREE_VALUE (TREE_VALUE (attr))))
2293 case 0:
2294 /* No match, ignore. */
2295 break;
2296 case -1:
2297 /* Needs to be deferred. */
2298 any_deferred = true;
2299 variants.safe_push (attr);
2300 defer.safe_push (true);
2301 break;
2302 default:
2303 variants.safe_push (attr);
2304 defer.safe_push (false);
2305 break;
2308 if (variants.length () == 0)
2309 return base;
2311 if (any_deferred)
2313 score_wide_int max_score1 = 0;
2314 score_wide_int max_score2 = 0;
2315 bool first = true;
2316 unsigned int i;
2317 tree attr1, attr2;
2318 omp_declare_variant_base_entry entry;
2319 entry.base = cgraph_node::get_create (base);
2320 entry.node = NULL;
2321 vec_alloc (entry.variants, variants.length ());
2322 FOR_EACH_VEC_ELT (variants, i, attr1)
2324 score_wide_int score1;
2325 score_wide_int score2;
2326 bool need_two;
2327 tree ctx = TREE_VALUE (TREE_VALUE (attr1));
2328 need_two = omp_context_compute_score (ctx, &score1, false);
2329 if (need_two)
2330 omp_context_compute_score (ctx, &score2, true);
2331 else
2332 score2 = score1;
2333 if (first)
2335 first = false;
2336 max_score1 = score1;
2337 max_score2 = score2;
2338 if (!defer[i])
2340 variant1 = attr1;
2341 variant2 = attr1;
2344 else
2346 if (max_score1 == score1)
2347 variant1 = NULL_TREE;
2348 else if (score1 > max_score1)
2350 max_score1 = score1;
2351 variant1 = defer[i] ? NULL_TREE : attr1;
2353 if (max_score2 == score2)
2354 variant2 = NULL_TREE;
2355 else if (score2 > max_score2)
2357 max_score2 = score2;
2358 variant2 = defer[i] ? NULL_TREE : attr1;
2361 omp_declare_variant_entry varentry;
2362 varentry.variant
2363 = cgraph_node::get_create (TREE_PURPOSE (TREE_VALUE (attr1)));
2364 varentry.score = score1;
2365 varentry.score_in_declare_simd_clone = score2;
2366 varentry.ctx = ctx;
2367 varentry.matches = !defer[i];
2368 entry.variants->quick_push (varentry);
2371 /* If there is a clear winner variant with the score which is not
2372 deferred, verify it is not a strict subset of any other context
2373 selector and if it is not, it is the best alternative no matter
2374 whether the others do or don't match. */
2375 if (variant1 && variant1 == variant2)
2377 tree ctx1 = TREE_VALUE (TREE_VALUE (variant1));
2378 FOR_EACH_VEC_ELT (variants, i, attr2)
2380 if (attr2 == variant1)
2381 continue;
2382 tree ctx2 = TREE_VALUE (TREE_VALUE (attr2));
2383 int r = omp_context_selector_compare (ctx1, ctx2);
2384 if (r == -1)
2386 /* The winner is a strict subset of ctx2, can't
2387 decide now. */
2388 variant1 = NULL_TREE;
2389 break;
2392 if (variant1)
2394 vec_free (entry.variants);
2395 return TREE_PURPOSE (TREE_VALUE (variant1));
2399 static struct cgraph_node_hook_list *node_removal_hook_holder;
2400 if (!node_removal_hook_holder)
2401 node_removal_hook_holder
2402 = symtab->add_cgraph_removal_hook (omp_declare_variant_remove_hook,
2403 NULL);
2405 if (omp_declare_variants == NULL)
2406 omp_declare_variants
2407 = hash_table<omp_declare_variant_hasher>::create_ggc (64);
2408 omp_declare_variant_base_entry **slot
2409 = omp_declare_variants->find_slot (&entry, INSERT);
2410 if (*slot != NULL)
2412 vec_free (entry.variants);
2413 return (*slot)->node->decl;
2416 *slot = ggc_cleared_alloc<omp_declare_variant_base_entry> ();
2417 (*slot)->base = entry.base;
2418 (*slot)->node = entry.base;
2419 (*slot)->variants = entry.variants;
2420 tree alt = build_decl (DECL_SOURCE_LOCATION (base), FUNCTION_DECL,
2421 DECL_NAME (base), TREE_TYPE (base));
2422 DECL_ARTIFICIAL (alt) = 1;
2423 DECL_IGNORED_P (alt) = 1;
2424 TREE_STATIC (alt) = 1;
2425 tree attributes = DECL_ATTRIBUTES (base);
2426 if (lookup_attribute ("noipa", attributes) == NULL)
2428 attributes = tree_cons (get_identifier ("noipa"), NULL, attributes);
2429 if (lookup_attribute ("noinline", attributes) == NULL)
2430 attributes = tree_cons (get_identifier ("noinline"), NULL,
2431 attributes);
2432 if (lookup_attribute ("noclone", attributes) == NULL)
2433 attributes = tree_cons (get_identifier ("noclone"), NULL,
2434 attributes);
2435 if (lookup_attribute ("no_icf", attributes) == NULL)
2436 attributes = tree_cons (get_identifier ("no_icf"), NULL,
2437 attributes);
2439 DECL_ATTRIBUTES (alt) = attributes;
2440 DECL_INITIAL (alt) = error_mark_node;
2441 (*slot)->node = cgraph_node::create (alt);
2442 (*slot)->node->declare_variant_alt = 1;
2443 (*slot)->node->create_reference (entry.base, IPA_REF_ADDR);
2444 omp_declare_variant_entry *varentry;
2445 FOR_EACH_VEC_SAFE_ELT (entry.variants, i, varentry)
2446 (*slot)->node->create_reference (varentry->variant, IPA_REF_ADDR);
2447 if (omp_declare_variant_alt == NULL)
2448 omp_declare_variant_alt
2449 = hash_table<omp_declare_variant_alt_hasher>::create_ggc (64);
2450 *omp_declare_variant_alt->find_slot_with_hash (*slot, DECL_UID (alt),
2451 INSERT) = *slot;
2452 return alt;
2455 if (variants.length () == 1)
2456 return TREE_PURPOSE (TREE_VALUE (variants[0]));
2458 /* A context selector that is a strict subset of another context selector
2459 has a score of zero. */
2460 tree attr1, attr2;
2461 unsigned int i, j;
2462 FOR_EACH_VEC_ELT (variants, i, attr1)
2463 if (attr1)
2465 tree ctx1 = TREE_VALUE (TREE_VALUE (attr1));
2466 FOR_EACH_VEC_ELT_FROM (variants, j, attr2, i + 1)
2467 if (attr2)
2469 tree ctx2 = TREE_VALUE (TREE_VALUE (attr2));
2470 int r = omp_context_selector_compare (ctx1, ctx2);
2471 if (r == -1)
2473 /* ctx1 is a strict subset of ctx2, remove
2474 attr1 from the vector. */
2475 variants[i] = NULL_TREE;
2476 break;
2478 else if (r == 1)
2479 /* ctx2 is a strict subset of ctx1, remove attr2
2480 from the vector. */
2481 variants[j] = NULL_TREE;
2484 score_wide_int max_score1 = 0;
2485 score_wide_int max_score2 = 0;
2486 bool first = true;
2487 FOR_EACH_VEC_ELT (variants, i, attr1)
2488 if (attr1)
2490 if (variant1)
2492 score_wide_int score1;
2493 score_wide_int score2;
2494 bool need_two;
2495 tree ctx;
2496 if (first)
2498 first = false;
2499 ctx = TREE_VALUE (TREE_VALUE (variant1));
2500 need_two = omp_context_compute_score (ctx, &max_score1, false);
2501 if (need_two)
2502 omp_context_compute_score (ctx, &max_score2, true);
2503 else
2504 max_score2 = max_score1;
2506 ctx = TREE_VALUE (TREE_VALUE (attr1));
2507 need_two = omp_context_compute_score (ctx, &score1, false);
2508 if (need_two)
2509 omp_context_compute_score (ctx, &score2, true);
2510 else
2511 score2 = score1;
2512 if (score1 > max_score1)
2514 max_score1 = score1;
2515 variant1 = attr1;
2517 if (score2 > max_score2)
2519 max_score2 = score2;
2520 variant2 = attr1;
2523 else
2525 variant1 = attr1;
2526 variant2 = attr1;
2529 /* If there is a disagreement on which variant has the highest score
2530 depending on whether it will be in a declare simd clone or not,
2531 punt for now and defer until after IPA where we will know that. */
2532 return ((variant1 && variant1 == variant2)
2533 ? TREE_PURPOSE (TREE_VALUE (variant1)) : base);
2536 void
2537 omp_lto_output_declare_variant_alt (lto_simple_output_block *ob,
2538 cgraph_node *node,
2539 lto_symtab_encoder_t encoder)
2541 gcc_assert (node->declare_variant_alt);
2543 omp_declare_variant_base_entry entry;
2544 entry.base = NULL;
2545 entry.node = node;
2546 entry.variants = NULL;
2547 omp_declare_variant_base_entry *entryp
2548 = omp_declare_variant_alt->find_with_hash (&entry, DECL_UID (node->decl));
2549 gcc_assert (entryp);
2551 int nbase = lto_symtab_encoder_lookup (encoder, entryp->base);
2552 gcc_assert (nbase != LCC_NOT_FOUND);
2553 streamer_write_hwi_stream (ob->main_stream, nbase);
2555 streamer_write_hwi_stream (ob->main_stream, entryp->variants->length ());
2557 unsigned int i;
2558 omp_declare_variant_entry *varentry;
2559 FOR_EACH_VEC_SAFE_ELT (entryp->variants, i, varentry)
2561 int nvar = lto_symtab_encoder_lookup (encoder, varentry->variant);
2562 gcc_assert (nvar != LCC_NOT_FOUND);
2563 streamer_write_hwi_stream (ob->main_stream, nvar);
2565 for (score_wide_int *w = &varentry->score; ;
2566 w = &varentry->score_in_declare_simd_clone)
2568 unsigned len = w->get_len ();
2569 streamer_write_hwi_stream (ob->main_stream, len);
2570 const HOST_WIDE_INT *val = w->get_val ();
2571 for (unsigned j = 0; j < len; j++)
2572 streamer_write_hwi_stream (ob->main_stream, val[j]);
2573 if (w == &varentry->score_in_declare_simd_clone)
2574 break;
2577 HOST_WIDE_INT cnt = -1;
2578 HOST_WIDE_INT i = varentry->matches ? 1 : 0;
2579 for (tree attr = DECL_ATTRIBUTES (entryp->base->decl);
2580 attr; attr = TREE_CHAIN (attr), i += 2)
2582 attr = lookup_attribute ("omp declare variant base", attr);
2583 if (attr == NULL_TREE)
2584 break;
2586 if (varentry->ctx == TREE_VALUE (TREE_VALUE (attr)))
2588 cnt = i;
2589 break;
2593 gcc_assert (cnt != -1);
2594 streamer_write_hwi_stream (ob->main_stream, cnt);
2598 void
2599 omp_lto_input_declare_variant_alt (lto_input_block *ib, cgraph_node *node,
2600 vec<symtab_node *> nodes)
2602 gcc_assert (node->declare_variant_alt);
2603 omp_declare_variant_base_entry *entryp
2604 = ggc_cleared_alloc<omp_declare_variant_base_entry> ();
2605 entryp->base = dyn_cast<cgraph_node *> (nodes[streamer_read_hwi (ib)]);
2606 entryp->node = node;
2607 unsigned int len = streamer_read_hwi (ib);
2608 vec_alloc (entryp->variants, len);
2610 for (unsigned int i = 0; i < len; i++)
2612 omp_declare_variant_entry varentry;
2613 varentry.variant
2614 = dyn_cast<cgraph_node *> (nodes[streamer_read_hwi (ib)]);
2615 for (score_wide_int *w = &varentry.score; ;
2616 w = &varentry.score_in_declare_simd_clone)
2618 unsigned len2 = streamer_read_hwi (ib);
2619 HOST_WIDE_INT arr[WIDE_INT_MAX_HWIS (1024)];
2620 gcc_assert (len2 <= WIDE_INT_MAX_HWIS (1024));
2621 for (unsigned int j = 0; j < len2; j++)
2622 arr[j] = streamer_read_hwi (ib);
2623 *w = score_wide_int::from_array (arr, len2, true);
2624 if (w == &varentry.score_in_declare_simd_clone)
2625 break;
2628 HOST_WIDE_INT cnt = streamer_read_hwi (ib);
2629 HOST_WIDE_INT j = 0;
2630 varentry.ctx = NULL_TREE;
2631 varentry.matches = (cnt & 1) ? true : false;
2632 cnt &= ~HOST_WIDE_INT_1;
2633 for (tree attr = DECL_ATTRIBUTES (entryp->base->decl);
2634 attr; attr = TREE_CHAIN (attr), j += 2)
2636 attr = lookup_attribute ("omp declare variant base", attr);
2637 if (attr == NULL_TREE)
2638 break;
2640 if (cnt == j)
2642 varentry.ctx = TREE_VALUE (TREE_VALUE (attr));
2643 break;
2646 gcc_assert (varentry.ctx != NULL_TREE);
2647 entryp->variants->quick_push (varentry);
2649 if (omp_declare_variant_alt == NULL)
2650 omp_declare_variant_alt
2651 = hash_table<omp_declare_variant_alt_hasher>::create_ggc (64);
2652 *omp_declare_variant_alt->find_slot_with_hash (entryp, DECL_UID (node->decl),
2653 INSERT) = entryp;
2656 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
2657 macro on gomp-constants.h. We do not check for overflow. */
2659 tree
2660 oacc_launch_pack (unsigned code, tree device, unsigned op)
2662 tree res;
2664 res = build_int_cst (unsigned_type_node, GOMP_LAUNCH_PACK (code, 0, op));
2665 if (device)
2667 device = fold_build2 (LSHIFT_EXPR, unsigned_type_node,
2668 device, build_int_cst (unsigned_type_node,
2669 GOMP_LAUNCH_DEVICE_SHIFT));
2670 res = fold_build2 (BIT_IOR_EXPR, unsigned_type_node, res, device);
2672 return res;
2675 /* FIXME: What is the following comment for? */
2676 /* Look for compute grid dimension clauses and convert to an attribute
2677 attached to FN. This permits the target-side code to (a) massage
2678 the dimensions, (b) emit that data and (c) optimize. Non-constant
2679 dimensions are pushed onto ARGS.
2681 The attribute value is a TREE_LIST. A set of dimensions is
2682 represented as a list of INTEGER_CST. Those that are runtime
2683 exprs are represented as an INTEGER_CST of zero.
2685 TODO: Normally the attribute will just contain a single such list. If
2686 however it contains a list of lists, this will represent the use of
2687 device_type. Each member of the outer list is an assoc list of
2688 dimensions, keyed by the device type. The first entry will be the
2689 default. Well, that's the plan. */
2691 /* Replace any existing oacc fn attribute with updated dimensions. */
2693 /* Variant working on a list of attributes. */
2695 tree
2696 oacc_replace_fn_attrib_attr (tree attribs, tree dims)
2698 tree ident = get_identifier (OACC_FN_ATTRIB);
2700 /* If we happen to be present as the first attrib, drop it. */
2701 if (attribs && TREE_PURPOSE (attribs) == ident)
2702 attribs = TREE_CHAIN (attribs);
2703 return tree_cons (ident, dims, attribs);
2706 /* Variant working on a function decl. */
2708 void
2709 oacc_replace_fn_attrib (tree fn, tree dims)
2711 DECL_ATTRIBUTES (fn)
2712 = oacc_replace_fn_attrib_attr (DECL_ATTRIBUTES (fn), dims);
2715 /* Scan CLAUSES for launch dimensions and attach them to the oacc
2716 function attribute. Push any that are non-constant onto the ARGS
2717 list, along with an appropriate GOMP_LAUNCH_DIM tag. */
2719 void
2720 oacc_set_fn_attrib (tree fn, tree clauses, vec<tree> *args)
2722 /* Must match GOMP_DIM ordering. */
2723 static const omp_clause_code ids[]
2724 = { OMP_CLAUSE_NUM_GANGS, OMP_CLAUSE_NUM_WORKERS,
2725 OMP_CLAUSE_VECTOR_LENGTH };
2726 unsigned ix;
2727 tree dims[GOMP_DIM_MAX];
2729 tree attr = NULL_TREE;
2730 unsigned non_const = 0;
2732 for (ix = GOMP_DIM_MAX; ix--;)
2734 tree clause = omp_find_clause (clauses, ids[ix]);
2735 tree dim = NULL_TREE;
2737 if (clause)
2738 dim = OMP_CLAUSE_EXPR (clause, ids[ix]);
2739 dims[ix] = dim;
2740 if (dim && TREE_CODE (dim) != INTEGER_CST)
2742 dim = integer_zero_node;
2743 non_const |= GOMP_DIM_MASK (ix);
2745 attr = tree_cons (NULL_TREE, dim, attr);
2748 oacc_replace_fn_attrib (fn, attr);
2750 if (non_const)
2752 /* Push a dynamic argument set. */
2753 args->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM,
2754 NULL_TREE, non_const));
2755 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
2756 if (non_const & GOMP_DIM_MASK (ix))
2757 args->safe_push (dims[ix]);
2761 /* Verify OpenACC routine clauses.
2763 Returns 0 if FNDECL should be marked with an OpenACC 'routine' directive, 1
2764 if it has already been marked in compatible way, and -1 if incompatible.
2765 Upon returning, the chain of clauses will contain exactly one clause
2766 specifying the level of parallelism. */
2769 oacc_verify_routine_clauses (tree fndecl, tree *clauses, location_t loc,
2770 const char *routine_str)
2772 tree c_level = NULL_TREE;
2773 tree c_nohost = NULL_TREE;
2774 tree c_p = NULL_TREE;
2775 for (tree c = *clauses; c; c_p = c, c = OMP_CLAUSE_CHAIN (c))
2776 switch (OMP_CLAUSE_CODE (c))
2778 case OMP_CLAUSE_GANG:
2779 case OMP_CLAUSE_WORKER:
2780 case OMP_CLAUSE_VECTOR:
2781 case OMP_CLAUSE_SEQ:
2782 if (c_level == NULL_TREE)
2783 c_level = c;
2784 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_CODE (c_level))
2786 /* This has already been diagnosed in the front ends. */
2787 /* Drop the duplicate clause. */
2788 gcc_checking_assert (c_p != NULL_TREE);
2789 OMP_CLAUSE_CHAIN (c_p) = OMP_CLAUSE_CHAIN (c);
2790 c = c_p;
2792 else
2794 error_at (OMP_CLAUSE_LOCATION (c),
2795 "%qs specifies a conflicting level of parallelism",
2796 omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
2797 inform (OMP_CLAUSE_LOCATION (c_level),
2798 "... to the previous %qs clause here",
2799 omp_clause_code_name[OMP_CLAUSE_CODE (c_level)]);
2800 /* Drop the conflicting clause. */
2801 gcc_checking_assert (c_p != NULL_TREE);
2802 OMP_CLAUSE_CHAIN (c_p) = OMP_CLAUSE_CHAIN (c);
2803 c = c_p;
2805 break;
2806 case OMP_CLAUSE_NOHOST:
2807 /* Don't worry about duplicate clauses here. */
2808 c_nohost = c;
2809 break;
2810 default:
2811 gcc_unreachable ();
2813 if (c_level == NULL_TREE)
2815 /* Default to an implicit 'seq' clause. */
2816 c_level = build_omp_clause (loc, OMP_CLAUSE_SEQ);
2817 OMP_CLAUSE_CHAIN (c_level) = *clauses;
2818 *clauses = c_level;
2820 /* In *clauses, we now have exactly one clause specifying the level of
2821 parallelism. */
2823 tree attr
2824 = lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl));
2825 if (attr != NULL_TREE)
2827 /* Diagnose if "#pragma omp declare target" has also been applied. */
2828 if (TREE_VALUE (attr) == NULL_TREE)
2830 /* See <https://gcc.gnu.org/PR93465>; the semantics of combining
2831 OpenACC and OpenMP 'target' are not clear. */
2832 error_at (loc,
2833 "cannot apply %<%s%> to %qD, which has also been"
2834 " marked with an OpenMP 'declare target' directive",
2835 routine_str, fndecl);
2836 /* Incompatible. */
2837 return -1;
2840 /* If a "#pragma acc routine" has already been applied, just verify
2841 this one for compatibility. */
2842 /* Collect previous directive's clauses. */
2843 tree c_level_p = NULL_TREE;
2844 tree c_nohost_p = NULL_TREE;
2845 for (tree c = TREE_VALUE (attr); c; c = OMP_CLAUSE_CHAIN (c))
2846 switch (OMP_CLAUSE_CODE (c))
2848 case OMP_CLAUSE_GANG:
2849 case OMP_CLAUSE_WORKER:
2850 case OMP_CLAUSE_VECTOR:
2851 case OMP_CLAUSE_SEQ:
2852 gcc_checking_assert (c_level_p == NULL_TREE);
2853 c_level_p = c;
2854 break;
2855 case OMP_CLAUSE_NOHOST:
2856 gcc_checking_assert (c_nohost_p == NULL_TREE);
2857 c_nohost_p = c;
2858 break;
2859 default:
2860 gcc_unreachable ();
2862 gcc_checking_assert (c_level_p != NULL_TREE);
2863 /* ..., and compare to current directive's, which we've already collected
2864 above. */
2865 tree c_diag;
2866 tree c_diag_p;
2867 /* Matching level of parallelism? */
2868 if (OMP_CLAUSE_CODE (c_level) != OMP_CLAUSE_CODE (c_level_p))
2870 c_diag = c_level;
2871 c_diag_p = c_level_p;
2872 goto incompatible;
2874 /* Matching 'nohost' clauses? */
2875 if ((c_nohost == NULL_TREE) != (c_nohost_p == NULL_TREE))
2877 c_diag = c_nohost;
2878 c_diag_p = c_nohost_p;
2879 goto incompatible;
2881 /* Compatible. */
2882 return 1;
2884 incompatible:
2885 if (c_diag != NULL_TREE)
2886 error_at (OMP_CLAUSE_LOCATION (c_diag),
2887 "incompatible %qs clause when applying"
2888 " %<%s%> to %qD, which has already been"
2889 " marked with an OpenACC 'routine' directive",
2890 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag)],
2891 routine_str, fndecl);
2892 else if (c_diag_p != NULL_TREE)
2893 error_at (loc,
2894 "missing %qs clause when applying"
2895 " %<%s%> to %qD, which has already been"
2896 " marked with an OpenACC 'routine' directive",
2897 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag_p)],
2898 routine_str, fndecl);
2899 else
2900 gcc_unreachable ();
2901 if (c_diag_p != NULL_TREE)
2902 inform (OMP_CLAUSE_LOCATION (c_diag_p),
2903 "... with %qs clause here",
2904 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag_p)]);
2905 else
2907 /* In the front ends, we don't preserve location information for the
2908 OpenACC routine directive itself. However, that of c_level_p
2909 should be close. */
2910 location_t loc_routine = OMP_CLAUSE_LOCATION (c_level_p);
2911 inform (loc_routine, "... without %qs clause near to here",
2912 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag)]);
2914 /* Incompatible. */
2915 return -1;
2918 return 0;
2921 /* Process the OpenACC 'routine' directive clauses to generate an attribute
2922 for the level of parallelism. All dimensions have a size of zero
2923 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
2924 can have a loop partitioned on it. non-zero indicates
2925 yes, zero indicates no. By construction once a non-zero has been
2926 reached, further inner dimensions must also be non-zero. We set
2927 TREE_VALUE to zero for the dimensions that may be partitioned and
2928 1 for the other ones -- if a loop is (erroneously) spawned at
2929 an outer level, we don't want to try and partition it. */
2931 tree
2932 oacc_build_routine_dims (tree clauses)
2934 /* Must match GOMP_DIM ordering. */
2935 static const omp_clause_code ids[]
2936 = {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ};
2937 int ix;
2938 int level = -1;
2940 for (; clauses; clauses = OMP_CLAUSE_CHAIN (clauses))
2941 for (ix = GOMP_DIM_MAX + 1; ix--;)
2942 if (OMP_CLAUSE_CODE (clauses) == ids[ix])
2944 level = ix;
2945 break;
2947 gcc_checking_assert (level >= 0);
2949 tree dims = NULL_TREE;
2951 for (ix = GOMP_DIM_MAX; ix--;)
2952 dims = tree_cons (build_int_cst (boolean_type_node, ix >= level),
2953 build_int_cst (integer_type_node, ix < level), dims);
2955 return dims;
2958 /* Retrieve the oacc function attrib and return it. Non-oacc
2959 functions will return NULL. */
2961 tree
2962 oacc_get_fn_attrib (tree fn)
2964 return lookup_attribute (OACC_FN_ATTRIB, DECL_ATTRIBUTES (fn));
2967 /* Return true if FN is an OpenMP or OpenACC offloading function. */
2969 bool
2970 offloading_function_p (tree fn)
2972 tree attrs = DECL_ATTRIBUTES (fn);
2973 return (lookup_attribute ("omp declare target", attrs)
2974 || lookup_attribute ("omp target entrypoint", attrs));
2977 /* Extract an oacc execution dimension from FN. FN must be an
2978 offloaded function or routine that has already had its execution
2979 dimensions lowered to the target-specific values. */
2982 oacc_get_fn_dim_size (tree fn, int axis)
2984 tree attrs = oacc_get_fn_attrib (fn);
2986 gcc_assert (axis < GOMP_DIM_MAX);
2988 tree dims = TREE_VALUE (attrs);
2989 while (axis--)
2990 dims = TREE_CHAIN (dims);
2992 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
2994 return size;
2997 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
2998 IFN_GOACC_DIM_SIZE call. */
3001 oacc_get_ifn_dim_arg (const gimple *stmt)
3003 gcc_checking_assert (gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_SIZE
3004 || gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_POS);
3005 tree arg = gimple_call_arg (stmt, 0);
3006 HOST_WIDE_INT axis = TREE_INT_CST_LOW (arg);
3008 gcc_checking_assert (axis >= 0 && axis < GOMP_DIM_MAX);
3009 return (int) axis;
3012 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
3013 as appropriate. */
3015 tree
3016 omp_build_component_ref (tree obj, tree field)
3018 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
3019 if (TREE_THIS_VOLATILE (field))
3020 TREE_THIS_VOLATILE (ret) |= 1;
3021 if (TREE_READONLY (field))
3022 TREE_READONLY (ret) |= 1;
3023 return ret;
3026 /* Return true if NAME is the name of an omp_* runtime API call. */
3027 bool
3028 omp_runtime_api_procname (const char *name)
3030 if (!startswith (name, "omp_"))
3031 return false;
3033 static const char *omp_runtime_apis[] =
3035 /* This array has 3 sections. First omp_* calls that don't
3036 have any suffixes. */
3037 "aligned_alloc",
3038 "aligned_calloc",
3039 "alloc",
3040 "calloc",
3041 "free",
3042 "get_mapped_ptr",
3043 "realloc",
3044 "target_alloc",
3045 "target_associate_ptr",
3046 "target_disassociate_ptr",
3047 "target_free",
3048 "target_is_accessible",
3049 "target_is_present",
3050 "target_memcpy",
3051 "target_memcpy_async",
3052 "target_memcpy_rect",
3053 "target_memcpy_rect_async",
3054 NULL,
3055 /* Now omp_* calls that are available as omp_* and omp_*_; however, the
3056 DECL_NAME is always omp_* without tailing underscore. */
3057 "capture_affinity",
3058 "destroy_allocator",
3059 "destroy_lock",
3060 "destroy_nest_lock",
3061 "display_affinity",
3062 "fulfill_event",
3063 "get_active_level",
3064 "get_affinity_format",
3065 "get_cancellation",
3066 "get_default_allocator",
3067 "get_default_device",
3068 "get_device_num",
3069 "get_dynamic",
3070 "get_initial_device",
3071 "get_level",
3072 "get_max_active_levels",
3073 "get_max_task_priority",
3074 "get_max_teams",
3075 "get_max_threads",
3076 "get_nested",
3077 "get_num_devices",
3078 "get_num_places",
3079 "get_num_procs",
3080 "get_num_teams",
3081 "get_num_threads",
3082 "get_partition_num_places",
3083 "get_place_num",
3084 "get_proc_bind",
3085 "get_supported_active_levels",
3086 "get_team_num",
3087 "get_teams_thread_limit",
3088 "get_thread_limit",
3089 "get_thread_num",
3090 "get_wtick",
3091 "get_wtime",
3092 "in_explicit_task",
3093 "in_final",
3094 "in_parallel",
3095 "init_lock",
3096 "init_nest_lock",
3097 "is_initial_device",
3098 "pause_resource",
3099 "pause_resource_all",
3100 "set_affinity_format",
3101 "set_default_allocator",
3102 "set_lock",
3103 "set_nest_lock",
3104 "test_lock",
3105 "test_nest_lock",
3106 "unset_lock",
3107 "unset_nest_lock",
3108 NULL,
3109 /* And finally calls available as omp_*, omp_*_ and omp_*_8_; however,
3110 as DECL_NAME only omp_* and omp_*_8 appear. */
3111 "display_env",
3112 "get_ancestor_thread_num",
3113 "init_allocator",
3114 "get_partition_place_nums",
3115 "get_place_num_procs",
3116 "get_place_proc_ids",
3117 "get_schedule",
3118 "get_team_size",
3119 "set_default_device",
3120 "set_dynamic",
3121 "set_max_active_levels",
3122 "set_nested",
3123 "set_num_teams",
3124 "set_num_threads",
3125 "set_schedule",
3126 "set_teams_thread_limit"
3129 int mode = 0;
3130 for (unsigned i = 0; i < ARRAY_SIZE (omp_runtime_apis); i++)
3132 if (omp_runtime_apis[i] == NULL)
3134 mode++;
3135 continue;
3137 size_t len = strlen (omp_runtime_apis[i]);
3138 if (strncmp (name + 4, omp_runtime_apis[i], len) == 0
3139 && (name[4 + len] == '\0'
3140 || (mode > 1 && strcmp (name + 4 + len, "_8") == 0)))
3141 return true;
3143 return false;
3146 /* Return true if FNDECL is an omp_* runtime API call. */
3148 bool
3149 omp_runtime_api_call (const_tree fndecl)
3151 tree declname = DECL_NAME (fndecl);
3152 if (!declname
3153 || (DECL_CONTEXT (fndecl) != NULL_TREE
3154 && TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
3155 || !TREE_PUBLIC (fndecl))
3156 return false;
3157 return omp_runtime_api_procname (IDENTIFIER_POINTER (declname));
3160 namespace omp_addr_tokenizer {
3162 /* We scan an expression by recursive descent, and build a vector of
3163 "omp_addr_token *" pointers representing a "parsed" version of the
3164 expression. The grammar we use is something like this:
3166 expr0::
3167 expr [section-access]
3169 expr::
3170 structured-expr access-method
3171 | array-base access-method
3173 structured-expr::
3174 structure-base component-selector
3176 arbitrary-expr::
3177 (anything else)
3179 structure-base::
3180 DECL access-method
3181 | structured-expr access-method
3182 | arbitrary-expr access-method
3184 array-base::
3185 DECL
3186 | arbitrary-expr
3188 access-method::
3189 DIRECT
3190 | REF
3191 | POINTER
3192 | REF_TO_POINTER
3193 | POINTER_OFFSET
3194 | REF_TO_POINTER_OFFSET
3195 | INDEXED_ARRAY
3196 | INDEXED_REF_TO_ARRAY
3197 | index-expr
3199 index-expr::
3200 INDEX_EXPR access-method
3202 component-selector::
3203 component-selector COMPONENT_REF
3204 | component-selector ARRAY_REF
3205 | COMPONENT_REF
3207 This tokenized form is then used both in parsing, for OpenMP clause
3208 expansion (for C and C++) and in gimplify.cc for sibling-list handling
3209 (for C, C++ and Fortran). */
3211 omp_addr_token::omp_addr_token (token_type t, tree e)
3212 : type(t), expr(e)
3216 omp_addr_token::omp_addr_token (access_method_kinds k, tree e)
3217 : type(ACCESS_METHOD), expr(e)
3219 u.access_kind = k;
3222 omp_addr_token::omp_addr_token (token_type t, structure_base_kinds k, tree e)
3223 : type(t), expr(e)
3225 u.structure_base_kind = k;
3228 static bool
3229 omp_parse_component_selector (tree *expr0)
3231 tree expr = *expr0;
3232 tree last_component = NULL_TREE;
3234 while (TREE_CODE (expr) == COMPONENT_REF
3235 || TREE_CODE (expr) == ARRAY_REF)
3237 if (TREE_CODE (expr) == COMPONENT_REF)
3238 last_component = expr;
3240 expr = TREE_OPERAND (expr, 0);
3242 if (TREE_CODE (TREE_TYPE (expr)) == REFERENCE_TYPE)
3243 break;
3246 if (!last_component)
3247 return false;
3249 *expr0 = last_component;
3250 return true;
3253 /* This handles references that have had convert_from_reference called on
3254 them, and also those that haven't. */
3256 static bool
3257 omp_parse_ref (tree *expr0)
3259 tree expr = *expr0;
3261 if (TREE_CODE (TREE_TYPE (expr)) == REFERENCE_TYPE)
3262 return true;
3263 else if ((TREE_CODE (expr) == INDIRECT_REF
3264 || (TREE_CODE (expr) == MEM_REF
3265 && integer_zerop (TREE_OPERAND (expr, 1))))
3266 && TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == REFERENCE_TYPE)
3268 *expr0 = TREE_OPERAND (expr, 0);
3269 return true;
3272 return false;
3275 static bool
3276 omp_parse_pointer (tree *expr0, bool *has_offset)
3278 tree expr = *expr0;
3280 *has_offset = false;
3282 if ((TREE_CODE (expr) == INDIRECT_REF
3283 || (TREE_CODE (expr) == MEM_REF
3284 && integer_zerop (TREE_OPERAND (expr, 1))))
3285 && TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == POINTER_TYPE)
3287 expr = TREE_OPERAND (expr, 0);
3289 /* The Fortran FE sometimes emits a no-op cast here. */
3290 STRIP_NOPS (expr);
3292 while (1)
3294 if (TREE_CODE (expr) == COMPOUND_EXPR)
3296 expr = TREE_OPERAND (expr, 1);
3297 STRIP_NOPS (expr);
3299 else if (TREE_CODE (expr) == SAVE_EXPR)
3300 expr = TREE_OPERAND (expr, 0);
3301 else if (TREE_CODE (expr) == POINTER_PLUS_EXPR)
3303 *has_offset = true;
3304 expr = TREE_OPERAND (expr, 0);
3306 else
3307 break;
3310 STRIP_NOPS (expr);
3312 *expr0 = expr;
3313 return true;
3316 return false;
3319 static bool
3320 omp_parse_access_method (tree *expr0, enum access_method_kinds *kind)
3322 tree expr = *expr0;
3323 bool has_offset;
3325 if (omp_parse_ref (&expr))
3326 *kind = ACCESS_REF;
3327 else if (omp_parse_pointer (&expr, &has_offset))
3329 if (omp_parse_ref (&expr))
3330 *kind = has_offset ? ACCESS_REF_TO_POINTER_OFFSET
3331 : ACCESS_REF_TO_POINTER;
3332 else
3333 *kind = has_offset ? ACCESS_POINTER_OFFSET : ACCESS_POINTER;
3335 else if (TREE_CODE (expr) == ARRAY_REF)
3337 while (TREE_CODE (expr) == ARRAY_REF)
3338 expr = TREE_OPERAND (expr, 0);
3339 if (omp_parse_ref (&expr))
3340 *kind = ACCESS_INDEXED_REF_TO_ARRAY;
3341 else
3342 *kind = ACCESS_INDEXED_ARRAY;
3344 else
3345 *kind = ACCESS_DIRECT;
3347 STRIP_NOPS (expr);
3349 *expr0 = expr;
3350 return true;
3353 static bool
3354 omp_parse_access_methods (vec<omp_addr_token *> &addr_tokens, tree *expr0)
3356 tree expr = *expr0;
3357 enum access_method_kinds kind;
3358 tree am_expr;
3360 if (omp_parse_access_method (&expr, &kind))
3361 am_expr = expr;
3363 if (TREE_CODE (expr) == INDIRECT_REF
3364 || TREE_CODE (expr) == MEM_REF
3365 || TREE_CODE (expr) == ARRAY_REF)
3366 omp_parse_access_methods (addr_tokens, &expr);
3368 addr_tokens.safe_push (new omp_addr_token (kind, am_expr));
3370 *expr0 = expr;
3371 return true;
3374 static bool omp_parse_structured_expr (vec<omp_addr_token *> &, tree *);
3376 static bool
3377 omp_parse_structure_base (vec<omp_addr_token *> &addr_tokens,
3378 tree *expr0, structure_base_kinds *kind,
3379 vec<omp_addr_token *> &base_access_tokens,
3380 bool allow_structured = true)
3382 tree expr = *expr0;
3384 if (allow_structured)
3385 omp_parse_access_methods (base_access_tokens, &expr);
3387 if (DECL_P (expr))
3389 *kind = BASE_DECL;
3390 return true;
3393 if (allow_structured && omp_parse_structured_expr (addr_tokens, &expr))
3395 *kind = BASE_COMPONENT_EXPR;
3396 *expr0 = expr;
3397 return true;
3400 *kind = BASE_ARBITRARY_EXPR;
3401 *expr0 = expr;
3402 return true;
3405 static bool
3406 omp_parse_structured_expr (vec<omp_addr_token *> &addr_tokens, tree *expr0)
3408 tree expr = *expr0;
3409 tree base_component = NULL_TREE;
3410 structure_base_kinds struct_base_kind;
3411 auto_vec<omp_addr_token *> base_access_tokens;
3413 if (omp_parse_component_selector (&expr))
3414 base_component = expr;
3415 else
3416 return false;
3418 gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
3419 expr = TREE_OPERAND (expr, 0);
3421 tree structure_base = expr;
3423 if (!omp_parse_structure_base (addr_tokens, &expr, &struct_base_kind,
3424 base_access_tokens))
3425 return false;
3427 addr_tokens.safe_push (new omp_addr_token (STRUCTURE_BASE, struct_base_kind,
3428 structure_base));
3429 addr_tokens.safe_splice (base_access_tokens);
3430 addr_tokens.safe_push (new omp_addr_token (COMPONENT_SELECTOR,
3431 base_component));
3433 *expr0 = expr;
3435 return true;
3438 static bool
3439 omp_parse_array_expr (vec<omp_addr_token *> &addr_tokens, tree *expr0)
3441 tree expr = *expr0;
3442 structure_base_kinds s_kind;
3443 auto_vec<omp_addr_token *> base_access_tokens;
3445 if (!omp_parse_structure_base (addr_tokens, &expr, &s_kind,
3446 base_access_tokens, false))
3447 return false;
3449 addr_tokens.safe_push (new omp_addr_token (ARRAY_BASE, s_kind, expr));
3450 addr_tokens.safe_splice (base_access_tokens);
3452 *expr0 = expr;
3453 return true;
3456 /* Return TRUE if the ACCESS_METHOD token at index 'i' has a further
3457 ACCESS_METHOD chained after it (e.g., if we're processing an expression
3458 containing multiple pointer indirections). */
3460 bool
3461 omp_access_chain_p (vec<omp_addr_token *> &addr_tokens, unsigned i)
3463 gcc_assert (addr_tokens[i]->type == ACCESS_METHOD);
3464 return (i + 1 < addr_tokens.length ()
3465 && addr_tokens[i + 1]->type == ACCESS_METHOD);
3468 /* Return the address of the object accessed by the ACCESS_METHOD token
3469 at 'i': either of the next access method's expr, or of EXPR if we're at
3470 the end of the list of tokens. */
3472 tree
3473 omp_accessed_addr (vec<omp_addr_token *> &addr_tokens, unsigned i, tree expr)
3475 if (i + 1 < addr_tokens.length ())
3476 return build_fold_addr_expr (addr_tokens[i + 1]->expr);
3477 else
3478 return build_fold_addr_expr (expr);
3481 } /* namespace omp_addr_tokenizer. */
3483 bool
3484 omp_parse_expr (vec<omp_addr_token *> &addr_tokens, tree expr)
3486 using namespace omp_addr_tokenizer;
3487 auto_vec<omp_addr_token *> expr_access_tokens;
3489 if (!omp_parse_access_methods (expr_access_tokens, &expr))
3490 return false;
3492 if (omp_parse_structured_expr (addr_tokens, &expr))
3494 else if (omp_parse_array_expr (addr_tokens, &expr))
3496 else
3497 return false;
3499 addr_tokens.safe_splice (expr_access_tokens);
3501 return true;
3504 DEBUG_FUNCTION void
3505 debug_omp_tokenized_addr (vec<omp_addr_token *> &addr_tokens,
3506 bool with_exprs)
3508 using namespace omp_addr_tokenizer;
3509 const char *sep = with_exprs ? " " : "";
3511 for (auto e : addr_tokens)
3513 const char *pfx = "";
3515 fputs (sep, stderr);
3517 switch (e->type)
3519 case COMPONENT_SELECTOR:
3520 fputs ("component_selector", stderr);
3521 break;
3522 case ACCESS_METHOD:
3523 switch (e->u.access_kind)
3525 case ACCESS_DIRECT:
3526 fputs ("access_direct", stderr);
3527 break;
3528 case ACCESS_REF:
3529 fputs ("access_ref", stderr);
3530 break;
3531 case ACCESS_POINTER:
3532 fputs ("access_pointer", stderr);
3533 break;
3534 case ACCESS_POINTER_OFFSET:
3535 fputs ("access_pointer_offset", stderr);
3536 break;
3537 case ACCESS_REF_TO_POINTER:
3538 fputs ("access_ref_to_pointer", stderr);
3539 break;
3540 case ACCESS_REF_TO_POINTER_OFFSET:
3541 fputs ("access_ref_to_pointer_offset", stderr);
3542 break;
3543 case ACCESS_INDEXED_ARRAY:
3544 fputs ("access_indexed_array", stderr);
3545 break;
3546 case ACCESS_INDEXED_REF_TO_ARRAY:
3547 fputs ("access_indexed_ref_to_array", stderr);
3548 break;
3550 break;
3551 case ARRAY_BASE:
3552 case STRUCTURE_BASE:
3553 pfx = e->type == ARRAY_BASE ? "array_" : "struct_";
3554 switch (e->u.structure_base_kind)
3556 case BASE_DECL:
3557 fprintf (stderr, "%sbase_decl", pfx);
3558 break;
3559 case BASE_COMPONENT_EXPR:
3560 fputs ("base_component_expr", stderr);
3561 break;
3562 case BASE_ARBITRARY_EXPR:
3563 fprintf (stderr, "%sbase_arbitrary_expr", pfx);
3564 break;
3566 break;
3568 if (with_exprs)
3570 fputs (" [", stderr);
3571 print_generic_expr (stderr, e->expr);
3572 fputc (']', stderr);
3573 sep = ",\n ";
3575 else
3576 sep = " ";
3579 fputs ("\n", stderr);
3583 #include "gt-omp-general.h"