2017-03-17 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / c-family / c-omp.c
blob519c4e4ce6690bd09d9608d2221177caf3bb1624
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2017 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "options.h"
28 #include "c-common.h"
29 #include "gimple-expr.h"
30 #include "c-pragma.h"
31 #include "omp-general.h"
32 #include "gomp-constants.h"
35 /* Complete a #pragma oacc wait construct. LOC is the location of
36 the #pragma. */
38 tree
39 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
41 const int nparms = list_length (parms);
42 tree stmt, t;
43 vec<tree, va_gc> *args;
45 vec_alloc (args, nparms + 2);
46 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
48 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
49 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
50 else
51 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
53 args->quick_push (t);
54 args->quick_push (build_int_cst (integer_type_node, nparms));
56 for (t = parms; t; t = TREE_CHAIN (t))
58 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
59 args->quick_push (build_int_cst (integer_type_node,
60 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
61 else
62 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
65 stmt = build_call_expr_loc_vec (loc, stmt, args);
67 vec_free (args);
69 return stmt;
72 /* Complete a #pragma omp master construct. STMT is the structured-block
73 that follows the pragma. LOC is the l*/
75 tree
76 c_finish_omp_master (location_t loc, tree stmt)
78 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
79 SET_EXPR_LOCATION (t, loc);
80 return t;
83 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
84 that follows the pragma. LOC is the l*/
86 tree
87 c_finish_omp_taskgroup (location_t loc, tree stmt)
89 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
90 SET_EXPR_LOCATION (t, loc);
91 return t;
94 /* Complete a #pragma omp critical construct. STMT is the structured-block
95 that follows the pragma, NAME is the identifier in the pragma, or null
96 if it was omitted. LOC is the location of the #pragma. */
98 tree
99 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
101 tree stmt = make_node (OMP_CRITICAL);
102 TREE_TYPE (stmt) = void_type_node;
103 OMP_CRITICAL_BODY (stmt) = body;
104 OMP_CRITICAL_NAME (stmt) = name;
105 OMP_CRITICAL_CLAUSES (stmt) = clauses;
106 SET_EXPR_LOCATION (stmt, loc);
107 return add_stmt (stmt);
110 /* Complete a #pragma omp ordered construct. STMT is the structured-block
111 that follows the pragma. LOC is the location of the #pragma. */
113 tree
114 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
116 tree t = make_node (OMP_ORDERED);
117 TREE_TYPE (t) = void_type_node;
118 OMP_ORDERED_BODY (t) = stmt;
119 OMP_ORDERED_CLAUSES (t) = clauses;
120 SET_EXPR_LOCATION (t, loc);
121 return add_stmt (t);
125 /* Complete a #pragma omp barrier construct. LOC is the location of
126 the #pragma. */
128 void
129 c_finish_omp_barrier (location_t loc)
131 tree x;
133 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
134 x = build_call_expr_loc (loc, x, 0);
135 add_stmt (x);
139 /* Complete a #pragma omp taskwait construct. LOC is the location of the
140 pragma. */
142 void
143 c_finish_omp_taskwait (location_t loc)
145 tree x;
147 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
148 x = build_call_expr_loc (loc, x, 0);
149 add_stmt (x);
153 /* Complete a #pragma omp taskyield construct. LOC is the location of the
154 pragma. */
156 void
157 c_finish_omp_taskyield (location_t loc)
159 tree x;
161 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
162 x = build_call_expr_loc (loc, x, 0);
163 add_stmt (x);
167 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
168 the expression to be implemented atomically is LHS opcode= RHS.
169 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
170 opcode= RHS with the new or old content of LHS returned.
171 LOC is the location of the atomic statement. The value returned
172 is either error_mark_node (if the construct was erroneous) or an
173 OMP_ATOMIC* node which should be added to the current statement
174 tree with add_stmt. If TEST is set, avoid calling save_expr
175 or create_tmp_var*. */
177 tree
178 c_finish_omp_atomic (location_t loc, enum tree_code code,
179 enum tree_code opcode, tree lhs, tree rhs,
180 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst,
181 bool test)
183 tree x, type, addr, pre = NULL_TREE;
185 if (lhs == error_mark_node || rhs == error_mark_node
186 || v == error_mark_node || lhs1 == error_mark_node
187 || rhs1 == error_mark_node)
188 return error_mark_node;
190 /* ??? According to one reading of the OpenMP spec, complex type are
191 supported, but there are no atomic stores for any architecture.
192 But at least icc 9.0 doesn't support complex types here either.
193 And lets not even talk about vector types... */
194 type = TREE_TYPE (lhs);
195 if (!INTEGRAL_TYPE_P (type)
196 && !POINTER_TYPE_P (type)
197 && !SCALAR_FLOAT_TYPE_P (type))
199 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
200 return error_mark_node;
202 if (TYPE_ATOMIC (type))
204 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
205 return error_mark_node;
208 if (opcode == RDIV_EXPR)
209 opcode = TRUNC_DIV_EXPR;
211 /* ??? Validate that rhs does not overlap lhs. */
213 /* Take and save the address of the lhs. From then on we'll reference it
214 via indirection. */
215 addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
216 if (addr == error_mark_node)
217 return error_mark_node;
218 if (!test)
219 addr = save_expr (addr);
220 if (!test
221 && TREE_CODE (addr) != SAVE_EXPR
222 && (TREE_CODE (addr) != ADDR_EXPR
223 || !VAR_P (TREE_OPERAND (addr, 0))))
225 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
226 it even after unsharing function body. */
227 tree var = create_tmp_var_raw (TREE_TYPE (addr));
228 DECL_CONTEXT (var) = current_function_decl;
229 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
231 lhs = build_indirect_ref (loc, addr, RO_NULL);
233 if (code == OMP_ATOMIC_READ)
235 x = build1 (OMP_ATOMIC_READ, type, addr);
236 SET_EXPR_LOCATION (x, loc);
237 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
238 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
239 loc, x, NULL_TREE);
242 /* There are lots of warnings, errors, and conversions that need to happen
243 in the course of interpreting a statement. Use the normal mechanisms
244 to do this, and then take it apart again. */
245 if (swapped)
247 rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
248 opcode = NOP_EXPR;
250 bool save = in_late_binary_op;
251 in_late_binary_op = true;
252 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
253 in_late_binary_op = save;
254 if (x == error_mark_node)
255 return error_mark_node;
256 if (TREE_CODE (x) == COMPOUND_EXPR)
258 pre = TREE_OPERAND (x, 0);
259 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
260 x = TREE_OPERAND (x, 1);
262 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
263 rhs = TREE_OPERAND (x, 1);
265 /* Punt the actual generation of atomic operations to common code. */
266 if (code == OMP_ATOMIC)
267 type = void_type_node;
268 x = build2 (code, type, addr, rhs);
269 SET_EXPR_LOCATION (x, loc);
270 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
272 /* Generally it is hard to prove lhs1 and lhs are the same memory
273 location, just diagnose different variables. */
274 if (rhs1
275 && VAR_P (rhs1)
276 && VAR_P (lhs)
277 && rhs1 != lhs
278 && !test)
280 if (code == OMP_ATOMIC)
281 error_at (loc, "%<#pragma omp atomic update%> uses two different "
282 "variables for memory");
283 else
284 error_at (loc, "%<#pragma omp atomic capture%> uses two different "
285 "variables for memory");
286 return error_mark_node;
289 if (code != OMP_ATOMIC)
291 /* Generally it is hard to prove lhs1 and lhs are the same memory
292 location, just diagnose different variables. */
293 if (lhs1 && VAR_P (lhs1) && VAR_P (lhs))
295 if (lhs1 != lhs && !test)
297 error_at (loc, "%<#pragma omp atomic capture%> uses two "
298 "different variables for memory");
299 return error_mark_node;
302 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
303 loc, x, NULL_TREE);
304 if (rhs1 && rhs1 != lhs)
306 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
307 if (rhs1addr == error_mark_node)
308 return error_mark_node;
309 x = omit_one_operand_loc (loc, type, x, rhs1addr);
311 if (lhs1 && lhs1 != lhs)
313 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
314 if (lhs1addr == error_mark_node)
315 return error_mark_node;
316 if (code == OMP_ATOMIC_CAPTURE_OLD)
317 x = omit_one_operand_loc (loc, type, x, lhs1addr);
318 else
320 if (!test)
321 x = save_expr (x);
322 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
326 else if (rhs1 && rhs1 != lhs)
328 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
329 if (rhs1addr == error_mark_node)
330 return error_mark_node;
331 x = omit_one_operand_loc (loc, type, x, rhs1addr);
334 if (pre)
335 x = omit_one_operand_loc (loc, type, x, pre);
336 return x;
340 /* Complete a #pragma omp flush construct. We don't do anything with
341 the variable list that the syntax allows. LOC is the location of
342 the #pragma. */
344 void
345 c_finish_omp_flush (location_t loc)
347 tree x;
349 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
350 x = build_call_expr_loc (loc, x, 0);
351 add_stmt (x);
355 /* Check and canonicalize OMP_FOR increment expression.
356 Helper function for c_finish_omp_for. */
358 static tree
359 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
361 tree t;
363 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
364 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
365 return error_mark_node;
367 if (exp == decl)
368 return build_int_cst (TREE_TYPE (exp), 0);
370 switch (TREE_CODE (exp))
372 CASE_CONVERT:
373 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
374 if (t != error_mark_node)
375 return fold_convert_loc (loc, TREE_TYPE (exp), t);
376 break;
377 case MINUS_EXPR:
378 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
379 if (t != error_mark_node)
380 return fold_build2_loc (loc, MINUS_EXPR,
381 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
382 break;
383 case PLUS_EXPR:
384 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
385 if (t != error_mark_node)
386 return fold_build2_loc (loc, PLUS_EXPR,
387 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
388 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
389 if (t != error_mark_node)
390 return fold_build2_loc (loc, PLUS_EXPR,
391 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
392 break;
393 case COMPOUND_EXPR:
395 /* cp_build_modify_expr forces preevaluation of the RHS to make
396 sure that it is evaluated before the lvalue-rvalue conversion
397 is applied to the LHS. Reconstruct the original expression. */
398 tree op0 = TREE_OPERAND (exp, 0);
399 if (TREE_CODE (op0) == TARGET_EXPR
400 && !VOID_TYPE_P (TREE_TYPE (op0)))
402 tree op1 = TREE_OPERAND (exp, 1);
403 tree temp = TARGET_EXPR_SLOT (op0);
404 if (BINARY_CLASS_P (op1)
405 && TREE_OPERAND (op1, 1) == temp)
407 op1 = copy_node (op1);
408 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
409 return check_omp_for_incr_expr (loc, op1, decl);
412 break;
414 default:
415 break;
418 return error_mark_node;
421 /* If the OMP_FOR increment expression in INCR is of pointer type,
422 canonicalize it into an expression handled by gimplify_omp_for()
423 and return it. DECL is the iteration variable. */
425 static tree
426 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
428 if (POINTER_TYPE_P (TREE_TYPE (decl))
429 && TREE_OPERAND (incr, 1))
431 tree t = fold_convert_loc (loc,
432 sizetype, TREE_OPERAND (incr, 1));
434 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
435 || TREE_CODE (incr) == PREDECREMENT_EXPR)
436 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
437 t = fold_build_pointer_plus (decl, t);
438 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
440 return incr;
443 /* Validate and generate OMP_FOR.
444 DECLV is a vector of iteration variables, for each collapsed loop.
446 ORIG_DECLV, if non-NULL, is a vector with the original iteration
447 variables (prior to any transformations, by say, C++ iterators).
449 INITV, CONDV and INCRV are vectors containing initialization
450 expressions, controlling predicates and increment expressions.
451 BODY is the body of the loop and PRE_BODY statements that go before
452 the loop. */
454 tree
455 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
456 tree orig_declv, tree initv, tree condv, tree incrv,
457 tree body, tree pre_body)
459 location_t elocus;
460 bool fail = false;
461 int i;
463 if ((code == CILK_SIMD || code == CILK_FOR)
464 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
465 fail = true;
467 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
468 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
469 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
470 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
472 tree decl = TREE_VEC_ELT (declv, i);
473 tree init = TREE_VEC_ELT (initv, i);
474 tree cond = TREE_VEC_ELT (condv, i);
475 tree incr = TREE_VEC_ELT (incrv, i);
477 elocus = locus;
478 if (EXPR_HAS_LOCATION (init))
479 elocus = EXPR_LOCATION (init);
481 /* Validate the iteration variable. */
482 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
483 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
485 error_at (elocus, "invalid type for iteration variable %qE", decl);
486 fail = true;
488 else if (TYPE_ATOMIC (TREE_TYPE (decl)))
490 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
491 fail = true;
492 /* _Atomic iterator confuses stuff too much, so we risk ICE
493 trying to diagnose it further. */
494 continue;
497 /* In the case of "for (int i = 0...)", init will be a decl. It should
498 have a DECL_INITIAL that we can turn into an assignment. */
499 if (init == decl)
501 elocus = DECL_SOURCE_LOCATION (decl);
503 init = DECL_INITIAL (decl);
504 if (init == NULL)
506 error_at (elocus, "%qE is not initialized", decl);
507 init = integer_zero_node;
508 fail = true;
510 DECL_INITIAL (decl) = NULL_TREE;
512 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
513 /* FIXME diagnostics: This should
514 be the location of the INIT. */
515 elocus,
516 init,
517 NULL_TREE);
519 if (init != error_mark_node)
521 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
522 gcc_assert (TREE_OPERAND (init, 0) == decl);
525 if (cond == NULL_TREE)
527 error_at (elocus, "missing controlling predicate");
528 fail = true;
530 else
532 bool cond_ok = false;
534 if (EXPR_HAS_LOCATION (cond))
535 elocus = EXPR_LOCATION (cond);
537 if (TREE_CODE (cond) == LT_EXPR
538 || TREE_CODE (cond) == LE_EXPR
539 || TREE_CODE (cond) == GT_EXPR
540 || TREE_CODE (cond) == GE_EXPR
541 || TREE_CODE (cond) == NE_EXPR
542 || TREE_CODE (cond) == EQ_EXPR)
544 tree op0 = TREE_OPERAND (cond, 0);
545 tree op1 = TREE_OPERAND (cond, 1);
547 /* 2.5.1. The comparison in the condition is computed in
548 the type of DECL, otherwise the behavior is undefined.
550 For example:
551 long n; int i;
552 i < n;
554 according to ISO will be evaluated as:
555 (long)i < n;
557 We want to force:
558 i < (int)n; */
559 if (TREE_CODE (op0) == NOP_EXPR
560 && decl == TREE_OPERAND (op0, 0))
562 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
563 TREE_OPERAND (cond, 1)
564 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
565 TREE_OPERAND (cond, 1));
567 else if (TREE_CODE (op1) == NOP_EXPR
568 && decl == TREE_OPERAND (op1, 0))
570 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
571 TREE_OPERAND (cond, 0)
572 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
573 TREE_OPERAND (cond, 0));
576 if (decl == TREE_OPERAND (cond, 0))
577 cond_ok = true;
578 else if (decl == TREE_OPERAND (cond, 1))
580 TREE_SET_CODE (cond,
581 swap_tree_comparison (TREE_CODE (cond)));
582 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
583 TREE_OPERAND (cond, 0) = decl;
584 cond_ok = true;
587 if (TREE_CODE (cond) == NE_EXPR
588 || TREE_CODE (cond) == EQ_EXPR)
590 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
592 if (code != CILK_SIMD && code != CILK_FOR)
593 cond_ok = false;
595 else if (operand_equal_p (TREE_OPERAND (cond, 1),
596 TYPE_MIN_VALUE (TREE_TYPE (decl)),
598 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
599 ? GT_EXPR : LE_EXPR);
600 else if (operand_equal_p (TREE_OPERAND (cond, 1),
601 TYPE_MAX_VALUE (TREE_TYPE (decl)),
603 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
604 ? LT_EXPR : GE_EXPR);
605 else if (code != CILK_SIMD && code != CILK_FOR)
606 cond_ok = false;
610 if (!cond_ok)
612 error_at (elocus, "invalid controlling predicate");
613 fail = true;
617 if (incr == NULL_TREE)
619 error_at (elocus, "missing increment expression");
620 fail = true;
622 else
624 bool incr_ok = false;
626 if (EXPR_HAS_LOCATION (incr))
627 elocus = EXPR_LOCATION (incr);
629 /* Check all the valid increment expressions: v++, v--, ++v, --v,
630 v = v + incr, v = incr + v and v = v - incr. */
631 switch (TREE_CODE (incr))
633 case POSTINCREMENT_EXPR:
634 case PREINCREMENT_EXPR:
635 case POSTDECREMENT_EXPR:
636 case PREDECREMENT_EXPR:
637 if (TREE_OPERAND (incr, 0) != decl)
638 break;
640 incr_ok = true;
641 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
642 break;
644 case COMPOUND_EXPR:
645 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
646 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
647 break;
648 incr = TREE_OPERAND (incr, 1);
649 /* FALLTHRU */
650 case MODIFY_EXPR:
651 if (TREE_OPERAND (incr, 0) != decl)
652 break;
653 if (TREE_OPERAND (incr, 1) == decl)
654 break;
655 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
656 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
657 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
658 incr_ok = true;
659 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
660 || (TREE_CODE (TREE_OPERAND (incr, 1))
661 == POINTER_PLUS_EXPR))
662 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
663 incr_ok = true;
664 else
666 tree t = check_omp_for_incr_expr (elocus,
667 TREE_OPERAND (incr, 1),
668 decl);
669 if (t != error_mark_node)
671 incr_ok = true;
672 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
673 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
676 break;
678 default:
679 break;
681 if (!incr_ok)
683 error_at (elocus, "invalid increment expression");
684 fail = true;
688 TREE_VEC_ELT (initv, i) = init;
689 TREE_VEC_ELT (incrv, i) = incr;
692 if (fail)
693 return NULL;
694 else
696 tree t = make_node (code);
698 TREE_TYPE (t) = void_type_node;
699 OMP_FOR_INIT (t) = initv;
700 OMP_FOR_COND (t) = condv;
701 OMP_FOR_INCR (t) = incrv;
702 OMP_FOR_BODY (t) = body;
703 OMP_FOR_PRE_BODY (t) = pre_body;
704 OMP_FOR_ORIG_DECLS (t) = orig_declv;
706 SET_EXPR_LOCATION (t, locus);
707 return t;
711 /* Type for passing data in between c_omp_check_loop_iv and
712 c_omp_check_loop_iv_r. */
714 struct c_omp_check_loop_iv_data
716 tree declv;
717 bool fail;
718 location_t stmt_loc;
719 location_t expr_loc;
720 int kind;
721 walk_tree_lh lh;
722 hash_set<tree> *ppset;
725 /* Helper function called via walk_tree, to diagnose uses
726 of associated loop IVs inside of lb, b and incr expressions
727 of OpenMP loops. */
729 static tree
730 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
732 struct c_omp_check_loop_iv_data *d
733 = (struct c_omp_check_loop_iv_data *) data;
734 if (DECL_P (*tp))
736 int i;
737 for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
738 if (*tp == TREE_VEC_ELT (d->declv, i))
740 location_t loc = d->expr_loc;
741 if (loc == UNKNOWN_LOCATION)
742 loc = d->stmt_loc;
743 switch (d->kind)
745 case 0:
746 error_at (loc, "initializer expression refers to "
747 "iteration variable %qD", *tp);
748 break;
749 case 1:
750 error_at (loc, "condition expression refers to "
751 "iteration variable %qD", *tp);
752 break;
753 case 2:
754 error_at (loc, "increment expression refers to "
755 "iteration variable %qD", *tp);
756 break;
758 d->fail = true;
761 /* Don't walk dtors added by C++ wrap_cleanups_r. */
762 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
763 && TRY_CATCH_IS_CLEANUP (*tp))
765 *walk_subtrees = 0;
766 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
767 d->ppset, d->lh);
770 return NULL_TREE;
773 /* Diagnose invalid references to loop iterators in lb, b and incr
774 expressions. */
776 bool
777 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
779 hash_set<tree> pset;
780 struct c_omp_check_loop_iv_data data;
781 int i;
783 data.declv = declv;
784 data.fail = false;
785 data.stmt_loc = EXPR_LOCATION (stmt);
786 data.lh = lh;
787 data.ppset = &pset;
788 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
790 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
791 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
792 tree decl = TREE_OPERAND (init, 0);
793 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
794 gcc_assert (COMPARISON_CLASS_P (cond));
795 gcc_assert (TREE_OPERAND (cond, 0) == decl);
796 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
797 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
798 data.kind = 0;
799 walk_tree_1 (&TREE_OPERAND (init, 1),
800 c_omp_check_loop_iv_r, &data, &pset, lh);
801 /* Don't warn for C++ random access iterators here, the
802 expression then involves the subtraction and always refers
803 to the original value. The C++ FE needs to warn on those
804 earlier. */
805 if (decl == TREE_VEC_ELT (declv, i))
807 data.expr_loc = EXPR_LOCATION (cond);
808 data.kind = 1;
809 walk_tree_1 (&TREE_OPERAND (cond, 1),
810 c_omp_check_loop_iv_r, &data, &pset, lh);
812 if (TREE_CODE (incr) == MODIFY_EXPR)
814 gcc_assert (TREE_OPERAND (incr, 0) == decl);
815 incr = TREE_OPERAND (incr, 1);
816 data.kind = 2;
817 if (TREE_CODE (incr) == PLUS_EXPR
818 && TREE_OPERAND (incr, 1) == decl)
820 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
821 walk_tree_1 (&TREE_OPERAND (incr, 0),
822 c_omp_check_loop_iv_r, &data, &pset, lh);
824 else
826 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
827 walk_tree_1 (&TREE_OPERAND (incr, 1),
828 c_omp_check_loop_iv_r, &data, &pset, lh);
832 return !data.fail;
835 /* Similar, but allows to check the init or cond expressions individually. */
837 bool
838 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl,
839 tree init, tree cond, walk_tree_lh lh)
841 hash_set<tree> pset;
842 struct c_omp_check_loop_iv_data data;
844 data.declv = declv;
845 data.fail = false;
846 data.stmt_loc = stmt_loc;
847 data.lh = lh;
848 data.ppset = &pset;
849 if (init)
851 data.expr_loc = EXPR_LOCATION (init);
852 data.kind = 0;
853 walk_tree_1 (&init,
854 c_omp_check_loop_iv_r, &data, &pset, lh);
856 if (cond)
858 gcc_assert (COMPARISON_CLASS_P (cond));
859 data.expr_loc = EXPR_LOCATION (init);
860 data.kind = 1;
861 if (TREE_OPERAND (cond, 0) == decl)
862 walk_tree_1 (&TREE_OPERAND (cond, 1),
863 c_omp_check_loop_iv_r, &data, &pset, lh);
864 else
865 walk_tree_1 (&TREE_OPERAND (cond, 0),
866 c_omp_check_loop_iv_r, &data, &pset, lh);
868 return !data.fail;
871 /* This function splits clauses for OpenACC combined loop
872 constructs. OpenACC combined loop constructs are:
873 #pragma acc kernels loop
874 #pragma acc parallel loop */
876 tree
877 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
878 bool is_parallel)
880 tree next, loop_clauses, nc;
882 loop_clauses = *not_loop_clauses = NULL_TREE;
883 for (; clauses ; clauses = next)
885 next = OMP_CLAUSE_CHAIN (clauses);
887 switch (OMP_CLAUSE_CODE (clauses))
889 /* Loop clauses. */
890 case OMP_CLAUSE_COLLAPSE:
891 case OMP_CLAUSE_TILE:
892 case OMP_CLAUSE_GANG:
893 case OMP_CLAUSE_WORKER:
894 case OMP_CLAUSE_VECTOR:
895 case OMP_CLAUSE_AUTO:
896 case OMP_CLAUSE_SEQ:
897 case OMP_CLAUSE_INDEPENDENT:
898 case OMP_CLAUSE_PRIVATE:
899 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
900 loop_clauses = clauses;
901 break;
903 /* Reductions must be duplicated on both constructs. */
904 case OMP_CLAUSE_REDUCTION:
905 if (is_parallel)
907 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
908 OMP_CLAUSE_REDUCTION);
909 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
910 OMP_CLAUSE_REDUCTION_CODE (nc)
911 = OMP_CLAUSE_REDUCTION_CODE (clauses);
912 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
913 *not_loop_clauses = nc;
916 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
917 loop_clauses = clauses;
918 break;
920 /* Parallel/kernels clauses. */
921 default:
922 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
923 *not_loop_clauses = clauses;
924 break;
928 return loop_clauses;
931 /* This function attempts to split or duplicate clauses for OpenMP
932 combined/composite constructs. Right now there are 21 different
933 constructs. CODE is the innermost construct in the combined construct,
934 and MASK allows to determine which constructs are combined together,
935 as every construct has at least one clause that no other construct
936 has (except for OMP_SECTIONS, but that can be only combined with parallel).
937 OpenMP combined/composite constructs are:
938 #pragma omp distribute parallel for
939 #pragma omp distribute parallel for simd
940 #pragma omp distribute simd
941 #pragma omp for simd
942 #pragma omp parallel for
943 #pragma omp parallel for simd
944 #pragma omp parallel sections
945 #pragma omp target parallel
946 #pragma omp target parallel for
947 #pragma omp target parallel for simd
948 #pragma omp target teams
949 #pragma omp target teams distribute
950 #pragma omp target teams distribute parallel for
951 #pragma omp target teams distribute parallel for simd
952 #pragma omp target teams distribute simd
953 #pragma omp target simd
954 #pragma omp taskloop simd
955 #pragma omp teams distribute
956 #pragma omp teams distribute parallel for
957 #pragma omp teams distribute parallel for simd
958 #pragma omp teams distribute simd */
960 void
961 c_omp_split_clauses (location_t loc, enum tree_code code,
962 omp_clause_mask mask, tree clauses, tree *cclauses)
964 tree next, c;
965 enum c_omp_clause_split s;
966 int i;
968 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
969 cclauses[i] = NULL;
970 /* Add implicit nowait clause on
971 #pragma omp parallel {for,for simd,sections}. */
972 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
973 switch (code)
975 case OMP_FOR:
976 case OMP_SIMD:
977 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
978 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
979 break;
980 case OMP_SECTIONS:
981 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
982 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
983 break;
984 default:
985 break;
988 for (; clauses ; clauses = next)
990 next = OMP_CLAUSE_CHAIN (clauses);
992 switch (OMP_CLAUSE_CODE (clauses))
994 /* First the clauses that are unique to some constructs. */
995 case OMP_CLAUSE_DEVICE:
996 case OMP_CLAUSE_MAP:
997 case OMP_CLAUSE_IS_DEVICE_PTR:
998 case OMP_CLAUSE_DEFAULTMAP:
999 case OMP_CLAUSE_DEPEND:
1000 s = C_OMP_CLAUSE_SPLIT_TARGET;
1001 break;
1002 case OMP_CLAUSE_NUM_TEAMS:
1003 case OMP_CLAUSE_THREAD_LIMIT:
1004 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1005 break;
1006 case OMP_CLAUSE_DIST_SCHEDULE:
1007 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1008 break;
1009 case OMP_CLAUSE_COPYIN:
1010 case OMP_CLAUSE_NUM_THREADS:
1011 case OMP_CLAUSE_PROC_BIND:
1012 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1013 break;
1014 case OMP_CLAUSE_ORDERED:
1015 s = C_OMP_CLAUSE_SPLIT_FOR;
1016 break;
1017 case OMP_CLAUSE_SCHEDULE:
1018 s = C_OMP_CLAUSE_SPLIT_FOR;
1019 if (code != OMP_SIMD)
1020 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
1021 break;
1022 case OMP_CLAUSE_SAFELEN:
1023 case OMP_CLAUSE_SIMDLEN:
1024 case OMP_CLAUSE_ALIGNED:
1025 s = C_OMP_CLAUSE_SPLIT_SIMD;
1026 break;
1027 case OMP_CLAUSE_GRAINSIZE:
1028 case OMP_CLAUSE_NUM_TASKS:
1029 case OMP_CLAUSE_FINAL:
1030 case OMP_CLAUSE_UNTIED:
1031 case OMP_CLAUSE_MERGEABLE:
1032 case OMP_CLAUSE_NOGROUP:
1033 case OMP_CLAUSE_PRIORITY:
1034 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1035 break;
1036 /* Duplicate this to all of taskloop, distribute, for and simd. */
1037 case OMP_CLAUSE_COLLAPSE:
1038 if (code == OMP_SIMD)
1040 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1041 | (OMP_CLAUSE_MASK_1
1042 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1043 | (OMP_CLAUSE_MASK_1
1044 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
1046 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1047 OMP_CLAUSE_COLLAPSE);
1048 OMP_CLAUSE_COLLAPSE_EXPR (c)
1049 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1050 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1051 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1053 else
1055 /* This must be #pragma omp target simd */
1056 s = C_OMP_CLAUSE_SPLIT_SIMD;
1057 break;
1060 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1062 if ((mask & (OMP_CLAUSE_MASK_1
1063 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1065 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1066 OMP_CLAUSE_COLLAPSE);
1067 OMP_CLAUSE_COLLAPSE_EXPR (c)
1068 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1069 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1070 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1071 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1073 else
1074 s = C_OMP_CLAUSE_SPLIT_FOR;
1076 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1077 != 0)
1078 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1079 else
1080 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1081 break;
1082 /* Private clause is supported on all constructs,
1083 it is enough to put it on the innermost one. For
1084 #pragma omp {for,sections} put it on parallel though,
1085 as that's what we did for OpenMP 3.1. */
1086 case OMP_CLAUSE_PRIVATE:
1087 switch (code)
1089 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
1090 case OMP_FOR: case OMP_SECTIONS:
1091 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1092 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
1093 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
1094 default: gcc_unreachable ();
1096 break;
1097 /* Firstprivate clause is supported on all constructs but
1098 simd. Put it on the outermost of those and duplicate on teams
1099 and parallel. */
1100 case OMP_CLAUSE_FIRSTPRIVATE:
1101 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1102 != 0)
1104 if (code == OMP_SIMD
1105 && (mask & ((OMP_CLAUSE_MASK_1
1106 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1107 | (OMP_CLAUSE_MASK_1
1108 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
1110 /* This must be #pragma omp target simd. */
1111 s = C_OMP_CLAUSE_SPLIT_TARGET;
1112 break;
1114 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1115 OMP_CLAUSE_FIRSTPRIVATE);
1116 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1117 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1118 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1120 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1121 != 0)
1123 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
1124 | (OMP_CLAUSE_MASK_1
1125 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
1127 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1128 OMP_CLAUSE_FIRSTPRIVATE);
1129 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1130 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1131 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1132 if ((mask & (OMP_CLAUSE_MASK_1
1133 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
1134 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1135 else
1136 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1138 else
1139 /* This must be
1140 #pragma omp parallel{, for{, simd}, sections}
1142 #pragma omp target parallel. */
1143 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1145 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1146 != 0)
1148 /* This must be one of
1149 #pragma omp {,target }teams distribute
1150 #pragma omp target teams
1151 #pragma omp {,target }teams distribute simd. */
1152 gcc_assert (code == OMP_DISTRIBUTE
1153 || code == OMP_TEAMS
1154 || code == OMP_SIMD);
1155 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1157 else if ((mask & (OMP_CLAUSE_MASK_1
1158 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1160 /* This must be #pragma omp distribute simd. */
1161 gcc_assert (code == OMP_SIMD);
1162 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1164 else if ((mask & (OMP_CLAUSE_MASK_1
1165 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1167 /* This must be #pragma omp taskloop simd. */
1168 gcc_assert (code == OMP_SIMD);
1169 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1171 else
1173 /* This must be #pragma omp for simd. */
1174 gcc_assert (code == OMP_SIMD);
1175 s = C_OMP_CLAUSE_SPLIT_FOR;
1177 break;
1178 /* Lastprivate is allowed on distribute, for, sections and simd. In
1179 parallel {for{, simd},sections} we actually want to put it on
1180 parallel rather than for or sections. */
1181 case OMP_CLAUSE_LASTPRIVATE:
1182 if (code == OMP_DISTRIBUTE)
1184 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1185 break;
1187 if ((mask & (OMP_CLAUSE_MASK_1
1188 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1190 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1191 OMP_CLAUSE_LASTPRIVATE);
1192 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1193 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
1194 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
1196 if (code == OMP_FOR || code == OMP_SECTIONS)
1198 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1199 != 0)
1200 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1201 else
1202 s = C_OMP_CLAUSE_SPLIT_FOR;
1203 break;
1205 gcc_assert (code == OMP_SIMD);
1206 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1208 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1209 OMP_CLAUSE_LASTPRIVATE);
1210 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1211 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1212 != 0)
1213 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1214 else
1215 s = C_OMP_CLAUSE_SPLIT_FOR;
1216 OMP_CLAUSE_CHAIN (c) = cclauses[s];
1217 cclauses[s] = c;
1219 s = C_OMP_CLAUSE_SPLIT_SIMD;
1220 break;
1221 /* Shared and default clauses are allowed on parallel, teams and
1222 taskloop. */
1223 case OMP_CLAUSE_SHARED:
1224 case OMP_CLAUSE_DEFAULT:
1225 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1226 != 0)
1228 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1229 break;
1231 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1232 != 0)
1234 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1235 == 0)
1237 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1238 break;
1240 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1241 OMP_CLAUSE_CODE (clauses));
1242 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1243 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1244 else
1245 OMP_CLAUSE_DEFAULT_KIND (c)
1246 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1247 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1248 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1250 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1251 break;
1252 /* Reduction is allowed on simd, for, parallel, sections and teams.
1253 Duplicate it on all of them, but omit on for or sections if
1254 parallel is present. */
1255 case OMP_CLAUSE_REDUCTION:
1256 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1258 if (code == OMP_SIMD)
1260 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1261 OMP_CLAUSE_REDUCTION);
1262 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1263 OMP_CLAUSE_REDUCTION_CODE (c)
1264 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1265 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1266 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1267 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1268 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1269 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1270 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1272 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1273 != 0)
1275 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1276 OMP_CLAUSE_REDUCTION);
1277 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1278 OMP_CLAUSE_REDUCTION_CODE (c)
1279 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1280 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1281 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1282 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1283 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1284 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1285 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1286 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1288 else if ((mask & (OMP_CLAUSE_MASK_1
1289 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1290 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1291 else
1292 s = C_OMP_CLAUSE_SPLIT_FOR;
1294 else if (code == OMP_SECTIONS || code == OMP_PARALLEL)
1295 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1296 else if (code == OMP_SIMD)
1297 s = C_OMP_CLAUSE_SPLIT_SIMD;
1298 else
1299 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1300 break;
1301 case OMP_CLAUSE_IF:
1302 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1303 != 0)
1304 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1305 else if ((mask & (OMP_CLAUSE_MASK_1
1306 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1308 if ((mask & (OMP_CLAUSE_MASK_1
1309 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
1311 if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL)
1312 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1313 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET)
1314 s = C_OMP_CLAUSE_SPLIT_TARGET;
1315 else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK)
1317 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1318 OMP_CLAUSE_IF);
1319 OMP_CLAUSE_IF_MODIFIER (c)
1320 = OMP_CLAUSE_IF_MODIFIER (clauses);
1321 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
1322 OMP_CLAUSE_CHAIN (c)
1323 = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1324 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1325 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1327 else
1329 error_at (OMP_CLAUSE_LOCATION (clauses),
1330 "expected %<parallel%> or %<target%> %<if%> "
1331 "clause modifier");
1332 continue;
1335 else
1336 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1338 else
1339 s = C_OMP_CLAUSE_SPLIT_TARGET;
1340 break;
1341 case OMP_CLAUSE_LINEAR:
1342 /* Linear clause is allowed on simd and for. Put it on the
1343 innermost construct. */
1344 if (code == OMP_SIMD)
1345 s = C_OMP_CLAUSE_SPLIT_SIMD;
1346 else
1347 s = C_OMP_CLAUSE_SPLIT_FOR;
1348 break;
1349 case OMP_CLAUSE_NOWAIT:
1350 /* Nowait clause is allowed on target, for and sections, but
1351 is not allowed on parallel for or parallel sections. Therefore,
1352 put it on target construct if present, because that can only
1353 be combined with parallel for{, simd} and not with for{, simd},
1354 otherwise to the worksharing construct. */
1355 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1356 != 0)
1357 s = C_OMP_CLAUSE_SPLIT_TARGET;
1358 else
1359 s = C_OMP_CLAUSE_SPLIT_FOR;
1360 break;
1361 default:
1362 gcc_unreachable ();
1364 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
1365 cclauses[s] = clauses;
1368 if (!flag_checking)
1369 return;
1371 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
1372 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
1373 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
1374 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
1375 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
1376 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
1377 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
1378 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
1379 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1380 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
1381 && code != OMP_SECTIONS)
1382 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
1383 if (code != OMP_SIMD)
1384 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
1388 /* qsort callback to compare #pragma omp declare simd clauses. */
1390 static int
1391 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
1393 tree a = *(const tree *) p;
1394 tree b = *(const tree *) q;
1395 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
1397 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
1398 return -1;
1399 return 1;
1401 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
1402 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
1403 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
1405 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
1406 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
1407 if (c < d)
1408 return 1;
1409 if (c > d)
1410 return -1;
1412 return 0;
1415 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1416 CLAUSES on FNDECL into argument indexes and sort them. */
1418 tree
1419 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1421 tree c;
1422 vec<tree> clvec = vNULL;
1424 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1426 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1427 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1428 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1430 tree decl = OMP_CLAUSE_DECL (c);
1431 tree arg;
1432 int idx;
1433 for (arg = parms, idx = 0; arg;
1434 arg = TREE_CHAIN (arg), idx++)
1435 if (arg == decl)
1436 break;
1437 if (arg == NULL_TREE)
1439 error_at (OMP_CLAUSE_LOCATION (c),
1440 "%qD is not an function argument", decl);
1441 continue;
1443 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1444 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1445 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1447 decl = OMP_CLAUSE_LINEAR_STEP (c);
1448 for (arg = parms, idx = 0; arg;
1449 arg = TREE_CHAIN (arg), idx++)
1450 if (arg == decl)
1451 break;
1452 if (arg == NULL_TREE)
1454 error_at (OMP_CLAUSE_LOCATION (c),
1455 "%qD is not an function argument", decl);
1456 continue;
1458 OMP_CLAUSE_LINEAR_STEP (c)
1459 = build_int_cst (integer_type_node, idx);
1462 clvec.safe_push (c);
1464 if (!clvec.is_empty ())
1466 unsigned int len = clvec.length (), i;
1467 clvec.qsort (c_omp_declare_simd_clause_cmp);
1468 clauses = clvec[0];
1469 for (i = 0; i < len; i++)
1470 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1472 else
1473 clauses = NULL_TREE;
1474 clvec.release ();
1475 return clauses;
1478 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1480 void
1481 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1483 tree c;
1485 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1486 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1487 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1488 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1490 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1491 tree arg;
1492 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1493 arg = TREE_CHAIN (arg), i++)
1494 if (i == idx)
1495 break;
1496 gcc_assert (arg);
1497 OMP_CLAUSE_DECL (c) = arg;
1498 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1499 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
1501 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
1502 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1503 arg = TREE_CHAIN (arg), i++)
1504 if (i == idx)
1505 break;
1506 gcc_assert (arg);
1507 OMP_CLAUSE_LINEAR_STEP (c) = arg;
1512 /* True if OpenMP sharing attribute of DECL is predetermined. */
1514 enum omp_clause_default_kind
1515 c_omp_predetermined_sharing (tree decl)
1517 /* Variables with const-qualified type having no mutable member
1518 are predetermined shared. */
1519 if (TREE_READONLY (decl))
1520 return OMP_CLAUSE_DEFAULT_SHARED;
1522 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;