1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2016 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
29 #include "gimple-expr.h"
32 #include "gomp-constants.h"
35 /* Complete a #pragma oacc wait construct. LOC is the location of
39 c_finish_oacc_wait (location_t loc
, tree parms
, tree clauses
)
41 const int nparms
= list_length (parms
);
43 vec
<tree
, va_gc
> *args
;
45 vec_alloc (args
, nparms
+ 2);
46 stmt
= builtin_decl_explicit (BUILT_IN_GOACC_WAIT
);
48 if (find_omp_clause (clauses
, OMP_CLAUSE_ASYNC
))
49 t
= OMP_CLAUSE_ASYNC_EXPR (clauses
);
51 t
= build_int_cst (integer_type_node
, GOMP_ASYNC_SYNC
);
54 args
->quick_push (build_int_cst (integer_type_node
, nparms
));
56 for (t
= parms
; t
; t
= TREE_CHAIN (t
))
58 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t
)) == INTEGER_CST
)
59 args
->quick_push (build_int_cst (integer_type_node
,
60 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t
))));
62 args
->quick_push (OMP_CLAUSE_WAIT_EXPR (t
));
65 stmt
= build_call_expr_loc_vec (loc
, stmt
, args
);
72 /* Complete a #pragma omp master construct. STMT is the structured-block
73 that follows the pragma. LOC is the l*/
76 c_finish_omp_master (location_t loc
, tree stmt
)
78 tree t
= add_stmt (build1 (OMP_MASTER
, void_type_node
, stmt
));
79 SET_EXPR_LOCATION (t
, loc
);
83 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
84 that follows the pragma. LOC is the l*/
87 c_finish_omp_taskgroup (location_t loc
, tree stmt
)
89 tree t
= add_stmt (build1 (OMP_TASKGROUP
, void_type_node
, stmt
));
90 SET_EXPR_LOCATION (t
, loc
);
94 /* Complete a #pragma omp critical construct. STMT is the structured-block
95 that follows the pragma, NAME is the identifier in the pragma, or null
96 if it was omitted. LOC is the location of the #pragma. */
99 c_finish_omp_critical (location_t loc
, tree body
, tree name
, tree clauses
)
101 tree stmt
= make_node (OMP_CRITICAL
);
102 TREE_TYPE (stmt
) = void_type_node
;
103 OMP_CRITICAL_BODY (stmt
) = body
;
104 OMP_CRITICAL_NAME (stmt
) = name
;
105 OMP_CRITICAL_CLAUSES (stmt
) = clauses
;
106 SET_EXPR_LOCATION (stmt
, loc
);
107 return add_stmt (stmt
);
110 /* Complete a #pragma omp ordered construct. STMT is the structured-block
111 that follows the pragma. LOC is the location of the #pragma. */
114 c_finish_omp_ordered (location_t loc
, tree clauses
, tree stmt
)
116 tree t
= make_node (OMP_ORDERED
);
117 TREE_TYPE (t
) = void_type_node
;
118 OMP_ORDERED_BODY (t
) = stmt
;
119 OMP_ORDERED_CLAUSES (t
) = clauses
;
120 SET_EXPR_LOCATION (t
, loc
);
125 /* Complete a #pragma omp barrier construct. LOC is the location of
129 c_finish_omp_barrier (location_t loc
)
133 x
= builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
);
134 x
= build_call_expr_loc (loc
, x
, 0);
139 /* Complete a #pragma omp taskwait construct. LOC is the location of the
143 c_finish_omp_taskwait (location_t loc
)
147 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT
);
148 x
= build_call_expr_loc (loc
, x
, 0);
153 /* Complete a #pragma omp taskyield construct. LOC is the location of the
157 c_finish_omp_taskyield (location_t loc
)
161 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD
);
162 x
= build_call_expr_loc (loc
, x
, 0);
167 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
168 the expression to be implemented atomically is LHS opcode= RHS.
169 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
170 opcode= RHS with the new or old content of LHS returned.
171 LOC is the location of the atomic statement. The value returned
172 is either error_mark_node (if the construct was erroneous) or an
173 OMP_ATOMIC* node which should be added to the current statement
174 tree with add_stmt. If TEST is set, avoid calling save_expr
175 or create_tmp_var*. */
178 c_finish_omp_atomic (location_t loc
, enum tree_code code
,
179 enum tree_code opcode
, tree lhs
, tree rhs
,
180 tree v
, tree lhs1
, tree rhs1
, bool swapped
, bool seq_cst
,
183 tree x
, type
, addr
, pre
= NULL_TREE
;
185 if (lhs
== error_mark_node
|| rhs
== error_mark_node
186 || v
== error_mark_node
|| lhs1
== error_mark_node
187 || rhs1
== error_mark_node
)
188 return error_mark_node
;
190 /* ??? According to one reading of the OpenMP spec, complex type are
191 supported, but there are no atomic stores for any architecture.
192 But at least icc 9.0 doesn't support complex types here either.
193 And lets not even talk about vector types... */
194 type
= TREE_TYPE (lhs
);
195 if (!INTEGRAL_TYPE_P (type
)
196 && !POINTER_TYPE_P (type
)
197 && !SCALAR_FLOAT_TYPE_P (type
))
199 error_at (loc
, "invalid expression type for %<#pragma omp atomic%>");
200 return error_mark_node
;
202 if (TYPE_ATOMIC (type
))
204 error_at (loc
, "%<_Atomic%> expression in %<#pragma omp atomic%>");
205 return error_mark_node
;
208 if (opcode
== RDIV_EXPR
)
209 opcode
= TRUNC_DIV_EXPR
;
211 /* ??? Validate that rhs does not overlap lhs. */
213 /* Take and save the address of the lhs. From then on we'll reference it
215 addr
= build_unary_op (loc
, ADDR_EXPR
, lhs
, false);
216 if (addr
== error_mark_node
)
217 return error_mark_node
;
219 addr
= save_expr (addr
);
221 && TREE_CODE (addr
) != SAVE_EXPR
222 && (TREE_CODE (addr
) != ADDR_EXPR
223 || !VAR_P (TREE_OPERAND (addr
, 0))))
225 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
226 it even after unsharing function body. */
227 tree var
= create_tmp_var_raw (TREE_TYPE (addr
));
228 DECL_CONTEXT (var
) = current_function_decl
;
229 addr
= build4 (TARGET_EXPR
, TREE_TYPE (addr
), var
, addr
, NULL
, NULL
);
231 lhs
= build_indirect_ref (loc
, addr
, RO_NULL
);
233 if (code
== OMP_ATOMIC_READ
)
235 x
= build1 (OMP_ATOMIC_READ
, type
, addr
);
236 SET_EXPR_LOCATION (x
, loc
);
237 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
238 return build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
242 /* There are lots of warnings, errors, and conversions that need to happen
243 in the course of interpreting a statement. Use the normal mechanisms
244 to do this, and then take it apart again. */
247 rhs
= build_binary_op (loc
, opcode
, rhs
, lhs
, 1);
250 bool save
= in_late_binary_op
;
251 in_late_binary_op
= true;
252 x
= build_modify_expr (loc
, lhs
, NULL_TREE
, opcode
, loc
, rhs
, NULL_TREE
);
253 in_late_binary_op
= save
;
254 if (x
== error_mark_node
)
255 return error_mark_node
;
256 if (TREE_CODE (x
) == COMPOUND_EXPR
)
258 pre
= TREE_OPERAND (x
, 0);
259 gcc_assert (TREE_CODE (pre
) == SAVE_EXPR
);
260 x
= TREE_OPERAND (x
, 1);
262 gcc_assert (TREE_CODE (x
) == MODIFY_EXPR
);
263 rhs
= TREE_OPERAND (x
, 1);
265 /* Punt the actual generation of atomic operations to common code. */
266 if (code
== OMP_ATOMIC
)
267 type
= void_type_node
;
268 x
= build2 (code
, type
, addr
, rhs
);
269 SET_EXPR_LOCATION (x
, loc
);
270 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
272 /* Generally it is hard to prove lhs1 and lhs are the same memory
273 location, just diagnose different variables. */
280 if (code
== OMP_ATOMIC
)
281 error_at (loc
, "%<#pragma omp atomic update%> uses two different "
282 "variables for memory");
284 error_at (loc
, "%<#pragma omp atomic capture%> uses two different "
285 "variables for memory");
286 return error_mark_node
;
289 if (code
!= OMP_ATOMIC
)
291 /* Generally it is hard to prove lhs1 and lhs are the same memory
292 location, just diagnose different variables. */
293 if (lhs1
&& VAR_P (lhs1
) && VAR_P (lhs
))
295 if (lhs1
!= lhs
&& !test
)
297 error_at (loc
, "%<#pragma omp atomic capture%> uses two "
298 "different variables for memory");
299 return error_mark_node
;
302 x
= build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
304 if (rhs1
&& rhs1
!= lhs
)
306 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, false);
307 if (rhs1addr
== error_mark_node
)
308 return error_mark_node
;
309 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
311 if (lhs1
&& lhs1
!= lhs
)
313 tree lhs1addr
= build_unary_op (loc
, ADDR_EXPR
, lhs1
, false);
314 if (lhs1addr
== error_mark_node
)
315 return error_mark_node
;
316 if (code
== OMP_ATOMIC_CAPTURE_OLD
)
317 x
= omit_one_operand_loc (loc
, type
, x
, lhs1addr
);
322 x
= omit_two_operands_loc (loc
, type
, x
, x
, lhs1addr
);
326 else if (rhs1
&& rhs1
!= lhs
)
328 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, false);
329 if (rhs1addr
== error_mark_node
)
330 return error_mark_node
;
331 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
335 x
= omit_one_operand_loc (loc
, type
, x
, pre
);
340 /* Complete a #pragma omp flush construct. We don't do anything with
341 the variable list that the syntax allows. LOC is the location of
345 c_finish_omp_flush (location_t loc
)
349 x
= builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE
);
350 x
= build_call_expr_loc (loc
, x
, 0);
355 /* Check and canonicalize OMP_FOR increment expression.
356 Helper function for c_finish_omp_for. */
359 check_omp_for_incr_expr (location_t loc
, tree exp
, tree decl
)
363 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp
))
364 || TYPE_PRECISION (TREE_TYPE (exp
)) < TYPE_PRECISION (TREE_TYPE (decl
)))
365 return error_mark_node
;
368 return build_int_cst (TREE_TYPE (exp
), 0);
370 switch (TREE_CODE (exp
))
373 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
374 if (t
!= error_mark_node
)
375 return fold_convert_loc (loc
, TREE_TYPE (exp
), t
);
378 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
379 if (t
!= error_mark_node
)
380 return fold_build2_loc (loc
, MINUS_EXPR
,
381 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
384 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
385 if (t
!= error_mark_node
)
386 return fold_build2_loc (loc
, PLUS_EXPR
,
387 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
388 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 1), decl
);
389 if (t
!= error_mark_node
)
390 return fold_build2_loc (loc
, PLUS_EXPR
,
391 TREE_TYPE (exp
), TREE_OPERAND (exp
, 0), t
);
395 /* cp_build_modify_expr forces preevaluation of the RHS to make
396 sure that it is evaluated before the lvalue-rvalue conversion
397 is applied to the LHS. Reconstruct the original expression. */
398 tree op0
= TREE_OPERAND (exp
, 0);
399 if (TREE_CODE (op0
) == TARGET_EXPR
400 && !VOID_TYPE_P (TREE_TYPE (op0
)))
402 tree op1
= TREE_OPERAND (exp
, 1);
403 tree temp
= TARGET_EXPR_SLOT (op0
);
404 if (BINARY_CLASS_P (op1
)
405 && TREE_OPERAND (op1
, 1) == temp
)
407 op1
= copy_node (op1
);
408 TREE_OPERAND (op1
, 1) = TARGET_EXPR_INITIAL (op0
);
409 return check_omp_for_incr_expr (loc
, op1
, decl
);
418 return error_mark_node
;
421 /* If the OMP_FOR increment expression in INCR is of pointer type,
422 canonicalize it into an expression handled by gimplify_omp_for()
423 and return it. DECL is the iteration variable. */
426 c_omp_for_incr_canonicalize_ptr (location_t loc
, tree decl
, tree incr
)
428 if (POINTER_TYPE_P (TREE_TYPE (decl
))
429 && TREE_OPERAND (incr
, 1))
431 tree t
= fold_convert_loc (loc
,
432 sizetype
, TREE_OPERAND (incr
, 1));
434 if (TREE_CODE (incr
) == POSTDECREMENT_EXPR
435 || TREE_CODE (incr
) == PREDECREMENT_EXPR
)
436 t
= fold_build1_loc (loc
, NEGATE_EXPR
, sizetype
, t
);
437 t
= fold_build_pointer_plus (decl
, t
);
438 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
443 /* Validate and generate OMP_FOR.
444 DECLV is a vector of iteration variables, for each collapsed loop.
446 ORIG_DECLV, if non-NULL, is a vector with the original iteration
447 variables (prior to any transformations, by say, C++ iterators).
449 INITV, CONDV and INCRV are vectors containing initialization
450 expressions, controlling predicates and increment expressions.
451 BODY is the body of the loop and PRE_BODY statements that go before
455 c_finish_omp_for (location_t locus
, enum tree_code code
, tree declv
,
456 tree orig_declv
, tree initv
, tree condv
, tree incrv
,
457 tree body
, tree pre_body
)
463 if ((code
== CILK_SIMD
|| code
== CILK_FOR
)
464 && !c_check_cilk_loop (locus
, TREE_VEC_ELT (declv
, 0)))
467 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (initv
));
468 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (condv
));
469 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (incrv
));
470 for (i
= 0; i
< TREE_VEC_LENGTH (declv
); i
++)
472 tree decl
= TREE_VEC_ELT (declv
, i
);
473 tree init
= TREE_VEC_ELT (initv
, i
);
474 tree cond
= TREE_VEC_ELT (condv
, i
);
475 tree incr
= TREE_VEC_ELT (incrv
, i
);
478 if (EXPR_HAS_LOCATION (init
))
479 elocus
= EXPR_LOCATION (init
);
481 /* Validate the iteration variable. */
482 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
))
483 && TREE_CODE (TREE_TYPE (decl
)) != POINTER_TYPE
)
485 error_at (elocus
, "invalid type for iteration variable %qE", decl
);
488 else if (TYPE_ATOMIC (TREE_TYPE (decl
)))
490 error_at (elocus
, "%<_Atomic%> iteration variable %qE", decl
);
492 /* _Atomic iterator confuses stuff too much, so we risk ICE
493 trying to diagnose it further. */
497 /* In the case of "for (int i = 0...)", init will be a decl. It should
498 have a DECL_INITIAL that we can turn into an assignment. */
501 elocus
= DECL_SOURCE_LOCATION (decl
);
503 init
= DECL_INITIAL (decl
);
506 error_at (elocus
, "%qE is not initialized", decl
);
507 init
= integer_zero_node
;
510 DECL_INITIAL (decl
) = NULL_TREE
;
512 init
= build_modify_expr (elocus
, decl
, NULL_TREE
, NOP_EXPR
,
513 /* FIXME diagnostics: This should
514 be the location of the INIT. */
519 if (init
!= error_mark_node
)
521 gcc_assert (TREE_CODE (init
) == MODIFY_EXPR
);
522 gcc_assert (TREE_OPERAND (init
, 0) == decl
);
525 if (cond
== NULL_TREE
)
527 error_at (elocus
, "missing controlling predicate");
532 bool cond_ok
= false;
534 if (EXPR_HAS_LOCATION (cond
))
535 elocus
= EXPR_LOCATION (cond
);
537 if (TREE_CODE (cond
) == LT_EXPR
538 || TREE_CODE (cond
) == LE_EXPR
539 || TREE_CODE (cond
) == GT_EXPR
540 || TREE_CODE (cond
) == GE_EXPR
541 || TREE_CODE (cond
) == NE_EXPR
542 || TREE_CODE (cond
) == EQ_EXPR
)
544 tree op0
= TREE_OPERAND (cond
, 0);
545 tree op1
= TREE_OPERAND (cond
, 1);
547 /* 2.5.1. The comparison in the condition is computed in
548 the type of DECL, otherwise the behavior is undefined.
554 according to ISO will be evaluated as:
559 if (TREE_CODE (op0
) == NOP_EXPR
560 && decl
== TREE_OPERAND (op0
, 0))
562 TREE_OPERAND (cond
, 0) = TREE_OPERAND (op0
, 0);
563 TREE_OPERAND (cond
, 1)
564 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
565 TREE_OPERAND (cond
, 1));
567 else if (TREE_CODE (op1
) == NOP_EXPR
568 && decl
== TREE_OPERAND (op1
, 0))
570 TREE_OPERAND (cond
, 1) = TREE_OPERAND (op1
, 0);
571 TREE_OPERAND (cond
, 0)
572 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
573 TREE_OPERAND (cond
, 0));
576 if (decl
== TREE_OPERAND (cond
, 0))
578 else if (decl
== TREE_OPERAND (cond
, 1))
581 swap_tree_comparison (TREE_CODE (cond
)));
582 TREE_OPERAND (cond
, 1) = TREE_OPERAND (cond
, 0);
583 TREE_OPERAND (cond
, 0) = decl
;
587 if (TREE_CODE (cond
) == NE_EXPR
588 || TREE_CODE (cond
) == EQ_EXPR
)
590 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
)))
592 if (code
!= CILK_SIMD
&& code
!= CILK_FOR
)
595 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
596 TYPE_MIN_VALUE (TREE_TYPE (decl
)),
598 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
599 ? GT_EXPR
: LE_EXPR
);
600 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
601 TYPE_MAX_VALUE (TREE_TYPE (decl
)),
603 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
604 ? LT_EXPR
: GE_EXPR
);
605 else if (code
!= CILK_SIMD
&& code
!= CILK_FOR
)
612 error_at (elocus
, "invalid controlling predicate");
617 if (incr
== NULL_TREE
)
619 error_at (elocus
, "missing increment expression");
624 bool incr_ok
= false;
626 if (EXPR_HAS_LOCATION (incr
))
627 elocus
= EXPR_LOCATION (incr
);
629 /* Check all the valid increment expressions: v++, v--, ++v, --v,
630 v = v + incr, v = incr + v and v = v - incr. */
631 switch (TREE_CODE (incr
))
633 case POSTINCREMENT_EXPR
:
634 case PREINCREMENT_EXPR
:
635 case POSTDECREMENT_EXPR
:
636 case PREDECREMENT_EXPR
:
637 if (TREE_OPERAND (incr
, 0) != decl
)
641 incr
= c_omp_for_incr_canonicalize_ptr (elocus
, decl
, incr
);
645 if (TREE_CODE (TREE_OPERAND (incr
, 0)) != SAVE_EXPR
646 || TREE_CODE (TREE_OPERAND (incr
, 1)) != MODIFY_EXPR
)
648 incr
= TREE_OPERAND (incr
, 1);
651 if (TREE_OPERAND (incr
, 0) != decl
)
653 if (TREE_OPERAND (incr
, 1) == decl
)
655 if (TREE_CODE (TREE_OPERAND (incr
, 1)) == PLUS_EXPR
656 && (TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
657 || TREE_OPERAND (TREE_OPERAND (incr
, 1), 1) == decl
))
659 else if ((TREE_CODE (TREE_OPERAND (incr
, 1)) == MINUS_EXPR
660 || (TREE_CODE (TREE_OPERAND (incr
, 1))
661 == POINTER_PLUS_EXPR
))
662 && TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
)
666 tree t
= check_omp_for_incr_expr (elocus
,
667 TREE_OPERAND (incr
, 1),
669 if (t
!= error_mark_node
)
672 t
= build2 (PLUS_EXPR
, TREE_TYPE (decl
), decl
, t
);
673 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
683 error_at (elocus
, "invalid increment expression");
688 TREE_VEC_ELT (initv
, i
) = init
;
689 TREE_VEC_ELT (incrv
, i
) = incr
;
696 tree t
= make_node (code
);
698 TREE_TYPE (t
) = void_type_node
;
699 OMP_FOR_INIT (t
) = initv
;
700 OMP_FOR_COND (t
) = condv
;
701 OMP_FOR_INCR (t
) = incrv
;
702 OMP_FOR_BODY (t
) = body
;
703 OMP_FOR_PRE_BODY (t
) = pre_body
;
704 OMP_FOR_ORIG_DECLS (t
) = orig_declv
;
706 SET_EXPR_LOCATION (t
, locus
);
711 /* Type for passing data in between c_omp_check_loop_iv and
712 c_omp_check_loop_iv_r. */
714 struct c_omp_check_loop_iv_data
722 hash_set
<tree
> *ppset
;
725 /* Helper function called via walk_tree, to diagnose uses
726 of associated loop IVs inside of lb, b and incr expressions
730 c_omp_check_loop_iv_r (tree
*tp
, int *walk_subtrees
, void *data
)
732 struct c_omp_check_loop_iv_data
*d
733 = (struct c_omp_check_loop_iv_data
*) data
;
737 for (i
= 0; i
< TREE_VEC_LENGTH (d
->declv
); i
++)
738 if (*tp
== TREE_VEC_ELT (d
->declv
, i
))
740 location_t loc
= d
->expr_loc
;
741 if (loc
== UNKNOWN_LOCATION
)
746 error_at (loc
, "initializer expression refers to "
747 "iteration variable %qD", *tp
);
750 error_at (loc
, "condition expression refers to "
751 "iteration variable %qD", *tp
);
754 error_at (loc
, "increment expression refers to "
755 "iteration variable %qD", *tp
);
761 /* Don't walk dtors added by C++ wrap_cleanups_r. */
762 else if (TREE_CODE (*tp
) == TRY_CATCH_EXPR
763 && TRY_CATCH_IS_CLEANUP (*tp
))
766 return walk_tree_1 (&TREE_OPERAND (*tp
, 0), c_omp_check_loop_iv_r
, data
,
773 /* Diagnose invalid references to loop iterators in lb, b and incr
777 c_omp_check_loop_iv (tree stmt
, tree declv
, walk_tree_lh lh
)
780 struct c_omp_check_loop_iv_data data
;
785 data
.stmt_loc
= EXPR_LOCATION (stmt
);
788 for (i
= 0; i
< TREE_VEC_LENGTH (OMP_FOR_INIT (stmt
)); i
++)
790 tree init
= TREE_VEC_ELT (OMP_FOR_INIT (stmt
), i
);
791 gcc_assert (TREE_CODE (init
) == MODIFY_EXPR
);
792 tree decl
= TREE_OPERAND (init
, 0);
793 tree cond
= TREE_VEC_ELT (OMP_FOR_COND (stmt
), i
);
794 gcc_assert (COMPARISON_CLASS_P (cond
));
795 gcc_assert (TREE_OPERAND (cond
, 0) == decl
);
796 tree incr
= TREE_VEC_ELT (OMP_FOR_INCR (stmt
), i
);
797 data
.expr_loc
= EXPR_LOCATION (TREE_OPERAND (init
, 1));
799 walk_tree_1 (&TREE_OPERAND (init
, 1),
800 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
801 /* Don't warn for C++ random access iterators here, the
802 expression then involves the subtraction and always refers
803 to the original value. The C++ FE needs to warn on those
805 if (decl
== TREE_VEC_ELT (declv
, i
))
807 data
.expr_loc
= EXPR_LOCATION (cond
);
809 walk_tree_1 (&TREE_OPERAND (cond
, 1),
810 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
812 if (TREE_CODE (incr
) == MODIFY_EXPR
)
814 gcc_assert (TREE_OPERAND (incr
, 0) == decl
);
815 incr
= TREE_OPERAND (incr
, 1);
817 if (TREE_CODE (incr
) == PLUS_EXPR
818 && TREE_OPERAND (incr
, 1) == decl
)
820 data
.expr_loc
= EXPR_LOCATION (TREE_OPERAND (incr
, 0));
821 walk_tree_1 (&TREE_OPERAND (incr
, 0),
822 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
826 data
.expr_loc
= EXPR_LOCATION (TREE_OPERAND (incr
, 1));
827 walk_tree_1 (&TREE_OPERAND (incr
, 1),
828 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
835 /* Similar, but allows to check the init or cond expressions individually. */
838 c_omp_check_loop_iv_exprs (location_t stmt_loc
, tree declv
, tree decl
,
839 tree init
, tree cond
, walk_tree_lh lh
)
842 struct c_omp_check_loop_iv_data data
;
846 data
.stmt_loc
= stmt_loc
;
851 data
.expr_loc
= EXPR_LOCATION (init
);
854 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
858 gcc_assert (COMPARISON_CLASS_P (cond
));
859 data
.expr_loc
= EXPR_LOCATION (init
);
861 if (TREE_OPERAND (cond
, 0) == decl
)
862 walk_tree_1 (&TREE_OPERAND (cond
, 1),
863 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
865 walk_tree_1 (&TREE_OPERAND (cond
, 0),
866 c_omp_check_loop_iv_r
, &data
, &pset
, lh
);
871 /* This function splits clauses for OpenACC combined loop
872 constructs. OpenACC combined loop constructs are:
873 #pragma acc kernels loop
874 #pragma acc parallel loop */
877 c_oacc_split_loop_clauses (tree clauses
, tree
*not_loop_clauses
,
880 tree next
, loop_clauses
, nc
;
882 loop_clauses
= *not_loop_clauses
= NULL_TREE
;
883 for (; clauses
; clauses
= next
)
885 next
= OMP_CLAUSE_CHAIN (clauses
);
887 switch (OMP_CLAUSE_CODE (clauses
))
890 case OMP_CLAUSE_COLLAPSE
:
891 case OMP_CLAUSE_TILE
:
892 case OMP_CLAUSE_GANG
:
893 case OMP_CLAUSE_WORKER
:
894 case OMP_CLAUSE_VECTOR
:
895 case OMP_CLAUSE_AUTO
:
897 case OMP_CLAUSE_INDEPENDENT
:
898 case OMP_CLAUSE_PRIVATE
:
899 OMP_CLAUSE_CHAIN (clauses
) = loop_clauses
;
900 loop_clauses
= clauses
;
903 /* Reductions must be duplicated on both constructs. */
904 case OMP_CLAUSE_REDUCTION
:
907 nc
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
908 OMP_CLAUSE_REDUCTION
);
909 OMP_CLAUSE_DECL (nc
) = OMP_CLAUSE_DECL (clauses
);
910 OMP_CLAUSE_REDUCTION_CODE (nc
)
911 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
912 OMP_CLAUSE_CHAIN (nc
) = *not_loop_clauses
;
913 *not_loop_clauses
= nc
;
916 OMP_CLAUSE_CHAIN (clauses
) = loop_clauses
;
917 loop_clauses
= clauses
;
920 /* Parallel/kernels clauses. */
922 OMP_CLAUSE_CHAIN (clauses
) = *not_loop_clauses
;
923 *not_loop_clauses
= clauses
;
931 /* This function attempts to split or duplicate clauses for OpenMP
932 combined/composite constructs. Right now there are 21 different
933 constructs. CODE is the innermost construct in the combined construct,
934 and MASK allows to determine which constructs are combined together,
935 as every construct has at least one clause that no other construct
936 has (except for OMP_SECTIONS, but that can be only combined with parallel).
937 OpenMP combined/composite constructs are:
938 #pragma omp distribute parallel for
939 #pragma omp distribute parallel for simd
940 #pragma omp distribute simd
942 #pragma omp parallel for
943 #pragma omp parallel for simd
944 #pragma omp parallel sections
945 #pragma omp target parallel
946 #pragma omp target parallel for
947 #pragma omp target parallel for simd
948 #pragma omp target teams
949 #pragma omp target teams distribute
950 #pragma omp target teams distribute parallel for
951 #pragma omp target teams distribute parallel for simd
952 #pragma omp target teams distribute simd
953 #pragma omp target simd
954 #pragma omp taskloop simd
955 #pragma omp teams distribute
956 #pragma omp teams distribute parallel for
957 #pragma omp teams distribute parallel for simd
958 #pragma omp teams distribute simd */
961 c_omp_split_clauses (location_t loc
, enum tree_code code
,
962 omp_clause_mask mask
, tree clauses
, tree
*cclauses
)
965 enum c_omp_clause_split s
;
968 for (i
= 0; i
< C_OMP_CLAUSE_SPLIT_COUNT
; i
++)
970 /* Add implicit nowait clause on
971 #pragma omp parallel {for,for simd,sections}. */
972 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
977 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
]
978 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
981 cclauses
[C_OMP_CLAUSE_SPLIT_SECTIONS
]
982 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
988 for (; clauses
; clauses
= next
)
990 next
= OMP_CLAUSE_CHAIN (clauses
);
992 switch (OMP_CLAUSE_CODE (clauses
))
994 /* First the clauses that are unique to some constructs. */
995 case OMP_CLAUSE_DEVICE
:
997 case OMP_CLAUSE_IS_DEVICE_PTR
:
998 case OMP_CLAUSE_DEFAULTMAP
:
999 case OMP_CLAUSE_DEPEND
:
1000 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1002 case OMP_CLAUSE_NUM_TEAMS
:
1003 case OMP_CLAUSE_THREAD_LIMIT
:
1004 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1006 case OMP_CLAUSE_DIST_SCHEDULE
:
1007 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1009 case OMP_CLAUSE_COPYIN
:
1010 case OMP_CLAUSE_NUM_THREADS
:
1011 case OMP_CLAUSE_PROC_BIND
:
1012 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1014 case OMP_CLAUSE_ORDERED
:
1015 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1017 case OMP_CLAUSE_SCHEDULE
:
1018 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1019 if (code
!= OMP_SIMD
)
1020 OMP_CLAUSE_SCHEDULE_SIMD (clauses
) = 0;
1022 case OMP_CLAUSE_SAFELEN
:
1023 case OMP_CLAUSE_SIMDLEN
:
1024 case OMP_CLAUSE_ALIGNED
:
1025 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1027 case OMP_CLAUSE_GRAINSIZE
:
1028 case OMP_CLAUSE_NUM_TASKS
:
1029 case OMP_CLAUSE_FINAL
:
1030 case OMP_CLAUSE_UNTIED
:
1031 case OMP_CLAUSE_MERGEABLE
:
1032 case OMP_CLAUSE_NOGROUP
:
1033 case OMP_CLAUSE_PRIORITY
:
1034 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1036 /* Duplicate this to all of taskloop, distribute, for and simd. */
1037 case OMP_CLAUSE_COLLAPSE
:
1038 if (code
== OMP_SIMD
)
1040 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)
1041 | (OMP_CLAUSE_MASK_1
1042 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)
1043 | (OMP_CLAUSE_MASK_1
1044 << PRAGMA_OMP_CLAUSE_NOGROUP
))) != 0)
1046 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1047 OMP_CLAUSE_COLLAPSE
);
1048 OMP_CLAUSE_COLLAPSE_EXPR (c
)
1049 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
1050 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
1051 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
1055 /* This must be #pragma omp target simd */
1056 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1060 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
1062 if ((mask
& (OMP_CLAUSE_MASK_1
1063 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
1065 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1066 OMP_CLAUSE_COLLAPSE
);
1067 OMP_CLAUSE_COLLAPSE_EXPR (c
)
1068 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
1069 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_FOR
];
1070 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
] = c
;
1071 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1074 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1076 else if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))
1078 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1080 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1082 /* Private clause is supported on all constructs,
1083 it is enough to put it on the innermost one. For
1084 #pragma omp {for,sections} put it on parallel though,
1085 as that's what we did for OpenMP 3.1. */
1086 case OMP_CLAUSE_PRIVATE
:
1089 case OMP_SIMD
: s
= C_OMP_CLAUSE_SPLIT_SIMD
; break;
1090 case OMP_FOR
: case OMP_SECTIONS
:
1091 case OMP_PARALLEL
: s
= C_OMP_CLAUSE_SPLIT_PARALLEL
; break;
1092 case OMP_DISTRIBUTE
: s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
; break;
1093 case OMP_TEAMS
: s
= C_OMP_CLAUSE_SPLIT_TEAMS
; break;
1094 default: gcc_unreachable ();
1097 /* Firstprivate clause is supported on all constructs but
1098 simd. Put it on the outermost of those and duplicate on teams
1100 case OMP_CLAUSE_FIRSTPRIVATE
:
1101 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP
))
1104 if (code
== OMP_SIMD
1105 && (mask
& ((OMP_CLAUSE_MASK_1
1106 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)
1107 | (OMP_CLAUSE_MASK_1
1108 << PRAGMA_OMP_CLAUSE_NUM_TEAMS
))) == 0)
1110 /* This must be #pragma omp target simd. */
1111 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1114 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1115 OMP_CLAUSE_FIRSTPRIVATE
);
1116 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1117 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
];
1118 cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
] = c
;
1120 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1123 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
)
1124 | (OMP_CLAUSE_MASK_1
1125 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
))) != 0)
1127 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1128 OMP_CLAUSE_FIRSTPRIVATE
);
1129 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1130 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
1131 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
1132 if ((mask
& (OMP_CLAUSE_MASK_1
1133 << PRAGMA_OMP_CLAUSE_NUM_TEAMS
)) != 0)
1134 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1136 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1140 #pragma omp parallel{, for{, simd}, sections}
1142 #pragma omp target parallel. */
1143 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1145 else if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
1148 /* This must be one of
1149 #pragma omp {,target }teams distribute
1150 #pragma omp target teams
1151 #pragma omp {,target }teams distribute simd. */
1152 gcc_assert (code
== OMP_DISTRIBUTE
1153 || code
== OMP_TEAMS
1154 || code
== OMP_SIMD
);
1155 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1157 else if ((mask
& (OMP_CLAUSE_MASK_1
1158 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
1160 /* This must be #pragma omp distribute simd. */
1161 gcc_assert (code
== OMP_SIMD
);
1162 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1164 else if ((mask
& (OMP_CLAUSE_MASK_1
1165 << PRAGMA_OMP_CLAUSE_NOGROUP
)) != 0)
1167 /* This must be #pragma omp taskloop simd. */
1168 gcc_assert (code
== OMP_SIMD
);
1169 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1173 /* This must be #pragma omp for simd. */
1174 gcc_assert (code
== OMP_SIMD
);
1175 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1178 /* Lastprivate is allowed on distribute, for, sections and simd. In
1179 parallel {for{, simd},sections} we actually want to put it on
1180 parallel rather than for or sections. */
1181 case OMP_CLAUSE_LASTPRIVATE
:
1182 if (code
== OMP_DISTRIBUTE
)
1184 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
1187 if ((mask
& (OMP_CLAUSE_MASK_1
1188 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
1190 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1191 OMP_CLAUSE_LASTPRIVATE
);
1192 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1193 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_DISTRIBUTE
];
1194 cclauses
[C_OMP_CLAUSE_SPLIT_DISTRIBUTE
] = c
;
1196 if (code
== OMP_FOR
|| code
== OMP_SECTIONS
)
1198 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1200 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1202 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1205 gcc_assert (code
== OMP_SIMD
);
1206 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
1208 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1209 OMP_CLAUSE_LASTPRIVATE
);
1210 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1211 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1213 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1215 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1216 OMP_CLAUSE_CHAIN (c
) = cclauses
[s
];
1219 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1221 /* Shared and default clauses are allowed on parallel, teams and
1223 case OMP_CLAUSE_SHARED
:
1224 case OMP_CLAUSE_DEFAULT
:
1225 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))
1228 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1231 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
1234 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
1237 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1240 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1241 OMP_CLAUSE_CODE (clauses
));
1242 if (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_SHARED
)
1243 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1245 OMP_CLAUSE_DEFAULT_KIND (c
)
1246 = OMP_CLAUSE_DEFAULT_KIND (clauses
);
1247 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
];
1248 cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
] = c
;
1250 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1252 /* Reduction is allowed on simd, for, parallel, sections and teams.
1253 Duplicate it on all of them, but omit on for or sections if
1254 parallel is present. */
1255 case OMP_CLAUSE_REDUCTION
:
1256 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
1258 if (code
== OMP_SIMD
)
1260 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1261 OMP_CLAUSE_REDUCTION
);
1262 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1263 OMP_CLAUSE_REDUCTION_CODE (c
)
1264 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
1265 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
1266 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
1267 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c
)
1268 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses
);
1269 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
1270 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
1272 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
1275 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1276 OMP_CLAUSE_REDUCTION
);
1277 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
1278 OMP_CLAUSE_REDUCTION_CODE (c
)
1279 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
1280 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
1281 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
1282 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c
)
1283 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses
);
1284 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
1285 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
1286 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1288 else if ((mask
& (OMP_CLAUSE_MASK_1
1289 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
1290 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1292 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1294 else if (code
== OMP_SECTIONS
|| code
== OMP_PARALLEL
)
1295 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1296 else if (code
== OMP_SIMD
)
1297 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1299 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
1302 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))
1304 s
= C_OMP_CLAUSE_SPLIT_TASKLOOP
;
1305 else if ((mask
& (OMP_CLAUSE_MASK_1
1306 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
1308 if ((mask
& (OMP_CLAUSE_MASK_1
1309 << PRAGMA_OMP_CLAUSE_MAP
)) != 0)
1311 if (OMP_CLAUSE_IF_MODIFIER (clauses
) == OMP_PARALLEL
)
1312 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1313 else if (OMP_CLAUSE_IF_MODIFIER (clauses
) == OMP_TARGET
)
1314 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1315 else if (OMP_CLAUSE_IF_MODIFIER (clauses
) == ERROR_MARK
)
1317 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
1319 OMP_CLAUSE_IF_MODIFIER (c
)
1320 = OMP_CLAUSE_IF_MODIFIER (clauses
);
1321 OMP_CLAUSE_IF_EXPR (c
) = OMP_CLAUSE_IF_EXPR (clauses
);
1322 OMP_CLAUSE_CHAIN (c
)
1323 = cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
];
1324 cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
] = c
;
1325 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1329 error_at (OMP_CLAUSE_LOCATION (clauses
),
1330 "expected %<parallel%> or %<target%> %<if%> "
1336 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
1339 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1341 case OMP_CLAUSE_LINEAR
:
1342 /* Linear clause is allowed on simd and for. Put it on the
1343 innermost construct. */
1344 if (code
== OMP_SIMD
)
1345 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
1347 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1349 case OMP_CLAUSE_NOWAIT
:
1350 /* Nowait clause is allowed on target, for and sections, but
1351 is not allowed on parallel for or parallel sections. Therefore,
1352 put it on target construct if present, because that can only
1353 be combined with parallel for{, simd} and not with for{, simd},
1354 otherwise to the worksharing construct. */
1355 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP
))
1357 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
1359 s
= C_OMP_CLAUSE_SPLIT_FOR
;
1364 OMP_CLAUSE_CHAIN (clauses
) = cclauses
[s
];
1365 cclauses
[s
] = clauses
;
1371 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP
)) == 0)
1372 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_TARGET
] == NULL_TREE
);
1373 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
)) == 0)
1374 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
] == NULL_TREE
);
1375 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) == 0)
1376 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_DISTRIBUTE
] == NULL_TREE
);
1377 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
)) == 0)
1378 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] == NULL_TREE
);
1379 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)
1380 | (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP
))) == 0
1381 && code
!= OMP_SECTIONS
)
1382 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_FOR
] == NULL_TREE
);
1383 if (code
!= OMP_SIMD
)
1384 gcc_assert (cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] == NULL_TREE
);
1388 /* qsort callback to compare #pragma omp declare simd clauses. */
1391 c_omp_declare_simd_clause_cmp (const void *p
, const void *q
)
1393 tree a
= *(const tree
*) p
;
1394 tree b
= *(const tree
*) q
;
1395 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_CODE (b
))
1397 if (OMP_CLAUSE_CODE (a
) > OMP_CLAUSE_CODE (b
))
1401 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_SIMDLEN
1402 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_INBRANCH
1403 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_NOTINBRANCH
)
1405 int c
= tree_to_shwi (OMP_CLAUSE_DECL (a
));
1406 int d
= tree_to_shwi (OMP_CLAUSE_DECL (b
));
1415 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1416 CLAUSES on FNDECL into argument indexes and sort them. */
1419 c_omp_declare_simd_clauses_to_numbers (tree parms
, tree clauses
)
1422 vec
<tree
> clvec
= vNULL
;
1424 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1426 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
1427 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
1428 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
1430 tree decl
= OMP_CLAUSE_DECL (c
);
1433 for (arg
= parms
, idx
= 0; arg
;
1434 arg
= TREE_CHAIN (arg
), idx
++)
1437 if (arg
== NULL_TREE
)
1439 error_at (OMP_CLAUSE_LOCATION (c
),
1440 "%qD is not an function argument", decl
);
1443 OMP_CLAUSE_DECL (c
) = build_int_cst (integer_type_node
, idx
);
1444 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
1445 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c
))
1447 decl
= OMP_CLAUSE_LINEAR_STEP (c
);
1448 for (arg
= parms
, idx
= 0; arg
;
1449 arg
= TREE_CHAIN (arg
), idx
++)
1452 if (arg
== NULL_TREE
)
1454 error_at (OMP_CLAUSE_LOCATION (c
),
1455 "%qD is not an function argument", decl
);
1458 OMP_CLAUSE_LINEAR_STEP (c
)
1459 = build_int_cst (integer_type_node
, idx
);
1462 clvec
.safe_push (c
);
1464 if (!clvec
.is_empty ())
1466 unsigned int len
= clvec
.length (), i
;
1467 clvec
.qsort (c_omp_declare_simd_clause_cmp
);
1469 for (i
= 0; i
< len
; i
++)
1470 OMP_CLAUSE_CHAIN (clvec
[i
]) = (i
< len
- 1) ? clvec
[i
+ 1] : NULL_TREE
;
1473 clauses
= NULL_TREE
;
1478 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1481 c_omp_declare_simd_clauses_to_decls (tree fndecl
, tree clauses
)
1485 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1486 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
1487 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
1488 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
1490 int idx
= tree_to_shwi (OMP_CLAUSE_DECL (c
)), i
;
1492 for (arg
= DECL_ARGUMENTS (fndecl
), i
= 0; arg
;
1493 arg
= TREE_CHAIN (arg
), i
++)
1497 OMP_CLAUSE_DECL (c
) = arg
;
1498 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
1499 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c
))
1501 idx
= tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c
));
1502 for (arg
= DECL_ARGUMENTS (fndecl
), i
= 0; arg
;
1503 arg
= TREE_CHAIN (arg
), i
++)
1507 OMP_CLAUSE_LINEAR_STEP (c
) = arg
;
1512 /* True if OpenMP sharing attribute of DECL is predetermined. */
1514 enum omp_clause_default_kind
1515 c_omp_predetermined_sharing (tree decl
)
1517 /* Variables with const-qualified type having no mutable member
1518 are predetermined shared. */
1519 if (TREE_READONLY (decl
))
1520 return OMP_CLAUSE_DEFAULT_SHARED
;
1522 return OMP_CLAUSE_DEFAULT_UNSPECIFIED
;