1 /* This file contains routines to construct GNU OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2014 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
30 #include "gimple-expr.h"
31 #include "langhooks.h"
34 /* Complete a #pragma omp master construct. STMT is the structured-block
35 that follows the pragma. LOC is the l*/
38 c_finish_omp_master (location_t loc
, tree stmt
)
40 tree t
= add_stmt (build1 (OMP_MASTER
, void_type_node
, stmt
));
41 SET_EXPR_LOCATION (t
, loc
);
45 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
46 that follows the pragma. LOC is the l*/
49 c_finish_omp_taskgroup (location_t loc
, tree stmt
)
51 tree t
= add_stmt (build1 (OMP_TASKGROUP
, void_type_node
, stmt
));
52 SET_EXPR_LOCATION (t
, loc
);
56 /* Complete a #pragma omp critical construct. STMT is the structured-block
57 that follows the pragma, NAME is the identifier in the pragma, or null
58 if it was omitted. LOC is the location of the #pragma. */
61 c_finish_omp_critical (location_t loc
, tree body
, tree name
)
63 tree stmt
= make_node (OMP_CRITICAL
);
64 TREE_TYPE (stmt
) = void_type_node
;
65 OMP_CRITICAL_BODY (stmt
) = body
;
66 OMP_CRITICAL_NAME (stmt
) = name
;
67 SET_EXPR_LOCATION (stmt
, loc
);
68 return add_stmt (stmt
);
71 /* Complete a #pragma omp ordered construct. STMT is the structured-block
72 that follows the pragma. LOC is the location of the #pragma. */
75 c_finish_omp_ordered (location_t loc
, tree stmt
)
77 tree t
= build1 (OMP_ORDERED
, void_type_node
, stmt
);
78 SET_EXPR_LOCATION (t
, loc
);
83 /* Complete a #pragma omp barrier construct. LOC is the location of
87 c_finish_omp_barrier (location_t loc
)
91 x
= builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
);
92 x
= build_call_expr_loc (loc
, x
, 0);
97 /* Complete a #pragma omp taskwait construct. LOC is the location of the
101 c_finish_omp_taskwait (location_t loc
)
105 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT
);
106 x
= build_call_expr_loc (loc
, x
, 0);
111 /* Complete a #pragma omp taskyield construct. LOC is the location of the
115 c_finish_omp_taskyield (location_t loc
)
119 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD
);
120 x
= build_call_expr_loc (loc
, x
, 0);
125 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
126 the expression to be implemented atomically is LHS opcode= RHS.
127 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
128 opcode= RHS with the new or old content of LHS returned.
129 LOC is the location of the atomic statement. The value returned
130 is either error_mark_node (if the construct was erroneous) or an
131 OMP_ATOMIC* node which should be added to the current statement
132 tree with add_stmt. */
135 c_finish_omp_atomic (location_t loc
, enum tree_code code
,
136 enum tree_code opcode
, tree lhs
, tree rhs
,
137 tree v
, tree lhs1
, tree rhs1
, bool swapped
, bool seq_cst
)
139 tree x
, type
, addr
, pre
= NULL_TREE
;
141 if (lhs
== error_mark_node
|| rhs
== error_mark_node
142 || v
== error_mark_node
|| lhs1
== error_mark_node
143 || rhs1
== error_mark_node
)
144 return error_mark_node
;
146 /* ??? According to one reading of the OpenMP spec, complex type are
147 supported, but there are no atomic stores for any architecture.
148 But at least icc 9.0 doesn't support complex types here either.
149 And lets not even talk about vector types... */
150 type
= TREE_TYPE (lhs
);
151 if (!INTEGRAL_TYPE_P (type
)
152 && !POINTER_TYPE_P (type
)
153 && !SCALAR_FLOAT_TYPE_P (type
))
155 error_at (loc
, "invalid expression type for %<#pragma omp atomic%>");
156 return error_mark_node
;
159 /* ??? Validate that rhs does not overlap lhs. */
161 /* Take and save the address of the lhs. From then on we'll reference it
163 addr
= build_unary_op (loc
, ADDR_EXPR
, lhs
, 0);
164 if (addr
== error_mark_node
)
165 return error_mark_node
;
166 addr
= save_expr (addr
);
167 if (TREE_CODE (addr
) != SAVE_EXPR
168 && (TREE_CODE (addr
) != ADDR_EXPR
169 || TREE_CODE (TREE_OPERAND (addr
, 0)) != VAR_DECL
))
171 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
172 it even after unsharing function body. */
173 tree var
= create_tmp_var_raw (TREE_TYPE (addr
), NULL
);
174 DECL_CONTEXT (var
) = current_function_decl
;
175 addr
= build4 (TARGET_EXPR
, TREE_TYPE (addr
), var
, addr
, NULL
, NULL
);
177 lhs
= build_indirect_ref (loc
, addr
, RO_NULL
);
179 if (code
== OMP_ATOMIC_READ
)
181 x
= build1 (OMP_ATOMIC_READ
, type
, addr
);
182 SET_EXPR_LOCATION (x
, loc
);
183 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
184 return build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
188 /* There are lots of warnings, errors, and conversions that need to happen
189 in the course of interpreting a statement. Use the normal mechanisms
190 to do this, and then take it apart again. */
193 rhs
= build2_loc (loc
, opcode
, TREE_TYPE (lhs
), rhs
, lhs
);
196 bool save
= in_late_binary_op
;
197 in_late_binary_op
= true;
198 x
= build_modify_expr (loc
, lhs
, NULL_TREE
, opcode
, loc
, rhs
, NULL_TREE
);
199 in_late_binary_op
= save
;
200 if (x
== error_mark_node
)
201 return error_mark_node
;
202 if (TREE_CODE (x
) == COMPOUND_EXPR
)
204 pre
= TREE_OPERAND (x
, 0);
205 gcc_assert (TREE_CODE (pre
) == SAVE_EXPR
);
206 x
= TREE_OPERAND (x
, 1);
208 gcc_assert (TREE_CODE (x
) == MODIFY_EXPR
);
209 rhs
= TREE_OPERAND (x
, 1);
211 /* Punt the actual generation of atomic operations to common code. */
212 if (code
== OMP_ATOMIC
)
213 type
= void_type_node
;
214 x
= build2 (code
, type
, addr
, rhs
);
215 SET_EXPR_LOCATION (x
, loc
);
216 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
218 /* Generally it is hard to prove lhs1 and lhs are the same memory
219 location, just diagnose different variables. */
221 && TREE_CODE (rhs1
) == VAR_DECL
222 && TREE_CODE (lhs
) == VAR_DECL
225 if (code
== OMP_ATOMIC
)
226 error_at (loc
, "%<#pragma omp atomic update%> uses two different variables for memory");
228 error_at (loc
, "%<#pragma omp atomic capture%> uses two different variables for memory");
229 return error_mark_node
;
232 if (code
!= OMP_ATOMIC
)
234 /* Generally it is hard to prove lhs1 and lhs are the same memory
235 location, just diagnose different variables. */
236 if (lhs1
&& TREE_CODE (lhs1
) == VAR_DECL
&& TREE_CODE (lhs
) == VAR_DECL
)
240 error_at (loc
, "%<#pragma omp atomic capture%> uses two different variables for memory");
241 return error_mark_node
;
244 x
= build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
246 if (rhs1
&& rhs1
!= lhs
)
248 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, 0);
249 if (rhs1addr
== error_mark_node
)
250 return error_mark_node
;
251 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
253 if (lhs1
&& lhs1
!= lhs
)
255 tree lhs1addr
= build_unary_op (loc
, ADDR_EXPR
, lhs1
, 0);
256 if (lhs1addr
== error_mark_node
)
257 return error_mark_node
;
258 if (code
== OMP_ATOMIC_CAPTURE_OLD
)
259 x
= omit_one_operand_loc (loc
, type
, x
, lhs1addr
);
263 x
= omit_two_operands_loc (loc
, type
, x
, x
, lhs1addr
);
267 else if (rhs1
&& rhs1
!= lhs
)
269 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, 0);
270 if (rhs1addr
== error_mark_node
)
271 return error_mark_node
;
272 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
276 x
= omit_one_operand_loc (loc
, type
, x
, pre
);
281 /* Complete a #pragma omp flush construct. We don't do anything with
282 the variable list that the syntax allows. LOC is the location of
286 c_finish_omp_flush (location_t loc
)
290 x
= builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE
);
291 x
= build_call_expr_loc (loc
, x
, 0);
296 /* Check and canonicalize #pragma omp for increment expression.
297 Helper function for c_finish_omp_for. */
300 check_omp_for_incr_expr (location_t loc
, tree exp
, tree decl
)
304 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp
))
305 || TYPE_PRECISION (TREE_TYPE (exp
)) < TYPE_PRECISION (TREE_TYPE (decl
)))
306 return error_mark_node
;
309 return build_int_cst (TREE_TYPE (exp
), 0);
311 switch (TREE_CODE (exp
))
314 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
315 if (t
!= error_mark_node
)
316 return fold_convert_loc (loc
, TREE_TYPE (exp
), t
);
319 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
320 if (t
!= error_mark_node
)
321 return fold_build2_loc (loc
, MINUS_EXPR
,
322 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
325 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
326 if (t
!= error_mark_node
)
327 return fold_build2_loc (loc
, PLUS_EXPR
,
328 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
329 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 1), decl
);
330 if (t
!= error_mark_node
)
331 return fold_build2_loc (loc
, PLUS_EXPR
,
332 TREE_TYPE (exp
), TREE_OPERAND (exp
, 0), t
);
336 /* cp_build_modify_expr forces preevaluation of the RHS to make
337 sure that it is evaluated before the lvalue-rvalue conversion
338 is applied to the LHS. Reconstruct the original expression. */
339 tree op0
= TREE_OPERAND (exp
, 0);
340 if (TREE_CODE (op0
) == TARGET_EXPR
341 && !VOID_TYPE_P (TREE_TYPE (op0
)))
343 tree op1
= TREE_OPERAND (exp
, 1);
344 tree temp
= TARGET_EXPR_SLOT (op0
);
345 if (TREE_CODE_CLASS (TREE_CODE (op1
)) == tcc_binary
346 && TREE_OPERAND (op1
, 1) == temp
)
348 op1
= copy_node (op1
);
349 TREE_OPERAND (op1
, 1) = TARGET_EXPR_INITIAL (op0
);
350 return check_omp_for_incr_expr (loc
, op1
, decl
);
359 return error_mark_node
;
362 /* If the OMP_FOR increment expression in INCR is of pointer type,
363 canonicalize it into an expression handled by gimplify_omp_for()
364 and return it. DECL is the iteration variable. */
367 c_omp_for_incr_canonicalize_ptr (location_t loc
, tree decl
, tree incr
)
369 if (POINTER_TYPE_P (TREE_TYPE (decl
))
370 && TREE_OPERAND (incr
, 1))
372 tree t
= fold_convert_loc (loc
,
373 sizetype
, TREE_OPERAND (incr
, 1));
375 if (TREE_CODE (incr
) == POSTDECREMENT_EXPR
376 || TREE_CODE (incr
) == PREDECREMENT_EXPR
)
377 t
= fold_build1_loc (loc
, NEGATE_EXPR
, sizetype
, t
);
378 t
= fold_build_pointer_plus (decl
, t
);
379 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
384 /* Validate and emit code for the OpenMP directive #pragma omp for.
385 DECLV is a vector of iteration variables, for each collapsed loop.
386 INITV, CONDV and INCRV are vectors containing initialization
387 expressions, controlling predicates and increment expressions.
388 BODY is the body of the loop and PRE_BODY statements that go before
392 c_finish_omp_for (location_t locus
, enum tree_code code
, tree declv
,
393 tree initv
, tree condv
, tree incrv
, tree body
, tree pre_body
)
399 if (code
== CILK_SIMD
400 && !c_check_cilk_loop (locus
, TREE_VEC_ELT (declv
, 0)))
403 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (initv
));
404 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (condv
));
405 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (incrv
));
406 for (i
= 0; i
< TREE_VEC_LENGTH (declv
); i
++)
408 tree decl
= TREE_VEC_ELT (declv
, i
);
409 tree init
= TREE_VEC_ELT (initv
, i
);
410 tree cond
= TREE_VEC_ELT (condv
, i
);
411 tree incr
= TREE_VEC_ELT (incrv
, i
);
414 if (EXPR_HAS_LOCATION (init
))
415 elocus
= EXPR_LOCATION (init
);
417 /* Validate the iteration variable. */
418 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
))
419 && TREE_CODE (TREE_TYPE (decl
)) != POINTER_TYPE
)
421 error_at (elocus
, "invalid type for iteration variable %qE", decl
);
425 /* In the case of "for (int i = 0...)", init will be a decl. It should
426 have a DECL_INITIAL that we can turn into an assignment. */
429 elocus
= DECL_SOURCE_LOCATION (decl
);
431 init
= DECL_INITIAL (decl
);
434 error_at (elocus
, "%qE is not initialized", decl
);
435 init
= integer_zero_node
;
439 init
= build_modify_expr (elocus
, decl
, NULL_TREE
, NOP_EXPR
,
440 /* FIXME diagnostics: This should
441 be the location of the INIT. */
446 if (init
!= error_mark_node
)
448 gcc_assert (TREE_CODE (init
) == MODIFY_EXPR
);
449 gcc_assert (TREE_OPERAND (init
, 0) == decl
);
452 if (cond
== NULL_TREE
)
454 error_at (elocus
, "missing controlling predicate");
459 bool cond_ok
= false;
461 if (EXPR_HAS_LOCATION (cond
))
462 elocus
= EXPR_LOCATION (cond
);
464 if (TREE_CODE (cond
) == LT_EXPR
465 || TREE_CODE (cond
) == LE_EXPR
466 || TREE_CODE (cond
) == GT_EXPR
467 || TREE_CODE (cond
) == GE_EXPR
468 || TREE_CODE (cond
) == NE_EXPR
469 || TREE_CODE (cond
) == EQ_EXPR
)
471 tree op0
= TREE_OPERAND (cond
, 0);
472 tree op1
= TREE_OPERAND (cond
, 1);
474 /* 2.5.1. The comparison in the condition is computed in
475 the type of DECL, otherwise the behavior is undefined.
481 according to ISO will be evaluated as:
486 if (TREE_CODE (op0
) == NOP_EXPR
487 && decl
== TREE_OPERAND (op0
, 0))
489 TREE_OPERAND (cond
, 0) = TREE_OPERAND (op0
, 0);
490 TREE_OPERAND (cond
, 1)
491 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
492 TREE_OPERAND (cond
, 1));
494 else if (TREE_CODE (op1
) == NOP_EXPR
495 && decl
== TREE_OPERAND (op1
, 0))
497 TREE_OPERAND (cond
, 1) = TREE_OPERAND (op1
, 0);
498 TREE_OPERAND (cond
, 0)
499 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
500 TREE_OPERAND (cond
, 0));
503 if (decl
== TREE_OPERAND (cond
, 0))
505 else if (decl
== TREE_OPERAND (cond
, 1))
508 swap_tree_comparison (TREE_CODE (cond
)));
509 TREE_OPERAND (cond
, 1) = TREE_OPERAND (cond
, 0);
510 TREE_OPERAND (cond
, 0) = decl
;
514 if (TREE_CODE (cond
) == NE_EXPR
515 || TREE_CODE (cond
) == EQ_EXPR
)
517 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
)))
519 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
520 TYPE_MIN_VALUE (TREE_TYPE (decl
)),
522 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
523 ? GT_EXPR
: LE_EXPR
);
524 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
525 TYPE_MAX_VALUE (TREE_TYPE (decl
)),
527 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
528 ? LT_EXPR
: GE_EXPR
);
529 else if (code
!= CILK_SIMD
)
536 error_at (elocus
, "invalid controlling predicate");
541 if (incr
== NULL_TREE
)
543 error_at (elocus
, "missing increment expression");
548 bool incr_ok
= false;
550 if (EXPR_HAS_LOCATION (incr
))
551 elocus
= EXPR_LOCATION (incr
);
553 /* Check all the valid increment expressions: v++, v--, ++v, --v,
554 v = v + incr, v = incr + v and v = v - incr. */
555 switch (TREE_CODE (incr
))
557 case POSTINCREMENT_EXPR
:
558 case PREINCREMENT_EXPR
:
559 case POSTDECREMENT_EXPR
:
560 case PREDECREMENT_EXPR
:
561 if (TREE_OPERAND (incr
, 0) != decl
)
565 incr
= c_omp_for_incr_canonicalize_ptr (elocus
, decl
, incr
);
569 if (TREE_CODE (TREE_OPERAND (incr
, 0)) != SAVE_EXPR
570 || TREE_CODE (TREE_OPERAND (incr
, 1)) != MODIFY_EXPR
)
572 incr
= TREE_OPERAND (incr
, 1);
575 if (TREE_OPERAND (incr
, 0) != decl
)
577 if (TREE_OPERAND (incr
, 1) == decl
)
579 if (TREE_CODE (TREE_OPERAND (incr
, 1)) == PLUS_EXPR
580 && (TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
581 || TREE_OPERAND (TREE_OPERAND (incr
, 1), 1) == decl
))
583 else if ((TREE_CODE (TREE_OPERAND (incr
, 1)) == MINUS_EXPR
584 || (TREE_CODE (TREE_OPERAND (incr
, 1))
585 == POINTER_PLUS_EXPR
))
586 && TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
)
590 tree t
= check_omp_for_incr_expr (elocus
,
591 TREE_OPERAND (incr
, 1),
593 if (t
!= error_mark_node
)
596 t
= build2 (PLUS_EXPR
, TREE_TYPE (decl
), decl
, t
);
597 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
607 error_at (elocus
, "invalid increment expression");
612 TREE_VEC_ELT (initv
, i
) = init
;
613 TREE_VEC_ELT (incrv
, i
) = incr
;
620 tree t
= make_node (code
);
622 TREE_TYPE (t
) = void_type_node
;
623 OMP_FOR_INIT (t
) = initv
;
624 OMP_FOR_COND (t
) = condv
;
625 OMP_FOR_INCR (t
) = incrv
;
626 OMP_FOR_BODY (t
) = body
;
627 OMP_FOR_PRE_BODY (t
) = pre_body
;
629 SET_EXPR_LOCATION (t
, locus
);
634 /* Right now we have 14 different combined constructs, this
635 function attempts to split or duplicate clauses for combined
636 constructs. CODE is the innermost construct in the combined construct,
637 and MASK allows to determine which constructs are combined together,
638 as every construct has at least one clause that no other construct
639 has (except for OMP_SECTIONS, but that can be only combined with parallel).
640 Combined constructs are:
641 #pragma omp parallel for
642 #pragma omp parallel sections
643 #pragma omp parallel for simd
645 #pragma omp distribute simd
646 #pragma omp distribute parallel for
647 #pragma omp distribute parallel for simd
648 #pragma omp teams distribute
649 #pragma omp teams distribute parallel for
650 #pragma omp teams distribute parallel for simd
651 #pragma omp target teams
652 #pragma omp target teams distribute
653 #pragma omp target teams distribute parallel for
654 #pragma omp target teams distribute parallel for simd */
657 c_omp_split_clauses (location_t loc
, enum tree_code code
,
658 omp_clause_mask mask
, tree clauses
, tree
*cclauses
)
661 enum c_omp_clause_split s
;
664 for (i
= 0; i
< C_OMP_CLAUSE_SPLIT_COUNT
; i
++)
666 /* Add implicit nowait clause on
667 #pragma omp parallel {for,for simd,sections}. */
668 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
673 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
]
674 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
677 cclauses
[C_OMP_CLAUSE_SPLIT_SECTIONS
]
678 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
684 for (; clauses
; clauses
= next
)
686 next
= OMP_CLAUSE_CHAIN (clauses
);
688 switch (OMP_CLAUSE_CODE (clauses
))
690 /* First the clauses that are unique to some constructs. */
691 case OMP_CLAUSE_DEVICE
:
693 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
695 case OMP_CLAUSE_NUM_TEAMS
:
696 case OMP_CLAUSE_THREAD_LIMIT
:
697 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
699 case OMP_CLAUSE_DIST_SCHEDULE
:
700 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
702 case OMP_CLAUSE_COPYIN
:
703 case OMP_CLAUSE_NUM_THREADS
:
704 case OMP_CLAUSE_PROC_BIND
:
705 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
707 case OMP_CLAUSE_ORDERED
:
708 case OMP_CLAUSE_SCHEDULE
:
709 case OMP_CLAUSE_NOWAIT
:
710 s
= C_OMP_CLAUSE_SPLIT_FOR
;
712 case OMP_CLAUSE_SAFELEN
:
713 case OMP_CLAUSE_LINEAR
:
714 case OMP_CLAUSE_ALIGNED
:
715 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
717 /* Duplicate this to all of distribute, for and simd. */
718 case OMP_CLAUSE_COLLAPSE
:
719 if (code
== OMP_SIMD
)
721 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
722 OMP_CLAUSE_COLLAPSE
);
723 OMP_CLAUSE_COLLAPSE_EXPR (c
)
724 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
725 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
726 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
728 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
730 if ((mask
& (OMP_CLAUSE_MASK_1
731 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
733 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
734 OMP_CLAUSE_COLLAPSE
);
735 OMP_CLAUSE_COLLAPSE_EXPR (c
)
736 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
737 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_FOR
];
738 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
] = c
;
739 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
742 s
= C_OMP_CLAUSE_SPLIT_FOR
;
745 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
747 /* Private clause is supported on all constructs but target,
748 it is enough to put it on the innermost one. For
749 #pragma omp {for,sections} put it on parallel though,
750 as that's what we did for OpenMP 3.1. */
751 case OMP_CLAUSE_PRIVATE
:
754 case OMP_SIMD
: s
= C_OMP_CLAUSE_SPLIT_SIMD
; break;
755 case OMP_FOR
: case OMP_SECTIONS
:
756 case OMP_PARALLEL
: s
= C_OMP_CLAUSE_SPLIT_PARALLEL
; break;
757 case OMP_DISTRIBUTE
: s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
; break;
758 case OMP_TEAMS
: s
= C_OMP_CLAUSE_SPLIT_TEAMS
; break;
759 default: gcc_unreachable ();
762 /* Firstprivate clause is supported on all constructs but
763 target and simd. Put it on the outermost of those and
764 duplicate on parallel. */
765 case OMP_CLAUSE_FIRSTPRIVATE
:
766 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
769 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
)
771 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
))) != 0)
773 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
774 OMP_CLAUSE_FIRSTPRIVATE
);
775 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
776 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
777 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
778 if ((mask
& (OMP_CLAUSE_MASK_1
779 << PRAGMA_OMP_CLAUSE_NUM_TEAMS
)) != 0)
780 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
782 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
786 #pragma omp parallel{, for{, simd}, sections}. */
787 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
789 else if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
792 /* This must be one of
793 #pragma omp {,target }teams distribute
794 #pragma omp target teams
795 #pragma omp {,target }teams distribute simd. */
796 gcc_assert (code
== OMP_DISTRIBUTE
798 || code
== OMP_SIMD
);
799 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
801 else if ((mask
& (OMP_CLAUSE_MASK_1
802 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
804 /* This must be #pragma omp distribute simd. */
805 gcc_assert (code
== OMP_SIMD
);
806 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
810 /* This must be #pragma omp for simd. */
811 gcc_assert (code
== OMP_SIMD
);
812 s
= C_OMP_CLAUSE_SPLIT_FOR
;
815 /* Lastprivate is allowed on for, sections and simd. In
816 parallel {for{, simd},sections} we actually want to put it on
817 parallel rather than for or sections. */
818 case OMP_CLAUSE_LASTPRIVATE
:
819 if (code
== OMP_FOR
|| code
== OMP_SECTIONS
)
821 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
823 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
825 s
= C_OMP_CLAUSE_SPLIT_FOR
;
828 gcc_assert (code
== OMP_SIMD
);
829 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
831 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
832 OMP_CLAUSE_LASTPRIVATE
);
833 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
834 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
836 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
838 s
= C_OMP_CLAUSE_SPLIT_FOR
;
839 OMP_CLAUSE_CHAIN (c
) = cclauses
[s
];
842 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
844 /* Shared and default clauses are allowed on private and teams. */
845 case OMP_CLAUSE_SHARED
:
846 case OMP_CLAUSE_DEFAULT
:
847 if (code
== OMP_TEAMS
)
849 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
852 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
855 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
856 OMP_CLAUSE_CODE (clauses
));
857 if (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_SHARED
)
858 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
860 OMP_CLAUSE_DEFAULT_KIND (c
)
861 = OMP_CLAUSE_DEFAULT_KIND (clauses
);
862 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
];
863 cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
] = c
;
866 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
868 /* Reduction is allowed on simd, for, parallel, sections and teams.
869 Duplicate it on all of them, but omit on for or sections if
870 parallel is present. */
871 case OMP_CLAUSE_REDUCTION
:
872 if (code
== OMP_SIMD
)
874 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
875 OMP_CLAUSE_REDUCTION
);
876 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
877 OMP_CLAUSE_REDUCTION_CODE (c
)
878 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
879 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
880 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
881 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
882 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
884 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
886 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
889 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
890 OMP_CLAUSE_REDUCTION
);
891 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
892 OMP_CLAUSE_REDUCTION_CODE (c
)
893 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
894 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
895 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
896 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
897 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
898 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
900 else if ((mask
& (OMP_CLAUSE_MASK_1
901 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
902 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
904 s
= C_OMP_CLAUSE_SPLIT_FOR
;
906 else if (code
== OMP_SECTIONS
)
907 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
909 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
912 /* FIXME: This is currently being discussed. */
913 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
915 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
917 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
922 OMP_CLAUSE_CHAIN (clauses
) = cclauses
[s
];
923 cclauses
[s
] = clauses
;
928 /* qsort callback to compare #pragma omp declare simd clauses. */
931 c_omp_declare_simd_clause_cmp (const void *p
, const void *q
)
933 tree a
= *(const tree
*) p
;
934 tree b
= *(const tree
*) q
;
935 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_CODE (b
))
937 if (OMP_CLAUSE_CODE (a
) > OMP_CLAUSE_CODE (b
))
941 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_SIMDLEN
942 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_INBRANCH
943 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_NOTINBRANCH
)
945 int c
= tree_to_shwi (OMP_CLAUSE_DECL (a
));
946 int d
= tree_to_shwi (OMP_CLAUSE_DECL (b
));
955 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
956 CLAUSES on FNDECL into argument indexes and sort them. */
959 c_omp_declare_simd_clauses_to_numbers (tree parms
, tree clauses
)
962 vec
<tree
> clvec
= vNULL
;
964 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
966 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
967 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
968 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
970 tree decl
= OMP_CLAUSE_DECL (c
);
973 for (arg
= parms
, idx
= 0; arg
;
974 arg
= TREE_CHAIN (arg
), idx
++)
977 if (arg
== NULL_TREE
)
979 error_at (OMP_CLAUSE_LOCATION (c
),
980 "%qD is not an function argument", decl
);
983 OMP_CLAUSE_DECL (c
) = build_int_cst (integer_type_node
, idx
);
987 if (!clvec
.is_empty ())
989 unsigned int len
= clvec
.length (), i
;
990 clvec
.qsort (c_omp_declare_simd_clause_cmp
);
992 for (i
= 0; i
< len
; i
++)
993 OMP_CLAUSE_CHAIN (clvec
[i
]) = (i
< len
- 1) ? clvec
[i
+ 1] : NULL_TREE
;
999 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1002 c_omp_declare_simd_clauses_to_decls (tree fndecl
, tree clauses
)
1006 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1007 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
1008 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
1009 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
1011 int idx
= tree_to_shwi (OMP_CLAUSE_DECL (c
)), i
;
1013 for (arg
= DECL_ARGUMENTS (fndecl
), i
= 0; arg
;
1014 arg
= TREE_CHAIN (arg
), i
++)
1018 OMP_CLAUSE_DECL (c
) = arg
;
1022 /* True if OpenMP sharing attribute of DECL is predetermined. */
1024 enum omp_clause_default_kind
1025 c_omp_predetermined_sharing (tree decl
)
1027 /* Variables with const-qualified type having no mutable member
1028 are predetermined shared. */
1029 if (TREE_READONLY (decl
))
1030 return OMP_CLAUSE_DEFAULT_SHARED
;
1032 return OMP_CLAUSE_DEFAULT_UNSPECIFIED
;