2014-09-12 Marc Glisse <marc.glisse@inria.fr>
[official-gcc.git] / gcc / c-family / c-omp.c
blobd6ca3df51e8d221d66a3dcc11e6f355a3e3d6704
1 /* This file contains routines to construct GNU OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2014 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tree.h"
28 #include "c-common.h"
29 #include "c-pragma.h"
30 #include "gimple-expr.h"
31 #include "langhooks.h"
34 /* Complete a #pragma omp master construct. STMT is the structured-block
35 that follows the pragma. LOC is the l*/
37 tree
38 c_finish_omp_master (location_t loc, tree stmt)
40 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
41 SET_EXPR_LOCATION (t, loc);
42 return t;
45 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
46 that follows the pragma. LOC is the l*/
48 tree
49 c_finish_omp_taskgroup (location_t loc, tree stmt)
51 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
52 SET_EXPR_LOCATION (t, loc);
53 return t;
56 /* Complete a #pragma omp critical construct. STMT is the structured-block
57 that follows the pragma, NAME is the identifier in the pragma, or null
58 if it was omitted. LOC is the location of the #pragma. */
60 tree
61 c_finish_omp_critical (location_t loc, tree body, tree name)
63 tree stmt = make_node (OMP_CRITICAL);
64 TREE_TYPE (stmt) = void_type_node;
65 OMP_CRITICAL_BODY (stmt) = body;
66 OMP_CRITICAL_NAME (stmt) = name;
67 SET_EXPR_LOCATION (stmt, loc);
68 return add_stmt (stmt);
71 /* Complete a #pragma omp ordered construct. STMT is the structured-block
72 that follows the pragma. LOC is the location of the #pragma. */
74 tree
75 c_finish_omp_ordered (location_t loc, tree stmt)
77 tree t = build1 (OMP_ORDERED, void_type_node, stmt);
78 SET_EXPR_LOCATION (t, loc);
79 return add_stmt (t);
83 /* Complete a #pragma omp barrier construct. LOC is the location of
84 the #pragma. */
86 void
87 c_finish_omp_barrier (location_t loc)
89 tree x;
91 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
92 x = build_call_expr_loc (loc, x, 0);
93 add_stmt (x);
97 /* Complete a #pragma omp taskwait construct. LOC is the location of the
98 pragma. */
100 void
101 c_finish_omp_taskwait (location_t loc)
103 tree x;
105 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
106 x = build_call_expr_loc (loc, x, 0);
107 add_stmt (x);
111 /* Complete a #pragma omp taskyield construct. LOC is the location of the
112 pragma. */
114 void
115 c_finish_omp_taskyield (location_t loc)
117 tree x;
119 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
120 x = build_call_expr_loc (loc, x, 0);
121 add_stmt (x);
125 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
126 the expression to be implemented atomically is LHS opcode= RHS.
127 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
128 opcode= RHS with the new or old content of LHS returned.
129 LOC is the location of the atomic statement. The value returned
130 is either error_mark_node (if the construct was erroneous) or an
131 OMP_ATOMIC* node which should be added to the current statement
132 tree with add_stmt. */
134 tree
135 c_finish_omp_atomic (location_t loc, enum tree_code code,
136 enum tree_code opcode, tree lhs, tree rhs,
137 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
139 tree x, type, addr, pre = NULL_TREE;
141 if (lhs == error_mark_node || rhs == error_mark_node
142 || v == error_mark_node || lhs1 == error_mark_node
143 || rhs1 == error_mark_node)
144 return error_mark_node;
146 /* ??? According to one reading of the OpenMP spec, complex type are
147 supported, but there are no atomic stores for any architecture.
148 But at least icc 9.0 doesn't support complex types here either.
149 And lets not even talk about vector types... */
150 type = TREE_TYPE (lhs);
151 if (!INTEGRAL_TYPE_P (type)
152 && !POINTER_TYPE_P (type)
153 && !SCALAR_FLOAT_TYPE_P (type))
155 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
156 return error_mark_node;
159 /* ??? Validate that rhs does not overlap lhs. */
161 /* Take and save the address of the lhs. From then on we'll reference it
162 via indirection. */
163 addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
164 if (addr == error_mark_node)
165 return error_mark_node;
166 addr = save_expr (addr);
167 if (TREE_CODE (addr) != SAVE_EXPR
168 && (TREE_CODE (addr) != ADDR_EXPR
169 || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
171 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
172 it even after unsharing function body. */
173 tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL);
174 DECL_CONTEXT (var) = current_function_decl;
175 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
177 lhs = build_indirect_ref (loc, addr, RO_NULL);
179 if (code == OMP_ATOMIC_READ)
181 x = build1 (OMP_ATOMIC_READ, type, addr);
182 SET_EXPR_LOCATION (x, loc);
183 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
184 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
185 loc, x, NULL_TREE);
188 /* There are lots of warnings, errors, and conversions that need to happen
189 in the course of interpreting a statement. Use the normal mechanisms
190 to do this, and then take it apart again. */
191 if (swapped)
193 rhs = build2_loc (loc, opcode, TREE_TYPE (lhs), rhs, lhs);
194 opcode = NOP_EXPR;
196 bool save = in_late_binary_op;
197 in_late_binary_op = true;
198 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
199 in_late_binary_op = save;
200 if (x == error_mark_node)
201 return error_mark_node;
202 if (TREE_CODE (x) == COMPOUND_EXPR)
204 pre = TREE_OPERAND (x, 0);
205 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
206 x = TREE_OPERAND (x, 1);
208 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
209 rhs = TREE_OPERAND (x, 1);
211 /* Punt the actual generation of atomic operations to common code. */
212 if (code == OMP_ATOMIC)
213 type = void_type_node;
214 x = build2 (code, type, addr, rhs);
215 SET_EXPR_LOCATION (x, loc);
216 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
218 /* Generally it is hard to prove lhs1 and lhs are the same memory
219 location, just diagnose different variables. */
220 if (rhs1
221 && TREE_CODE (rhs1) == VAR_DECL
222 && TREE_CODE (lhs) == VAR_DECL
223 && rhs1 != lhs)
225 if (code == OMP_ATOMIC)
226 error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
227 else
228 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
229 return error_mark_node;
232 if (code != OMP_ATOMIC)
234 /* Generally it is hard to prove lhs1 and lhs are the same memory
235 location, just diagnose different variables. */
236 if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
238 if (lhs1 != lhs)
240 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
241 return error_mark_node;
244 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
245 loc, x, NULL_TREE);
246 if (rhs1 && rhs1 != lhs)
248 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
249 if (rhs1addr == error_mark_node)
250 return error_mark_node;
251 x = omit_one_operand_loc (loc, type, x, rhs1addr);
253 if (lhs1 && lhs1 != lhs)
255 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
256 if (lhs1addr == error_mark_node)
257 return error_mark_node;
258 if (code == OMP_ATOMIC_CAPTURE_OLD)
259 x = omit_one_operand_loc (loc, type, x, lhs1addr);
260 else
262 x = save_expr (x);
263 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
267 else if (rhs1 && rhs1 != lhs)
269 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
270 if (rhs1addr == error_mark_node)
271 return error_mark_node;
272 x = omit_one_operand_loc (loc, type, x, rhs1addr);
275 if (pre)
276 x = omit_one_operand_loc (loc, type, x, pre);
277 return x;
281 /* Complete a #pragma omp flush construct. We don't do anything with
282 the variable list that the syntax allows. LOC is the location of
283 the #pragma. */
285 void
286 c_finish_omp_flush (location_t loc)
288 tree x;
290 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
291 x = build_call_expr_loc (loc, x, 0);
292 add_stmt (x);
296 /* Check and canonicalize #pragma omp for increment expression.
297 Helper function for c_finish_omp_for. */
299 static tree
300 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
302 tree t;
304 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
305 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
306 return error_mark_node;
308 if (exp == decl)
309 return build_int_cst (TREE_TYPE (exp), 0);
311 switch (TREE_CODE (exp))
313 CASE_CONVERT:
314 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
315 if (t != error_mark_node)
316 return fold_convert_loc (loc, TREE_TYPE (exp), t);
317 break;
318 case MINUS_EXPR:
319 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
320 if (t != error_mark_node)
321 return fold_build2_loc (loc, MINUS_EXPR,
322 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
323 break;
324 case PLUS_EXPR:
325 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
326 if (t != error_mark_node)
327 return fold_build2_loc (loc, PLUS_EXPR,
328 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
329 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
330 if (t != error_mark_node)
331 return fold_build2_loc (loc, PLUS_EXPR,
332 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
333 break;
334 case COMPOUND_EXPR:
336 /* cp_build_modify_expr forces preevaluation of the RHS to make
337 sure that it is evaluated before the lvalue-rvalue conversion
338 is applied to the LHS. Reconstruct the original expression. */
339 tree op0 = TREE_OPERAND (exp, 0);
340 if (TREE_CODE (op0) == TARGET_EXPR
341 && !VOID_TYPE_P (TREE_TYPE (op0)))
343 tree op1 = TREE_OPERAND (exp, 1);
344 tree temp = TARGET_EXPR_SLOT (op0);
345 if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary
346 && TREE_OPERAND (op1, 1) == temp)
348 op1 = copy_node (op1);
349 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
350 return check_omp_for_incr_expr (loc, op1, decl);
353 break;
355 default:
356 break;
359 return error_mark_node;
362 /* If the OMP_FOR increment expression in INCR is of pointer type,
363 canonicalize it into an expression handled by gimplify_omp_for()
364 and return it. DECL is the iteration variable. */
366 static tree
367 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
369 if (POINTER_TYPE_P (TREE_TYPE (decl))
370 && TREE_OPERAND (incr, 1))
372 tree t = fold_convert_loc (loc,
373 sizetype, TREE_OPERAND (incr, 1));
375 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
376 || TREE_CODE (incr) == PREDECREMENT_EXPR)
377 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
378 t = fold_build_pointer_plus (decl, t);
379 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
381 return incr;
384 /* Validate and emit code for the OpenMP directive #pragma omp for.
385 DECLV is a vector of iteration variables, for each collapsed loop.
386 INITV, CONDV and INCRV are vectors containing initialization
387 expressions, controlling predicates and increment expressions.
388 BODY is the body of the loop and PRE_BODY statements that go before
389 the loop. */
391 tree
392 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
393 tree initv, tree condv, tree incrv, tree body, tree pre_body)
395 location_t elocus;
396 bool fail = false;
397 int i;
399 if ((code == CILK_SIMD || code == CILK_FOR)
400 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
401 fail = true;
403 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
404 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
405 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
406 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
408 tree decl = TREE_VEC_ELT (declv, i);
409 tree init = TREE_VEC_ELT (initv, i);
410 tree cond = TREE_VEC_ELT (condv, i);
411 tree incr = TREE_VEC_ELT (incrv, i);
413 elocus = locus;
414 if (EXPR_HAS_LOCATION (init))
415 elocus = EXPR_LOCATION (init);
417 /* Validate the iteration variable. */
418 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
419 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
421 error_at (elocus, "invalid type for iteration variable %qE", decl);
422 fail = true;
425 /* In the case of "for (int i = 0...)", init will be a decl. It should
426 have a DECL_INITIAL that we can turn into an assignment. */
427 if (init == decl)
429 elocus = DECL_SOURCE_LOCATION (decl);
431 init = DECL_INITIAL (decl);
432 if (init == NULL)
434 error_at (elocus, "%qE is not initialized", decl);
435 init = integer_zero_node;
436 fail = true;
439 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
440 /* FIXME diagnostics: This should
441 be the location of the INIT. */
442 elocus,
443 init,
444 NULL_TREE);
446 if (init != error_mark_node)
448 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
449 gcc_assert (TREE_OPERAND (init, 0) == decl);
452 if (cond == NULL_TREE)
454 error_at (elocus, "missing controlling predicate");
455 fail = true;
457 else
459 bool cond_ok = false;
461 if (EXPR_HAS_LOCATION (cond))
462 elocus = EXPR_LOCATION (cond);
464 if (TREE_CODE (cond) == LT_EXPR
465 || TREE_CODE (cond) == LE_EXPR
466 || TREE_CODE (cond) == GT_EXPR
467 || TREE_CODE (cond) == GE_EXPR
468 || TREE_CODE (cond) == NE_EXPR
469 || TREE_CODE (cond) == EQ_EXPR)
471 tree op0 = TREE_OPERAND (cond, 0);
472 tree op1 = TREE_OPERAND (cond, 1);
474 /* 2.5.1. The comparison in the condition is computed in
475 the type of DECL, otherwise the behavior is undefined.
477 For example:
478 long n; int i;
479 i < n;
481 according to ISO will be evaluated as:
482 (long)i < n;
484 We want to force:
485 i < (int)n; */
486 if (TREE_CODE (op0) == NOP_EXPR
487 && decl == TREE_OPERAND (op0, 0))
489 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
490 TREE_OPERAND (cond, 1)
491 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
492 TREE_OPERAND (cond, 1));
494 else if (TREE_CODE (op1) == NOP_EXPR
495 && decl == TREE_OPERAND (op1, 0))
497 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
498 TREE_OPERAND (cond, 0)
499 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
500 TREE_OPERAND (cond, 0));
503 if (decl == TREE_OPERAND (cond, 0))
504 cond_ok = true;
505 else if (decl == TREE_OPERAND (cond, 1))
507 TREE_SET_CODE (cond,
508 swap_tree_comparison (TREE_CODE (cond)));
509 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
510 TREE_OPERAND (cond, 0) = decl;
511 cond_ok = true;
514 if (TREE_CODE (cond) == NE_EXPR
515 || TREE_CODE (cond) == EQ_EXPR)
517 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
519 if (code != CILK_SIMD && code != CILK_FOR)
520 cond_ok = false;
522 else if (operand_equal_p (TREE_OPERAND (cond, 1),
523 TYPE_MIN_VALUE (TREE_TYPE (decl)),
525 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
526 ? GT_EXPR : LE_EXPR);
527 else if (operand_equal_p (TREE_OPERAND (cond, 1),
528 TYPE_MAX_VALUE (TREE_TYPE (decl)),
530 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
531 ? LT_EXPR : GE_EXPR);
532 else if (code != CILK_SIMD && code != CILK_FOR)
533 cond_ok = false;
537 if (!cond_ok)
539 error_at (elocus, "invalid controlling predicate");
540 fail = true;
544 if (incr == NULL_TREE)
546 error_at (elocus, "missing increment expression");
547 fail = true;
549 else
551 bool incr_ok = false;
553 if (EXPR_HAS_LOCATION (incr))
554 elocus = EXPR_LOCATION (incr);
556 /* Check all the valid increment expressions: v++, v--, ++v, --v,
557 v = v + incr, v = incr + v and v = v - incr. */
558 switch (TREE_CODE (incr))
560 case POSTINCREMENT_EXPR:
561 case PREINCREMENT_EXPR:
562 case POSTDECREMENT_EXPR:
563 case PREDECREMENT_EXPR:
564 if (TREE_OPERAND (incr, 0) != decl)
565 break;
567 incr_ok = true;
568 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
569 break;
571 case COMPOUND_EXPR:
572 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
573 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
574 break;
575 incr = TREE_OPERAND (incr, 1);
576 /* FALLTHRU */
577 case MODIFY_EXPR:
578 if (TREE_OPERAND (incr, 0) != decl)
579 break;
580 if (TREE_OPERAND (incr, 1) == decl)
581 break;
582 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
583 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
584 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
585 incr_ok = true;
586 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
587 || (TREE_CODE (TREE_OPERAND (incr, 1))
588 == POINTER_PLUS_EXPR))
589 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
590 incr_ok = true;
591 else
593 tree t = check_omp_for_incr_expr (elocus,
594 TREE_OPERAND (incr, 1),
595 decl);
596 if (t != error_mark_node)
598 incr_ok = true;
599 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
600 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
603 break;
605 default:
606 break;
608 if (!incr_ok)
610 error_at (elocus, "invalid increment expression");
611 fail = true;
615 TREE_VEC_ELT (initv, i) = init;
616 TREE_VEC_ELT (incrv, i) = incr;
619 if (fail)
620 return NULL;
621 else
623 tree t = make_node (code);
625 TREE_TYPE (t) = void_type_node;
626 OMP_FOR_INIT (t) = initv;
627 OMP_FOR_COND (t) = condv;
628 OMP_FOR_INCR (t) = incrv;
629 OMP_FOR_BODY (t) = body;
630 OMP_FOR_PRE_BODY (t) = pre_body;
632 SET_EXPR_LOCATION (t, locus);
633 return add_stmt (t);
637 /* Right now we have 14 different combined constructs, this
638 function attempts to split or duplicate clauses for combined
639 constructs. CODE is the innermost construct in the combined construct,
640 and MASK allows to determine which constructs are combined together,
641 as every construct has at least one clause that no other construct
642 has (except for OMP_SECTIONS, but that can be only combined with parallel).
643 Combined constructs are:
644 #pragma omp parallel for
645 #pragma omp parallel sections
646 #pragma omp parallel for simd
647 #pragma omp for simd
648 #pragma omp distribute simd
649 #pragma omp distribute parallel for
650 #pragma omp distribute parallel for simd
651 #pragma omp teams distribute
652 #pragma omp teams distribute parallel for
653 #pragma omp teams distribute parallel for simd
654 #pragma omp target teams
655 #pragma omp target teams distribute
656 #pragma omp target teams distribute parallel for
657 #pragma omp target teams distribute parallel for simd */
659 void
660 c_omp_split_clauses (location_t loc, enum tree_code code,
661 omp_clause_mask mask, tree clauses, tree *cclauses)
663 tree next, c;
664 enum c_omp_clause_split s;
665 int i;
667 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
668 cclauses[i] = NULL;
669 /* Add implicit nowait clause on
670 #pragma omp parallel {for,for simd,sections}. */
671 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
672 switch (code)
674 case OMP_FOR:
675 case OMP_SIMD:
676 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
677 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
678 break;
679 case OMP_SECTIONS:
680 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
681 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
682 break;
683 default:
684 break;
687 for (; clauses ; clauses = next)
689 next = OMP_CLAUSE_CHAIN (clauses);
691 switch (OMP_CLAUSE_CODE (clauses))
693 /* First the clauses that are unique to some constructs. */
694 case OMP_CLAUSE_DEVICE:
695 case OMP_CLAUSE_MAP:
696 s = C_OMP_CLAUSE_SPLIT_TARGET;
697 break;
698 case OMP_CLAUSE_NUM_TEAMS:
699 case OMP_CLAUSE_THREAD_LIMIT:
700 s = C_OMP_CLAUSE_SPLIT_TEAMS;
701 break;
702 case OMP_CLAUSE_DIST_SCHEDULE:
703 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
704 break;
705 case OMP_CLAUSE_COPYIN:
706 case OMP_CLAUSE_NUM_THREADS:
707 case OMP_CLAUSE_PROC_BIND:
708 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
709 break;
710 case OMP_CLAUSE_ORDERED:
711 case OMP_CLAUSE_SCHEDULE:
712 case OMP_CLAUSE_NOWAIT:
713 s = C_OMP_CLAUSE_SPLIT_FOR;
714 break;
715 case OMP_CLAUSE_SAFELEN:
716 case OMP_CLAUSE_LINEAR:
717 case OMP_CLAUSE_ALIGNED:
718 s = C_OMP_CLAUSE_SPLIT_SIMD;
719 break;
720 /* Duplicate this to all of distribute, for and simd. */
721 case OMP_CLAUSE_COLLAPSE:
722 if (code == OMP_SIMD)
724 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
725 OMP_CLAUSE_COLLAPSE);
726 OMP_CLAUSE_COLLAPSE_EXPR (c)
727 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
728 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
729 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
731 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
733 if ((mask & (OMP_CLAUSE_MASK_1
734 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
736 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
737 OMP_CLAUSE_COLLAPSE);
738 OMP_CLAUSE_COLLAPSE_EXPR (c)
739 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
740 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
741 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
742 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
744 else
745 s = C_OMP_CLAUSE_SPLIT_FOR;
747 else
748 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
749 break;
750 /* Private clause is supported on all constructs but target,
751 it is enough to put it on the innermost one. For
752 #pragma omp {for,sections} put it on parallel though,
753 as that's what we did for OpenMP 3.1. */
754 case OMP_CLAUSE_PRIVATE:
755 switch (code)
757 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
758 case OMP_FOR: case OMP_SECTIONS:
759 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
760 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
761 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
762 default: gcc_unreachable ();
764 break;
765 /* Firstprivate clause is supported on all constructs but
766 target and simd. Put it on the outermost of those and
767 duplicate on parallel. */
768 case OMP_CLAUSE_FIRSTPRIVATE:
769 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
770 != 0)
772 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
773 | (OMP_CLAUSE_MASK_1
774 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
776 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
777 OMP_CLAUSE_FIRSTPRIVATE);
778 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
779 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
780 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
781 if ((mask & (OMP_CLAUSE_MASK_1
782 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
783 s = C_OMP_CLAUSE_SPLIT_TEAMS;
784 else
785 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
787 else
788 /* This must be
789 #pragma omp parallel{, for{, simd}, sections}. */
790 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
792 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
793 != 0)
795 /* This must be one of
796 #pragma omp {,target }teams distribute
797 #pragma omp target teams
798 #pragma omp {,target }teams distribute simd. */
799 gcc_assert (code == OMP_DISTRIBUTE
800 || code == OMP_TEAMS
801 || code == OMP_SIMD);
802 s = C_OMP_CLAUSE_SPLIT_TEAMS;
804 else if ((mask & (OMP_CLAUSE_MASK_1
805 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
807 /* This must be #pragma omp distribute simd. */
808 gcc_assert (code == OMP_SIMD);
809 s = C_OMP_CLAUSE_SPLIT_TEAMS;
811 else
813 /* This must be #pragma omp for simd. */
814 gcc_assert (code == OMP_SIMD);
815 s = C_OMP_CLAUSE_SPLIT_FOR;
817 break;
818 /* Lastprivate is allowed on for, sections and simd. In
819 parallel {for{, simd},sections} we actually want to put it on
820 parallel rather than for or sections. */
821 case OMP_CLAUSE_LASTPRIVATE:
822 if (code == OMP_FOR || code == OMP_SECTIONS)
824 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
825 != 0)
826 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
827 else
828 s = C_OMP_CLAUSE_SPLIT_FOR;
829 break;
831 gcc_assert (code == OMP_SIMD);
832 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
834 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
835 OMP_CLAUSE_LASTPRIVATE);
836 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
837 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
838 != 0)
839 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
840 else
841 s = C_OMP_CLAUSE_SPLIT_FOR;
842 OMP_CLAUSE_CHAIN (c) = cclauses[s];
843 cclauses[s] = c;
845 s = C_OMP_CLAUSE_SPLIT_SIMD;
846 break;
847 /* Shared and default clauses are allowed on private and teams. */
848 case OMP_CLAUSE_SHARED:
849 case OMP_CLAUSE_DEFAULT:
850 if (code == OMP_TEAMS)
852 s = C_OMP_CLAUSE_SPLIT_TEAMS;
853 break;
855 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
856 != 0)
858 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
859 OMP_CLAUSE_CODE (clauses));
860 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
861 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
862 else
863 OMP_CLAUSE_DEFAULT_KIND (c)
864 = OMP_CLAUSE_DEFAULT_KIND (clauses);
865 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
866 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
869 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
870 break;
871 /* Reduction is allowed on simd, for, parallel, sections and teams.
872 Duplicate it on all of them, but omit on for or sections if
873 parallel is present. */
874 case OMP_CLAUSE_REDUCTION:
875 if (code == OMP_SIMD)
877 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
878 OMP_CLAUSE_REDUCTION);
879 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
880 OMP_CLAUSE_REDUCTION_CODE (c)
881 = OMP_CLAUSE_REDUCTION_CODE (clauses);
882 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
883 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
884 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
885 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
887 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
889 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
890 != 0)
892 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
893 OMP_CLAUSE_REDUCTION);
894 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
895 OMP_CLAUSE_REDUCTION_CODE (c)
896 = OMP_CLAUSE_REDUCTION_CODE (clauses);
897 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
898 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
899 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
900 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
901 s = C_OMP_CLAUSE_SPLIT_TEAMS;
903 else if ((mask & (OMP_CLAUSE_MASK_1
904 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
905 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
906 else
907 s = C_OMP_CLAUSE_SPLIT_FOR;
909 else if (code == OMP_SECTIONS)
910 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
911 else
912 s = C_OMP_CLAUSE_SPLIT_TEAMS;
913 break;
914 case OMP_CLAUSE_IF:
915 /* FIXME: This is currently being discussed. */
916 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
917 != 0)
918 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
919 else
920 s = C_OMP_CLAUSE_SPLIT_TARGET;
921 break;
922 default:
923 gcc_unreachable ();
925 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
926 cclauses[s] = clauses;
931 /* qsort callback to compare #pragma omp declare simd clauses. */
933 static int
934 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
936 tree a = *(const tree *) p;
937 tree b = *(const tree *) q;
938 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
940 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
941 return -1;
942 return 1;
944 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
945 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
946 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
948 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
949 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
950 if (c < d)
951 return 1;
952 if (c > d)
953 return -1;
955 return 0;
958 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
959 CLAUSES on FNDECL into argument indexes and sort them. */
961 tree
962 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
964 tree c;
965 vec<tree> clvec = vNULL;
967 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
969 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
970 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
971 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
973 tree decl = OMP_CLAUSE_DECL (c);
974 tree arg;
975 int idx;
976 for (arg = parms, idx = 0; arg;
977 arg = TREE_CHAIN (arg), idx++)
978 if (arg == decl)
979 break;
980 if (arg == NULL_TREE)
982 error_at (OMP_CLAUSE_LOCATION (c),
983 "%qD is not an function argument", decl);
984 continue;
986 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
988 clvec.safe_push (c);
990 if (!clvec.is_empty ())
992 unsigned int len = clvec.length (), i;
993 clvec.qsort (c_omp_declare_simd_clause_cmp);
994 clauses = clvec[0];
995 for (i = 0; i < len; i++)
996 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
998 clvec.release ();
999 return clauses;
1002 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1004 void
1005 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1007 tree c;
1009 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1010 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1011 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1012 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1014 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1015 tree arg;
1016 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1017 arg = TREE_CHAIN (arg), i++)
1018 if (i == idx)
1019 break;
1020 gcc_assert (arg);
1021 OMP_CLAUSE_DECL (c) = arg;
1025 /* True if OpenMP sharing attribute of DECL is predetermined. */
1027 enum omp_clause_default_kind
1028 c_omp_predetermined_sharing (tree decl)
1030 /* Variables with const-qualified type having no mutable member
1031 are predetermined shared. */
1032 if (TREE_READONLY (decl))
1033 return OMP_CLAUSE_DEFAULT_SHARED;
1035 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;