Miscellaneous code cleanup and extension.
[official-gcc.git] / gcc / c-family / c-omp.c
blob69abdd05fbc0ec7389b9e7bbb7a1606cc8b3e908
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2014 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tree.h"
28 #include "c-common.h"
29 #include "c-pragma.h"
30 #include "gimple-expr.h"
31 #include "langhooks.h"
32 #include "omp-low.h"
33 #include "gomp-constants.h"
36 /* Complete a #pragma oacc wait construct. LOC is the location of
37 the #pragma. */
39 tree
40 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
42 const int nparms = list_length (parms);
43 tree stmt, t;
44 vec<tree, va_gc> *args;
46 vec_alloc (args, nparms + 2);
47 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
49 if (find_omp_clause (clauses, OMP_CLAUSE_ASYNC))
50 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
51 else
52 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
54 args->quick_push (t);
55 args->quick_push (build_int_cst (integer_type_node, nparms));
57 for (t = parms; t; t = TREE_CHAIN (t))
59 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
60 args->quick_push (build_int_cst (integer_type_node,
61 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
62 else
63 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
66 stmt = build_call_expr_loc_vec (loc, stmt, args);
67 add_stmt (stmt);
69 vec_free (args);
71 return stmt;
74 /* Complete a #pragma omp master construct. STMT is the structured-block
75 that follows the pragma. LOC is the l*/
77 tree
78 c_finish_omp_master (location_t loc, tree stmt)
80 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
81 SET_EXPR_LOCATION (t, loc);
82 return t;
85 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
86 that follows the pragma. LOC is the l*/
88 tree
89 c_finish_omp_taskgroup (location_t loc, tree stmt)
91 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
92 SET_EXPR_LOCATION (t, loc);
93 return t;
96 /* Complete a #pragma omp critical construct. STMT is the structured-block
97 that follows the pragma, NAME is the identifier in the pragma, or null
98 if it was omitted. LOC is the location of the #pragma. */
100 tree
101 c_finish_omp_critical (location_t loc, tree body, tree name)
103 tree stmt = make_node (OMP_CRITICAL);
104 TREE_TYPE (stmt) = void_type_node;
105 OMP_CRITICAL_BODY (stmt) = body;
106 OMP_CRITICAL_NAME (stmt) = name;
107 SET_EXPR_LOCATION (stmt, loc);
108 return add_stmt (stmt);
111 /* Complete a #pragma omp ordered construct. STMT is the structured-block
112 that follows the pragma. LOC is the location of the #pragma. */
114 tree
115 c_finish_omp_ordered (location_t loc, tree stmt)
117 tree t = build1 (OMP_ORDERED, void_type_node, stmt);
118 SET_EXPR_LOCATION (t, loc);
119 return add_stmt (t);
123 /* Complete a #pragma omp barrier construct. LOC is the location of
124 the #pragma. */
126 void
127 c_finish_omp_barrier (location_t loc)
129 tree x;
131 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
132 x = build_call_expr_loc (loc, x, 0);
133 add_stmt (x);
137 /* Complete a #pragma omp taskwait construct. LOC is the location of the
138 pragma. */
140 void
141 c_finish_omp_taskwait (location_t loc)
143 tree x;
145 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
146 x = build_call_expr_loc (loc, x, 0);
147 add_stmt (x);
151 /* Complete a #pragma omp taskyield construct. LOC is the location of the
152 pragma. */
154 void
155 c_finish_omp_taskyield (location_t loc)
157 tree x;
159 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
160 x = build_call_expr_loc (loc, x, 0);
161 add_stmt (x);
165 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
166 the expression to be implemented atomically is LHS opcode= RHS.
167 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
168 opcode= RHS with the new or old content of LHS returned.
169 LOC is the location of the atomic statement. The value returned
170 is either error_mark_node (if the construct was erroneous) or an
171 OMP_ATOMIC* node which should be added to the current statement
172 tree with add_stmt. */
174 tree
175 c_finish_omp_atomic (location_t loc, enum tree_code code,
176 enum tree_code opcode, tree lhs, tree rhs,
177 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
179 tree x, type, addr, pre = NULL_TREE;
181 if (lhs == error_mark_node || rhs == error_mark_node
182 || v == error_mark_node || lhs1 == error_mark_node
183 || rhs1 == error_mark_node)
184 return error_mark_node;
186 /* ??? According to one reading of the OpenMP spec, complex type are
187 supported, but there are no atomic stores for any architecture.
188 But at least icc 9.0 doesn't support complex types here either.
189 And lets not even talk about vector types... */
190 type = TREE_TYPE (lhs);
191 if (!INTEGRAL_TYPE_P (type)
192 && !POINTER_TYPE_P (type)
193 && !SCALAR_FLOAT_TYPE_P (type))
195 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
196 return error_mark_node;
199 /* ??? Validate that rhs does not overlap lhs. */
201 /* Take and save the address of the lhs. From then on we'll reference it
202 via indirection. */
203 addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
204 if (addr == error_mark_node)
205 return error_mark_node;
206 addr = save_expr (addr);
207 if (TREE_CODE (addr) != SAVE_EXPR
208 && (TREE_CODE (addr) != ADDR_EXPR
209 || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
211 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
212 it even after unsharing function body. */
213 tree var = create_tmp_var_raw (TREE_TYPE (addr));
214 DECL_CONTEXT (var) = current_function_decl;
215 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
217 lhs = build_indirect_ref (loc, addr, RO_NULL);
219 if (code == OMP_ATOMIC_READ)
221 x = build1 (OMP_ATOMIC_READ, type, addr);
222 SET_EXPR_LOCATION (x, loc);
223 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
224 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
225 loc, x, NULL_TREE);
228 /* There are lots of warnings, errors, and conversions that need to happen
229 in the course of interpreting a statement. Use the normal mechanisms
230 to do this, and then take it apart again. */
231 if (swapped)
233 rhs = build2_loc (loc, opcode, TREE_TYPE (lhs), rhs, lhs);
234 opcode = NOP_EXPR;
236 bool save = in_late_binary_op;
237 in_late_binary_op = true;
238 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
239 in_late_binary_op = save;
240 if (x == error_mark_node)
241 return error_mark_node;
242 if (TREE_CODE (x) == COMPOUND_EXPR)
244 pre = TREE_OPERAND (x, 0);
245 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
246 x = TREE_OPERAND (x, 1);
248 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
249 rhs = TREE_OPERAND (x, 1);
251 /* Punt the actual generation of atomic operations to common code. */
252 if (code == OMP_ATOMIC)
253 type = void_type_node;
254 x = build2 (code, type, addr, rhs);
255 SET_EXPR_LOCATION (x, loc);
256 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
258 /* Generally it is hard to prove lhs1 and lhs are the same memory
259 location, just diagnose different variables. */
260 if (rhs1
261 && TREE_CODE (rhs1) == VAR_DECL
262 && TREE_CODE (lhs) == VAR_DECL
263 && rhs1 != lhs)
265 if (code == OMP_ATOMIC)
266 error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
267 else
268 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
269 return error_mark_node;
272 if (code != OMP_ATOMIC)
274 /* Generally it is hard to prove lhs1 and lhs are the same memory
275 location, just diagnose different variables. */
276 if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
278 if (lhs1 != lhs)
280 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
281 return error_mark_node;
284 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
285 loc, x, NULL_TREE);
286 if (rhs1 && rhs1 != lhs)
288 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
289 if (rhs1addr == error_mark_node)
290 return error_mark_node;
291 x = omit_one_operand_loc (loc, type, x, rhs1addr);
293 if (lhs1 && lhs1 != lhs)
295 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
296 if (lhs1addr == error_mark_node)
297 return error_mark_node;
298 if (code == OMP_ATOMIC_CAPTURE_OLD)
299 x = omit_one_operand_loc (loc, type, x, lhs1addr);
300 else
302 x = save_expr (x);
303 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
307 else if (rhs1 && rhs1 != lhs)
309 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
310 if (rhs1addr == error_mark_node)
311 return error_mark_node;
312 x = omit_one_operand_loc (loc, type, x, rhs1addr);
315 if (pre)
316 x = omit_one_operand_loc (loc, type, x, pre);
317 return x;
321 /* Complete a #pragma omp flush construct. We don't do anything with
322 the variable list that the syntax allows. LOC is the location of
323 the #pragma. */
325 void
326 c_finish_omp_flush (location_t loc)
328 tree x;
330 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
331 x = build_call_expr_loc (loc, x, 0);
332 add_stmt (x);
336 /* Check and canonicalize OMP_FOR increment expression.
337 Helper function for c_finish_omp_for. */
339 static tree
340 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
342 tree t;
344 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
345 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
346 return error_mark_node;
348 if (exp == decl)
349 return build_int_cst (TREE_TYPE (exp), 0);
351 switch (TREE_CODE (exp))
353 CASE_CONVERT:
354 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
355 if (t != error_mark_node)
356 return fold_convert_loc (loc, TREE_TYPE (exp), t);
357 break;
358 case MINUS_EXPR:
359 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
360 if (t != error_mark_node)
361 return fold_build2_loc (loc, MINUS_EXPR,
362 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
363 break;
364 case PLUS_EXPR:
365 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
366 if (t != error_mark_node)
367 return fold_build2_loc (loc, PLUS_EXPR,
368 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
369 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
370 if (t != error_mark_node)
371 return fold_build2_loc (loc, PLUS_EXPR,
372 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
373 break;
374 case COMPOUND_EXPR:
376 /* cp_build_modify_expr forces preevaluation of the RHS to make
377 sure that it is evaluated before the lvalue-rvalue conversion
378 is applied to the LHS. Reconstruct the original expression. */
379 tree op0 = TREE_OPERAND (exp, 0);
380 if (TREE_CODE (op0) == TARGET_EXPR
381 && !VOID_TYPE_P (TREE_TYPE (op0)))
383 tree op1 = TREE_OPERAND (exp, 1);
384 tree temp = TARGET_EXPR_SLOT (op0);
385 if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary
386 && TREE_OPERAND (op1, 1) == temp)
388 op1 = copy_node (op1);
389 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
390 return check_omp_for_incr_expr (loc, op1, decl);
393 break;
395 default:
396 break;
399 return error_mark_node;
402 /* If the OMP_FOR increment expression in INCR is of pointer type,
403 canonicalize it into an expression handled by gimplify_omp_for()
404 and return it. DECL is the iteration variable. */
406 static tree
407 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
409 if (POINTER_TYPE_P (TREE_TYPE (decl))
410 && TREE_OPERAND (incr, 1))
412 tree t = fold_convert_loc (loc,
413 sizetype, TREE_OPERAND (incr, 1));
415 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
416 || TREE_CODE (incr) == PREDECREMENT_EXPR)
417 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
418 t = fold_build_pointer_plus (decl, t);
419 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
421 return incr;
424 /* Validate and generate OMP_FOR.
425 DECLV is a vector of iteration variables, for each collapsed loop.
426 INITV, CONDV and INCRV are vectors containing initialization
427 expressions, controlling predicates and increment expressions.
428 BODY is the body of the loop and PRE_BODY statements that go before
429 the loop. */
431 tree
432 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
433 tree initv, tree condv, tree incrv, tree body, tree pre_body)
435 location_t elocus;
436 bool fail = false;
437 int i;
439 if ((code == CILK_SIMD || code == CILK_FOR)
440 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
441 fail = true;
443 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
444 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
445 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
446 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
448 tree decl = TREE_VEC_ELT (declv, i);
449 tree init = TREE_VEC_ELT (initv, i);
450 tree cond = TREE_VEC_ELT (condv, i);
451 tree incr = TREE_VEC_ELT (incrv, i);
453 elocus = locus;
454 if (EXPR_HAS_LOCATION (init))
455 elocus = EXPR_LOCATION (init);
457 /* Validate the iteration variable. */
458 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
459 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
461 error_at (elocus, "invalid type for iteration variable %qE", decl);
462 fail = true;
465 /* In the case of "for (int i = 0...)", init will be a decl. It should
466 have a DECL_INITIAL that we can turn into an assignment. */
467 if (init == decl)
469 elocus = DECL_SOURCE_LOCATION (decl);
471 init = DECL_INITIAL (decl);
472 if (init == NULL)
474 error_at (elocus, "%qE is not initialized", decl);
475 init = integer_zero_node;
476 fail = true;
479 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
480 /* FIXME diagnostics: This should
481 be the location of the INIT. */
482 elocus,
483 init,
484 NULL_TREE);
486 if (init != error_mark_node)
488 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
489 gcc_assert (TREE_OPERAND (init, 0) == decl);
492 if (cond == NULL_TREE)
494 error_at (elocus, "missing controlling predicate");
495 fail = true;
497 else
499 bool cond_ok = false;
501 if (EXPR_HAS_LOCATION (cond))
502 elocus = EXPR_LOCATION (cond);
504 if (TREE_CODE (cond) == LT_EXPR
505 || TREE_CODE (cond) == LE_EXPR
506 || TREE_CODE (cond) == GT_EXPR
507 || TREE_CODE (cond) == GE_EXPR
508 || TREE_CODE (cond) == NE_EXPR
509 || TREE_CODE (cond) == EQ_EXPR)
511 tree op0 = TREE_OPERAND (cond, 0);
512 tree op1 = TREE_OPERAND (cond, 1);
514 /* 2.5.1. The comparison in the condition is computed in
515 the type of DECL, otherwise the behavior is undefined.
517 For example:
518 long n; int i;
519 i < n;
521 according to ISO will be evaluated as:
522 (long)i < n;
524 We want to force:
525 i < (int)n; */
526 if (TREE_CODE (op0) == NOP_EXPR
527 && decl == TREE_OPERAND (op0, 0))
529 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
530 TREE_OPERAND (cond, 1)
531 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
532 TREE_OPERAND (cond, 1));
534 else if (TREE_CODE (op1) == NOP_EXPR
535 && decl == TREE_OPERAND (op1, 0))
537 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
538 TREE_OPERAND (cond, 0)
539 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
540 TREE_OPERAND (cond, 0));
543 if (decl == TREE_OPERAND (cond, 0))
544 cond_ok = true;
545 else if (decl == TREE_OPERAND (cond, 1))
547 TREE_SET_CODE (cond,
548 swap_tree_comparison (TREE_CODE (cond)));
549 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
550 TREE_OPERAND (cond, 0) = decl;
551 cond_ok = true;
554 if (TREE_CODE (cond) == NE_EXPR
555 || TREE_CODE (cond) == EQ_EXPR)
557 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
559 if (code != CILK_SIMD && code != CILK_FOR)
560 cond_ok = false;
562 else if (operand_equal_p (TREE_OPERAND (cond, 1),
563 TYPE_MIN_VALUE (TREE_TYPE (decl)),
565 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
566 ? GT_EXPR : LE_EXPR);
567 else if (operand_equal_p (TREE_OPERAND (cond, 1),
568 TYPE_MAX_VALUE (TREE_TYPE (decl)),
570 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
571 ? LT_EXPR : GE_EXPR);
572 else if (code != CILK_SIMD && code != CILK_FOR)
573 cond_ok = false;
577 if (!cond_ok)
579 error_at (elocus, "invalid controlling predicate");
580 fail = true;
584 if (incr == NULL_TREE)
586 error_at (elocus, "missing increment expression");
587 fail = true;
589 else
591 bool incr_ok = false;
593 if (EXPR_HAS_LOCATION (incr))
594 elocus = EXPR_LOCATION (incr);
596 /* Check all the valid increment expressions: v++, v--, ++v, --v,
597 v = v + incr, v = incr + v and v = v - incr. */
598 switch (TREE_CODE (incr))
600 case POSTINCREMENT_EXPR:
601 case PREINCREMENT_EXPR:
602 case POSTDECREMENT_EXPR:
603 case PREDECREMENT_EXPR:
604 if (TREE_OPERAND (incr, 0) != decl)
605 break;
607 incr_ok = true;
608 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
609 break;
611 case COMPOUND_EXPR:
612 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
613 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
614 break;
615 incr = TREE_OPERAND (incr, 1);
616 /* FALLTHRU */
617 case MODIFY_EXPR:
618 if (TREE_OPERAND (incr, 0) != decl)
619 break;
620 if (TREE_OPERAND (incr, 1) == decl)
621 break;
622 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
623 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
624 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
625 incr_ok = true;
626 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
627 || (TREE_CODE (TREE_OPERAND (incr, 1))
628 == POINTER_PLUS_EXPR))
629 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
630 incr_ok = true;
631 else
633 tree t = check_omp_for_incr_expr (elocus,
634 TREE_OPERAND (incr, 1),
635 decl);
636 if (t != error_mark_node)
638 incr_ok = true;
639 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
640 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
643 break;
645 default:
646 break;
648 if (!incr_ok)
650 error_at (elocus, "invalid increment expression");
651 fail = true;
655 TREE_VEC_ELT (initv, i) = init;
656 TREE_VEC_ELT (incrv, i) = incr;
659 if (fail)
660 return NULL;
661 else
663 tree t = make_node (code);
665 TREE_TYPE (t) = void_type_node;
666 OMP_FOR_INIT (t) = initv;
667 OMP_FOR_COND (t) = condv;
668 OMP_FOR_INCR (t) = incrv;
669 OMP_FOR_BODY (t) = body;
670 OMP_FOR_PRE_BODY (t) = pre_body;
672 SET_EXPR_LOCATION (t, locus);
673 return add_stmt (t);
677 /* Right now we have 14 different combined constructs, this
678 function attempts to split or duplicate clauses for combined
679 constructs. CODE is the innermost construct in the combined construct,
680 and MASK allows to determine which constructs are combined together,
681 as every construct has at least one clause that no other construct
682 has (except for OMP_SECTIONS, but that can be only combined with parallel).
683 Combined constructs are:
684 #pragma omp parallel for
685 #pragma omp parallel sections
686 #pragma omp parallel for simd
687 #pragma omp for simd
688 #pragma omp distribute simd
689 #pragma omp distribute parallel for
690 #pragma omp distribute parallel for simd
691 #pragma omp teams distribute
692 #pragma omp teams distribute parallel for
693 #pragma omp teams distribute parallel for simd
694 #pragma omp target teams
695 #pragma omp target teams distribute
696 #pragma omp target teams distribute parallel for
697 #pragma omp target teams distribute parallel for simd */
699 void
700 c_omp_split_clauses (location_t loc, enum tree_code code,
701 omp_clause_mask mask, tree clauses, tree *cclauses)
703 tree next, c;
704 enum c_omp_clause_split s;
705 int i;
707 gcc_assert (code != OACC_PARALLEL);
708 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
709 cclauses[i] = NULL;
710 /* Add implicit nowait clause on
711 #pragma omp parallel {for,for simd,sections}. */
712 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
713 switch (code)
715 case OMP_FOR:
716 case OMP_SIMD:
717 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
718 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
719 break;
720 case OMP_SECTIONS:
721 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
722 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
723 break;
724 default:
725 break;
728 for (; clauses ; clauses = next)
730 next = OMP_CLAUSE_CHAIN (clauses);
732 switch (OMP_CLAUSE_CODE (clauses))
734 /* First the clauses that are unique to some constructs. */
735 case OMP_CLAUSE_DEVICE:
736 case OMP_CLAUSE_MAP:
737 s = C_OMP_CLAUSE_SPLIT_TARGET;
738 break;
739 case OMP_CLAUSE_NUM_TEAMS:
740 case OMP_CLAUSE_THREAD_LIMIT:
741 s = C_OMP_CLAUSE_SPLIT_TEAMS;
742 break;
743 case OMP_CLAUSE_DIST_SCHEDULE:
744 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
745 break;
746 case OMP_CLAUSE_COPYIN:
747 case OMP_CLAUSE_NUM_THREADS:
748 case OMP_CLAUSE_PROC_BIND:
749 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
750 break;
751 case OMP_CLAUSE_ORDERED:
752 case OMP_CLAUSE_SCHEDULE:
753 case OMP_CLAUSE_NOWAIT:
754 s = C_OMP_CLAUSE_SPLIT_FOR;
755 break;
756 case OMP_CLAUSE_SAFELEN:
757 case OMP_CLAUSE_LINEAR:
758 case OMP_CLAUSE_ALIGNED:
759 s = C_OMP_CLAUSE_SPLIT_SIMD;
760 break;
761 /* Duplicate this to all of distribute, for and simd. */
762 case OMP_CLAUSE_COLLAPSE:
763 if (code == OMP_SIMD)
765 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
766 OMP_CLAUSE_COLLAPSE);
767 OMP_CLAUSE_COLLAPSE_EXPR (c)
768 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
769 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
770 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
772 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
774 if ((mask & (OMP_CLAUSE_MASK_1
775 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
777 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
778 OMP_CLAUSE_COLLAPSE);
779 OMP_CLAUSE_COLLAPSE_EXPR (c)
780 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
781 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
782 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
783 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
785 else
786 s = C_OMP_CLAUSE_SPLIT_FOR;
788 else
789 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
790 break;
791 /* Private clause is supported on all constructs but target,
792 it is enough to put it on the innermost one. For
793 #pragma omp {for,sections} put it on parallel though,
794 as that's what we did for OpenMP 3.1. */
795 case OMP_CLAUSE_PRIVATE:
796 switch (code)
798 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
799 case OMP_FOR: case OMP_SECTIONS:
800 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
801 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
802 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
803 default: gcc_unreachable ();
805 break;
806 /* Firstprivate clause is supported on all constructs but
807 target and simd. Put it on the outermost of those and
808 duplicate on parallel. */
809 case OMP_CLAUSE_FIRSTPRIVATE:
810 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
811 != 0)
813 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
814 | (OMP_CLAUSE_MASK_1
815 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
817 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
818 OMP_CLAUSE_FIRSTPRIVATE);
819 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
820 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
821 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
822 if ((mask & (OMP_CLAUSE_MASK_1
823 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
824 s = C_OMP_CLAUSE_SPLIT_TEAMS;
825 else
826 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
828 else
829 /* This must be
830 #pragma omp parallel{, for{, simd}, sections}. */
831 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
833 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
834 != 0)
836 /* This must be one of
837 #pragma omp {,target }teams distribute
838 #pragma omp target teams
839 #pragma omp {,target }teams distribute simd. */
840 gcc_assert (code == OMP_DISTRIBUTE
841 || code == OMP_TEAMS
842 || code == OMP_SIMD);
843 s = C_OMP_CLAUSE_SPLIT_TEAMS;
845 else if ((mask & (OMP_CLAUSE_MASK_1
846 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
848 /* This must be #pragma omp distribute simd. */
849 gcc_assert (code == OMP_SIMD);
850 s = C_OMP_CLAUSE_SPLIT_TEAMS;
852 else
854 /* This must be #pragma omp for simd. */
855 gcc_assert (code == OMP_SIMD);
856 s = C_OMP_CLAUSE_SPLIT_FOR;
858 break;
859 /* Lastprivate is allowed on for, sections and simd. In
860 parallel {for{, simd},sections} we actually want to put it on
861 parallel rather than for or sections. */
862 case OMP_CLAUSE_LASTPRIVATE:
863 if (code == OMP_FOR || code == OMP_SECTIONS)
865 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
866 != 0)
867 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
868 else
869 s = C_OMP_CLAUSE_SPLIT_FOR;
870 break;
872 gcc_assert (code == OMP_SIMD);
873 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
875 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
876 OMP_CLAUSE_LASTPRIVATE);
877 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
878 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
879 != 0)
880 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
881 else
882 s = C_OMP_CLAUSE_SPLIT_FOR;
883 OMP_CLAUSE_CHAIN (c) = cclauses[s];
884 cclauses[s] = c;
886 s = C_OMP_CLAUSE_SPLIT_SIMD;
887 break;
888 /* Shared and default clauses are allowed on private and teams. */
889 case OMP_CLAUSE_SHARED:
890 case OMP_CLAUSE_DEFAULT:
891 if (code == OMP_TEAMS)
893 s = C_OMP_CLAUSE_SPLIT_TEAMS;
894 break;
896 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
897 != 0)
899 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
900 OMP_CLAUSE_CODE (clauses));
901 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
902 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
903 else
904 OMP_CLAUSE_DEFAULT_KIND (c)
905 = OMP_CLAUSE_DEFAULT_KIND (clauses);
906 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
907 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
910 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
911 break;
912 /* Reduction is allowed on simd, for, parallel, sections and teams.
913 Duplicate it on all of them, but omit on for or sections if
914 parallel is present. */
915 case OMP_CLAUSE_REDUCTION:
916 if (code == OMP_SIMD)
918 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
919 OMP_CLAUSE_REDUCTION);
920 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
921 OMP_CLAUSE_REDUCTION_CODE (c)
922 = OMP_CLAUSE_REDUCTION_CODE (clauses);
923 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
924 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
925 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
926 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
928 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
930 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
931 != 0)
933 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
934 OMP_CLAUSE_REDUCTION);
935 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
936 OMP_CLAUSE_REDUCTION_CODE (c)
937 = OMP_CLAUSE_REDUCTION_CODE (clauses);
938 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
939 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
940 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
941 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
942 s = C_OMP_CLAUSE_SPLIT_TEAMS;
944 else if ((mask & (OMP_CLAUSE_MASK_1
945 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
946 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
947 else
948 s = C_OMP_CLAUSE_SPLIT_FOR;
950 else if (code == OMP_SECTIONS)
951 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
952 else
953 s = C_OMP_CLAUSE_SPLIT_TEAMS;
954 break;
955 case OMP_CLAUSE_IF:
956 /* FIXME: This is currently being discussed. */
957 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
958 != 0)
959 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
960 else
961 s = C_OMP_CLAUSE_SPLIT_TARGET;
962 break;
963 default:
964 gcc_unreachable ();
966 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
967 cclauses[s] = clauses;
972 /* qsort callback to compare #pragma omp declare simd clauses. */
974 static int
975 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
977 tree a = *(const tree *) p;
978 tree b = *(const tree *) q;
979 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
981 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
982 return -1;
983 return 1;
985 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
986 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
987 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
989 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
990 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
991 if (c < d)
992 return 1;
993 if (c > d)
994 return -1;
996 return 0;
999 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1000 CLAUSES on FNDECL into argument indexes and sort them. */
1002 tree
1003 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1005 tree c;
1006 vec<tree> clvec = vNULL;
1008 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1010 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1011 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1012 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1014 tree decl = OMP_CLAUSE_DECL (c);
1015 tree arg;
1016 int idx;
1017 for (arg = parms, idx = 0; arg;
1018 arg = TREE_CHAIN (arg), idx++)
1019 if (arg == decl)
1020 break;
1021 if (arg == NULL_TREE)
1023 error_at (OMP_CLAUSE_LOCATION (c),
1024 "%qD is not an function argument", decl);
1025 continue;
1027 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1029 clvec.safe_push (c);
1031 if (!clvec.is_empty ())
1033 unsigned int len = clvec.length (), i;
1034 clvec.qsort (c_omp_declare_simd_clause_cmp);
1035 clauses = clvec[0];
1036 for (i = 0; i < len; i++)
1037 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1039 clvec.release ();
1040 return clauses;
1043 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1045 void
1046 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1048 tree c;
1050 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1051 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1052 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1053 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1055 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1056 tree arg;
1057 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1058 arg = TREE_CHAIN (arg), i++)
1059 if (i == idx)
1060 break;
1061 gcc_assert (arg);
1062 OMP_CLAUSE_DECL (c) = arg;
1066 /* True if OpenMP sharing attribute of DECL is predetermined. */
1068 enum omp_clause_default_kind
1069 c_omp_predetermined_sharing (tree decl)
1071 /* Variables with const-qualified type having no mutable member
1072 are predetermined shared. */
1073 if (TREE_READONLY (decl))
1074 return OMP_CLAUSE_DEFAULT_SHARED;
1076 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;